repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
praekeltfoundation/seed-stage-based-messaging
subscriptions/tasks.py
calculate_subscription_lifecycle
def calculate_subscription_lifecycle(subscription_id): """ Calculates the expected lifecycle position the subscription in subscription_ids, and creates a BehindSubscription entry for them. Args: subscription_id (str): ID of subscription to calculate lifecycle for """ subscription = Subscription.objects.select_related("messageset", "schedule").get( id=subscription_id ) behind = subscription.messages_behind() if behind == 0: return current_messageset = subscription.messageset current_sequence_number = subscription.next_sequence_number end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[-1] BehindSubscription.objects.create( subscription=subscription, messages_behind=behind, current_messageset=current_messageset, current_sequence_number=current_sequence_number, expected_messageset=end_subscription.messageset, expected_sequence_number=end_subscription.next_sequence_number, )
python
def calculate_subscription_lifecycle(subscription_id): """ Calculates the expected lifecycle position the subscription in subscription_ids, and creates a BehindSubscription entry for them. Args: subscription_id (str): ID of subscription to calculate lifecycle for """ subscription = Subscription.objects.select_related("messageset", "schedule").get( id=subscription_id ) behind = subscription.messages_behind() if behind == 0: return current_messageset = subscription.messageset current_sequence_number = subscription.next_sequence_number end_subscription = Subscription.fast_forward_lifecycle(subscription, save=False)[-1] BehindSubscription.objects.create( subscription=subscription, messages_behind=behind, current_messageset=current_messageset, current_sequence_number=current_sequence_number, expected_messageset=end_subscription.messageset, expected_sequence_number=end_subscription.next_sequence_number, )
[ "def", "calculate_subscription_lifecycle", "(", "subscription_id", ")", ":", "subscription", "=", "Subscription", ".", "objects", ".", "select_related", "(", "\"messageset\"", ",", "\"schedule\"", ")", ".", "get", "(", "id", "=", "subscription_id", ")", "behind", "=", "subscription", ".", "messages_behind", "(", ")", "if", "behind", "==", "0", ":", "return", "current_messageset", "=", "subscription", ".", "messageset", "current_sequence_number", "=", "subscription", ".", "next_sequence_number", "end_subscription", "=", "Subscription", ".", "fast_forward_lifecycle", "(", "subscription", ",", "save", "=", "False", ")", "[", "-", "1", "]", "BehindSubscription", ".", "objects", ".", "create", "(", "subscription", "=", "subscription", ",", "messages_behind", "=", "behind", ",", "current_messageset", "=", "current_messageset", ",", "current_sequence_number", "=", "current_sequence_number", ",", "expected_messageset", "=", "end_subscription", ".", "messageset", ",", "expected_sequence_number", "=", "end_subscription", ".", "next_sequence_number", ",", ")" ]
Calculates the expected lifecycle position the subscription in subscription_ids, and creates a BehindSubscription entry for them. Args: subscription_id (str): ID of subscription to calculate lifecycle for
[ "Calculates", "the", "expected", "lifecycle", "position", "the", "subscription", "in", "subscription_ids", "and", "creates", "a", "BehindSubscription", "entry", "for", "them", "." ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/tasks.py#L597-L622
praekeltfoundation/seed-stage-based-messaging
subscriptions/tasks.py
find_behind_subscriptions
def find_behind_subscriptions(): """ Finds any subscriptions that are behind according to where they should be, and creates a BehindSubscription entry for them. """ subscriptions = Subscription.objects.filter( active=True, completed=False, process_status=0 ).values_list("id", flat=True) for subscription_id in subscriptions.iterator(): calculate_subscription_lifecycle.delay(str(subscription_id))
python
def find_behind_subscriptions(): """ Finds any subscriptions that are behind according to where they should be, and creates a BehindSubscription entry for them. """ subscriptions = Subscription.objects.filter( active=True, completed=False, process_status=0 ).values_list("id", flat=True) for subscription_id in subscriptions.iterator(): calculate_subscription_lifecycle.delay(str(subscription_id))
[ "def", "find_behind_subscriptions", "(", ")", ":", "subscriptions", "=", "Subscription", ".", "objects", ".", "filter", "(", "active", "=", "True", ",", "completed", "=", "False", ",", "process_status", "=", "0", ")", ".", "values_list", "(", "\"id\"", ",", "flat", "=", "True", ")", "for", "subscription_id", "in", "subscriptions", ".", "iterator", "(", ")", ":", "calculate_subscription_lifecycle", ".", "delay", "(", "str", "(", "subscription_id", ")", ")" ]
Finds any subscriptions that are behind according to where they should be, and creates a BehindSubscription entry for them.
[ "Finds", "any", "subscriptions", "that", "are", "behind", "according", "to", "where", "they", "should", "be", "and", "creates", "a", "BehindSubscription", "entry", "for", "them", "." ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/tasks.py#L626-L635
praekeltfoundation/seed-stage-based-messaging
contentstore/views.py
ScheduleViewSet.send
def send(self, request, pk=None): """ Sends all the subscriptions for the specified schedule """ schedule = self.get_object() queue_subscription_send.delay(str(schedule.id)) return Response({}, status=status.HTTP_202_ACCEPTED)
python
def send(self, request, pk=None): """ Sends all the subscriptions for the specified schedule """ schedule = self.get_object() queue_subscription_send.delay(str(schedule.id)) return Response({}, status=status.HTTP_202_ACCEPTED)
[ "def", "send", "(", "self", ",", "request", ",", "pk", "=", "None", ")", ":", "schedule", "=", "self", ".", "get_object", "(", ")", "queue_subscription_send", ".", "delay", "(", "str", "(", "schedule", ".", "id", ")", ")", "return", "Response", "(", "{", "}", ",", "status", "=", "status", ".", "HTTP_202_ACCEPTED", ")" ]
Sends all the subscriptions for the specified schedule
[ "Sends", "all", "the", "subscriptions", "for", "the", "specified", "schedule" ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/contentstore/views.py#L37-L44
jreese/tasky
tasky/config.py
Config.get
def get(self, key: Any, default: Any=None) -> Any: '''Return the configured value for the given key name, or `default` if no value is available or key is invalid.''' return self.data.get(key, default)
python
def get(self, key: Any, default: Any=None) -> Any: '''Return the configured value for the given key name, or `default` if no value is available or key is invalid.''' return self.data.get(key, default)
[ "def", "get", "(", "self", ",", "key", ":", "Any", ",", "default", ":", "Any", "=", "None", ")", "->", "Any", ":", "return", "self", ".", "data", ".", "get", "(", "key", ",", "default", ")" ]
Return the configured value for the given key name, or `default` if no value is available or key is invalid.
[ "Return", "the", "configured", "value", "for", "the", "given", "key", "name", "or", "default", "if", "no", "value", "is", "available", "or", "key", "is", "invalid", "." ]
train
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/config.py#L28-L32
jreese/tasky
tasky/config.py
Config.task_config
def task_config(self, task: Task) -> Any: '''Return the task-specific configuration.''' return self.get(task.__class__.__name__)
python
def task_config(self, task: Task) -> Any: '''Return the task-specific configuration.''' return self.get(task.__class__.__name__)
[ "def", "task_config", "(", "self", ",", "task", ":", "Task", ")", "->", "Any", ":", "return", "self", ".", "get", "(", "task", ".", "__class__", ".", "__name__", ")" ]
Return the task-specific configuration.
[ "Return", "the", "task", "-", "specific", "configuration", "." ]
train
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/config.py#L39-L42
jreese/tasky
tasky/config.py
JsonConfig.init
async def init(self) -> None: '''Load configuration in JSON format from either a file or a raw data string.''' if self.data: return if self.json_data: try: self.data = json.loads(self.json_data) except Exception: Log.exception('Falied to load raw configuration') else: try: with open(self.json_path, 'r') as f: self.data = json.load(f) except Exception: Log.exception('Failed to load configuration from %s', self.json_path) self.data = {}
python
async def init(self) -> None: '''Load configuration in JSON format from either a file or a raw data string.''' if self.data: return if self.json_data: try: self.data = json.loads(self.json_data) except Exception: Log.exception('Falied to load raw configuration') else: try: with open(self.json_path, 'r') as f: self.data = json.load(f) except Exception: Log.exception('Failed to load configuration from %s', self.json_path) self.data = {}
[ "async", "def", "init", "(", "self", ")", "->", "None", ":", "if", "self", ".", "data", ":", "return", "if", "self", ".", "json_data", ":", "try", ":", "self", ".", "data", "=", "json", ".", "loads", "(", "self", ".", "json_data", ")", "except", "Exception", ":", "Log", ".", "exception", "(", "'Falied to load raw configuration'", ")", "else", ":", "try", ":", "with", "open", "(", "self", ".", "json_path", ",", "'r'", ")", "as", "f", ":", "self", ".", "data", "=", "json", ".", "load", "(", "f", ")", "except", "Exception", ":", "Log", ".", "exception", "(", "'Failed to load configuration from %s'", ",", "self", ".", "json_path", ")", "self", ".", "data", "=", "{", "}" ]
Load configuration in JSON format from either a file or a raw data string.
[ "Load", "configuration", "in", "JSON", "format", "from", "either", "a", "file", "or", "a", "raw", "data", "string", "." ]
train
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/config.py#L72-L94
jreese/tasky
tasky/tasks/timer.py
TimerTask.run_task
async def run_task(self) -> None: '''Execute the task inside the asyncio event loop after `DELAY` seconds. Track the time it takes to run, and log when it starts/stops. If/when `reset()` is called, reset the wait time to `DELAY` seconds.''' self.last_run = 0.0 self.target = self.time() + self.DELAY while self.running: try: now = self.time() if now < self.target: sleep = self.target - now await self.sleep(sleep) elif self.last_run < self.target: Log.debug('executing timer task %s', self.name) self.last_run = self.time() await self.run() total = self.time() - self.last_run Log.debug('finished timer task %s in %.1f seconds', self.name, total) else: sleep = min(5.0, self.DELAY) await self.sleep(sleep) except CancelledError: Log.debug('cancelled timer task %s', self.name) raise except Exception: Log.exception('exception in timer task %s', self.name)
python
async def run_task(self) -> None: '''Execute the task inside the asyncio event loop after `DELAY` seconds. Track the time it takes to run, and log when it starts/stops. If/when `reset()` is called, reset the wait time to `DELAY` seconds.''' self.last_run = 0.0 self.target = self.time() + self.DELAY while self.running: try: now = self.time() if now < self.target: sleep = self.target - now await self.sleep(sleep) elif self.last_run < self.target: Log.debug('executing timer task %s', self.name) self.last_run = self.time() await self.run() total = self.time() - self.last_run Log.debug('finished timer task %s in %.1f seconds', self.name, total) else: sleep = min(5.0, self.DELAY) await self.sleep(sleep) except CancelledError: Log.debug('cancelled timer task %s', self.name) raise except Exception: Log.exception('exception in timer task %s', self.name)
[ "async", "def", "run_task", "(", "self", ")", "->", "None", ":", "self", ".", "last_run", "=", "0.0", "self", ".", "target", "=", "self", ".", "time", "(", ")", "+", "self", ".", "DELAY", "while", "self", ".", "running", ":", "try", ":", "now", "=", "self", ".", "time", "(", ")", "if", "now", "<", "self", ".", "target", ":", "sleep", "=", "self", ".", "target", "-", "now", "await", "self", ".", "sleep", "(", "sleep", ")", "elif", "self", ".", "last_run", "<", "self", ".", "target", ":", "Log", ".", "debug", "(", "'executing timer task %s'", ",", "self", ".", "name", ")", "self", ".", "last_run", "=", "self", ".", "time", "(", ")", "await", "self", ".", "run", "(", ")", "total", "=", "self", ".", "time", "(", ")", "-", "self", ".", "last_run", "Log", ".", "debug", "(", "'finished timer task %s in %.1f seconds'", ",", "self", ".", "name", ",", "total", ")", "else", ":", "sleep", "=", "min", "(", "5.0", ",", "self", ".", "DELAY", ")", "await", "self", ".", "sleep", "(", "sleep", ")", "except", "CancelledError", ":", "Log", ".", "debug", "(", "'cancelled timer task %s'", ",", "self", ".", "name", ")", "raise", "except", "Exception", ":", "Log", ".", "exception", "(", "'exception in timer task %s'", ",", "self", ".", "name", ")" ]
Execute the task inside the asyncio event loop after `DELAY` seconds. Track the time it takes to run, and log when it starts/stops. If/when `reset()` is called, reset the wait time to `DELAY` seconds.
[ "Execute", "the", "task", "inside", "the", "asyncio", "event", "loop", "after", "DELAY", "seconds", ".", "Track", "the", "time", "it", "takes", "to", "run", "and", "log", "when", "it", "starts", "/", "stops", ".", "If", "/", "when", "reset", "()", "is", "called", "reset", "the", "wait", "time", "to", "DELAY", "seconds", "." ]
train
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/timer.py#L20-L53
jreese/tasky
tasky/tasks/timer.py
TimerTask.reset
def reset(self) -> None: '''Reset task execution to `DELAY` seconds from now.''' Log.debug('resetting timer task %s') self.target = self.time() + self.DELAY
python
def reset(self) -> None: '''Reset task execution to `DELAY` seconds from now.''' Log.debug('resetting timer task %s') self.target = self.time() + self.DELAY
[ "def", "reset", "(", "self", ")", "->", "None", ":", "Log", ".", "debug", "(", "'resetting timer task %s'", ")", "self", ".", "target", "=", "self", ".", "time", "(", ")", "+", "self", ".", "DELAY" ]
Reset task execution to `DELAY` seconds from now.
[ "Reset", "task", "execution", "to", "DELAY", "seconds", "from", "now", "." ]
train
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/timer.py#L55-L59
praekeltfoundation/seed-stage-based-messaging
seed_stage_based_messaging/decorators.py
internal_only
def internal_only(view_func): """ A view decorator which blocks access for requests coming through the load balancer. """ @functools.wraps(view_func) def wrapper(request, *args, **kwargs): forwards = request.META.get("HTTP_X_FORWARDED_FOR", "").split(",") # The nginx in the docker container adds the loadbalancer IP to the list inside # X-Forwarded-For, so if the list contains more than a single item, we know # that it went through our loadbalancer if len(forwards) > 1: raise PermissionDenied() return view_func(request, *args, **kwargs) return wrapper
python
def internal_only(view_func): """ A view decorator which blocks access for requests coming through the load balancer. """ @functools.wraps(view_func) def wrapper(request, *args, **kwargs): forwards = request.META.get("HTTP_X_FORWARDED_FOR", "").split(",") # The nginx in the docker container adds the loadbalancer IP to the list inside # X-Forwarded-For, so if the list contains more than a single item, we know # that it went through our loadbalancer if len(forwards) > 1: raise PermissionDenied() return view_func(request, *args, **kwargs) return wrapper
[ "def", "internal_only", "(", "view_func", ")", ":", "@", "functools", ".", "wraps", "(", "view_func", ")", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "forwards", "=", "request", ".", "META", ".", "get", "(", "\"HTTP_X_FORWARDED_FOR\"", ",", "\"\"", ")", ".", "split", "(", "\",\"", ")", "# The nginx in the docker container adds the loadbalancer IP to the list inside", "# X-Forwarded-For, so if the list contains more than a single item, we know", "# that it went through our loadbalancer", "if", "len", "(", "forwards", ")", ">", "1", ":", "raise", "PermissionDenied", "(", ")", "return", "view_func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
A view decorator which blocks access for requests coming through the load balancer.
[ "A", "view", "decorator", "which", "blocks", "access", "for", "requests", "coming", "through", "the", "load", "balancer", "." ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/seed_stage_based_messaging/decorators.py#L6-L21
praekeltfoundation/seed-stage-based-messaging
subscriptions/views.py
SubscriptionSend.post
def post(self, request, *args, **kwargs): """ Validates subscription data before creating Outbound message """ schedule_disable.delay(kwargs["subscription_id"]) return Response({"accepted": True}, status=201)
python
def post(self, request, *args, **kwargs): """ Validates subscription data before creating Outbound message """ schedule_disable.delay(kwargs["subscription_id"]) return Response({"accepted": True}, status=201)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "schedule_disable", ".", "delay", "(", "kwargs", "[", "\"subscription_id\"", "]", ")", "return", "Response", "(", "{", "\"accepted\"", ":", "True", "}", ",", "status", "=", "201", ")" ]
Validates subscription data before creating Outbound message
[ "Validates", "subscription", "data", "before", "creating", "Outbound", "message" ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/views.py#L86-L90
praekeltfoundation/seed-stage-based-messaging
subscriptions/views.py
SubscriptionResend.post
def post(self, request, *args, **kwargs): """ Validates subscription data before creating Outbound message """ # Look up subscriber subscription_id = kwargs["subscription_id"] if Subscription.objects.filter(id=subscription_id).exists(): status = 202 accepted = {"accepted": True} store_resend_request.apply_async(args=[subscription_id]) else: status = 400 accepted = { "accepted": False, "reason": "Cannot find subscription with ID {}".format(subscription_id), } return Response(accepted, status=status)
python
def post(self, request, *args, **kwargs): """ Validates subscription data before creating Outbound message """ # Look up subscriber subscription_id = kwargs["subscription_id"] if Subscription.objects.filter(id=subscription_id).exists(): status = 202 accepted = {"accepted": True} store_resend_request.apply_async(args=[subscription_id]) else: status = 400 accepted = { "accepted": False, "reason": "Cannot find subscription with ID {}".format(subscription_id), } return Response(accepted, status=status)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Look up subscriber", "subscription_id", "=", "kwargs", "[", "\"subscription_id\"", "]", "if", "Subscription", ".", "objects", ".", "filter", "(", "id", "=", "subscription_id", ")", ".", "exists", "(", ")", ":", "status", "=", "202", "accepted", "=", "{", "\"accepted\"", ":", "True", "}", "store_resend_request", ".", "apply_async", "(", "args", "=", "[", "subscription_id", "]", ")", "else", ":", "status", "=", "400", "accepted", "=", "{", "\"accepted\"", ":", "False", ",", "\"reason\"", ":", "\"Cannot find subscription with ID {}\"", ".", "format", "(", "subscription_id", ")", ",", "}", "return", "Response", "(", "accepted", ",", "status", "=", "status", ")" ]
Validates subscription data before creating Outbound message
[ "Validates", "subscription", "data", "before", "creating", "Outbound", "message" ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/views.py#L100-L115
praekeltfoundation/seed-stage-based-messaging
subscriptions/views.py
SubscriptionRequest.post
def post(self, request, *args, **kwargs): """ Validates subscription data before creating Subscription message """ # Ensure that we check for the 'data' key in the request object before # attempting to reference it if "data" in request.data: # This is a workaround for JSONField not liking blank/null refs if "metadata" not in request.data["data"]: request.data["data"]["metadata"] = {} if "initial_sequence_number" not in request.data["data"]: request.data["data"]["initial_sequence_number"] = request.data[ "data" ].get("next_sequence_number") subscription = SubscriptionSerializer(data=request.data["data"]) if subscription.is_valid(): subscription.save() # Return status = 201 accepted = {"accepted": True} return Response(accepted, status=status) else: status = 400 return Response(subscription.errors, status=status) else: status = 400 message = {"data": ["This field is required."]} return Response(message, status=status)
python
def post(self, request, *args, **kwargs): """ Validates subscription data before creating Subscription message """ # Ensure that we check for the 'data' key in the request object before # attempting to reference it if "data" in request.data: # This is a workaround for JSONField not liking blank/null refs if "metadata" not in request.data["data"]: request.data["data"]["metadata"] = {} if "initial_sequence_number" not in request.data["data"]: request.data["data"]["initial_sequence_number"] = request.data[ "data" ].get("next_sequence_number") subscription = SubscriptionSerializer(data=request.data["data"]) if subscription.is_valid(): subscription.save() # Return status = 201 accepted = {"accepted": True} return Response(accepted, status=status) else: status = 400 return Response(subscription.errors, status=status) else: status = 400 message = {"data": ["This field is required."]} return Response(message, status=status)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Ensure that we check for the 'data' key in the request object before", "# attempting to reference it", "if", "\"data\"", "in", "request", ".", "data", ":", "# This is a workaround for JSONField not liking blank/null refs", "if", "\"metadata\"", "not", "in", "request", ".", "data", "[", "\"data\"", "]", ":", "request", ".", "data", "[", "\"data\"", "]", "[", "\"metadata\"", "]", "=", "{", "}", "if", "\"initial_sequence_number\"", "not", "in", "request", ".", "data", "[", "\"data\"", "]", ":", "request", ".", "data", "[", "\"data\"", "]", "[", "\"initial_sequence_number\"", "]", "=", "request", ".", "data", "[", "\"data\"", "]", ".", "get", "(", "\"next_sequence_number\"", ")", "subscription", "=", "SubscriptionSerializer", "(", "data", "=", "request", ".", "data", "[", "\"data\"", "]", ")", "if", "subscription", ".", "is_valid", "(", ")", ":", "subscription", ".", "save", "(", ")", "# Return", "status", "=", "201", "accepted", "=", "{", "\"accepted\"", ":", "True", "}", "return", "Response", "(", "accepted", ",", "status", "=", "status", ")", "else", ":", "status", "=", "400", "return", "Response", "(", "subscription", ".", "errors", ",", "status", "=", "status", ")", "else", ":", "status", "=", "400", "message", "=", "{", "\"data\"", ":", "[", "\"This field is required.\"", "]", "}", "return", "Response", "(", "message", ",", "status", "=", "status", ")" ]
Validates subscription data before creating Subscription message
[ "Validates", "subscription", "data", "before", "creating", "Subscription", "message" ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/views.py#L125-L153
praekeltfoundation/seed-stage-based-messaging
subscriptions/views.py
BehindSubscriptionViewSet.find_behind_subscriptions
def find_behind_subscriptions(self, request): """ Starts a celery task that looks through active subscriptions to find and subscriptions that are behind where they should be, and adds a BehindSubscription for them. """ task_id = find_behind_subscriptions.delay() return Response( {"accepted": True, "task_id": str(task_id)}, status=status.HTTP_202_ACCEPTED )
python
def find_behind_subscriptions(self, request): """ Starts a celery task that looks through active subscriptions to find and subscriptions that are behind where they should be, and adds a BehindSubscription for them. """ task_id = find_behind_subscriptions.delay() return Response( {"accepted": True, "task_id": str(task_id)}, status=status.HTTP_202_ACCEPTED )
[ "def", "find_behind_subscriptions", "(", "self", ",", "request", ")", ":", "task_id", "=", "find_behind_subscriptions", ".", "delay", "(", ")", "return", "Response", "(", "{", "\"accepted\"", ":", "True", ",", "\"task_id\"", ":", "str", "(", "task_id", ")", "}", ",", "status", "=", "status", ".", "HTTP_202_ACCEPTED", ")" ]
Starts a celery task that looks through active subscriptions to find and subscriptions that are behind where they should be, and adds a BehindSubscription for them.
[ "Starts", "a", "celery", "task", "that", "looks", "through", "active", "subscriptions", "to", "find", "and", "subscriptions", "that", "are", "behind", "where", "they", "should", "be", "and", "adds", "a", "BehindSubscription", "for", "them", "." ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/views.py#L290-L300
inspirehep/inspire-query-parser
examples/demo_parser.py
repl
def repl(): """Read-Eval-Print-Loop for reading the query, printing it and its parse tree. Exit the loop either with an interrupt or "quit". """ while True: try: sys.stdout.write("Type in next query: \n> ") import locale query_str = raw_input().decode(sys.stdin.encoding or locale.getpreferredencoding(True)) except KeyboardInterrupt: break if u'quit' in query_str: break print_query_and_parse_tree(query_str)
python
def repl(): """Read-Eval-Print-Loop for reading the query, printing it and its parse tree. Exit the loop either with an interrupt or "quit". """ while True: try: sys.stdout.write("Type in next query: \n> ") import locale query_str = raw_input().decode(sys.stdin.encoding or locale.getpreferredencoding(True)) except KeyboardInterrupt: break if u'quit' in query_str: break print_query_and_parse_tree(query_str)
[ "def", "repl", "(", ")", ":", "while", "True", ":", "try", ":", "sys", ".", "stdout", ".", "write", "(", "\"Type in next query: \\n> \"", ")", "import", "locale", "query_str", "=", "raw_input", "(", ")", ".", "decode", "(", "sys", ".", "stdin", ".", "encoding", "or", "locale", ".", "getpreferredencoding", "(", "True", ")", ")", "except", "KeyboardInterrupt", ":", "break", "if", "u'quit'", "in", "query_str", ":", "break", "print_query_and_parse_tree", "(", "query_str", ")" ]
Read-Eval-Print-Loop for reading the query, printing it and its parse tree. Exit the loop either with an interrupt or "quit".
[ "Read", "-", "Eval", "-", "Print", "-", "Loop", "for", "reading", "the", "query", "printing", "it", "and", "its", "parse", "tree", "." ]
train
https://github.com/inspirehep/inspire-query-parser/blob/9dde20d7caef89a48bb419b866f4535c88cfc00d/examples/demo_parser.py#L33-L49
jreese/tasky
tasky/tasks/queue.py
QueueTask.run_task
async def run_task(self) -> None: '''Initialize the queue and spawn extra worker tasks if this if the first task. Then wait for work items to enter the task queue, and execute the `run()` method with the current work item.''' while self.running: try: item = self.QUEUE.get_nowait() Log.debug('%s processing work item', self.name) await self.run(item) Log.debug('%s completed work item', self.name) self.QUEUE.task_done() except asyncio.QueueEmpty: if self.OPEN: await self.sleep(0.05) else: Log.debug('%s queue closed and empty, stopping', self.name) return except CancelledError: Log.debug('%s cancelled, dropping work item') self.QUEUE.task_done() raise except Exception: Log.exception('%s failed work item', self.name) self.QUEUE.task_done()
python
async def run_task(self) -> None: '''Initialize the queue and spawn extra worker tasks if this if the first task. Then wait for work items to enter the task queue, and execute the `run()` method with the current work item.''' while self.running: try: item = self.QUEUE.get_nowait() Log.debug('%s processing work item', self.name) await self.run(item) Log.debug('%s completed work item', self.name) self.QUEUE.task_done() except asyncio.QueueEmpty: if self.OPEN: await self.sleep(0.05) else: Log.debug('%s queue closed and empty, stopping', self.name) return except CancelledError: Log.debug('%s cancelled, dropping work item') self.QUEUE.task_done() raise except Exception: Log.exception('%s failed work item', self.name) self.QUEUE.task_done()
[ "async", "def", "run_task", "(", "self", ")", "->", "None", ":", "while", "self", ".", "running", ":", "try", ":", "item", "=", "self", ".", "QUEUE", ".", "get_nowait", "(", ")", "Log", ".", "debug", "(", "'%s processing work item'", ",", "self", ".", "name", ")", "await", "self", ".", "run", "(", "item", ")", "Log", ".", "debug", "(", "'%s completed work item'", ",", "self", ".", "name", ")", "self", ".", "QUEUE", ".", "task_done", "(", ")", "except", "asyncio", ".", "QueueEmpty", ":", "if", "self", ".", "OPEN", ":", "await", "self", ".", "sleep", "(", "0.05", ")", "else", ":", "Log", ".", "debug", "(", "'%s queue closed and empty, stopping'", ",", "self", ".", "name", ")", "return", "except", "CancelledError", ":", "Log", ".", "debug", "(", "'%s cancelled, dropping work item'", ")", "self", ".", "QUEUE", ".", "task_done", "(", ")", "raise", "except", "Exception", ":", "Log", ".", "exception", "(", "'%s failed work item'", ",", "self", ".", "name", ")", "self", ".", "QUEUE", ".", "task_done", "(", ")" ]
Initialize the queue and spawn extra worker tasks if this if the first task. Then wait for work items to enter the task queue, and execute the `run()` method with the current work item.
[ "Initialize", "the", "queue", "and", "spawn", "extra", "worker", "tasks", "if", "this", "if", "the", "first", "task", ".", "Then", "wait", "for", "work", "items", "to", "enter", "the", "task", "queue", "and", "execute", "the", "run", "()", "method", "with", "the", "current", "work", "item", "." ]
train
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/queue.py#L62-L92
praekeltfoundation/seed-stage-based-messaging
contentstore/signals.py
schedule_saved
def schedule_saved(sender, instance, **kwargs): """ Fires off the celery task to ensure that this schedule is in the scheduler Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the Schedule that we want to sync """ from contentstore.tasks import sync_schedule sync_schedule.delay(str(instance.id))
python
def schedule_saved(sender, instance, **kwargs): """ Fires off the celery task to ensure that this schedule is in the scheduler Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the Schedule that we want to sync """ from contentstore.tasks import sync_schedule sync_schedule.delay(str(instance.id))
[ "def", "schedule_saved", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "from", "contentstore", ".", "tasks", "import", "sync_schedule", "sync_schedule", ".", "delay", "(", "str", "(", "instance", ".", "id", ")", ")" ]
Fires off the celery task to ensure that this schedule is in the scheduler Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the Schedule that we want to sync
[ "Fires", "off", "the", "celery", "task", "to", "ensure", "that", "this", "schedule", "is", "in", "the", "scheduler" ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/contentstore/signals.py#L12-L23
praekeltfoundation/seed-stage-based-messaging
contentstore/signals.py
schedule_deleted
def schedule_deleted(sender, instance, **kwargs): """ Fires off the celery task to ensure that this schedule is deactivated Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the schedule that we want to deactivate """ from contentstore.tasks import deactivate_schedule deactivate_schedule.delay(str(instance.scheduler_schedule_id))
python
def schedule_deleted(sender, instance, **kwargs): """ Fires off the celery task to ensure that this schedule is deactivated Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the schedule that we want to deactivate """ from contentstore.tasks import deactivate_schedule deactivate_schedule.delay(str(instance.scheduler_schedule_id))
[ "def", "schedule_deleted", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "from", "contentstore", ".", "tasks", "import", "deactivate_schedule", "deactivate_schedule", ".", "delay", "(", "str", "(", "instance", ".", "scheduler_schedule_id", ")", ")" ]
Fires off the celery task to ensure that this schedule is deactivated Arguments: sender {class} -- The model class, always Schedule instance {Schedule} -- The instance of the schedule that we want to deactivate
[ "Fires", "off", "the", "celery", "task", "to", "ensure", "that", "this", "schedule", "is", "deactivated" ]
train
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/contentstore/signals.py#L27-L38
jreese/tasky
tasky/tasks/periodic.py
PeriodicTask.run_task
async def run_task(self) -> None: '''Execute the task inside the asyncio event loop. Track the time it takes to run, and log when it starts/stops. After `INTERVAL` seconds, if/once the task has finished running, run it again until `stop()` is called.''' while self.running: try: Log.debug('executing periodic task %s', self.name) before = self.time() await self.run() total = self.time() - before Log.debug('finished periodic task %s in %.1f seconds', self.name, total) sleep = self.INTERVAL - total if sleep > 0: await self.sleep(sleep) except CancelledError: Log.debug('cancelled periodic task %s', self.name) raise except Exception: Log.exception('exception in periodic task %s', self.name)
python
async def run_task(self) -> None: '''Execute the task inside the asyncio event loop. Track the time it takes to run, and log when it starts/stops. After `INTERVAL` seconds, if/once the task has finished running, run it again until `stop()` is called.''' while self.running: try: Log.debug('executing periodic task %s', self.name) before = self.time() await self.run() total = self.time() - before Log.debug('finished periodic task %s in %.1f seconds', self.name, total) sleep = self.INTERVAL - total if sleep > 0: await self.sleep(sleep) except CancelledError: Log.debug('cancelled periodic task %s', self.name) raise except Exception: Log.exception('exception in periodic task %s', self.name)
[ "async", "def", "run_task", "(", "self", ")", "->", "None", ":", "while", "self", ".", "running", ":", "try", ":", "Log", ".", "debug", "(", "'executing periodic task %s'", ",", "self", ".", "name", ")", "before", "=", "self", ".", "time", "(", ")", "await", "self", ".", "run", "(", ")", "total", "=", "self", ".", "time", "(", ")", "-", "before", "Log", ".", "debug", "(", "'finished periodic task %s in %.1f seconds'", ",", "self", ".", "name", ",", "total", ")", "sleep", "=", "self", ".", "INTERVAL", "-", "total", "if", "sleep", ">", "0", ":", "await", "self", ".", "sleep", "(", "sleep", ")", "except", "CancelledError", ":", "Log", ".", "debug", "(", "'cancelled periodic task %s'", ",", "self", ".", "name", ")", "raise", "except", "Exception", ":", "Log", ".", "exception", "(", "'exception in periodic task %s'", ",", "self", ".", "name", ")" ]
Execute the task inside the asyncio event loop. Track the time it takes to run, and log when it starts/stops. After `INTERVAL` seconds, if/once the task has finished running, run it again until `stop()` is called.
[ "Execute", "the", "task", "inside", "the", "asyncio", "event", "loop", ".", "Track", "the", "time", "it", "takes", "to", "run", "and", "log", "when", "it", "starts", "/", "stops", ".", "After", "INTERVAL", "seconds", "if", "/", "once", "the", "task", "has", "finished", "running", "run", "it", "again", "until", "stop", "()", "is", "called", "." ]
train
https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/periodic.py#L18-L42
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
group_by
def group_by(keys, values=None, reduction=None, axis=0): """construct a grouping object on the given keys, optionally performing the given reduction on the given values Parameters ---------- keys : indexable object keys to group by values : array_like, optional sequence of values, of the same length as keys if a reduction function is provided, the given values are reduced by key if no reduction is provided, the given values are grouped and split by key reduction : lambda, optional reduction function to apply to the values in each group axis : int, optional axis to regard as the key-sequence, in case keys is multi-dimensional Returns ------- iterable if values is None, a GroupBy object of the given keys object if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values else, a sequence of tuples of unique keys and reductions of values over that key-group See Also -------- numpy_indexed.as_index : for information regarding the casting rules to a valid Index object """ g = GroupBy(keys, axis) if values is None: return g groups = g.split(values) if reduction is None: return g.unique, groups return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
python
def group_by(keys, values=None, reduction=None, axis=0): """construct a grouping object on the given keys, optionally performing the given reduction on the given values Parameters ---------- keys : indexable object keys to group by values : array_like, optional sequence of values, of the same length as keys if a reduction function is provided, the given values are reduced by key if no reduction is provided, the given values are grouped and split by key reduction : lambda, optional reduction function to apply to the values in each group axis : int, optional axis to regard as the key-sequence, in case keys is multi-dimensional Returns ------- iterable if values is None, a GroupBy object of the given keys object if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values else, a sequence of tuples of unique keys and reductions of values over that key-group See Also -------- numpy_indexed.as_index : for information regarding the casting rules to a valid Index object """ g = GroupBy(keys, axis) if values is None: return g groups = g.split(values) if reduction is None: return g.unique, groups return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
[ "def", "group_by", "(", "keys", ",", "values", "=", "None", ",", "reduction", "=", "None", ",", "axis", "=", "0", ")", ":", "g", "=", "GroupBy", "(", "keys", ",", "axis", ")", "if", "values", "is", "None", ":", "return", "g", "groups", "=", "g", ".", "split", "(", "values", ")", "if", "reduction", "is", "None", ":", "return", "g", ".", "unique", ",", "groups", "return", "[", "(", "key", ",", "reduction", "(", "group", ")", ")", "for", "key", ",", "group", "in", "zip", "(", "g", ".", "unique", ",", "groups", ")", "]" ]
construct a grouping object on the given keys, optionally performing the given reduction on the given values Parameters ---------- keys : indexable object keys to group by values : array_like, optional sequence of values, of the same length as keys if a reduction function is provided, the given values are reduced by key if no reduction is provided, the given values are grouped and split by key reduction : lambda, optional reduction function to apply to the values in each group axis : int, optional axis to regard as the key-sequence, in case keys is multi-dimensional Returns ------- iterable if values is None, a GroupBy object of the given keys object if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values else, a sequence of tuples of unique keys and reductions of values over that key-group See Also -------- numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
[ "construct", "a", "grouping", "object", "on", "the", "given", "keys", "optionally", "performing", "the", "given", "reduction", "on", "the", "given", "values" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L576-L609
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.split_iterable_as_iterable
def split_iterable_as_iterable(self, values): """Group iterable into iterables, in the order of the keys Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- Memory consumption depends on the amount of sorting required Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable, before we can start yielding any output But to the extent that the keys are already sorted, the grouping is lazy """ values = iter(enumerate(values)) cache = dict() def get_value(ti): try: return cache.pop(ti) except: while True: i, v = next(values) if i==ti: return v cache[i] = v s = iter(self.index.sorter) for c in self.count: yield (get_value(i) for i in itertools.islice(s, int(c)))
python
def split_iterable_as_iterable(self, values): """Group iterable into iterables, in the order of the keys Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- Memory consumption depends on the amount of sorting required Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable, before we can start yielding any output But to the extent that the keys are already sorted, the grouping is lazy """ values = iter(enumerate(values)) cache = dict() def get_value(ti): try: return cache.pop(ti) except: while True: i, v = next(values) if i==ti: return v cache[i] = v s = iter(self.index.sorter) for c in self.count: yield (get_value(i) for i in itertools.islice(s, int(c)))
[ "def", "split_iterable_as_iterable", "(", "self", ",", "values", ")", ":", "values", "=", "iter", "(", "enumerate", "(", "values", ")", ")", "cache", "=", "dict", "(", ")", "def", "get_value", "(", "ti", ")", ":", "try", ":", "return", "cache", ".", "pop", "(", "ti", ")", "except", ":", "while", "True", ":", "i", ",", "v", "=", "next", "(", "values", ")", "if", "i", "==", "ti", ":", "return", "v", "cache", "[", "i", "]", "=", "v", "s", "=", "iter", "(", "self", ".", "index", ".", "sorter", ")", "for", "c", "in", "self", ".", "count", ":", "yield", "(", "get_value", "(", "i", ")", "for", "i", "in", "itertools", ".", "islice", "(", "s", ",", "int", "(", "c", ")", ")", ")" ]
Group iterable into iterables, in the order of the keys Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- Memory consumption depends on the amount of sorting required Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable, before we can start yielding any output But to the extent that the keys are already sorted, the grouping is lazy
[ "Group", "iterable", "into", "iterables", "in", "the", "order", "of", "the", "keys" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L57-L89
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.split_iterable_as_unordered_iterable
def split_iterable_as_unordered_iterable(self, values): """Group iterable into iterables, without regard for the ordering of self.index.unique key-group tuples are yielded as soon as they are complete Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ tuple of key, and a list of corresponding items in values Notes ----- This approach is lazy, insofar as grouped values are close in their iterable """ from collections import defaultdict cache = defaultdict(list) count = self.count unique = self.unique key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique)) for i,v in zip(self.inverse, values): cache[i].append(v) if len(cache[i]) == count[i]: yield key(i), cache.pop(i)
python
def split_iterable_as_unordered_iterable(self, values): """Group iterable into iterables, without regard for the ordering of self.index.unique key-group tuples are yielded as soon as they are complete Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ tuple of key, and a list of corresponding items in values Notes ----- This approach is lazy, insofar as grouped values are close in their iterable """ from collections import defaultdict cache = defaultdict(list) count = self.count unique = self.unique key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique)) for i,v in zip(self.inverse, values): cache[i].append(v) if len(cache[i]) == count[i]: yield key(i), cache.pop(i)
[ "def", "split_iterable_as_unordered_iterable", "(", "self", ",", "values", ")", ":", "from", "collections", "import", "defaultdict", "cache", "=", "defaultdict", "(", "list", ")", "count", "=", "self", ".", "count", "unique", "=", "self", ".", "unique", "key", "=", "(", "lambda", "i", ":", "unique", "[", "i", "]", ")", "if", "isinstance", "(", "unique", ",", "np", ".", "ndarray", ")", "else", "(", "lambda", "i", ":", "tuple", "(", "c", "[", "i", "]", "for", "c", "in", "unique", ")", ")", "for", "i", ",", "v", "in", "zip", "(", "self", ".", "inverse", ",", "values", ")", ":", "cache", "[", "i", "]", ".", "append", "(", "v", ")", "if", "len", "(", "cache", "[", "i", "]", ")", "==", "count", "[", "i", "]", ":", "yield", "key", "(", "i", ")", ",", "cache", ".", "pop", "(", "i", ")" ]
Group iterable into iterables, without regard for the ordering of self.index.unique key-group tuples are yielded as soon as they are complete Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ tuple of key, and a list of corresponding items in values Notes ----- This approach is lazy, insofar as grouped values are close in their iterable
[ "Group", "iterable", "into", "iterables", "without", "regard", "for", "the", "ordering", "of", "self", ".", "index", ".", "unique", "key", "-", "group", "tuples", "are", "yielded", "as", "soon", "as", "they", "are", "complete" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L91-L116
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.split_sequence_as_iterable
def split_sequence_as_iterable(self, values): """Group sequence into iterables Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- This is the preferred method if values has random access, but we dont want it completely in memory. Like a big memory mapped file, for instance """ print(self.count) s = iter(self.index.sorter) for c in self.count: yield (values[i] for i in itertools.islice(s, int(c)))
python
def split_sequence_as_iterable(self, values): """Group sequence into iterables Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- This is the preferred method if values has random access, but we dont want it completely in memory. Like a big memory mapped file, for instance """ print(self.count) s = iter(self.index.sorter) for c in self.count: yield (values[i] for i in itertools.islice(s, int(c)))
[ "def", "split_sequence_as_iterable", "(", "self", ",", "values", ")", ":", "print", "(", "self", ".", "count", ")", "s", "=", "iter", "(", "self", ".", "index", ".", "sorter", ")", "for", "c", "in", "self", ".", "count", ":", "yield", "(", "values", "[", "i", "]", "for", "i", "in", "itertools", ".", "islice", "(", "s", ",", "int", "(", "c", ")", ")", ")" ]
Group sequence into iterables Parameters ---------- values : iterable of length equal to keys iterable of values to be grouped Yields ------ iterable of items in values Notes ----- This is the preferred method if values has random access, but we dont want it completely in memory. Like a big memory mapped file, for instance
[ "Group", "sequence", "into", "iterables" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L118-L138
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.split_array_as_array
def split_array_as_array(self, values): """Group ndarray into ndarray by means of reshaping Parameters ---------- values : ndarray_like, [index.size, ...] Returns ------- ndarray, [groups, group_size, ...] values grouped by key Raises ------ AssertionError This operation is only possible if index.uniform==True """ if not self.index.uniform: raise ValueError("Array can only be split as array if all groups have the same size") values = np.asarray(values) values = values[self.index.sorter] return values.reshape(self.groups, -1, *values.shape[1:])
python
def split_array_as_array(self, values): """Group ndarray into ndarray by means of reshaping Parameters ---------- values : ndarray_like, [index.size, ...] Returns ------- ndarray, [groups, group_size, ...] values grouped by key Raises ------ AssertionError This operation is only possible if index.uniform==True """ if not self.index.uniform: raise ValueError("Array can only be split as array if all groups have the same size") values = np.asarray(values) values = values[self.index.sorter] return values.reshape(self.groups, -1, *values.shape[1:])
[ "def", "split_array_as_array", "(", "self", ",", "values", ")", ":", "if", "not", "self", ".", "index", ".", "uniform", ":", "raise", "ValueError", "(", "\"Array can only be split as array if all groups have the same size\"", ")", "values", "=", "np", ".", "asarray", "(", "values", ")", "values", "=", "values", "[", "self", ".", "index", ".", "sorter", "]", "return", "values", ".", "reshape", "(", "self", ".", "groups", ",", "-", "1", ",", "*", "values", ".", "shape", "[", "1", ":", "]", ")" ]
Group ndarray into ndarray by means of reshaping Parameters ---------- values : ndarray_like, [index.size, ...] Returns ------- ndarray, [groups, group_size, ...] values grouped by key Raises ------ AssertionError This operation is only possible if index.uniform==True
[ "Group", "ndarray", "into", "ndarray", "by", "means", "of", "reshaping" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L140-L161
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.split_array_as_list
def split_array_as_list(self, values): """Group values as a list of arrays, or a jagged-array Parameters ---------- values : ndarray, [keys, ...] Returns ------- list of length self.groups of ndarray, [key_count, ...] """ values = np.asarray(values) values = values[self.index.sorter] return np.split(values, self.index.slices[1:-1], axis=0)
python
def split_array_as_list(self, values): """Group values as a list of arrays, or a jagged-array Parameters ---------- values : ndarray, [keys, ...] Returns ------- list of length self.groups of ndarray, [key_count, ...] """ values = np.asarray(values) values = values[self.index.sorter] return np.split(values, self.index.slices[1:-1], axis=0)
[ "def", "split_array_as_list", "(", "self", ",", "values", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "values", "=", "values", "[", "self", ".", "index", ".", "sorter", "]", "return", "np", ".", "split", "(", "values", ",", "self", ".", "index", ".", "slices", "[", "1", ":", "-", "1", "]", ",", "axis", "=", "0", ")" ]
Group values as a list of arrays, or a jagged-array Parameters ---------- values : ndarray, [keys, ...] Returns ------- list of length self.groups of ndarray, [key_count, ...]
[ "Group", "values", "as", "a", "list", "of", "arrays", "or", "a", "jagged", "-", "array" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L163-L176
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.reduce
def reduce(self, values, operator=np.add, axis=0, dtype=None): """Reduce the values over identical key groups, using the given ufunc reduction is over the first axis, which should have elements corresponding to the keys all other axes are treated indepenently for the sake of this reduction Parameters ---------- values : ndarray, [keys, ...] values to perform reduction over operator : numpy.ufunc a numpy ufunc, such as np.add or np.sum axis : int, optional the axis to reduce over dtype : output dtype Returns ------- ndarray, [groups, ...] values reduced by operator over the key-groups """ values = np.take(values, self.index.sorter, axis=axis) return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
python
def reduce(self, values, operator=np.add, axis=0, dtype=None): """Reduce the values over identical key groups, using the given ufunc reduction is over the first axis, which should have elements corresponding to the keys all other axes are treated indepenently for the sake of this reduction Parameters ---------- values : ndarray, [keys, ...] values to perform reduction over operator : numpy.ufunc a numpy ufunc, such as np.add or np.sum axis : int, optional the axis to reduce over dtype : output dtype Returns ------- ndarray, [groups, ...] values reduced by operator over the key-groups """ values = np.take(values, self.index.sorter, axis=axis) return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
[ "def", "reduce", "(", "self", ",", "values", ",", "operator", "=", "np", ".", "add", ",", "axis", "=", "0", ",", "dtype", "=", "None", ")", ":", "values", "=", "np", ".", "take", "(", "values", ",", "self", ".", "index", ".", "sorter", ",", "axis", "=", "axis", ")", "return", "operator", ".", "reduceat", "(", "values", ",", "self", ".", "index", ".", "start", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")" ]
Reduce the values over identical key groups, using the given ufunc reduction is over the first axis, which should have elements corresponding to the keys all other axes are treated indepenently for the sake of this reduction Parameters ---------- values : ndarray, [keys, ...] values to perform reduction over operator : numpy.ufunc a numpy ufunc, such as np.add or np.sum axis : int, optional the axis to reduce over dtype : output dtype Returns ------- ndarray, [groups, ...] values reduced by operator over the key-groups
[ "Reduce", "the", "values", "over", "identical", "key", "groups", "using", "the", "given", "ufunc", "reduction", "is", "over", "the", "first", "axis", "which", "should", "have", "elements", "corresponding", "to", "the", "keys", "all", "other", "axes", "are", "treated", "indepenently", "for", "the", "sake", "of", "this", "reduction" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L192-L213
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.sum
def sum(self, values, axis=0, dtype=None): """compute the sum over each group Parameters ---------- values : array_like, [keys, ...] values to sum per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, axis=axis, dtype=dtype)
python
def sum(self, values, axis=0, dtype=None): """compute the sum over each group Parameters ---------- values : array_like, [keys, ...] values to sum per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, axis=axis, dtype=dtype)
[ "def", "sum", "(", "self", ",", "values", ",", "axis", "=", "0", ",", "dtype", "=", "None", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "self", ".", "unique", ",", "self", ".", "reduce", "(", "values", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")" ]
compute the sum over each group Parameters ---------- values : array_like, [keys, ...] values to sum per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "compute", "the", "sum", "over", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L216-L235
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.prod
def prod(self, values, axis=0, dtype=None): """compute the product over each group Parameters ---------- values : array_like, [keys, ...] values to multiply per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
python
def prod(self, values, axis=0, dtype=None): """compute the product over each group Parameters ---------- values : array_like, [keys, ...] values to multiply per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
[ "def", "prod", "(", "self", ",", "values", ",", "axis", "=", "0", ",", "dtype", "=", "None", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "self", ".", "unique", ",", "self", ".", "reduce", "(", "values", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ",", "operator", "=", "np", ".", "multiply", ")" ]
compute the product over each group Parameters ---------- values : array_like, [keys, ...] values to multiply per group axis : int, optional alternative reduction axis for values dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "compute", "the", "product", "over", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L237-L256
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.mean
def mean(self, values, axis=0, weights=None, dtype=None): """compute the mean over each group Parameters ---------- values : array_like, [keys, ...] values to take average of per group axis : int, optional alternative reduction axis for values weights : ndarray, [keys, ...], optional weight to use for each value dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) if weights is None: result = self.reduce(values, axis=axis, dtype=dtype) shape = [1] * values.ndim shape[axis] = self.groups weights = self.count.reshape(shape) else: weights = np.asarray(weights) result = self.reduce(values * weights, axis=axis, dtype=dtype) weights = self.reduce(weights, axis=axis, dtype=dtype) return self.unique, result / weights
python
def mean(self, values, axis=0, weights=None, dtype=None): """compute the mean over each group Parameters ---------- values : array_like, [keys, ...] values to take average of per group axis : int, optional alternative reduction axis for values weights : ndarray, [keys, ...], optional weight to use for each value dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) if weights is None: result = self.reduce(values, axis=axis, dtype=dtype) shape = [1] * values.ndim shape[axis] = self.groups weights = self.count.reshape(shape) else: weights = np.asarray(weights) result = self.reduce(values * weights, axis=axis, dtype=dtype) weights = self.reduce(weights, axis=axis, dtype=dtype) return self.unique, result / weights
[ "def", "mean", "(", "self", ",", "values", ",", "axis", "=", "0", ",", "weights", "=", "None", ",", "dtype", "=", "None", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "if", "weights", "is", "None", ":", "result", "=", "self", ".", "reduce", "(", "values", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "shape", "=", "[", "1", "]", "*", "values", ".", "ndim", "shape", "[", "axis", "]", "=", "self", ".", "groups", "weights", "=", "self", ".", "count", ".", "reshape", "(", "shape", ")", "else", ":", "weights", "=", "np", ".", "asarray", "(", "weights", ")", "result", "=", "self", ".", "reduce", "(", "values", "*", "weights", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "weights", "=", "self", ".", "reduce", "(", "weights", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "return", "self", ".", "unique", ",", "result", "/", "weights" ]
compute the mean over each group Parameters ---------- values : array_like, [keys, ...] values to take average of per group axis : int, optional alternative reduction axis for values weights : ndarray, [keys, ...], optional weight to use for each value dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "compute", "the", "mean", "over", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L258-L288
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.var
def var(self, values, axis=0, weights=None, dtype=None): """compute the variance over each group Parameters ---------- values : array_like, [keys, ...] values to take variance of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) unique, mean = self.mean(values, axis, weights, dtype) err = values - mean.take(self.inverse, axis) if weights is None: shape = [1] * values.ndim shape[axis] = self.groups group_weights = self.count.reshape(shape) var = self.reduce(err ** 2, axis=axis, dtype=dtype) else: weights = np.asarray(weights) group_weights = self.reduce(weights, axis=axis, dtype=dtype) var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype) return unique, var / group_weights
python
def var(self, values, axis=0, weights=None, dtype=None): """compute the variance over each group Parameters ---------- values : array_like, [keys, ...] values to take variance of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) unique, mean = self.mean(values, axis, weights, dtype) err = values - mean.take(self.inverse, axis) if weights is None: shape = [1] * values.ndim shape[axis] = self.groups group_weights = self.count.reshape(shape) var = self.reduce(err ** 2, axis=axis, dtype=dtype) else: weights = np.asarray(weights) group_weights = self.reduce(weights, axis=axis, dtype=dtype) var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype) return unique, var / group_weights
[ "def", "var", "(", "self", ",", "values", ",", "axis", "=", "0", ",", "weights", "=", "None", ",", "dtype", "=", "None", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "unique", ",", "mean", "=", "self", ".", "mean", "(", "values", ",", "axis", ",", "weights", ",", "dtype", ")", "err", "=", "values", "-", "mean", ".", "take", "(", "self", ".", "inverse", ",", "axis", ")", "if", "weights", "is", "None", ":", "shape", "=", "[", "1", "]", "*", "values", ".", "ndim", "shape", "[", "axis", "]", "=", "self", ".", "groups", "group_weights", "=", "self", ".", "count", ".", "reshape", "(", "shape", ")", "var", "=", "self", ".", "reduce", "(", "err", "**", "2", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "else", ":", "weights", "=", "np", ".", "asarray", "(", "weights", ")", "group_weights", "=", "self", ".", "reduce", "(", "weights", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "var", "=", "self", ".", "reduce", "(", "weights", "*", "err", "**", "2", ",", "axis", "=", "axis", ",", "dtype", "=", "dtype", ")", "return", "unique", ",", "var", "/", "group_weights" ]
compute the variance over each group Parameters ---------- values : array_like, [keys, ...] values to take variance of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "compute", "the", "variance", "over", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L290-L321
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.std
def std(self, values, axis=0, weights=None, dtype=None): """standard deviation over each group Parameters ---------- values : array_like, [keys, ...] values to take standard deviation of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ unique, var = self.var(values, axis, weights, dtype) return unique, np.sqrt(var)
python
def std(self, values, axis=0, weights=None, dtype=None): """standard deviation over each group Parameters ---------- values : array_like, [keys, ...] values to take standard deviation of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ unique, var = self.var(values, axis, weights, dtype) return unique, np.sqrt(var)
[ "def", "std", "(", "self", ",", "values", ",", "axis", "=", "0", ",", "weights", "=", "None", ",", "dtype", "=", "None", ")", ":", "unique", ",", "var", "=", "self", ".", "var", "(", "values", ",", "axis", ",", "weights", ",", "dtype", ")", "return", "unique", ",", "np", ".", "sqrt", "(", "var", ")" ]
standard deviation over each group Parameters ---------- values : array_like, [keys, ...] values to take standard deviation of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "standard", "deviation", "over", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L323-L341
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.median
def median(self, values, axis=0, average=True): """compute the median value over each group. Parameters ---------- values : array_like, [keys, ...] values to compute the median of per group axis : int, optional alternative reduction axis for values average : bool, optional when average is true, the average of the two central values is taken for groups with an even key-count Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ mid_2 = self.index.start + self.index.stop hi = (mid_2 ) // 2 lo = (mid_2 - 1) // 2 #need this indirection for lex-index compatibility sorted_group_rank_per_key = self.index.sorted_group_rank_per_key def median1d(slc): #place values at correct keys; preconditions the upcoming lexsort slc = slc[self.index.sorter] #refine value sorting within each keygroup sorter = np.lexsort((slc, sorted_group_rank_per_key)) slc = slc[sorter] return (slc[lo]+slc[hi]) / 2 if average else slc[hi] values = np.asarray(values) if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization? values = np.apply_along_axis(median1d, axis, values) else: values = median1d(values) return self.unique, values
python
def median(self, values, axis=0, average=True): """compute the median value over each group. Parameters ---------- values : array_like, [keys, ...] values to compute the median of per group axis : int, optional alternative reduction axis for values average : bool, optional when average is true, the average of the two central values is taken for groups with an even key-count Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ mid_2 = self.index.start + self.index.stop hi = (mid_2 ) // 2 lo = (mid_2 - 1) // 2 #need this indirection for lex-index compatibility sorted_group_rank_per_key = self.index.sorted_group_rank_per_key def median1d(slc): #place values at correct keys; preconditions the upcoming lexsort slc = slc[self.index.sorter] #refine value sorting within each keygroup sorter = np.lexsort((slc, sorted_group_rank_per_key)) slc = slc[sorter] return (slc[lo]+slc[hi]) / 2 if average else slc[hi] values = np.asarray(values) if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization? values = np.apply_along_axis(median1d, axis, values) else: values = median1d(values) return self.unique, values
[ "def", "median", "(", "self", ",", "values", ",", "axis", "=", "0", ",", "average", "=", "True", ")", ":", "mid_2", "=", "self", ".", "index", ".", "start", "+", "self", ".", "index", ".", "stop", "hi", "=", "(", "mid_2", ")", "//", "2", "lo", "=", "(", "mid_2", "-", "1", ")", "//", "2", "#need this indirection for lex-index compatibility", "sorted_group_rank_per_key", "=", "self", ".", "index", ".", "sorted_group_rank_per_key", "def", "median1d", "(", "slc", ")", ":", "#place values at correct keys; preconditions the upcoming lexsort", "slc", "=", "slc", "[", "self", ".", "index", ".", "sorter", "]", "#refine value sorting within each keygroup", "sorter", "=", "np", ".", "lexsort", "(", "(", "slc", ",", "sorted_group_rank_per_key", ")", ")", "slc", "=", "slc", "[", "sorter", "]", "return", "(", "slc", "[", "lo", "]", "+", "slc", "[", "hi", "]", ")", "/", "2", "if", "average", "else", "slc", "[", "hi", "]", "values", "=", "np", ".", "asarray", "(", "values", ")", "if", "values", ".", "ndim", ">", "1", ":", "#is trying to skip apply_along_axis somewhat premature optimization?", "values", "=", "np", ".", "apply_along_axis", "(", "median1d", ",", "axis", ",", "values", ")", "else", ":", "values", "=", "median1d", "(", "values", ")", "return", "self", ".", "unique", ",", "values" ]
compute the median value over each group. Parameters ---------- values : array_like, [keys, ...] values to compute the median of per group axis : int, optional alternative reduction axis for values average : bool, optional when average is true, the average of the two central values is taken for groups with an even key-count Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "compute", "the", "median", "value", "over", "each", "group", "." ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L343-L382
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.mode
def mode(self, values, weights=None): """compute the mode within each group. Parameters ---------- values : array_like, [keys, ...] values to compute the mode of per group weights : array_like, [keys], float, optional optional weight associated with each entry in values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ if weights is None: unique, weights = npi.count((self.index.sorted_group_rank_per_key, values)) else: unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights) x, bin = npi.group_by(unique[0]).argmax(weights) return x, unique[1][bin]
python
def mode(self, values, weights=None): """compute the mode within each group. Parameters ---------- values : array_like, [keys, ...] values to compute the mode of per group weights : array_like, [keys], float, optional optional weight associated with each entry in values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ if weights is None: unique, weights = npi.count((self.index.sorted_group_rank_per_key, values)) else: unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights) x, bin = npi.group_by(unique[0]).argmax(weights) return x, unique[1][bin]
[ "def", "mode", "(", "self", ",", "values", ",", "weights", "=", "None", ")", ":", "if", "weights", "is", "None", ":", "unique", ",", "weights", "=", "npi", ".", "count", "(", "(", "self", ".", "index", ".", "sorted_group_rank_per_key", ",", "values", ")", ")", "else", ":", "unique", ",", "weights", "=", "npi", ".", "group_by", "(", "(", "self", ".", "index", ".", "sorted_group_rank_per_key", ",", "values", ")", ")", ".", "sum", "(", "weights", ")", "x", ",", "bin", "=", "npi", ".", "group_by", "(", "unique", "[", "0", "]", ")", ".", "argmax", "(", "weights", ")", "return", "x", ",", "unique", "[", "1", "]", "[", "bin", "]" ]
compute the mode within each group. Parameters ---------- values : array_like, [keys, ...] values to compute the mode of per group weights : array_like, [keys], float, optional optional weight associated with each entry in values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "compute", "the", "mode", "within", "each", "group", "." ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L384-L407
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.min
def min(self, values, axis=0): """return the minimum within each group Parameters ---------- values : array_like, [keys, ...] values to take minimum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, np.minimum, axis)
python
def min(self, values, axis=0): """return the minimum within each group Parameters ---------- values : array_like, [keys, ...] values to take minimum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, np.minimum, axis)
[ "def", "min", "(", "self", ",", "values", ",", "axis", "=", "0", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "self", ".", "unique", ",", "self", ".", "reduce", "(", "values", ",", "np", ".", "minimum", ",", "axis", ")" ]
return the minimum within each group Parameters ---------- values : array_like, [keys, ...] values to take minimum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "return", "the", "minimum", "within", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L409-L427
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.max
def max(self, values, axis=0): """return the maximum within each group Parameters ---------- values : array_like, [keys, ...] values to take maximum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, np.maximum, axis)
python
def max(self, values, axis=0): """return the maximum within each group Parameters ---------- values : array_like, [keys, ...] values to take maximum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, np.maximum, axis)
[ "def", "max", "(", "self", ",", "values", ",", "axis", "=", "0", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "self", ".", "unique", ",", "self", ".", "reduce", "(", "values", ",", "np", ".", "maximum", ",", "axis", ")" ]
return the maximum within each group Parameters ---------- values : array_like, [keys, ...] values to take maximum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "return", "the", "maximum", "within", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L429-L447
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.first
def first(self, values, axis=0): """return values at first occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the first value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
python
def first(self, values, axis=0): """return values at first occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the first value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
[ "def", "first", "(", "self", ",", "values", ",", "axis", "=", "0", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "self", ".", "unique", ",", "np", ".", "take", "(", "values", ",", "self", ".", "index", ".", "sorter", "[", "self", ".", "index", ".", "start", "]", ",", "axis", ")" ]
return values at first occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the first value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "return", "values", "at", "first", "occurance", "of", "its", "associated", "key" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L449-L467
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.last
def last(self, values, axis=0): """return values at last occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the last value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
python
def last(self, values, axis=0): """return values at last occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the last value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups """ values = np.asarray(values) return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
[ "def", "last", "(", "self", ",", "values", ",", "axis", "=", "0", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "self", ".", "unique", ",", "np", ".", "take", "(", "values", ",", "self", ".", "index", ".", "sorter", "[", "self", ".", "index", ".", "stop", "-", "1", "]", ",", "axis", ")" ]
return values at last occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the last value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
[ "return", "values", "at", "last", "occurance", "of", "its", "associated", "key" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L469-L487
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.any
def any(self, values, axis=0): """compute if any item evaluates to true in each group Parameters ---------- values : array_like, [keys, ...] values to take boolean predicate over per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...], np.bool value array, reduced over groups """ values = np.asarray(values) if not values.dtype == np.bool: values = values != 0 return self.unique, self.reduce(values, axis=axis) > 0
python
def any(self, values, axis=0): """compute if any item evaluates to true in each group Parameters ---------- values : array_like, [keys, ...] values to take boolean predicate over per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...], np.bool value array, reduced over groups """ values = np.asarray(values) if not values.dtype == np.bool: values = values != 0 return self.unique, self.reduce(values, axis=axis) > 0
[ "def", "any", "(", "self", ",", "values", ",", "axis", "=", "0", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "if", "not", "values", ".", "dtype", "==", "np", ".", "bool", ":", "values", "=", "values", "!=", "0", "return", "self", ".", "unique", ",", "self", ".", "reduce", "(", "values", ",", "axis", "=", "axis", ")", ">", "0" ]
compute if any item evaluates to true in each group Parameters ---------- values : array_like, [keys, ...] values to take boolean predicate over per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...], np.bool value array, reduced over groups
[ "compute", "if", "any", "item", "evaluates", "to", "true", "in", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L489-L509
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.all
def all(self, values, axis=0): """compute if all items evaluates to true in each group Parameters ---------- values : array_like, [keys, ...] values to take boolean predicate over per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...], np.bool value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
python
def all(self, values, axis=0): """compute if all items evaluates to true in each group Parameters ---------- values : array_like, [keys, ...] values to take boolean predicate over per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...], np.bool value array, reduced over groups """ values = np.asarray(values) return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
[ "def", "all", "(", "self", ",", "values", ",", "axis", "=", "0", ")", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "self", ".", "unique", ",", "self", ".", "reduce", "(", "values", ",", "axis", "=", "axis", ",", "operator", "=", "np", ".", "multiply", ")", "!=", "0" ]
compute if all items evaluates to true in each group Parameters ---------- values : array_like, [keys, ...] values to take boolean predicate over per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...], np.bool value array, reduced over groups
[ "compute", "if", "all", "items", "evaluates", "to", "true", "in", "each", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L511-L529
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.argmin
def argmin(self, values): """return the index into values corresponding to the minimum value of the group Parameters ---------- values : array_like, [keys] values to pick the argmin of per group Returns ------- unique: ndarray, [groups] unique keys argmin : ndarray, [groups] index into value array, representing the argmin per group """ keys, minima = self.min(values) minima = minima[self.inverse] # select the first occurence of the minimum in each group index = as_index((self.inverse, values == minima)) return keys, index.sorter[index.start[-self.groups:]]
python
def argmin(self, values): """return the index into values corresponding to the minimum value of the group Parameters ---------- values : array_like, [keys] values to pick the argmin of per group Returns ------- unique: ndarray, [groups] unique keys argmin : ndarray, [groups] index into value array, representing the argmin per group """ keys, minima = self.min(values) minima = minima[self.inverse] # select the first occurence of the minimum in each group index = as_index((self.inverse, values == minima)) return keys, index.sorter[index.start[-self.groups:]]
[ "def", "argmin", "(", "self", ",", "values", ")", ":", "keys", ",", "minima", "=", "self", ".", "min", "(", "values", ")", "minima", "=", "minima", "[", "self", ".", "inverse", "]", "# select the first occurence of the minimum in each group", "index", "=", "as_index", "(", "(", "self", ".", "inverse", ",", "values", "==", "minima", ")", ")", "return", "keys", ",", "index", ".", "sorter", "[", "index", ".", "start", "[", "-", "self", ".", "groups", ":", "]", "]" ]
return the index into values corresponding to the minimum value of the group Parameters ---------- values : array_like, [keys] values to pick the argmin of per group Returns ------- unique: ndarray, [groups] unique keys argmin : ndarray, [groups] index into value array, representing the argmin per group
[ "return", "the", "index", "into", "values", "corresponding", "to", "the", "minimum", "value", "of", "the", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L531-L550
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/grouping.py
GroupBy.argmax
def argmax(self, values): """return the index into values corresponding to the maximum value of the group Parameters ---------- values : array_like, [keys] values to pick the argmax of per group Returns ------- unique: ndarray, [groups] unique keys argmax : ndarray, [groups] index into value array, representing the argmax per group """ keys, maxima = self.max(values) maxima = maxima[self.inverse] # select the first occurence of the maximum in each group index = as_index((self.inverse, values == maxima)) return keys, index.sorter[index.start[-self.groups:]]
python
def argmax(self, values): """return the index into values corresponding to the maximum value of the group Parameters ---------- values : array_like, [keys] values to pick the argmax of per group Returns ------- unique: ndarray, [groups] unique keys argmax : ndarray, [groups] index into value array, representing the argmax per group """ keys, maxima = self.max(values) maxima = maxima[self.inverse] # select the first occurence of the maximum in each group index = as_index((self.inverse, values == maxima)) return keys, index.sorter[index.start[-self.groups:]]
[ "def", "argmax", "(", "self", ",", "values", ")", ":", "keys", ",", "maxima", "=", "self", ".", "max", "(", "values", ")", "maxima", "=", "maxima", "[", "self", ".", "inverse", "]", "# select the first occurence of the maximum in each group", "index", "=", "as_index", "(", "(", "self", ".", "inverse", ",", "values", "==", "maxima", ")", ")", "return", "keys", ",", "index", ".", "sorter", "[", "index", ".", "start", "[", "-", "self", ".", "groups", ":", "]", "]" ]
return the index into values corresponding to the maximum value of the group Parameters ---------- values : array_like, [keys] values to pick the argmax of per group Returns ------- unique: ndarray, [groups] unique keys argmax : ndarray, [groups] index into value array, representing the argmax per group
[ "return", "the", "index", "into", "values", "corresponding", "to", "the", "maximum", "value", "of", "the", "group" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/grouping.py#L552-L571
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/index.py
as_index
def as_index(keys, axis=semantics.axis_default, base=False, stable=True, lex_as_struct=False): """ casting rules for a keys object to an index object the preferred semantics is that keys is a sequence of key objects, except when keys is an instance of tuple, in which case the zipped elements of the tuple are the key objects the axis keyword specifies the axis which enumerates the keys if axis is None, the keys array is flattened if axis is 0, the first axis enumerates the keys which of these two is the default depends on whether backwards_compatible == True if base==True, the most basic index possible is constructed. this avoids an indirect sort; if it isnt required, this has better performance """ if isinstance(keys, Index): if type(keys) is BaseIndex and base==False: keys = keys.keys #need to upcast to an indirectly sorted index type else: return keys #already done here if isinstance(keys, tuple): if lex_as_struct: keys = as_struct_array(*keys) else: return LexIndex(keys, stable) try: keys = np.asarray(keys) except: raise TypeError('Given object does not form a valid set of keys') if axis is None: keys = keys.flatten() if keys.ndim==1: if base: return BaseIndex(keys) else: return Index(keys, stable=stable) else: return ObjectIndex(keys, axis, stable=stable)
python
def as_index(keys, axis=semantics.axis_default, base=False, stable=True, lex_as_struct=False): """ casting rules for a keys object to an index object the preferred semantics is that keys is a sequence of key objects, except when keys is an instance of tuple, in which case the zipped elements of the tuple are the key objects the axis keyword specifies the axis which enumerates the keys if axis is None, the keys array is flattened if axis is 0, the first axis enumerates the keys which of these two is the default depends on whether backwards_compatible == True if base==True, the most basic index possible is constructed. this avoids an indirect sort; if it isnt required, this has better performance """ if isinstance(keys, Index): if type(keys) is BaseIndex and base==False: keys = keys.keys #need to upcast to an indirectly sorted index type else: return keys #already done here if isinstance(keys, tuple): if lex_as_struct: keys = as_struct_array(*keys) else: return LexIndex(keys, stable) try: keys = np.asarray(keys) except: raise TypeError('Given object does not form a valid set of keys') if axis is None: keys = keys.flatten() if keys.ndim==1: if base: return BaseIndex(keys) else: return Index(keys, stable=stable) else: return ObjectIndex(keys, axis, stable=stable)
[ "def", "as_index", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ",", "base", "=", "False", ",", "stable", "=", "True", ",", "lex_as_struct", "=", "False", ")", ":", "if", "isinstance", "(", "keys", ",", "Index", ")", ":", "if", "type", "(", "keys", ")", "is", "BaseIndex", "and", "base", "==", "False", ":", "keys", "=", "keys", ".", "keys", "#need to upcast to an indirectly sorted index type", "else", ":", "return", "keys", "#already done here", "if", "isinstance", "(", "keys", ",", "tuple", ")", ":", "if", "lex_as_struct", ":", "keys", "=", "as_struct_array", "(", "*", "keys", ")", "else", ":", "return", "LexIndex", "(", "keys", ",", "stable", ")", "try", ":", "keys", "=", "np", ".", "asarray", "(", "keys", ")", "except", ":", "raise", "TypeError", "(", "'Given object does not form a valid set of keys'", ")", "if", "axis", "is", "None", ":", "keys", "=", "keys", ".", "flatten", "(", ")", "if", "keys", ".", "ndim", "==", "1", ":", "if", "base", ":", "return", "BaseIndex", "(", "keys", ")", "else", ":", "return", "Index", "(", "keys", ",", "stable", "=", "stable", ")", "else", ":", "return", "ObjectIndex", "(", "keys", ",", "axis", ",", "stable", "=", "stable", ")" ]
casting rules for a keys object to an index object the preferred semantics is that keys is a sequence of key objects, except when keys is an instance of tuple, in which case the zipped elements of the tuple are the key objects the axis keyword specifies the axis which enumerates the keys if axis is None, the keys array is flattened if axis is 0, the first axis enumerates the keys which of these two is the default depends on whether backwards_compatible == True if base==True, the most basic index possible is constructed. this avoids an indirect sort; if it isnt required, this has better performance
[ "casting", "rules", "for", "a", "keys", "object", "to", "an", "index", "object" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/index.py#L288-L327
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/index.py
Index.inverse
def inverse(self): """return index array that maps unique values back to original space. unique[inverse]==keys""" inv = np.empty(self.size, np.int) inv[self.sorter] = self.sorted_group_rank_per_key return inv
python
def inverse(self): """return index array that maps unique values back to original space. unique[inverse]==keys""" inv = np.empty(self.size, np.int) inv[self.sorter] = self.sorted_group_rank_per_key return inv
[ "def", "inverse", "(", "self", ")", ":", "inv", "=", "np", ".", "empty", "(", "self", ".", "size", ",", "np", ".", "int", ")", "inv", "[", "self", ".", "sorter", "]", "=", "self", ".", "sorted_group_rank_per_key", "return", "inv" ]
return index array that maps unique values back to original space. unique[inverse]==keys
[ "return", "index", "array", "that", "maps", "unique", "values", "back", "to", "original", "space", ".", "unique", "[", "inverse", "]", "==", "keys" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/index.py#L142-L146
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/index.py
Index.rank
def rank(self): """how high in sorted list each key is. inverse permutation of sorter, such that sorted[rank]==keys""" r = np.empty(self.size, np.int) r[self.sorter] = np.arange(self.size) return r
python
def rank(self): """how high in sorted list each key is. inverse permutation of sorter, such that sorted[rank]==keys""" r = np.empty(self.size, np.int) r[self.sorter] = np.arange(self.size) return r
[ "def", "rank", "(", "self", ")", ":", "r", "=", "np", ".", "empty", "(", "self", ".", "size", ",", "np", ".", "int", ")", "r", "[", "self", ".", "sorter", "]", "=", "np", ".", "arange", "(", "self", ".", "size", ")", "return", "r" ]
how high in sorted list each key is. inverse permutation of sorter, such that sorted[rank]==keys
[ "how", "high", "in", "sorted", "list", "each", "key", "is", ".", "inverse", "permutation", "of", "sorter", "such", "that", "sorted", "[", "rank", "]", "==", "keys" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/index.py#L149-L153
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/index.py
LexIndex.unique
def unique(self): """returns a tuple of unique key columns""" return tuple( (array_as_typed(s, k.dtype, k.shape) if k.ndim>1 else s)[self.start] for s, k in zip(self.sorted, self._keys))
python
def unique(self): """returns a tuple of unique key columns""" return tuple( (array_as_typed(s, k.dtype, k.shape) if k.ndim>1 else s)[self.start] for s, k in zip(self.sorted, self._keys))
[ "def", "unique", "(", "self", ")", ":", "return", "tuple", "(", "(", "array_as_typed", "(", "s", ",", "k", ".", "dtype", ",", "k", ".", "shape", ")", "if", "k", ".", "ndim", ">", "1", "else", "s", ")", "[", "self", ".", "start", "]", "for", "s", ",", "k", "in", "zip", "(", "self", ".", "sorted", ",", "self", ".", "_keys", ")", ")" ]
returns a tuple of unique key columns
[ "returns", "a", "tuple", "of", "unique", "key", "columns" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/index.py#L236-L240
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/utility.py
as_struct_array
def as_struct_array(*columns): """pack a sequence of columns into a recarray Parameters ---------- columns : sequence of key objects Returns ------- data : recarray recarray containing the input columns as struct fields """ columns = [np.asarray(c) for c in columns] rows = len(columns[0]) names = ['f'+str(i) for i in range(len(columns))] dtype = [(names[i], c.dtype, c.shape[1:]) for i, c in enumerate(columns)] data = np.empty(rows, dtype) for i, c in enumerate(columns): data[names[i]] = c return data
python
def as_struct_array(*columns): """pack a sequence of columns into a recarray Parameters ---------- columns : sequence of key objects Returns ------- data : recarray recarray containing the input columns as struct fields """ columns = [np.asarray(c) for c in columns] rows = len(columns[0]) names = ['f'+str(i) for i in range(len(columns))] dtype = [(names[i], c.dtype, c.shape[1:]) for i, c in enumerate(columns)] data = np.empty(rows, dtype) for i, c in enumerate(columns): data[names[i]] = c return data
[ "def", "as_struct_array", "(", "*", "columns", ")", ":", "columns", "=", "[", "np", ".", "asarray", "(", "c", ")", "for", "c", "in", "columns", "]", "rows", "=", "len", "(", "columns", "[", "0", "]", ")", "names", "=", "[", "'f'", "+", "str", "(", "i", ")", "for", "i", "in", "range", "(", "len", "(", "columns", ")", ")", "]", "dtype", "=", "[", "(", "names", "[", "i", "]", ",", "c", ".", "dtype", ",", "c", ".", "shape", "[", "1", ":", "]", ")", "for", "i", ",", "c", "in", "enumerate", "(", "columns", ")", "]", "data", "=", "np", ".", "empty", "(", "rows", ",", "dtype", ")", "for", "i", ",", "c", "in", "enumerate", "(", "columns", ")", ":", "data", "[", "names", "[", "i", "]", "]", "=", "c", "return", "data" ]
pack a sequence of columns into a recarray Parameters ---------- columns : sequence of key objects Returns ------- data : recarray recarray containing the input columns as struct fields
[ "pack", "a", "sequence", "of", "columns", "into", "a", "recarray" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/utility.py#L11-L31
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/utility.py
axis_as_object
def axis_as_object(arr, axis=-1): """cast the given axis of an array to a void object if the axis to be cast is contiguous, a view is returned, otherwise a copy is made this is useful for efficiently sorting by the content of an axis, for instance Parameters ---------- arr : ndarray array to view as void object type axis : int axis to view as a void object type Returns ------- ndarray array with the given axis viewed as a void object """ shape = arr.shape # make axis to be viewed as a void object as contiguous items arr = np.ascontiguousarray(np.rollaxis(arr, axis, arr.ndim)) # number of bytes in each void object nbytes = arr.dtype.itemsize * shape[axis] # void type with the correct number of bytes voidtype = np.dtype((np.void, nbytes)) # return the view as such, with the reduced shape return arr.view(voidtype).reshape(np.delete(shape, axis))
python
def axis_as_object(arr, axis=-1): """cast the given axis of an array to a void object if the axis to be cast is contiguous, a view is returned, otherwise a copy is made this is useful for efficiently sorting by the content of an axis, for instance Parameters ---------- arr : ndarray array to view as void object type axis : int axis to view as a void object type Returns ------- ndarray array with the given axis viewed as a void object """ shape = arr.shape # make axis to be viewed as a void object as contiguous items arr = np.ascontiguousarray(np.rollaxis(arr, axis, arr.ndim)) # number of bytes in each void object nbytes = arr.dtype.itemsize * shape[axis] # void type with the correct number of bytes voidtype = np.dtype((np.void, nbytes)) # return the view as such, with the reduced shape return arr.view(voidtype).reshape(np.delete(shape, axis))
[ "def", "axis_as_object", "(", "arr", ",", "axis", "=", "-", "1", ")", ":", "shape", "=", "arr", ".", "shape", "# make axis to be viewed as a void object as contiguous items", "arr", "=", "np", ".", "ascontiguousarray", "(", "np", ".", "rollaxis", "(", "arr", ",", "axis", ",", "arr", ".", "ndim", ")", ")", "# number of bytes in each void object", "nbytes", "=", "arr", ".", "dtype", ".", "itemsize", "*", "shape", "[", "axis", "]", "# void type with the correct number of bytes", "voidtype", "=", "np", ".", "dtype", "(", "(", "np", ".", "void", ",", "nbytes", ")", ")", "# return the view as such, with the reduced shape", "return", "arr", ".", "view", "(", "voidtype", ")", ".", "reshape", "(", "np", ".", "delete", "(", "shape", ",", "axis", ")", ")" ]
cast the given axis of an array to a void object if the axis to be cast is contiguous, a view is returned, otherwise a copy is made this is useful for efficiently sorting by the content of an axis, for instance Parameters ---------- arr : ndarray array to view as void object type axis : int axis to view as a void object type Returns ------- ndarray array with the given axis viewed as a void object
[ "cast", "the", "given", "axis", "of", "an", "array", "to", "a", "void", "object", "if", "the", "axis", "to", "be", "cast", "is", "contiguous", "a", "view", "is", "returned", "otherwise", "a", "copy", "is", "made", "this", "is", "useful", "for", "efficiently", "sorting", "by", "the", "content", "of", "an", "axis", "for", "instance" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/utility.py#L34-L59
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/utility.py
object_as_axis
def object_as_axis(arr, dtype, axis=-1): """ cast an array of void objects to a typed axis Parameters ---------- arr : ndarray, [ndim], void array of type np.void dtype : numpy dtype object the output dtype to cast the input array to axis : int position to insert the newly formed axis into Returns ------- ndarray, [ndim+1], dtype output array cast to given dtype """ # view the void objects as typed elements arr = arr.view(dtype).reshape(arr.shape + (-1,)) # put the axis in the specified location return np.rollaxis(arr, -1, axis)
python
def object_as_axis(arr, dtype, axis=-1): """ cast an array of void objects to a typed axis Parameters ---------- arr : ndarray, [ndim], void array of type np.void dtype : numpy dtype object the output dtype to cast the input array to axis : int position to insert the newly formed axis into Returns ------- ndarray, [ndim+1], dtype output array cast to given dtype """ # view the void objects as typed elements arr = arr.view(dtype).reshape(arr.shape + (-1,)) # put the axis in the specified location return np.rollaxis(arr, -1, axis)
[ "def", "object_as_axis", "(", "arr", ",", "dtype", ",", "axis", "=", "-", "1", ")", ":", "# view the void objects as typed elements", "arr", "=", "arr", ".", "view", "(", "dtype", ")", ".", "reshape", "(", "arr", ".", "shape", "+", "(", "-", "1", ",", ")", ")", "# put the axis in the specified location", "return", "np", ".", "rollaxis", "(", "arr", ",", "-", "1", ",", "axis", ")" ]
cast an array of void objects to a typed axis Parameters ---------- arr : ndarray, [ndim], void array of type np.void dtype : numpy dtype object the output dtype to cast the input array to axis : int position to insert the newly formed axis into Returns ------- ndarray, [ndim+1], dtype output array cast to given dtype
[ "cast", "an", "array", "of", "void", "objects", "to", "a", "typed", "axis" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/utility.py#L62-L83
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
count
def count(keys, axis=semantics.axis_default): """count the number of times each key occurs in the input set Arguments --------- keys : indexable object Returns ------- unique : ndarray, [groups, ...] unique keys count : ndarray, [groups], int the number of times each key occurs in the input set Notes ----- Can be seen as numpy work-alike of collections.Counter Alternatively, as sparse equivalent of count_table """ index = as_index(keys, axis, base=True) return index.unique, index.count
python
def count(keys, axis=semantics.axis_default): """count the number of times each key occurs in the input set Arguments --------- keys : indexable object Returns ------- unique : ndarray, [groups, ...] unique keys count : ndarray, [groups], int the number of times each key occurs in the input set Notes ----- Can be seen as numpy work-alike of collections.Counter Alternatively, as sparse equivalent of count_table """ index = as_index(keys, axis, base=True) return index.unique, index.count
[ "def", "count", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ",", "base", "=", "True", ")", "return", "index", ".", "unique", ",", "index", ".", "count" ]
count the number of times each key occurs in the input set Arguments --------- keys : indexable object Returns ------- unique : ndarray, [groups, ...] unique keys count : ndarray, [groups], int the number of times each key occurs in the input set Notes ----- Can be seen as numpy work-alike of collections.Counter Alternatively, as sparse equivalent of count_table
[ "count", "the", "number", "of", "times", "each", "key", "occurs", "in", "the", "input", "set" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L19-L39
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
count_table
def count_table(*keys): """count the number of times each key occurs in the input set Arguments --------- keys : tuple of indexable objects, each having the same number of items Returns ------- unique : tuple of ndarray, [groups, ...] unique keys for each input item they form the axes labels of the table table : ndarray, [keys[0].groups, ... keys[n].groups], int the number of times each key-combination occurs in the input set Notes ----- Equivalent to R's pivot table or pandas 'crosstab' Alternatively, dense equivalent of the count function Should we add weights option? Or better yet; what about general reductions over key-grids? """ indices = [as_index(k, axis=0) for k in keys] uniques = [i.unique for i in indices] inverses = [i.inverse for i in indices] shape = [i.groups for i in indices] table = np.zeros(shape, np.int) np.add.at(table, inverses, 1) return tuple(uniques), table
python
def count_table(*keys): """count the number of times each key occurs in the input set Arguments --------- keys : tuple of indexable objects, each having the same number of items Returns ------- unique : tuple of ndarray, [groups, ...] unique keys for each input item they form the axes labels of the table table : ndarray, [keys[0].groups, ... keys[n].groups], int the number of times each key-combination occurs in the input set Notes ----- Equivalent to R's pivot table or pandas 'crosstab' Alternatively, dense equivalent of the count function Should we add weights option? Or better yet; what about general reductions over key-grids? """ indices = [as_index(k, axis=0) for k in keys] uniques = [i.unique for i in indices] inverses = [i.inverse for i in indices] shape = [i.groups for i in indices] table = np.zeros(shape, np.int) np.add.at(table, inverses, 1) return tuple(uniques), table
[ "def", "count_table", "(", "*", "keys", ")", ":", "indices", "=", "[", "as_index", "(", "k", ",", "axis", "=", "0", ")", "for", "k", "in", "keys", "]", "uniques", "=", "[", "i", ".", "unique", "for", "i", "in", "indices", "]", "inverses", "=", "[", "i", ".", "inverse", "for", "i", "in", "indices", "]", "shape", "=", "[", "i", ".", "groups", "for", "i", "in", "indices", "]", "table", "=", "np", ".", "zeros", "(", "shape", ",", "np", ".", "int", ")", "np", ".", "add", ".", "at", "(", "table", ",", "inverses", ",", "1", ")", "return", "tuple", "(", "uniques", ")", ",", "table" ]
count the number of times each key occurs in the input set Arguments --------- keys : tuple of indexable objects, each having the same number of items Returns ------- unique : tuple of ndarray, [groups, ...] unique keys for each input item they form the axes labels of the table table : ndarray, [keys[0].groups, ... keys[n].groups], int the number of times each key-combination occurs in the input set Notes ----- Equivalent to R's pivot table or pandas 'crosstab' Alternatively, dense equivalent of the count function Should we add weights option? Or better yet; what about general reductions over key-grids?
[ "count", "the", "number", "of", "times", "each", "key", "occurs", "in", "the", "input", "set" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L42-L70
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
binning
def binning(keys, start, end, count, axes=None): """Perform binning over the given axes of the keys Parameters ---------- keys : indexable or tuple of indexable Examples -------- binning(np.random.rand(100), 0, 1, 10) """ if isinstance(keys, tuple): n_keys = len(keys) else: n_keys = 1 bins = np.linspace(start, end, count+1, endpoint=True) idx = np.searchsorted(bins, keys) if axes is None: axes = [-1]
python
def binning(keys, start, end, count, axes=None): """Perform binning over the given axes of the keys Parameters ---------- keys : indexable or tuple of indexable Examples -------- binning(np.random.rand(100), 0, 1, 10) """ if isinstance(keys, tuple): n_keys = len(keys) else: n_keys = 1 bins = np.linspace(start, end, count+1, endpoint=True) idx = np.searchsorted(bins, keys) if axes is None: axes = [-1]
[ "def", "binning", "(", "keys", ",", "start", ",", "end", ",", "count", ",", "axes", "=", "None", ")", ":", "if", "isinstance", "(", "keys", ",", "tuple", ")", ":", "n_keys", "=", "len", "(", "keys", ")", "else", ":", "n_keys", "=", "1", "bins", "=", "np", ".", "linspace", "(", "start", ",", "end", ",", "count", "+", "1", ",", "endpoint", "=", "True", ")", "idx", "=", "np", ".", "searchsorted", "(", "bins", ",", "keys", ")", "if", "axes", "is", "None", ":", "axes", "=", "[", "-", "1", "]" ]
Perform binning over the given axes of the keys Parameters ---------- keys : indexable or tuple of indexable Examples -------- binning(np.random.rand(100), 0, 1, 10)
[ "Perform", "binning", "over", "the", "given", "axes", "of", "the", "keys" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L73-L92
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
multiplicity
def multiplicity(keys, axis=semantics.axis_default): """return the multiplicity of each key, or how often it occurs in the set Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int the number of times each input item occurs in the set """ index = as_index(keys, axis) return index.count[index.inverse]
python
def multiplicity(keys, axis=semantics.axis_default): """return the multiplicity of each key, or how often it occurs in the set Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int the number of times each input item occurs in the set """ index = as_index(keys, axis) return index.count[index.inverse]
[ "def", "multiplicity", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ")", "return", "index", ".", "count", "[", "index", ".", "inverse", "]" ]
return the multiplicity of each key, or how often it occurs in the set Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int the number of times each input item occurs in the set
[ "return", "the", "multiplicity", "of", "each", "key", "or", "how", "often", "it", "occurs", "in", "the", "set" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L183-L196
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
rank
def rank(keys, axis=semantics.axis_default): """where each item is in the pecking order. Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int unique integers, ranking the sorting order Notes ----- we should have that index.sorted[index.rank] == keys """ index = as_index(keys, axis) return index.rank
python
def rank(keys, axis=semantics.axis_default): """where each item is in the pecking order. Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int unique integers, ranking the sorting order Notes ----- we should have that index.sorted[index.rank] == keys """ index = as_index(keys, axis) return index.rank
[ "def", "rank", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ")", "return", "index", ".", "rank" ]
where each item is in the pecking order. Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int unique integers, ranking the sorting order Notes ----- we should have that index.sorted[index.rank] == keys
[ "where", "each", "item", "is", "in", "the", "pecking", "order", "." ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L199-L216
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
mode
def mode(keys, axis=semantics.axis_default, weights=None, return_indices=False): """compute the mode, or most frequent occuring key in a set Parameters ---------- keys : ndarray, [n_keys, ...] input array. elements of 'keys' can have arbitrary shape or dtype weights : ndarray, [n_keys], optional if given, the contribution of each key to the mode is weighted by the given weights return_indices : bool if True, return all indices such that keys[indices]==mode holds Returns ------- mode : ndarray, [...] the most frequently occuring key in the key sequence indices : ndarray, [mode_multiplicity], int, optional if return_indices is True, all indices such that points[indices]==mode holds """ index = as_index(keys, axis) if weights is None: unique, weights = count(index) else: unique, weights = group_by(index).sum(weights) bin = np.argmax(weights) _mode = unique[bin] # FIXME: replace with index.take for lexindex compatibility? if return_indices: indices = index.sorter[index.start[bin]: index.stop[bin]] return _mode, indices else: return _mode
python
def mode(keys, axis=semantics.axis_default, weights=None, return_indices=False): """compute the mode, or most frequent occuring key in a set Parameters ---------- keys : ndarray, [n_keys, ...] input array. elements of 'keys' can have arbitrary shape or dtype weights : ndarray, [n_keys], optional if given, the contribution of each key to the mode is weighted by the given weights return_indices : bool if True, return all indices such that keys[indices]==mode holds Returns ------- mode : ndarray, [...] the most frequently occuring key in the key sequence indices : ndarray, [mode_multiplicity], int, optional if return_indices is True, all indices such that points[indices]==mode holds """ index = as_index(keys, axis) if weights is None: unique, weights = count(index) else: unique, weights = group_by(index).sum(weights) bin = np.argmax(weights) _mode = unique[bin] # FIXME: replace with index.take for lexindex compatibility? if return_indices: indices = index.sorter[index.start[bin]: index.stop[bin]] return _mode, indices else: return _mode
[ "def", "mode", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ",", "weights", "=", "None", ",", "return_indices", "=", "False", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ")", "if", "weights", "is", "None", ":", "unique", ",", "weights", "=", "count", "(", "index", ")", "else", ":", "unique", ",", "weights", "=", "group_by", "(", "index", ")", ".", "sum", "(", "weights", ")", "bin", "=", "np", ".", "argmax", "(", "weights", ")", "_mode", "=", "unique", "[", "bin", "]", "# FIXME: replace with index.take for lexindex compatibility?", "if", "return_indices", ":", "indices", "=", "index", ".", "sorter", "[", "index", ".", "start", "[", "bin", "]", ":", "index", ".", "stop", "[", "bin", "]", "]", "return", "_mode", ",", "indices", "else", ":", "return", "_mode" ]
compute the mode, or most frequent occuring key in a set Parameters ---------- keys : ndarray, [n_keys, ...] input array. elements of 'keys' can have arbitrary shape or dtype weights : ndarray, [n_keys], optional if given, the contribution of each key to the mode is weighted by the given weights return_indices : bool if True, return all indices such that keys[indices]==mode holds Returns ------- mode : ndarray, [...] the most frequently occuring key in the key sequence indices : ndarray, [mode_multiplicity], int, optional if return_indices is True, all indices such that points[indices]==mode holds
[ "compute", "the", "mode", "or", "most", "frequent", "occuring", "key", "in", "a", "set" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L219-L249
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
incidence
def incidence(boundary): """ given an Nxm matrix containing boundary info between simplices, compute indidence info matrix not very reusable; should probably not be in this lib """ return GroupBy(boundary).split(np.arange(boundary.size) // boundary.shape[1])
python
def incidence(boundary): """ given an Nxm matrix containing boundary info between simplices, compute indidence info matrix not very reusable; should probably not be in this lib """ return GroupBy(boundary).split(np.arange(boundary.size) // boundary.shape[1])
[ "def", "incidence", "(", "boundary", ")", ":", "return", "GroupBy", "(", "boundary", ")", ".", "split", "(", "np", ".", "arange", "(", "boundary", ".", "size", ")", "//", "boundary", ".", "shape", "[", "1", "]", ")" ]
given an Nxm matrix containing boundary info between simplices, compute indidence info matrix not very reusable; should probably not be in this lib
[ "given", "an", "Nxm", "matrix", "containing", "boundary", "info", "between", "simplices", "compute", "indidence", "info", "matrix", "not", "very", "reusable", ";", "should", "probably", "not", "be", "in", "this", "lib" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L267-L273
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
all_unique
def all_unique(keys, axis=semantics.axis_default): """Returns true if all keys are unique""" index = as_index(keys, axis) return index.groups == index.size
python
def all_unique(keys, axis=semantics.axis_default): """Returns true if all keys are unique""" index = as_index(keys, axis) return index.groups == index.size
[ "def", "all_unique", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ")", "return", "index", ".", "groups", "==", "index", ".", "size" ]
Returns true if all keys are unique
[ "Returns", "true", "if", "all", "keys", "are", "unique" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L276-L279
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
any_unique
def any_unique(keys, axis=semantics.axis_default): """returns true if any of the keys is unique""" index = as_index(keys, axis) return np.any(index.count == 1)
python
def any_unique(keys, axis=semantics.axis_default): """returns true if any of the keys is unique""" index = as_index(keys, axis) return np.any(index.count == 1)
[ "def", "any_unique", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ")", "return", "np", ".", "any", "(", "index", ".", "count", "==", "1", ")" ]
returns true if any of the keys is unique
[ "returns", "true", "if", "any", "of", "the", "keys", "is", "unique" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L282-L285
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
all_equal
def all_equal(keys, axis=semantics.axis_default): """returns true of all keys are equal""" index = as_index(keys, axis) return index.groups == 1
python
def all_equal(keys, axis=semantics.axis_default): """returns true of all keys are equal""" index = as_index(keys, axis) return index.groups == 1
[ "def", "all_equal", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ")", "return", "index", ".", "groups", "==", "1" ]
returns true of all keys are equal
[ "returns", "true", "of", "all", "keys", "are", "equal" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L293-L296
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
is_uniform
def is_uniform(keys, axis=semantics.axis_default): """returns true if all keys have equal multiplicity""" index = as_index(keys, axis) return index.uniform
python
def is_uniform(keys, axis=semantics.axis_default): """returns true if all keys have equal multiplicity""" index = as_index(keys, axis) return index.uniform
[ "def", "is_uniform", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ")", "return", "index", ".", "uniform" ]
returns true if all keys have equal multiplicity
[ "returns", "true", "if", "all", "keys", "have", "equal", "multiplicity" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L299-L302
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
Table.get_inverses
def get_inverses(self, keys): """ Returns ------- Tuple of inverse indices """ return tuple([as_index(k, axis=0).inverse for k in keys])
python
def get_inverses(self, keys): """ Returns ------- Tuple of inverse indices """ return tuple([as_index(k, axis=0).inverse for k in keys])
[ "def", "get_inverses", "(", "self", ",", "keys", ")", ":", "return", "tuple", "(", "[", "as_index", "(", "k", ",", "axis", "=", "0", ")", ".", "inverse", "for", "k", "in", "keys", "]", ")" ]
Returns ------- Tuple of inverse indices
[ "Returns", "-------", "Tuple", "of", "inverse", "indices" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L109-L115
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
Table.unique
def unique(self, values): """Place each entry in a table, while asserting that each entry occurs once""" _, count = self.count() if not np.array_equiv(count, 1): raise ValueError("Not every entry in the table is assigned a unique value") return self.sum(values)
python
def unique(self, values): """Place each entry in a table, while asserting that each entry occurs once""" _, count = self.count() if not np.array_equiv(count, 1): raise ValueError("Not every entry in the table is assigned a unique value") return self.sum(values)
[ "def", "unique", "(", "self", ",", "values", ")", ":", "_", ",", "count", "=", "self", ".", "count", "(", ")", "if", "not", "np", ".", "array_equiv", "(", "count", ",", "1", ")", ":", "raise", "ValueError", "(", "\"Not every entry in the table is assigned a unique value\"", ")", "return", "self", ".", "sum", "(", "values", ")" ]
Place each entry in a table, while asserting that each entry occurs once
[ "Place", "each", "entry", "in", "a", "table", "while", "asserting", "that", "each", "entry", "occurs", "once" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L175-L180
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
unique
def unique(keys, axis=semantics.axis_default, return_index=False, return_inverse=False, return_count=False): """compute the set of unique keys Parameters ---------- keys : indexable key object keys object to find unique keys within axis : int if keys is a multi-dimensional array, the axis to regard as the sequence of key objects return_index : bool if True, return indexes such that keys[index] == unique return_inverse : bool if True, return the indices such that unique[inverse] == keys return_count : bool if True, return the number of times each unique key occurs in the input Notes ----- The kwargs are there to provide a backwards compatible interface to numpy.unique, but arguably, it is cleaner to call index and its properties directly, should more than unique values be desired as output """ stable = return_index or return_inverse index = as_index(keys, axis, base = not stable, stable = stable) ret = index.unique, if return_index: ret = ret + (index.index,) if return_inverse: ret = ret + (index.inverse,) if return_count: ret = ret + (index.count,) return ret[0] if len(ret) == 1 else ret
python
def unique(keys, axis=semantics.axis_default, return_index=False, return_inverse=False, return_count=False): """compute the set of unique keys Parameters ---------- keys : indexable key object keys object to find unique keys within axis : int if keys is a multi-dimensional array, the axis to regard as the sequence of key objects return_index : bool if True, return indexes such that keys[index] == unique return_inverse : bool if True, return the indices such that unique[inverse] == keys return_count : bool if True, return the number of times each unique key occurs in the input Notes ----- The kwargs are there to provide a backwards compatible interface to numpy.unique, but arguably, it is cleaner to call index and its properties directly, should more than unique values be desired as output """ stable = return_index or return_inverse index = as_index(keys, axis, base = not stable, stable = stable) ret = index.unique, if return_index: ret = ret + (index.index,) if return_inverse: ret = ret + (index.inverse,) if return_count: ret = ret + (index.count,) return ret[0] if len(ret) == 1 else ret
[ "def", "unique", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ",", "return_index", "=", "False", ",", "return_inverse", "=", "False", ",", "return_count", "=", "False", ")", ":", "stable", "=", "return_index", "or", "return_inverse", "index", "=", "as_index", "(", "keys", ",", "axis", ",", "base", "=", "not", "stable", ",", "stable", "=", "stable", ")", "ret", "=", "index", ".", "unique", ",", "if", "return_index", ":", "ret", "=", "ret", "+", "(", "index", ".", "index", ",", ")", "if", "return_inverse", ":", "ret", "=", "ret", "+", "(", "index", ".", "inverse", ",", ")", "if", "return_count", ":", "ret", "=", "ret", "+", "(", "index", ".", "count", ",", ")", "return", "ret", "[", "0", "]", "if", "len", "(", "ret", ")", "==", "1", "else", "ret" ]
compute the set of unique keys Parameters ---------- keys : indexable key object keys object to find unique keys within axis : int if keys is a multi-dimensional array, the axis to regard as the sequence of key objects return_index : bool if True, return indexes such that keys[index] == unique return_inverse : bool if True, return the indices such that unique[inverse] == keys return_count : bool if True, return the number of times each unique key occurs in the input Notes ----- The kwargs are there to provide a backwards compatible interface to numpy.unique, but arguably, it is cleaner to call index and its properties directly, should more than unique values be desired as output
[ "compute", "the", "set", "of", "unique", "keys" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L19-L50
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
contains
def contains(this, that, axis=semantics.axis_default): """Returns bool for each element of `that`, indicating if it is contained in `this` Parameters ---------- this : indexable key sequence sequence of items to test against that : indexable key sequence sequence of items to test for Returns ------- ndarray, [that.size], bool returns a bool for each element in `that`, indicating if it is contained in `this` Notes ----- Reads as 'this contains that' Similar to 'that in this', but with different performance characteristics """ this = as_index(this, axis=axis, lex_as_struct=True, base=True) that = as_index(that, axis=axis, lex_as_struct=True) left = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='left') right = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='right') flags = np.zeros(that.size + 1, dtype=np.int) np.add.at(flags, left, 1) np.add.at(flags, right, -1) return np.cumsum(flags)[:-1].astype(np.bool)[that.rank]
python
def contains(this, that, axis=semantics.axis_default): """Returns bool for each element of `that`, indicating if it is contained in `this` Parameters ---------- this : indexable key sequence sequence of items to test against that : indexable key sequence sequence of items to test for Returns ------- ndarray, [that.size], bool returns a bool for each element in `that`, indicating if it is contained in `this` Notes ----- Reads as 'this contains that' Similar to 'that in this', but with different performance characteristics """ this = as_index(this, axis=axis, lex_as_struct=True, base=True) that = as_index(that, axis=axis, lex_as_struct=True) left = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='left') right = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='right') flags = np.zeros(that.size + 1, dtype=np.int) np.add.at(flags, left, 1) np.add.at(flags, right, -1) return np.cumsum(flags)[:-1].astype(np.bool)[that.rank]
[ "def", "contains", "(", "this", ",", "that", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "this", "=", "as_index", "(", "this", ",", "axis", "=", "axis", ",", "lex_as_struct", "=", "True", ",", "base", "=", "True", ")", "that", "=", "as_index", "(", "that", ",", "axis", "=", "axis", ",", "lex_as_struct", "=", "True", ")", "left", "=", "np", ".", "searchsorted", "(", "that", ".", "_keys", ",", "this", ".", "_keys", ",", "sorter", "=", "that", ".", "sorter", ",", "side", "=", "'left'", ")", "right", "=", "np", ".", "searchsorted", "(", "that", ".", "_keys", ",", "this", ".", "_keys", ",", "sorter", "=", "that", ".", "sorter", ",", "side", "=", "'right'", ")", "flags", "=", "np", ".", "zeros", "(", "that", ".", "size", "+", "1", ",", "dtype", "=", "np", ".", "int", ")", "np", ".", "add", ".", "at", "(", "flags", ",", "left", ",", "1", ")", "np", ".", "add", ".", "at", "(", "flags", ",", "right", ",", "-", "1", ")", "return", "np", ".", "cumsum", "(", "flags", ")", "[", ":", "-", "1", "]", ".", "astype", "(", "np", ".", "bool", ")", "[", "that", ".", "rank", "]" ]
Returns bool for each element of `that`, indicating if it is contained in `this` Parameters ---------- this : indexable key sequence sequence of items to test against that : indexable key sequence sequence of items to test for Returns ------- ndarray, [that.size], bool returns a bool for each element in `that`, indicating if it is contained in `this` Notes ----- Reads as 'this contains that' Similar to 'that in this', but with different performance characteristics
[ "Returns", "bool", "for", "each", "element", "of", "that", "indicating", "if", "it", "is", "contained", "in", "this" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L53-L83
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
in_
def in_(this, that, axis=semantics.axis_default): """Returns bool for each element of `this`, indicating if it is present in `that` Parameters ---------- this : indexable key sequence sequence of items to test for that : indexable key sequence sequence of items to test against Returns ------- ndarray, [that.size], bool returns a bool for each element in `this`, indicating if it is present in `that` Notes ----- Reads as 'this in that' Similar to 'that contains this', but with different performance characteristics """ this = as_index(this, axis=axis, lex_as_struct=True, base=True) that = as_index(that, axis=axis, lex_as_struct=True) left = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='left') right = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='right') return left != right
python
def in_(this, that, axis=semantics.axis_default): """Returns bool for each element of `this`, indicating if it is present in `that` Parameters ---------- this : indexable key sequence sequence of items to test for that : indexable key sequence sequence of items to test against Returns ------- ndarray, [that.size], bool returns a bool for each element in `this`, indicating if it is present in `that` Notes ----- Reads as 'this in that' Similar to 'that contains this', but with different performance characteristics """ this = as_index(this, axis=axis, lex_as_struct=True, base=True) that = as_index(that, axis=axis, lex_as_struct=True) left = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='left') right = np.searchsorted(that._keys, this._keys, sorter=that.sorter, side='right') return left != right
[ "def", "in_", "(", "this", ",", "that", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "this", "=", "as_index", "(", "this", ",", "axis", "=", "axis", ",", "lex_as_struct", "=", "True", ",", "base", "=", "True", ")", "that", "=", "as_index", "(", "that", ",", "axis", "=", "axis", ",", "lex_as_struct", "=", "True", ")", "left", "=", "np", ".", "searchsorted", "(", "that", ".", "_keys", ",", "this", ".", "_keys", ",", "sorter", "=", "that", ".", "sorter", ",", "side", "=", "'left'", ")", "right", "=", "np", ".", "searchsorted", "(", "that", ".", "_keys", ",", "this", ".", "_keys", ",", "sorter", "=", "that", ".", "sorter", ",", "side", "=", "'right'", ")", "return", "left", "!=", "right" ]
Returns bool for each element of `this`, indicating if it is present in `that` Parameters ---------- this : indexable key sequence sequence of items to test for that : indexable key sequence sequence of items to test against Returns ------- ndarray, [that.size], bool returns a bool for each element in `this`, indicating if it is present in `that` Notes ----- Reads as 'this in that' Similar to 'that contains this', but with different performance characteristics
[ "Returns", "bool", "for", "each", "element", "of", "this", "indicating", "if", "it", "is", "present", "in", "that" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L86-L112
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
indices
def indices(this, that, axis=semantics.axis_default, missing='raise'): """Find indices such that this[indices] == that If multiple indices satisfy this condition, the first index found is returned Parameters ---------- this : indexable object items to search in that : indexable object items to search for axis : int, optional axis to operate on missing : {'raise', 'ignore', 'mask' or int} if `missing` is 'raise', a KeyError is raised if not all elements of `that` are present in `this` if `missing` is 'mask', a masked array is returned, where items of `that` not present in `this` are masked out if `missing` is 'ignore', all elements of `that` are assumed to be present in `this`, and output is undefined otherwise if missing is an integer, this is used as a fill-value Returns ------- indices : ndarray, [that.size], int indices such that this[indices] == that Notes ----- May be regarded as a vectorized numpy equivalent of list.index """ this = as_index(this, axis=axis, lex_as_struct=True) # use this for getting this.keys and that.keys organized the same way; # sorting is superfluous though. make sorting a cached property? # should we be working with cached properties generally? # or we should use sorted values, if searchsorted can exploit this knowledge? that = as_index(that, axis=axis, base=True, lex_as_struct=True) # use raw private keys here, rather than public unpacked keys insertion = np.searchsorted(this._keys, that._keys, sorter=this.sorter, side='left') indices = np.take(this.sorter, insertion, mode='clip') if missing != 'ignore': invalid = this._keys[indices] != that._keys if missing == 'raise': if np.any(invalid): raise KeyError('Not all keys in `that` are present in `this`') elif missing == 'mask': indices = np.ma.masked_array(indices, invalid) else: indices[invalid] = missing return indices
python
def indices(this, that, axis=semantics.axis_default, missing='raise'): """Find indices such that this[indices] == that If multiple indices satisfy this condition, the first index found is returned Parameters ---------- this : indexable object items to search in that : indexable object items to search for axis : int, optional axis to operate on missing : {'raise', 'ignore', 'mask' or int} if `missing` is 'raise', a KeyError is raised if not all elements of `that` are present in `this` if `missing` is 'mask', a masked array is returned, where items of `that` not present in `this` are masked out if `missing` is 'ignore', all elements of `that` are assumed to be present in `this`, and output is undefined otherwise if missing is an integer, this is used as a fill-value Returns ------- indices : ndarray, [that.size], int indices such that this[indices] == that Notes ----- May be regarded as a vectorized numpy equivalent of list.index """ this = as_index(this, axis=axis, lex_as_struct=True) # use this for getting this.keys and that.keys organized the same way; # sorting is superfluous though. make sorting a cached property? # should we be working with cached properties generally? # or we should use sorted values, if searchsorted can exploit this knowledge? that = as_index(that, axis=axis, base=True, lex_as_struct=True) # use raw private keys here, rather than public unpacked keys insertion = np.searchsorted(this._keys, that._keys, sorter=this.sorter, side='left') indices = np.take(this.sorter, insertion, mode='clip') if missing != 'ignore': invalid = this._keys[indices] != that._keys if missing == 'raise': if np.any(invalid): raise KeyError('Not all keys in `that` are present in `this`') elif missing == 'mask': indices = np.ma.masked_array(indices, invalid) else: indices[invalid] = missing return indices
[ "def", "indices", "(", "this", ",", "that", ",", "axis", "=", "semantics", ".", "axis_default", ",", "missing", "=", "'raise'", ")", ":", "this", "=", "as_index", "(", "this", ",", "axis", "=", "axis", ",", "lex_as_struct", "=", "True", ")", "# use this for getting this.keys and that.keys organized the same way;", "# sorting is superfluous though. make sorting a cached property?", "# should we be working with cached properties generally?", "# or we should use sorted values, if searchsorted can exploit this knowledge?", "that", "=", "as_index", "(", "that", ",", "axis", "=", "axis", ",", "base", "=", "True", ",", "lex_as_struct", "=", "True", ")", "# use raw private keys here, rather than public unpacked keys", "insertion", "=", "np", ".", "searchsorted", "(", "this", ".", "_keys", ",", "that", ".", "_keys", ",", "sorter", "=", "this", ".", "sorter", ",", "side", "=", "'left'", ")", "indices", "=", "np", ".", "take", "(", "this", ".", "sorter", ",", "insertion", ",", "mode", "=", "'clip'", ")", "if", "missing", "!=", "'ignore'", ":", "invalid", "=", "this", ".", "_keys", "[", "indices", "]", "!=", "that", ".", "_keys", "if", "missing", "==", "'raise'", ":", "if", "np", ".", "any", "(", "invalid", ")", ":", "raise", "KeyError", "(", "'Not all keys in `that` are present in `this`'", ")", "elif", "missing", "==", "'mask'", ":", "indices", "=", "np", ".", "ma", ".", "masked_array", "(", "indices", ",", "invalid", ")", "else", ":", "indices", "[", "invalid", "]", "=", "missing", "return", "indices" ]
Find indices such that this[indices] == that If multiple indices satisfy this condition, the first index found is returned Parameters ---------- this : indexable object items to search in that : indexable object items to search for axis : int, optional axis to operate on missing : {'raise', 'ignore', 'mask' or int} if `missing` is 'raise', a KeyError is raised if not all elements of `that` are present in `this` if `missing` is 'mask', a masked array is returned, where items of `that` not present in `this` are masked out if `missing` is 'ignore', all elements of `that` are assumed to be present in `this`, and output is undefined otherwise if missing is an integer, this is used as a fill-value Returns ------- indices : ndarray, [that.size], int indices such that this[indices] == that Notes ----- May be regarded as a vectorized numpy equivalent of list.index
[ "Find", "indices", "such", "that", "this", "[", "indices", "]", "==", "that", "If", "multiple", "indices", "satisfy", "this", "condition", "the", "first", "index", "found", "is", "returned" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L115-L164
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
remap
def remap(input, keys, values, missing='ignore', inplace=False): """Given an input array, remap its entries corresponding to 'keys' to 'values' equivalent of output = [map.get(i, default=i) for i in input], if map were a dictionary of corresponding keys and values Parameters ---------- input : ndarray, [...] values to perform replacements in keys : ndarray, [...] values to perform replacements in values : ndarray, [...] values to perform replacements in missing : {'raise', 'ignore'} if `missing` is 'raise', a KeyError is raised if 'values' contains elements not present in 'keys' if `missing` is 'ignore', only elements of 'values' persent in 'keys' are remapped inplace : bool, optional if True, input array is remapped in place if false, a copy is returned Returns ------- output : ndarray, [...] like 'input', but with elements remapped according to the mapping defined by 'keys' and 'values' """ input = np.asarray(input) # FIXME: currently instances of Index are not allowed values = np.asarray(values) if missing == 'ignore': idx = indices(keys, input, missing='mask') mask = np.logical_not(idx.mask) idx = idx.data elif missing == 'raise': idx = indices(keys, input, missing='raise') mask = Ellipsis else: raise ValueError("'missing' should be either 'ignore' or 'raise'") output = input if inplace else input.copy() output[mask] = values[idx[mask]] return output
python
def remap(input, keys, values, missing='ignore', inplace=False): """Given an input array, remap its entries corresponding to 'keys' to 'values' equivalent of output = [map.get(i, default=i) for i in input], if map were a dictionary of corresponding keys and values Parameters ---------- input : ndarray, [...] values to perform replacements in keys : ndarray, [...] values to perform replacements in values : ndarray, [...] values to perform replacements in missing : {'raise', 'ignore'} if `missing` is 'raise', a KeyError is raised if 'values' contains elements not present in 'keys' if `missing` is 'ignore', only elements of 'values' persent in 'keys' are remapped inplace : bool, optional if True, input array is remapped in place if false, a copy is returned Returns ------- output : ndarray, [...] like 'input', but with elements remapped according to the mapping defined by 'keys' and 'values' """ input = np.asarray(input) # FIXME: currently instances of Index are not allowed values = np.asarray(values) if missing == 'ignore': idx = indices(keys, input, missing='mask') mask = np.logical_not(idx.mask) idx = idx.data elif missing == 'raise': idx = indices(keys, input, missing='raise') mask = Ellipsis else: raise ValueError("'missing' should be either 'ignore' or 'raise'") output = input if inplace else input.copy() output[mask] = values[idx[mask]] return output
[ "def", "remap", "(", "input", ",", "keys", ",", "values", ",", "missing", "=", "'ignore'", ",", "inplace", "=", "False", ")", ":", "input", "=", "np", ".", "asarray", "(", "input", ")", "# FIXME: currently instances of Index are not allowed", "values", "=", "np", ".", "asarray", "(", "values", ")", "if", "missing", "==", "'ignore'", ":", "idx", "=", "indices", "(", "keys", ",", "input", ",", "missing", "=", "'mask'", ")", "mask", "=", "np", ".", "logical_not", "(", "idx", ".", "mask", ")", "idx", "=", "idx", ".", "data", "elif", "missing", "==", "'raise'", ":", "idx", "=", "indices", "(", "keys", ",", "input", ",", "missing", "=", "'raise'", ")", "mask", "=", "Ellipsis", "else", ":", "raise", "ValueError", "(", "\"'missing' should be either 'ignore' or 'raise'\"", ")", "output", "=", "input", "if", "inplace", "else", "input", ".", "copy", "(", ")", "output", "[", "mask", "]", "=", "values", "[", "idx", "[", "mask", "]", "]", "return", "output" ]
Given an input array, remap its entries corresponding to 'keys' to 'values' equivalent of output = [map.get(i, default=i) for i in input], if map were a dictionary of corresponding keys and values Parameters ---------- input : ndarray, [...] values to perform replacements in keys : ndarray, [...] values to perform replacements in values : ndarray, [...] values to perform replacements in missing : {'raise', 'ignore'} if `missing` is 'raise', a KeyError is raised if 'values' contains elements not present in 'keys' if `missing` is 'ignore', only elements of 'values' persent in 'keys' are remapped inplace : bool, optional if True, input array is remapped in place if false, a copy is returned Returns ------- output : ndarray, [...] like 'input', but with elements remapped according to the mapping defined by 'keys' and 'values'
[ "Given", "an", "input", "array", "remap", "its", "entries", "corresponding", "to", "keys", "to", "values", "equivalent", "of", "output", "=", "[", "map", ".", "get", "(", "i", "default", "=", "i", ")", "for", "i", "in", "input", "]", "if", "map", "were", "a", "dictionary", "of", "corresponding", "keys", "and", "values" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L167-L205
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
_set_preprocess
def _set_preprocess(sets, **kwargs): """upcasts a sequence of indexable objects to Index objets according to the given kwargs Parameters ---------- sets : iterable of indexable objects axis : int, optional axis to view as item sequence assume_unique : bool, optional if we should assume the items sequence does not contain duplicates Returns ------- list of Index objects Notes ----- common preprocessing for all set operations """ axis = kwargs.get('axis', semantics.axis_default) assume_unique = kwargs.get('assume_unique', False) if assume_unique: sets = [as_index(s, axis=axis).unique for s in sets] else: sets = [as_index(s, axis=axis).unique for s in sets] return sets
python
def _set_preprocess(sets, **kwargs): """upcasts a sequence of indexable objects to Index objets according to the given kwargs Parameters ---------- sets : iterable of indexable objects axis : int, optional axis to view as item sequence assume_unique : bool, optional if we should assume the items sequence does not contain duplicates Returns ------- list of Index objects Notes ----- common preprocessing for all set operations """ axis = kwargs.get('axis', semantics.axis_default) assume_unique = kwargs.get('assume_unique', False) if assume_unique: sets = [as_index(s, axis=axis).unique for s in sets] else: sets = [as_index(s, axis=axis).unique for s in sets] return sets
[ "def", "_set_preprocess", "(", "sets", ",", "*", "*", "kwargs", ")", ":", "axis", "=", "kwargs", ".", "get", "(", "'axis'", ",", "semantics", ".", "axis_default", ")", "assume_unique", "=", "kwargs", ".", "get", "(", "'assume_unique'", ",", "False", ")", "if", "assume_unique", ":", "sets", "=", "[", "as_index", "(", "s", ",", "axis", "=", "axis", ")", ".", "unique", "for", "s", "in", "sets", "]", "else", ":", "sets", "=", "[", "as_index", "(", "s", ",", "axis", "=", "axis", ")", ".", "unique", "for", "s", "in", "sets", "]", "return", "sets" ]
upcasts a sequence of indexable objects to Index objets according to the given kwargs Parameters ---------- sets : iterable of indexable objects axis : int, optional axis to view as item sequence assume_unique : bool, optional if we should assume the items sequence does not contain duplicates Returns ------- list of Index objects Notes ----- common preprocessing for all set operations
[ "upcasts", "a", "sequence", "of", "indexable", "objects", "to", "Index", "objets", "according", "to", "the", "given", "kwargs" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L208-L234
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
_set_concatenate
def _set_concatenate(sets): """concatenate indexable objects. Parameters ---------- sets : iterable of indexable objects Returns ------- indexable object handles both arrays and tuples of arrays """ def con(set): # if not all(): # raise ValueError('concatenated keys must have the same dtype') try: return np.concatenate([s for s in sets if len(s)]) except ValueError: return set[0] if any(not isinstance(s, tuple) for s in sets): #assume all arrays return con(sets) else: #assume all tuples return tuple(con(s) for s in zip(*sets))
python
def _set_concatenate(sets): """concatenate indexable objects. Parameters ---------- sets : iterable of indexable objects Returns ------- indexable object handles both arrays and tuples of arrays """ def con(set): # if not all(): # raise ValueError('concatenated keys must have the same dtype') try: return np.concatenate([s for s in sets if len(s)]) except ValueError: return set[0] if any(not isinstance(s, tuple) for s in sets): #assume all arrays return con(sets) else: #assume all tuples return tuple(con(s) for s in zip(*sets))
[ "def", "_set_concatenate", "(", "sets", ")", ":", "def", "con", "(", "set", ")", ":", "# if not all():", "# raise ValueError('concatenated keys must have the same dtype')", "try", ":", "return", "np", ".", "concatenate", "(", "[", "s", "for", "s", "in", "sets", "if", "len", "(", "s", ")", "]", ")", "except", "ValueError", ":", "return", "set", "[", "0", "]", "if", "any", "(", "not", "isinstance", "(", "s", ",", "tuple", ")", "for", "s", "in", "sets", ")", ":", "#assume all arrays", "return", "con", "(", "sets", ")", "else", ":", "#assume all tuples", "return", "tuple", "(", "con", "(", "s", ")", "for", "s", "in", "zip", "(", "*", "sets", ")", ")" ]
concatenate indexable objects. Parameters ---------- sets : iterable of indexable objects Returns ------- indexable object handles both arrays and tuples of arrays
[ "concatenate", "indexable", "objects", "." ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L237-L263
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
_set_count
def _set_count(sets, n, **kwargs): """return the elements which occur n times over the sequence of sets Parameters ---------- sets : iterable of indexable objects n : int number of sets the element should occur in Returns ------- indexable indexable with all elements that occured in n of the sets Notes ----- used by both exclusive and intersection """ sets = _set_preprocess(sets, **kwargs) i = as_index(_set_concatenate(sets), axis=0, base=True) # FIXME : this does not work for lex-keys return i.unique[i.count == n]
python
def _set_count(sets, n, **kwargs): """return the elements which occur n times over the sequence of sets Parameters ---------- sets : iterable of indexable objects n : int number of sets the element should occur in Returns ------- indexable indexable with all elements that occured in n of the sets Notes ----- used by both exclusive and intersection """ sets = _set_preprocess(sets, **kwargs) i = as_index(_set_concatenate(sets), axis=0, base=True) # FIXME : this does not work for lex-keys return i.unique[i.count == n]
[ "def", "_set_count", "(", "sets", ",", "n", ",", "*", "*", "kwargs", ")", ":", "sets", "=", "_set_preprocess", "(", "sets", ",", "*", "*", "kwargs", ")", "i", "=", "as_index", "(", "_set_concatenate", "(", "sets", ")", ",", "axis", "=", "0", ",", "base", "=", "True", ")", "# FIXME : this does not work for lex-keys", "return", "i", ".", "unique", "[", "i", ".", "count", "==", "n", "]" ]
return the elements which occur n times over the sequence of sets Parameters ---------- sets : iterable of indexable objects n : int number of sets the element should occur in Returns ------- indexable indexable with all elements that occured in n of the sets Notes ----- used by both exclusive and intersection
[ "return", "the", "elements", "which", "occur", "n", "times", "over", "the", "sequence", "of", "sets" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L266-L287
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
union
def union(*sets, **kwargs): """all unique items which occur in any one of the sets Parameters ---------- sets : tuple of indexable objects Returns ------- union of all items in all sets """ sets = _set_preprocess(sets, **kwargs) return as_index( _set_concatenate(sets), axis=0, base=True).unique
python
def union(*sets, **kwargs): """all unique items which occur in any one of the sets Parameters ---------- sets : tuple of indexable objects Returns ------- union of all items in all sets """ sets = _set_preprocess(sets, **kwargs) return as_index( _set_concatenate(sets), axis=0, base=True).unique
[ "def", "union", "(", "*", "sets", ",", "*", "*", "kwargs", ")", ":", "sets", "=", "_set_preprocess", "(", "sets", ",", "*", "*", "kwargs", ")", "return", "as_index", "(", "_set_concatenate", "(", "sets", ")", ",", "axis", "=", "0", ",", "base", "=", "True", ")", ".", "unique" ]
all unique items which occur in any one of the sets Parameters ---------- sets : tuple of indexable objects Returns ------- union of all items in all sets
[ "all", "unique", "items", "which", "occur", "in", "any", "one", "of", "the", "sets" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L290-L302
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/arraysetops.py
difference
def difference(*sets, **kwargs): """subtracts all tail sets from the head set Parameters ---------- sets : tuple of indexable objects first set is the head, from which we subtract other items form the tail, which are subtracted from head Returns ------- items which are in the head but not in any of the tail sets Notes ----- alt implementation: compute union of tail, then union with head, then use set_count(1) """ head, tail = sets[0], sets[1:] idx = as_index(head, **kwargs) lhs = idx.unique rhs = [intersection(idx, s, **kwargs) for s in tail] return exclusive(lhs, *rhs, axis=0, assume_unique=True)
python
def difference(*sets, **kwargs): """subtracts all tail sets from the head set Parameters ---------- sets : tuple of indexable objects first set is the head, from which we subtract other items form the tail, which are subtracted from head Returns ------- items which are in the head but not in any of the tail sets Notes ----- alt implementation: compute union of tail, then union with head, then use set_count(1) """ head, tail = sets[0], sets[1:] idx = as_index(head, **kwargs) lhs = idx.unique rhs = [intersection(idx, s, **kwargs) for s in tail] return exclusive(lhs, *rhs, axis=0, assume_unique=True)
[ "def", "difference", "(", "*", "sets", ",", "*", "*", "kwargs", ")", ":", "head", ",", "tail", "=", "sets", "[", "0", "]", ",", "sets", "[", "1", ":", "]", "idx", "=", "as_index", "(", "head", ",", "*", "*", "kwargs", ")", "lhs", "=", "idx", ".", "unique", "rhs", "=", "[", "intersection", "(", "idx", ",", "s", ",", "*", "*", "kwargs", ")", "for", "s", "in", "tail", "]", "return", "exclusive", "(", "lhs", ",", "*", "rhs", ",", "axis", "=", "0", ",", "assume_unique", "=", "True", ")" ]
subtracts all tail sets from the head set Parameters ---------- sets : tuple of indexable objects first set is the head, from which we subtract other items form the tail, which are subtracted from head Returns ------- items which are in the head but not in any of the tail sets Notes ----- alt implementation: compute union of tail, then union with head, then use set_count(1)
[ "subtracts", "all", "tail", "sets", "from", "the", "head", "set" ]
train
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/arraysetops.py#L339-L360
gtnx/pandas-highcharts
pandas_highcharts/display.py
_generate_div_id_chart
def _generate_div_id_chart(prefix="chart_id", digits=8): """Generate a random id for div chart. """ choices = (random.randrange(0, 52) for _ in range(digits)) return prefix + "".join((string.ascii_letters[x] for x in choices))
python
def _generate_div_id_chart(prefix="chart_id", digits=8): """Generate a random id for div chart. """ choices = (random.randrange(0, 52) for _ in range(digits)) return prefix + "".join((string.ascii_letters[x] for x in choices))
[ "def", "_generate_div_id_chart", "(", "prefix", "=", "\"chart_id\"", ",", "digits", "=", "8", ")", ":", "choices", "=", "(", "random", ".", "randrange", "(", "0", ",", "52", ")", "for", "_", "in", "range", "(", "digits", ")", ")", "return", "prefix", "+", "\"\"", ".", "join", "(", "(", "string", ".", "ascii_letters", "[", "x", "]", "for", "x", "in", "choices", ")", ")" ]
Generate a random id for div chart.
[ "Generate", "a", "random", "id", "for", "div", "chart", "." ]
train
https://github.com/gtnx/pandas-highcharts/blob/bf449b7db8b6966bcf95a0280bf2e4518f3e2419/pandas_highcharts/display.py#L36-L40
gtnx/pandas-highcharts
pandas_highcharts/display.py
display_charts
def display_charts(df, chart_type="default", render_to=None, **kwargs): """Display you DataFrame with Highcharts. df: DataFrame chart_type: str 'default' or 'stock' render_to: str div id for plotting your data """ if chart_type not in ("default", "stock"): raise ValueError("Wrong chart_type: accept 'default' or 'stock'.") chart_id = render_to if render_to is not None else _generate_div_id_chart() json_data = serialize(df, render_to=chart_id, chart_type=chart_type, **kwargs) content = """<div id="{chart_id}"</div> <script type="text/javascript">{data}</script>""" return display(HTML(content.format(chart_id=chart_id, data=json_data)))
python
def display_charts(df, chart_type="default", render_to=None, **kwargs): """Display you DataFrame with Highcharts. df: DataFrame chart_type: str 'default' or 'stock' render_to: str div id for plotting your data """ if chart_type not in ("default", "stock"): raise ValueError("Wrong chart_type: accept 'default' or 'stock'.") chart_id = render_to if render_to is not None else _generate_div_id_chart() json_data = serialize(df, render_to=chart_id, chart_type=chart_type, **kwargs) content = """<div id="{chart_id}"</div> <script type="text/javascript">{data}</script>""" return display(HTML(content.format(chart_id=chart_id, data=json_data)))
[ "def", "display_charts", "(", "df", ",", "chart_type", "=", "\"default\"", ",", "render_to", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "chart_type", "not", "in", "(", "\"default\"", ",", "\"stock\"", ")", ":", "raise", "ValueError", "(", "\"Wrong chart_type: accept 'default' or 'stock'.\"", ")", "chart_id", "=", "render_to", "if", "render_to", "is", "not", "None", "else", "_generate_div_id_chart", "(", ")", "json_data", "=", "serialize", "(", "df", ",", "render_to", "=", "chart_id", ",", "chart_type", "=", "chart_type", ",", "*", "*", "kwargs", ")", "content", "=", "\"\"\"<div id=\"{chart_id}\"</div>\n <script type=\"text/javascript\">{data}</script>\"\"\"", "return", "display", "(", "HTML", "(", "content", ".", "format", "(", "chart_id", "=", "chart_id", ",", "data", "=", "json_data", ")", ")", ")" ]
Display you DataFrame with Highcharts. df: DataFrame chart_type: str 'default' or 'stock' render_to: str div id for plotting your data
[ "Display", "you", "DataFrame", "with", "Highcharts", "." ]
train
https://github.com/gtnx/pandas-highcharts/blob/bf449b7db8b6966bcf95a0280bf2e4518f3e2419/pandas_highcharts/display.py#L43-L60
gtnx/pandas-highcharts
pandas_highcharts/display.py
_series_data_filter
def _series_data_filter(data): """Replace each 'data' key in the list stored under 'series' by "[...]". Use to not store and display the series data when you just want display and modify the Highcharts parameters. data: dict Serialized DataFrame in a dict for Highcharts Returns: a dict with filtered values See also `core.serialize` """ data = copy.deepcopy(data) if "series" in data: for series in data["series"]: series["data"] = "[...]" return data
python
def _series_data_filter(data): """Replace each 'data' key in the list stored under 'series' by "[...]". Use to not store and display the series data when you just want display and modify the Highcharts parameters. data: dict Serialized DataFrame in a dict for Highcharts Returns: a dict with filtered values See also `core.serialize` """ data = copy.deepcopy(data) if "series" in data: for series in data["series"]: series["data"] = "[...]" return data
[ "def", "_series_data_filter", "(", "data", ")", ":", "data", "=", "copy", ".", "deepcopy", "(", "data", ")", "if", "\"series\"", "in", "data", ":", "for", "series", "in", "data", "[", "\"series\"", "]", ":", "series", "[", "\"data\"", "]", "=", "\"[...]\"", "return", "data" ]
Replace each 'data' key in the list stored under 'series' by "[...]". Use to not store and display the series data when you just want display and modify the Highcharts parameters. data: dict Serialized DataFrame in a dict for Highcharts Returns: a dict with filtered values See also `core.serialize`
[ "Replace", "each", "data", "key", "in", "the", "list", "stored", "under", "series", "by", "[", "...", "]", "." ]
train
https://github.com/gtnx/pandas-highcharts/blob/bf449b7db8b6966bcf95a0280bf2e4518f3e2419/pandas_highcharts/display.py#L63-L80
gtnx/pandas-highcharts
pandas_highcharts/display.py
pretty_params
def pretty_params(data, indent=2): """Pretty print your Highcharts params (into a JSON). data: dict Serialized DataFrame in a dict for Highcharts """ data_to_print = _series_data_filter(data) print(json.dumps(data_to_print, indent=indent))
python
def pretty_params(data, indent=2): """Pretty print your Highcharts params (into a JSON). data: dict Serialized DataFrame in a dict for Highcharts """ data_to_print = _series_data_filter(data) print(json.dumps(data_to_print, indent=indent))
[ "def", "pretty_params", "(", "data", ",", "indent", "=", "2", ")", ":", "data_to_print", "=", "_series_data_filter", "(", "data", ")", "print", "(", "json", ".", "dumps", "(", "data_to_print", ",", "indent", "=", "indent", ")", ")" ]
Pretty print your Highcharts params (into a JSON). data: dict Serialized DataFrame in a dict for Highcharts
[ "Pretty", "print", "your", "Highcharts", "params", "(", "into", "a", "JSON", ")", "." ]
train
https://github.com/gtnx/pandas-highcharts/blob/bf449b7db8b6966bcf95a0280bf2e4518f3e2419/pandas_highcharts/display.py#L83-L90
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Transport.get_additional_params
def get_additional_params(self, **params): """ Filter to get the additional params needed for polling """ # TODO: Move these params to their own vertical if needed. polling_params = [ 'locationschema', 'carrierschema', 'sorttype', 'sortorder', 'originairports', 'destinationairports', 'stops', 'outbounddeparttime', 'outbounddepartstarttime', 'outbounddepartendtime', 'inbounddeparttime', 'inbounddepartstarttime', 'inbounddepartendtime', 'duration', 'includecarriers', 'excludecarriers' ] additional_params = dict( (key, value) for key, value in params.items() if key in polling_params ) return additional_params
python
def get_additional_params(self, **params): """ Filter to get the additional params needed for polling """ # TODO: Move these params to their own vertical if needed. polling_params = [ 'locationschema', 'carrierschema', 'sorttype', 'sortorder', 'originairports', 'destinationairports', 'stops', 'outbounddeparttime', 'outbounddepartstarttime', 'outbounddepartendtime', 'inbounddeparttime', 'inbounddepartstarttime', 'inbounddepartendtime', 'duration', 'includecarriers', 'excludecarriers' ] additional_params = dict( (key, value) for key, value in params.items() if key in polling_params ) return additional_params
[ "def", "get_additional_params", "(", "self", ",", "*", "*", "params", ")", ":", "# TODO: Move these params to their own vertical if needed.", "polling_params", "=", "[", "'locationschema'", ",", "'carrierschema'", ",", "'sorttype'", ",", "'sortorder'", ",", "'originairports'", ",", "'destinationairports'", ",", "'stops'", ",", "'outbounddeparttime'", ",", "'outbounddepartstarttime'", ",", "'outbounddepartendtime'", ",", "'inbounddeparttime'", ",", "'inbounddepartstarttime'", ",", "'inbounddepartendtime'", ",", "'duration'", ",", "'includecarriers'", ",", "'excludecarriers'", "]", "additional_params", "=", "dict", "(", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", "if", "key", "in", "polling_params", ")", "return", "additional_params" ]
Filter to get the additional params needed for polling
[ "Filter", "to", "get", "the", "additional", "params", "needed", "for", "polling" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L102-L132
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Transport.get_result
def get_result(self, errors=GRACEFUL, **params): """ Get all results, no filtering, etc. by creating and polling the session. """ additional_params = self.get_additional_params(**params) return self.poll_session( self.create_session(**params), errors=errors, **additional_params )
python
def get_result(self, errors=GRACEFUL, **params): """ Get all results, no filtering, etc. by creating and polling the session. """ additional_params = self.get_additional_params(**params) return self.poll_session( self.create_session(**params), errors=errors, **additional_params )
[ "def", "get_result", "(", "self", ",", "errors", "=", "GRACEFUL", ",", "*", "*", "params", ")", ":", "additional_params", "=", "self", ".", "get_additional_params", "(", "*", "*", "params", ")", "return", "self", ".", "poll_session", "(", "self", ".", "create_session", "(", "*", "*", "params", ")", ",", "errors", "=", "errors", ",", "*", "*", "additional_params", ")" ]
Get all results, no filtering, etc. by creating and polling the session.
[ "Get", "all", "results", "no", "filtering", "etc", ".", "by", "creating", "and", "polling", "the", "session", "." ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L134-L144
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Transport.make_request
def make_request(self, service_url, method='get', headers=None, data=None, callback=None, errors=GRACEFUL, **params): """ Reusable method for performing requests. :param service_url - URL to request :param method - request method, default is 'get' :param headers - request headers :param data - post data :param callback - callback to be applied to response, default callback will parse response as json object. :param errors - specifies communication errors handling mode, possible values are: * strict (default) - throw an error as soon as one occurred * graceful - ignore certain errors, e.g. EmptyResponse * ignore - ignore all errors and return a result in any case. NOTE that it DOES NOT mean that no exceptions can be raised from this method, it mostly ignores communication related errors. * None or empty string equals to default :param params - additional query parameters for request """ error_modes = (STRICT, GRACEFUL, IGNORE) error_mode = errors or GRACEFUL if error_mode.lower() not in error_modes: raise ValueError( 'Possible values for errors argument are: %s' % ', '.join(error_modes) ) if callback is None: callback = self._default_resp_callback if 'apikey' not in service_url.lower(): params.update({ 'apiKey': self.api_key }) request = getattr(requests, method.lower()) log.debug('* Request URL: %s' % service_url) log.debug('* Request method: %s' % method) log.debug('* Request query params: %s' % params) log.debug('* Request headers: %s' % headers) r = request(service_url, headers=headers, data=data, params=params) try: r.raise_for_status() return callback(r) except Exception as e: return self._with_error_handling(r, e, error_mode, self.response_format)
python
def make_request(self, service_url, method='get', headers=None, data=None, callback=None, errors=GRACEFUL, **params): """ Reusable method for performing requests. :param service_url - URL to request :param method - request method, default is 'get' :param headers - request headers :param data - post data :param callback - callback to be applied to response, default callback will parse response as json object. :param errors - specifies communication errors handling mode, possible values are: * strict (default) - throw an error as soon as one occurred * graceful - ignore certain errors, e.g. EmptyResponse * ignore - ignore all errors and return a result in any case. NOTE that it DOES NOT mean that no exceptions can be raised from this method, it mostly ignores communication related errors. * None or empty string equals to default :param params - additional query parameters for request """ error_modes = (STRICT, GRACEFUL, IGNORE) error_mode = errors or GRACEFUL if error_mode.lower() not in error_modes: raise ValueError( 'Possible values for errors argument are: %s' % ', '.join(error_modes) ) if callback is None: callback = self._default_resp_callback if 'apikey' not in service_url.lower(): params.update({ 'apiKey': self.api_key }) request = getattr(requests, method.lower()) log.debug('* Request URL: %s' % service_url) log.debug('* Request method: %s' % method) log.debug('* Request query params: %s' % params) log.debug('* Request headers: %s' % headers) r = request(service_url, headers=headers, data=data, params=params) try: r.raise_for_status() return callback(r) except Exception as e: return self._with_error_handling(r, e, error_mode, self.response_format)
[ "def", "make_request", "(", "self", ",", "service_url", ",", "method", "=", "'get'", ",", "headers", "=", "None", ",", "data", "=", "None", ",", "callback", "=", "None", ",", "errors", "=", "GRACEFUL", ",", "*", "*", "params", ")", ":", "error_modes", "=", "(", "STRICT", ",", "GRACEFUL", ",", "IGNORE", ")", "error_mode", "=", "errors", "or", "GRACEFUL", "if", "error_mode", ".", "lower", "(", ")", "not", "in", "error_modes", ":", "raise", "ValueError", "(", "'Possible values for errors argument are: %s'", "%", "', '", ".", "join", "(", "error_modes", ")", ")", "if", "callback", "is", "None", ":", "callback", "=", "self", ".", "_default_resp_callback", "if", "'apikey'", "not", "in", "service_url", ".", "lower", "(", ")", ":", "params", ".", "update", "(", "{", "'apiKey'", ":", "self", ".", "api_key", "}", ")", "request", "=", "getattr", "(", "requests", ",", "method", ".", "lower", "(", ")", ")", "log", ".", "debug", "(", "'* Request URL: %s'", "%", "service_url", ")", "log", ".", "debug", "(", "'* Request method: %s'", "%", "method", ")", "log", ".", "debug", "(", "'* Request query params: %s'", "%", "params", ")", "log", ".", "debug", "(", "'* Request headers: %s'", "%", "headers", ")", "r", "=", "request", "(", "service_url", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "params", "=", "params", ")", "try", ":", "r", ".", "raise_for_status", "(", ")", "return", "callback", "(", "r", ")", "except", "Exception", "as", "e", ":", "return", "self", ".", "_with_error_handling", "(", "r", ",", "e", ",", "error_mode", ",", "self", ".", "response_format", ")" ]
Reusable method for performing requests. :param service_url - URL to request :param method - request method, default is 'get' :param headers - request headers :param data - post data :param callback - callback to be applied to response, default callback will parse response as json object. :param errors - specifies communication errors handling mode, possible values are: * strict (default) - throw an error as soon as one occurred * graceful - ignore certain errors, e.g. EmptyResponse * ignore - ignore all errors and return a result in any case. NOTE that it DOES NOT mean that no exceptions can be raised from this method, it mostly ignores communication related errors. * None or empty string equals to default :param params - additional query parameters for request
[ "Reusable", "method", "for", "performing", "requests", "." ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L146-L200
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Transport.get_markets
def get_markets(self, market): """ Get the list of markets http://business.skyscanner.net/portal/en-GB/Documentation/Markets """ url = "{url}/{market}".format(url=self.MARKET_SERVICE_URL, market=market) return self.make_request(url, headers=self._headers())
python
def get_markets(self, market): """ Get the list of markets http://business.skyscanner.net/portal/en-GB/Documentation/Markets """ url = "{url}/{market}".format(url=self.MARKET_SERVICE_URL, market=market) return self.make_request(url, headers=self._headers())
[ "def", "get_markets", "(", "self", ",", "market", ")", ":", "url", "=", "\"{url}/{market}\"", ".", "format", "(", "url", "=", "self", ".", "MARKET_SERVICE_URL", ",", "market", "=", "market", ")", "return", "self", ".", "make_request", "(", "url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ")" ]
Get the list of markets http://business.skyscanner.net/portal/en-GB/Documentation/Markets
[ "Get", "the", "list", "of", "markets", "http", ":", "//", "business", ".", "skyscanner", ".", "net", "/", "portal", "/", "en", "-", "GB", "/", "Documentation", "/", "Markets" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L202-L209
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Transport.location_autosuggest
def location_autosuggest(self, **params): """ Location Autosuggest Services Doc URLs: http://business.skyscanner.net/portal/en-GB/ Documentation/Autosuggest http://business.skyscanner.net/portal/en-GB/ Documentation/CarHireAutoSuggest http://business.skyscanner.net/portal/en-GB/ Documentation/HotelsAutoSuggest Format: Generic - {LOCATION_AUTOSUGGEST_URL}/{market}/ {currency}/{locale}/?query={query}&apiKey={apiKey} CarHire/Hotels - {LOCATION_AUTOSUGGEST_URL}/{market}/ {currency}/{locale}/{query}?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.LOCATION_AUTOSUGGEST_URL, params_path=self._construct_params( params, self.LOCATION_AUTOSUGGEST_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
python
def location_autosuggest(self, **params): """ Location Autosuggest Services Doc URLs: http://business.skyscanner.net/portal/en-GB/ Documentation/Autosuggest http://business.skyscanner.net/portal/en-GB/ Documentation/CarHireAutoSuggest http://business.skyscanner.net/portal/en-GB/ Documentation/HotelsAutoSuggest Format: Generic - {LOCATION_AUTOSUGGEST_URL}/{market}/ {currency}/{locale}/?query={query}&apiKey={apiKey} CarHire/Hotels - {LOCATION_AUTOSUGGEST_URL}/{market}/ {currency}/{locale}/{query}?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.LOCATION_AUTOSUGGEST_URL, params_path=self._construct_params( params, self.LOCATION_AUTOSUGGEST_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
[ "def", "location_autosuggest", "(", "self", ",", "*", "*", "params", ")", ":", "service_url", "=", "\"{url}/{params_path}\"", ".", "format", "(", "url", "=", "self", ".", "LOCATION_AUTOSUGGEST_URL", ",", "params_path", "=", "self", ".", "_construct_params", "(", "params", ",", "self", ".", "LOCATION_AUTOSUGGEST_PARAMS", ")", ")", "return", "self", ".", "make_request", "(", "service_url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ",", "*", "*", "params", ")" ]
Location Autosuggest Services Doc URLs: http://business.skyscanner.net/portal/en-GB/ Documentation/Autosuggest http://business.skyscanner.net/portal/en-GB/ Documentation/CarHireAutoSuggest http://business.skyscanner.net/portal/en-GB/ Documentation/HotelsAutoSuggest Format: Generic - {LOCATION_AUTOSUGGEST_URL}/{market}/ {currency}/{locale}/?query={query}&apiKey={apiKey} CarHire/Hotels - {LOCATION_AUTOSUGGEST_URL}/{market}/ {currency}/{locale}/{query}?apiKey={apiKey}
[ "Location", "Autosuggest", "Services", "Doc", "URLs", ":" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L211-L241
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Transport.poll_session
def poll_session(self, poll_url, initial_delay=2, delay=1, tries=20, errors=GRACEFUL, **params): """ Poll the URL :param poll_url - URL to poll, should be returned by 'create_session' call :param initial_delay - specifies how many seconds to wait before the first poll :param delay - specifies how many seconds to wait between the polls :param tries - number of polls to perform :param errors - errors handling mode, see corresponding parameter in 'make_request' method :param params - additional query params for each poll request """ time.sleep(initial_delay) poll_response = None for n in range(tries): poll_response = self.make_request( poll_url, headers=self._headers(), errors=errors, **params ) if self.is_poll_complete(poll_response): return poll_response else: time.sleep(delay) if STRICT == errors: raise ExceededRetries( "Failed to poll within {0} tries.".format(tries)) else: return poll_response
python
def poll_session(self, poll_url, initial_delay=2, delay=1, tries=20, errors=GRACEFUL, **params): """ Poll the URL :param poll_url - URL to poll, should be returned by 'create_session' call :param initial_delay - specifies how many seconds to wait before the first poll :param delay - specifies how many seconds to wait between the polls :param tries - number of polls to perform :param errors - errors handling mode, see corresponding parameter in 'make_request' method :param params - additional query params for each poll request """ time.sleep(initial_delay) poll_response = None for n in range(tries): poll_response = self.make_request( poll_url, headers=self._headers(), errors=errors, **params ) if self.is_poll_complete(poll_response): return poll_response else: time.sleep(delay) if STRICT == errors: raise ExceededRetries( "Failed to poll within {0} tries.".format(tries)) else: return poll_response
[ "def", "poll_session", "(", "self", ",", "poll_url", ",", "initial_delay", "=", "2", ",", "delay", "=", "1", ",", "tries", "=", "20", ",", "errors", "=", "GRACEFUL", ",", "*", "*", "params", ")", ":", "time", ".", "sleep", "(", "initial_delay", ")", "poll_response", "=", "None", "for", "n", "in", "range", "(", "tries", ")", ":", "poll_response", "=", "self", ".", "make_request", "(", "poll_url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ",", "errors", "=", "errors", ",", "*", "*", "params", ")", "if", "self", ".", "is_poll_complete", "(", "poll_response", ")", ":", "return", "poll_response", "else", ":", "time", ".", "sleep", "(", "delay", ")", "if", "STRICT", "==", "errors", ":", "raise", "ExceededRetries", "(", "\"Failed to poll within {0} tries.\"", ".", "format", "(", "tries", ")", ")", "else", ":", "return", "poll_response" ]
Poll the URL :param poll_url - URL to poll, should be returned by 'create_session' call :param initial_delay - specifies how many seconds to wait before the first poll :param delay - specifies how many seconds to wait between the polls :param tries - number of polls to perform :param errors - errors handling mode, see corresponding parameter in 'make_request' method :param params - additional query params for each poll request
[ "Poll", "the", "URL", ":", "param", "poll_url", "-", "URL", "to", "poll", "should", "be", "returned", "by", "create_session", "call", ":", "param", "initial_delay", "-", "specifies", "how", "many", "seconds", "to", "wait", "before", "the", "first", "poll", ":", "param", "delay", "-", "specifies", "how", "many", "seconds", "to", "wait", "between", "the", "polls", ":", "param", "tries", "-", "number", "of", "polls", "to", "perform", ":", "param", "errors", "-", "errors", "handling", "mode", "see", "corresponding", "parameter", "in", "make_request", "method", ":", "param", "params", "-", "additional", "query", "params", "for", "each", "poll", "request" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L249-L281
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Transport._construct_params
def _construct_params(params, required_keys, opt_keys=None): """ Construct params list in order of given keys. """ try: params_list = [params.pop(key) for key in required_keys] except KeyError as e: raise MissingParameter( 'Missing expected request parameter: %s' % e) if opt_keys: params_list.extend([params.pop(key) for key in opt_keys if key in params]) return '/'.join(str(p) for p in params_list)
python
def _construct_params(params, required_keys, opt_keys=None): """ Construct params list in order of given keys. """ try: params_list = [params.pop(key) for key in required_keys] except KeyError as e: raise MissingParameter( 'Missing expected request parameter: %s' % e) if opt_keys: params_list.extend([params.pop(key) for key in opt_keys if key in params]) return '/'.join(str(p) for p in params_list)
[ "def", "_construct_params", "(", "params", ",", "required_keys", ",", "opt_keys", "=", "None", ")", ":", "try", ":", "params_list", "=", "[", "params", ".", "pop", "(", "key", ")", "for", "key", "in", "required_keys", "]", "except", "KeyError", "as", "e", ":", "raise", "MissingParameter", "(", "'Missing expected request parameter: %s'", "%", "e", ")", "if", "opt_keys", ":", "params_list", ".", "extend", "(", "[", "params", ".", "pop", "(", "key", ")", "for", "key", "in", "opt_keys", "if", "key", "in", "params", "]", ")", "return", "'/'", ".", "join", "(", "str", "(", "p", ")", "for", "p", "in", "params_list", ")" ]
Construct params list in order of given keys.
[ "Construct", "params", "list", "in", "order", "of", "given", "keys", "." ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L400-L412
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Flights.create_session
def create_session(self, **params): """ Create the session date format: YYYY-mm-dd location: ISO code """ return self.make_request(self.PRICING_SESSION_URL, method='post', headers=self._session_headers(), callback=lambda resp: resp.headers[ 'location'], data=params)
python
def create_session(self, **params): """ Create the session date format: YYYY-mm-dd location: ISO code """ return self.make_request(self.PRICING_SESSION_URL, method='post', headers=self._session_headers(), callback=lambda resp: resp.headers[ 'location'], data=params)
[ "def", "create_session", "(", "self", ",", "*", "*", "params", ")", ":", "return", "self", ".", "make_request", "(", "self", ".", "PRICING_SESSION_URL", ",", "method", "=", "'post'", ",", "headers", "=", "self", ".", "_session_headers", "(", ")", ",", "callback", "=", "lambda", "resp", ":", "resp", ".", "headers", "[", "'location'", "]", ",", "data", "=", "params", ")" ]
Create the session date format: YYYY-mm-dd location: ISO code
[ "Create", "the", "session", "date", "format", ":", "YYYY", "-", "mm", "-", "dd", "location", ":", "ISO", "code" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L434-L445
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
Flights.request_booking_details
def request_booking_details(self, poll_url, **params): """ Request for booking details URL Format: {API_HOST}/apiservices/pricing/v1.0/{session key}/booking ?apiKey={apiKey} """ return self.make_request("%s/booking" % poll_url, method='put', headers=self._headers(), callback=lambda resp: resp.headers[ 'location'], **params)
python
def request_booking_details(self, poll_url, **params): """ Request for booking details URL Format: {API_HOST}/apiservices/pricing/v1.0/{session key}/booking ?apiKey={apiKey} """ return self.make_request("%s/booking" % poll_url, method='put', headers=self._headers(), callback=lambda resp: resp.headers[ 'location'], **params)
[ "def", "request_booking_details", "(", "self", ",", "poll_url", ",", "*", "*", "params", ")", ":", "return", "self", ".", "make_request", "(", "\"%s/booking\"", "%", "poll_url", ",", "method", "=", "'put'", ",", "headers", "=", "self", ".", "_headers", "(", ")", ",", "callback", "=", "lambda", "resp", ":", "resp", ".", "headers", "[", "'location'", "]", ",", "*", "*", "params", ")" ]
Request for booking details URL Format: {API_HOST}/apiservices/pricing/v1.0/{session key}/booking ?apiKey={apiKey}
[ "Request", "for", "booking", "details", "URL", "Format", ":", "{", "API_HOST", "}", "/", "apiservices", "/", "pricing", "/", "v1", ".", "0", "/", "{", "session", "key", "}", "/", "booking", "?apiKey", "=", "{", "apiKey", "}" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L447-L459
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
FlightsCache.get_cheapest_price_by_date
def get_cheapest_price_by_date(self, **params): """ {API_HOST}/apiservices/browsedates/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.BROWSE_DATES_SERVICE_URL, params_path=self._construct_params( params, self._REQ_PARAMS, self._OPT_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
python
def get_cheapest_price_by_date(self, **params): """ {API_HOST}/apiservices/browsedates/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.BROWSE_DATES_SERVICE_URL, params_path=self._construct_params( params, self._REQ_PARAMS, self._OPT_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
[ "def", "get_cheapest_price_by_date", "(", "self", ",", "*", "*", "params", ")", ":", "service_url", "=", "\"{url}/{params_path}\"", ".", "format", "(", "url", "=", "self", ".", "BROWSE_DATES_SERVICE_URL", ",", "params_path", "=", "self", ".", "_construct_params", "(", "params", ",", "self", ".", "_REQ_PARAMS", ",", "self", ".", "_OPT_PARAMS", ")", ")", "return", "self", ".", "make_request", "(", "service_url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ",", "*", "*", "params", ")" ]
{API_HOST}/apiservices/browsedates/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey}
[ "{", "API_HOST", "}", "/", "apiservices", "/", "browsedates", "/", "v1", ".", "0", "/", "{", "market", "}", "/", "{", "currency", "}", "/", "{", "locale", "}", "/", "{", "originPlace", "}", "/", "{", "destinationPlace", "}", "/", "{", "outboundPartialDate", "}", "/", "{", "inboundPartialDate", "}", "?apiKey", "=", "{", "apiKey", "}" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L484-L501
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
FlightsCache.get_cheapest_price_by_route
def get_cheapest_price_by_route(self, **params): """ {API_HOST}/apiservices/browseroutes/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.BROWSE_ROUTES_SERVICE_URL, params_path=self._construct_params( params, self._REQ_PARAMS, self._OPT_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
python
def get_cheapest_price_by_route(self, **params): """ {API_HOST}/apiservices/browseroutes/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.BROWSE_ROUTES_SERVICE_URL, params_path=self._construct_params( params, self._REQ_PARAMS, self._OPT_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
[ "def", "get_cheapest_price_by_route", "(", "self", ",", "*", "*", "params", ")", ":", "service_url", "=", "\"{url}/{params_path}\"", ".", "format", "(", "url", "=", "self", ".", "BROWSE_ROUTES_SERVICE_URL", ",", "params_path", "=", "self", ".", "_construct_params", "(", "params", ",", "self", ".", "_REQ_PARAMS", ",", "self", ".", "_OPT_PARAMS", ")", ")", "return", "self", ".", "make_request", "(", "service_url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ",", "*", "*", "params", ")" ]
{API_HOST}/apiservices/browseroutes/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey}
[ "{", "API_HOST", "}", "/", "apiservices", "/", "browseroutes", "/", "v1", ".", "0", "/", "{", "market", "}", "/", "{", "currency", "}", "/", "{", "locale", "}", "/", "{", "originPlace", "}", "/", "{", "destinationPlace", "}", "/", "{", "outboundPartialDate", "}", "/", "{", "inboundPartialDate", "}", "?apiKey", "=", "{", "apiKey", "}" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L503-L520
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
FlightsCache.get_cheapest_quotes
def get_cheapest_quotes(self, **params): """ {API_HOST}/apiservices/browsequotes/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.BROWSE_QUOTES_SERVICE_URL, params_path=self._construct_params( params, self._REQ_PARAMS, self._OPT_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
python
def get_cheapest_quotes(self, **params): """ {API_HOST}/apiservices/browsequotes/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.BROWSE_QUOTES_SERVICE_URL, params_path=self._construct_params( params, self._REQ_PARAMS, self._OPT_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
[ "def", "get_cheapest_quotes", "(", "self", ",", "*", "*", "params", ")", ":", "service_url", "=", "\"{url}/{params_path}\"", ".", "format", "(", "url", "=", "self", ".", "BROWSE_QUOTES_SERVICE_URL", ",", "params_path", "=", "self", ".", "_construct_params", "(", "params", ",", "self", ".", "_REQ_PARAMS", ",", "self", ".", "_OPT_PARAMS", ")", ")", "return", "self", ".", "make_request", "(", "service_url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ",", "*", "*", "params", ")" ]
{API_HOST}/apiservices/browsequotes/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey}
[ "{", "API_HOST", "}", "/", "apiservices", "/", "browsequotes", "/", "v1", ".", "0", "/", "{", "market", "}", "/", "{", "currency", "}", "/", "{", "locale", "}", "/", "{", "originPlace", "}", "/", "{", "destinationPlace", "}", "/", "{", "outboundPartialDate", "}", "/", "{", "inboundPartialDate", "}", "?apiKey", "=", "{", "apiKey", "}" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L522-L538
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
FlightsCache.get_grid_prices_by_date
def get_grid_prices_by_date(self, **params): """ {API_HOST}/apiservices/browsegrid/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.BROWSE_GRID_SERVICE_URL, params_path=self._construct_params( params, self._REQ_PARAMS, self._OPT_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
python
def get_grid_prices_by_date(self, **params): """ {API_HOST}/apiservices/browsegrid/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey} """ service_url = "{url}/{params_path}".format( url=self.BROWSE_GRID_SERVICE_URL, params_path=self._construct_params( params, self._REQ_PARAMS, self._OPT_PARAMS) ) return self.make_request( service_url, headers=self._headers(), **params )
[ "def", "get_grid_prices_by_date", "(", "self", ",", "*", "*", "params", ")", ":", "service_url", "=", "\"{url}/{params_path}\"", ".", "format", "(", "url", "=", "self", ".", "BROWSE_GRID_SERVICE_URL", ",", "params_path", "=", "self", ".", "_construct_params", "(", "params", ",", "self", ".", "_REQ_PARAMS", ",", "self", ".", "_OPT_PARAMS", ")", ")", "return", "self", ".", "make_request", "(", "service_url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ",", "*", "*", "params", ")" ]
{API_HOST}/apiservices/browsegrid/v1.0/{market}/{currency}/{locale}/ {originPlace}/{destinationPlace}/ {outboundPartialDate}/{inboundPartialDate} ?apiKey={apiKey}
[ "{", "API_HOST", "}", "/", "apiservices", "/", "browsegrid", "/", "v1", ".", "0", "/", "{", "market", "}", "/", "{", "currency", "}", "/", "{", "locale", "}", "/", "{", "originPlace", "}", "/", "{", "destinationPlace", "}", "/", "{", "outboundPartialDate", "}", "/", "{", "inboundPartialDate", "}", "?apiKey", "=", "{", "apiKey", "}" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L540-L556
Skyscanner/skyscanner-python-sdk
skyscanner/skyscanner.py
CarHire.create_session
def create_session(self, **params): """ Create the session date format: YYYY-MM-DDThh:mm location: ISO code """ required_keys = ('market', 'currency', 'locale', 'pickupplace', 'dropoffplace', 'pickupdatetime', 'dropoffdatetime', 'driverage') service_url = "{url}/{params_path}".format( url=self.PRICING_SESSION_URL, params_path=self._construct_params(params, required_keys) ) poll_path = self.make_request(service_url, headers=self._session_headers(), callback=lambda resp: resp.headers[ 'location'], userip=params['userip']) return "{url}{path}".format(url=self.API_HOST, path=poll_path)
python
def create_session(self, **params): """ Create the session date format: YYYY-MM-DDThh:mm location: ISO code """ required_keys = ('market', 'currency', 'locale', 'pickupplace', 'dropoffplace', 'pickupdatetime', 'dropoffdatetime', 'driverage') service_url = "{url}/{params_path}".format( url=self.PRICING_SESSION_URL, params_path=self._construct_params(params, required_keys) ) poll_path = self.make_request(service_url, headers=self._session_headers(), callback=lambda resp: resp.headers[ 'location'], userip=params['userip']) return "{url}{path}".format(url=self.API_HOST, path=poll_path)
[ "def", "create_session", "(", "self", ",", "*", "*", "params", ")", ":", "required_keys", "=", "(", "'market'", ",", "'currency'", ",", "'locale'", ",", "'pickupplace'", ",", "'dropoffplace'", ",", "'pickupdatetime'", ",", "'dropoffdatetime'", ",", "'driverage'", ")", "service_url", "=", "\"{url}/{params_path}\"", ".", "format", "(", "url", "=", "self", ".", "PRICING_SESSION_URL", ",", "params_path", "=", "self", ".", "_construct_params", "(", "params", ",", "required_keys", ")", ")", "poll_path", "=", "self", ".", "make_request", "(", "service_url", ",", "headers", "=", "self", ".", "_session_headers", "(", ")", ",", "callback", "=", "lambda", "resp", ":", "resp", ".", "headers", "[", "'location'", "]", ",", "userip", "=", "params", "[", "'userip'", "]", ")", "return", "\"{url}{path}\"", ".", "format", "(", "url", "=", "self", ".", "API_HOST", ",", "path", "=", "poll_path", ")" ]
Create the session date format: YYYY-MM-DDThh:mm location: ISO code
[ "Create", "the", "session", "date", "format", ":", "YYYY", "-", "MM", "-", "DDThh", ":", "mm", "location", ":", "ISO", "code" ]
train
https://github.com/Skyscanner/skyscanner-python-sdk/blob/26ce4a563f538a689f2a29063f3604731703ddac/skyscanner/skyscanner.py#L574-L596
jlmadurga/django-telegram-bot
telegrambot/templatetags/telegrambot_filters.py
keyboard_field
def keyboard_field(value, args=None): """ Format keyboard /command field. """ qs = QueryDict(args) per_line = qs.get('per_line', 1) field = qs.get("field", "slug") command = qs.get("command") convert = lambda element: "/" + command + " " + str(getattr(element, field)) group = lambda flat, size: [flat[i:i+size] for i in range(0, len(flat), size)] grouped = group(value, int(per_line)) new_list = [] for line in grouped: new_list.append([convert(e) for e in line]) return str(new_list).encode('utf-8')
python
def keyboard_field(value, args=None): """ Format keyboard /command field. """ qs = QueryDict(args) per_line = qs.get('per_line', 1) field = qs.get("field", "slug") command = qs.get("command") convert = lambda element: "/" + command + " " + str(getattr(element, field)) group = lambda flat, size: [flat[i:i+size] for i in range(0, len(flat), size)] grouped = group(value, int(per_line)) new_list = [] for line in grouped: new_list.append([convert(e) for e in line]) return str(new_list).encode('utf-8')
[ "def", "keyboard_field", "(", "value", ",", "args", "=", "None", ")", ":", "qs", "=", "QueryDict", "(", "args", ")", "per_line", "=", "qs", ".", "get", "(", "'per_line'", ",", "1", ")", "field", "=", "qs", ".", "get", "(", "\"field\"", ",", "\"slug\"", ")", "command", "=", "qs", ".", "get", "(", "\"command\"", ")", "convert", "=", "lambda", "element", ":", "\"/\"", "+", "command", "+", "\" \"", "+", "str", "(", "getattr", "(", "element", ",", "field", ")", ")", "group", "=", "lambda", "flat", ",", "size", ":", "[", "flat", "[", "i", ":", "i", "+", "size", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "flat", ")", ",", "size", ")", "]", "grouped", "=", "group", "(", "value", ",", "int", "(", "per_line", ")", ")", "new_list", "=", "[", "]", "for", "line", "in", "grouped", ":", "new_list", ".", "append", "(", "[", "convert", "(", "e", ")", "for", "e", "in", "line", "]", ")", "return", "str", "(", "new_list", ")", ".", "encode", "(", "'utf-8'", ")" ]
Format keyboard /command field.
[ "Format", "keyboard", "/", "command", "field", "." ]
train
https://github.com/jlmadurga/django-telegram-bot/blob/becbc86a9735c794828eb5da39bd59647104ba34/telegrambot/templatetags/telegrambot_filters.py#L6-L20
jlmadurga/django-telegram-bot
telegrambot/bot_views/generic/detail.py
DetailCommandView.get_queryset
def get_queryset(self): """ Return the `QuerySet` that will be used to look up the object. Note that this method is called by the default implementation of `get_object` and may not be called if `get_object` is overridden. """ if self.queryset is None: if self.model: return self.model._default_manager.all() else: raise ImproperlyConfigured( "%(cls)s is missing a QuerySet. Define " "%(cls)s.model, %(cls)s.queryset, or override " "%(cls)s.get_queryset()." % { 'cls': self.__class__.__name__ } ) return self.queryset.all()
python
def get_queryset(self): """ Return the `QuerySet` that will be used to look up the object. Note that this method is called by the default implementation of `get_object` and may not be called if `get_object` is overridden. """ if self.queryset is None: if self.model: return self.model._default_manager.all() else: raise ImproperlyConfigured( "%(cls)s is missing a QuerySet. Define " "%(cls)s.model, %(cls)s.queryset, or override " "%(cls)s.get_queryset()." % { 'cls': self.__class__.__name__ } ) return self.queryset.all()
[ "def", "get_queryset", "(", "self", ")", ":", "if", "self", ".", "queryset", "is", "None", ":", "if", "self", ".", "model", ":", "return", "self", ".", "model", ".", "_default_manager", ".", "all", "(", ")", "else", ":", "raise", "ImproperlyConfigured", "(", "\"%(cls)s is missing a QuerySet. Define \"", "\"%(cls)s.model, %(cls)s.queryset, or override \"", "\"%(cls)s.get_queryset().\"", "%", "{", "'cls'", ":", "self", ".", "__class__", ".", "__name__", "}", ")", "return", "self", ".", "queryset", ".", "all", "(", ")" ]
Return the `QuerySet` that will be used to look up the object. Note that this method is called by the default implementation of `get_object` and may not be called if `get_object` is overridden.
[ "Return", "the", "QuerySet", "that", "will", "be", "used", "to", "look", "up", "the", "object", ".", "Note", "that", "this", "method", "is", "called", "by", "the", "default", "implementation", "of", "get_object", "and", "may", "not", "be", "called", "if", "get_object", "is", "overridden", "." ]
train
https://github.com/jlmadurga/django-telegram-bot/blob/becbc86a9735c794828eb5da39bd59647104ba34/telegrambot/bot_views/generic/detail.py#L20-L37
jlmadurga/django-telegram-bot
telegrambot/bot_views/decorators.py
login_required
def login_required(view_func): """ Decorator for command views that checks that the chat is authenticated, sends message with link for authenticated if necessary. """ @wraps(view_func) def wrapper(bot, update, **kwargs): chat = Chat.objects.get(id=update.message.chat.id) if chat.is_authenticated(): return view_func(bot, update, **kwargs) from telegrambot.bot_views.login import LoginBotView login_command_view = LoginBotView.as_command_view() bot_model = Bot.objects.get(token=bot.token) kwargs['link'] = reverse('telegrambot:auth', kwargs={'bot': bot_model.user_api.username}) return login_command_view(bot, update, **kwargs) return wrapper
python
def login_required(view_func): """ Decorator for command views that checks that the chat is authenticated, sends message with link for authenticated if necessary. """ @wraps(view_func) def wrapper(bot, update, **kwargs): chat = Chat.objects.get(id=update.message.chat.id) if chat.is_authenticated(): return view_func(bot, update, **kwargs) from telegrambot.bot_views.login import LoginBotView login_command_view = LoginBotView.as_command_view() bot_model = Bot.objects.get(token=bot.token) kwargs['link'] = reverse('telegrambot:auth', kwargs={'bot': bot_model.user_api.username}) return login_command_view(bot, update, **kwargs) return wrapper
[ "def", "login_required", "(", "view_func", ")", ":", "@", "wraps", "(", "view_func", ")", "def", "wrapper", "(", "bot", ",", "update", ",", "*", "*", "kwargs", ")", ":", "chat", "=", "Chat", ".", "objects", ".", "get", "(", "id", "=", "update", ".", "message", ".", "chat", ".", "id", ")", "if", "chat", ".", "is_authenticated", "(", ")", ":", "return", "view_func", "(", "bot", ",", "update", ",", "*", "*", "kwargs", ")", "from", "telegrambot", ".", "bot_views", ".", "login", "import", "LoginBotView", "login_command_view", "=", "LoginBotView", ".", "as_command_view", "(", ")", "bot_model", "=", "Bot", ".", "objects", ".", "get", "(", "token", "=", "bot", ".", "token", ")", "kwargs", "[", "'link'", "]", "=", "reverse", "(", "'telegrambot:auth'", ",", "kwargs", "=", "{", "'bot'", ":", "bot_model", ".", "user_api", ".", "username", "}", ")", "return", "login_command_view", "(", "bot", ",", "update", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Decorator for command views that checks that the chat is authenticated, sends message with link for authenticated if necessary.
[ "Decorator", "for", "command", "views", "that", "checks", "that", "the", "chat", "is", "authenticated", "sends", "message", "with", "link", "for", "authenticated", "if", "necessary", "." ]
train
https://github.com/jlmadurga/django-telegram-bot/blob/becbc86a9735c794828eb5da39bd59647104ba34/telegrambot/bot_views/decorators.py#L6-L21
0101/pipetools
pipetools/decorators.py
pipe_util
def pipe_util(func): """ Decorator that handles X objects and partial application for pipe-utils. """ @wraps(func) def pipe_util_wrapper(function, *args, **kwargs): if isinstance(function, XObject): function = ~function original_function = function if args or kwargs: function = xpartial(function, *args, **kwargs) name = lambda: '%s(%s)' % (get_name(func), ', '.join( filter(None, (get_name(original_function), repr_args(*args, **kwargs))))) f = func(function) result = pipe | set_name(name, f) # if the util defines an 'attrs' mapping, copy it as attributes # to the result attrs = getattr(f, 'attrs', {}) for k, v in dict_items(attrs): setattr(result, k, v) return result return pipe_util_wrapper
python
def pipe_util(func): """ Decorator that handles X objects and partial application for pipe-utils. """ @wraps(func) def pipe_util_wrapper(function, *args, **kwargs): if isinstance(function, XObject): function = ~function original_function = function if args or kwargs: function = xpartial(function, *args, **kwargs) name = lambda: '%s(%s)' % (get_name(func), ', '.join( filter(None, (get_name(original_function), repr_args(*args, **kwargs))))) f = func(function) result = pipe | set_name(name, f) # if the util defines an 'attrs' mapping, copy it as attributes # to the result attrs = getattr(f, 'attrs', {}) for k, v in dict_items(attrs): setattr(result, k, v) return result return pipe_util_wrapper
[ "def", "pipe_util", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "pipe_util_wrapper", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "function", ",", "XObject", ")", ":", "function", "=", "~", "function", "original_function", "=", "function", "if", "args", "or", "kwargs", ":", "function", "=", "xpartial", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", "name", "=", "lambda", ":", "'%s(%s)'", "%", "(", "get_name", "(", "func", ")", ",", "', '", ".", "join", "(", "filter", "(", "None", ",", "(", "get_name", "(", "original_function", ")", ",", "repr_args", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", ")", ")", ")", "f", "=", "func", "(", "function", ")", "result", "=", "pipe", "|", "set_name", "(", "name", ",", "f", ")", "# if the util defines an 'attrs' mapping, copy it as attributes", "# to the result", "attrs", "=", "getattr", "(", "f", ",", "'attrs'", ",", "{", "}", ")", "for", "k", ",", "v", "in", "dict_items", "(", "attrs", ")", ":", "setattr", "(", "result", ",", "k", ",", "v", ")", "return", "result", "return", "pipe_util_wrapper" ]
Decorator that handles X objects and partial application for pipe-utils.
[ "Decorator", "that", "handles", "X", "objects", "and", "partial", "application", "for", "pipe", "-", "utils", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/decorators.py#L10-L39
0101/pipetools
pipetools/decorators.py
auto_string_formatter
def auto_string_formatter(func): """ Decorator that handles automatic string formatting. By converting a string argument to a function that does formatting on said string. """ @wraps(func) def auto_string_formatter_wrapper(function, *args, **kwargs): if isinstance(function, string_types): function = StringFormatter(function) return func(function, *args, **kwargs) return auto_string_formatter_wrapper
python
def auto_string_formatter(func): """ Decorator that handles automatic string formatting. By converting a string argument to a function that does formatting on said string. """ @wraps(func) def auto_string_formatter_wrapper(function, *args, **kwargs): if isinstance(function, string_types): function = StringFormatter(function) return func(function, *args, **kwargs) return auto_string_formatter_wrapper
[ "def", "auto_string_formatter", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "auto_string_formatter_wrapper", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "function", ",", "string_types", ")", ":", "function", "=", "StringFormatter", "(", "function", ")", "return", "func", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "auto_string_formatter_wrapper" ]
Decorator that handles automatic string formatting. By converting a string argument to a function that does formatting on said string.
[ "Decorator", "that", "handles", "automatic", "string", "formatting", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/decorators.py#L42-L56
0101/pipetools
pipetools/decorators.py
data_structure_builder
def data_structure_builder(func): """ Decorator to handle automatic data structure creation for pipe-utils. """ @wraps(func) def ds_builder_wrapper(function, *args, **kwargs): try: function = DSBuilder(function) except NoBuilder: pass return func(function, *args, **kwargs) return ds_builder_wrapper
python
def data_structure_builder(func): """ Decorator to handle automatic data structure creation for pipe-utils. """ @wraps(func) def ds_builder_wrapper(function, *args, **kwargs): try: function = DSBuilder(function) except NoBuilder: pass return func(function, *args, **kwargs) return ds_builder_wrapper
[ "def", "data_structure_builder", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "ds_builder_wrapper", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "function", "=", "DSBuilder", "(", "function", ")", "except", "NoBuilder", ":", "pass", "return", "func", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ds_builder_wrapper" ]
Decorator to handle automatic data structure creation for pipe-utils.
[ "Decorator", "to", "handle", "automatic", "data", "structure", "creation", "for", "pipe", "-", "utils", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/decorators.py#L59-L71
0101/pipetools
pipetools/decorators.py
regex_condition
def regex_condition(func): """ If a condition is given as string instead of a function, it is turned into a regex-matching function. """ @wraps(func) def regex_condition_wrapper(condition, *args, **kwargs): if isinstance(condition, string_types): condition = maybe | partial(re.match, condition) return func(condition, *args, **kwargs) return regex_condition_wrapper
python
def regex_condition(func): """ If a condition is given as string instead of a function, it is turned into a regex-matching function. """ @wraps(func) def regex_condition_wrapper(condition, *args, **kwargs): if isinstance(condition, string_types): condition = maybe | partial(re.match, condition) return func(condition, *args, **kwargs) return regex_condition_wrapper
[ "def", "regex_condition", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "regex_condition_wrapper", "(", "condition", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "condition", ",", "string_types", ")", ":", "condition", "=", "maybe", "|", "partial", "(", "re", ".", "match", ",", "condition", ")", "return", "func", "(", "condition", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "regex_condition_wrapper" ]
If a condition is given as string instead of a function, it is turned into a regex-matching function.
[ "If", "a", "condition", "is", "given", "as", "string", "instead", "of", "a", "function", "it", "is", "turned", "into", "a", "regex", "-", "matching", "function", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/decorators.py#L74-L84
0101/pipetools
pipetools/utils.py
sort_by
def sort_by(function): """ Sorts an incoming sequence by using the given `function` as key. >>> range(10) > sort_by(-X) [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Supports automatic data-structure creation:: users > sort_by([X.last_name, X.first_name]) There is also a shortcut for ``sort_by(X)`` called ``sort``: >>> [4, 5, 8, -3, 0] > sort [-3, 0, 4, 5, 8] And (as of ``0.2.3``) a shortcut for reversing the sort: >>> 'asdfaSfa' > sort_by(X.lower()).descending ['s', 'S', 'f', 'f', 'd', 'a', 'a', 'a'] """ f = partial(sorted, key=function) f.attrs = {'descending': _descending_sort_by(function)} return f
python
def sort_by(function): """ Sorts an incoming sequence by using the given `function` as key. >>> range(10) > sort_by(-X) [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Supports automatic data-structure creation:: users > sort_by([X.last_name, X.first_name]) There is also a shortcut for ``sort_by(X)`` called ``sort``: >>> [4, 5, 8, -3, 0] > sort [-3, 0, 4, 5, 8] And (as of ``0.2.3``) a shortcut for reversing the sort: >>> 'asdfaSfa' > sort_by(X.lower()).descending ['s', 'S', 'f', 'f', 'd', 'a', 'a', 'a'] """ f = partial(sorted, key=function) f.attrs = {'descending': _descending_sort_by(function)} return f
[ "def", "sort_by", "(", "function", ")", ":", "f", "=", "partial", "(", "sorted", ",", "key", "=", "function", ")", "f", ".", "attrs", "=", "{", "'descending'", ":", "_descending_sort_by", "(", "function", ")", "}", "return", "f" ]
Sorts an incoming sequence by using the given `function` as key. >>> range(10) > sort_by(-X) [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] Supports automatic data-structure creation:: users > sort_by([X.last_name, X.first_name]) There is also a shortcut for ``sort_by(X)`` called ``sort``: >>> [4, 5, 8, -3, 0] > sort [-3, 0, 4, 5, 8] And (as of ``0.2.3``) a shortcut for reversing the sort: >>> 'asdfaSfa' > sort_by(X.lower()).descending ['s', 'S', 'f', 'f', 'd', 'a', 'a', 'a']
[ "Sorts", "an", "incoming", "sequence", "by", "using", "the", "given", "function", "as", "key", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/utils.py#L76-L99
0101/pipetools
pipetools/utils.py
take_first
def take_first(count): """ Assumes an iterable on the input, returns an iterable with first `count` items from the input (or possibly less, if there isn't that many). >>> range(9000) > where(X % 100 == 0) | take_first(5) | tuple (0, 100, 200, 300, 400) """ def _take_first(iterable): return islice(iterable, count) return pipe | set_name('take_first(%s)' % count, _take_first)
python
def take_first(count): """ Assumes an iterable on the input, returns an iterable with first `count` items from the input (or possibly less, if there isn't that many). >>> range(9000) > where(X % 100 == 0) | take_first(5) | tuple (0, 100, 200, 300, 400) """ def _take_first(iterable): return islice(iterable, count) return pipe | set_name('take_first(%s)' % count, _take_first)
[ "def", "take_first", "(", "count", ")", ":", "def", "_take_first", "(", "iterable", ")", ":", "return", "islice", "(", "iterable", ",", "count", ")", "return", "pipe", "|", "set_name", "(", "'take_first(%s)'", "%", "count", ",", "_take_first", ")" ]
Assumes an iterable on the input, returns an iterable with first `count` items from the input (or possibly less, if there isn't that many). >>> range(9000) > where(X % 100 == 0) | take_first(5) | tuple (0, 100, 200, 300, 400)
[ "Assumes", "an", "iterable", "on", "the", "input", "returns", "an", "iterable", "with", "first", "count", "items", "from", "the", "input", "(", "or", "possibly", "less", "if", "there", "isn", "t", "that", "many", ")", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/utils.py#L163-L174
0101/pipetools
pipetools/utils.py
drop_first
def drop_first(count): """ Assumes an iterable on the input, returns an iterable with identical items except for the first `count`. >>> range(10) > drop_first(5) | tuple (5, 6, 7, 8, 9) """ def _drop_first(iterable): g = (x for x in range(1, count + 1)) return dropwhile( lambda i: unless(StopIteration, lambda: next(g))(), iterable) return pipe | set_name('drop_first(%s)' % count, _drop_first)
python
def drop_first(count): """ Assumes an iterable on the input, returns an iterable with identical items except for the first `count`. >>> range(10) > drop_first(5) | tuple (5, 6, 7, 8, 9) """ def _drop_first(iterable): g = (x for x in range(1, count + 1)) return dropwhile( lambda i: unless(StopIteration, lambda: next(g))(), iterable) return pipe | set_name('drop_first(%s)' % count, _drop_first)
[ "def", "drop_first", "(", "count", ")", ":", "def", "_drop_first", "(", "iterable", ")", ":", "g", "=", "(", "x", "for", "x", "in", "range", "(", "1", ",", "count", "+", "1", ")", ")", "return", "dropwhile", "(", "lambda", "i", ":", "unless", "(", "StopIteration", ",", "lambda", ":", "next", "(", "g", ")", ")", "(", ")", ",", "iterable", ")", "return", "pipe", "|", "set_name", "(", "'drop_first(%s)'", "%", "count", ",", "_drop_first", ")" ]
Assumes an iterable on the input, returns an iterable with identical items except for the first `count`. >>> range(10) > drop_first(5) | tuple (5, 6, 7, 8, 9)
[ "Assumes", "an", "iterable", "on", "the", "input", "returns", "an", "iterable", "with", "identical", "items", "except", "for", "the", "first", "count", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/utils.py#L177-L189
0101/pipetools
pipetools/utils.py
unless
def unless(exception_class_or_tuple, func, *args, **kwargs): """ When `exception_class_or_tuple` occurs while executing `func`, it will be caught and ``None`` will be returned. >>> f = where(X > 10) | list | unless(IndexError, X[0]) >>> f([5, 8, 12, 4]) 12 >>> f([1, 2, 3]) None """ @pipe_util @auto_string_formatter @data_structure_builder def construct_unless(function): # a wrapper so we can re-use the decorators def _unless(*args, **kwargs): try: return function(*args, **kwargs) except exception_class_or_tuple: pass return _unless name = lambda: 'unless(%s, %s)' % (exception_class_or_tuple, ', '.join( filter(None, (get_name(func), repr_args(*args, **kwargs))))) return set_name(name, construct_unless(func, *args, **kwargs))
python
def unless(exception_class_or_tuple, func, *args, **kwargs): """ When `exception_class_or_tuple` occurs while executing `func`, it will be caught and ``None`` will be returned. >>> f = where(X > 10) | list | unless(IndexError, X[0]) >>> f([5, 8, 12, 4]) 12 >>> f([1, 2, 3]) None """ @pipe_util @auto_string_formatter @data_structure_builder def construct_unless(function): # a wrapper so we can re-use the decorators def _unless(*args, **kwargs): try: return function(*args, **kwargs) except exception_class_or_tuple: pass return _unless name = lambda: 'unless(%s, %s)' % (exception_class_or_tuple, ', '.join( filter(None, (get_name(func), repr_args(*args, **kwargs))))) return set_name(name, construct_unless(func, *args, **kwargs))
[ "def", "unless", "(", "exception_class_or_tuple", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "@", "pipe_util", "@", "auto_string_formatter", "@", "data_structure_builder", "def", "construct_unless", "(", "function", ")", ":", "# a wrapper so we can re-use the decorators", "def", "_unless", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "exception_class_or_tuple", ":", "pass", "return", "_unless", "name", "=", "lambda", ":", "'unless(%s, %s)'", "%", "(", "exception_class_or_tuple", ",", "', '", ".", "join", "(", "filter", "(", "None", ",", "(", "get_name", "(", "func", ")", ",", "repr_args", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", ")", ")", ")", "return", "set_name", "(", "name", ",", "construct_unless", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
When `exception_class_or_tuple` occurs while executing `func`, it will be caught and ``None`` will be returned. >>> f = where(X > 10) | list | unless(IndexError, X[0]) >>> f([5, 8, 12, 4]) 12 >>> f([1, 2, 3]) None
[ "When", "exception_class_or_tuple", "occurs", "while", "executing", "func", "it", "will", "be", "caught", "and", "None", "will", "be", "returned", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/utils.py#L192-L218
0101/pipetools
pipetools/utils.py
group_by
def group_by(function): """ Groups input sequence by `function`. Returns an iterator over a sequence of tuples where the first item is a result of `function` and the second one a list of items matching this result. Ordering of the resulting iterator is undefined, but ordering of the items in the groups is preserved. >>> [1, 2, 3, 4, 5, 6] > group_by(X % 2) | list [(0, [2, 4, 6]), (1, [1, 3, 5])] """ def _group_by(seq): result = {} for item in seq: result.setdefault(function(item), []).append(item) return dict_items(result) return _group_by
python
def group_by(function): """ Groups input sequence by `function`. Returns an iterator over a sequence of tuples where the first item is a result of `function` and the second one a list of items matching this result. Ordering of the resulting iterator is undefined, but ordering of the items in the groups is preserved. >>> [1, 2, 3, 4, 5, 6] > group_by(X % 2) | list [(0, [2, 4, 6]), (1, [1, 3, 5])] """ def _group_by(seq): result = {} for item in seq: result.setdefault(function(item), []).append(item) return dict_items(result) return _group_by
[ "def", "group_by", "(", "function", ")", ":", "def", "_group_by", "(", "seq", ")", ":", "result", "=", "{", "}", "for", "item", "in", "seq", ":", "result", ".", "setdefault", "(", "function", "(", "item", ")", ",", "[", "]", ")", ".", "append", "(", "item", ")", "return", "dict_items", "(", "result", ")", "return", "_group_by" ]
Groups input sequence by `function`. Returns an iterator over a sequence of tuples where the first item is a result of `function` and the second one a list of items matching this result. Ordering of the resulting iterator is undefined, but ordering of the items in the groups is preserved. >>> [1, 2, 3, 4, 5, 6] > group_by(X % 2) | list [(0, [2, 4, 6]), (1, [1, 3, 5])]
[ "Groups", "input", "sequence", "by", "function", "." ]
train
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/utils.py#L254-L274