repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
bradmontgomery/django-blargg
blargg/models.py
TagManager.create_tags
def create_tags(self, entry): """Inspects an ``Entry`` instance, and builds associates ``Tag`` objects based on the values in the ``Entry``'s ``tag_string``.""" tag_list = [t.lower().strip() for t in entry.tag_string.split(',')] for t in tag_list: tag, created = self.get_or_create(name=t) entry.tags.add(tag)
python
def create_tags(self, entry): """Inspects an ``Entry`` instance, and builds associates ``Tag`` objects based on the values in the ``Entry``'s ``tag_string``.""" tag_list = [t.lower().strip() for t in entry.tag_string.split(',')] for t in tag_list: tag, created = self.get_or_create(name=t) entry.tags.add(tag)
[ "def", "create_tags", "(", "self", ",", "entry", ")", ":", "tag_list", "=", "[", "t", ".", "lower", "(", ")", ".", "strip", "(", ")", "for", "t", "in", "entry", ".", "tag_string", ".", "split", "(", "','", ")", "]", "for", "t", "in", "tag_list", ":", "tag", ",", "created", "=", "self", ".", "get_or_create", "(", "name", "=", "t", ")", "entry", ".", "tags", ".", "add", "(", "tag", ")" ]
Inspects an ``Entry`` instance, and builds associates ``Tag`` objects based on the values in the ``Entry``'s ``tag_string``.
[ "Inspects", "an", "Entry", "instance", "and", "builds", "associates", "Tag", "objects", "based", "on", "the", "values", "in", "the", "Entry", "s", "tag_string", "." ]
5d683e04723889a0d1c6d6cf1a67a3d431a2e617
https://github.com/bradmontgomery/django-blargg/blob/5d683e04723889a0d1c6d6cf1a67a3d431a2e617/blargg/models.py#L33-L39
train
bradmontgomery/django-blargg
blargg/models.py
Entry._create_date_slug
def _create_date_slug(self): """Prefixes the slug with the ``published_on`` date.""" if not self.pk: # haven't saved this yet, so use today's date d = utc_now() elif self.published and self.published_on: # use the actual published on date d = self.published_on elif self.updated_on: # default to the last-updated date d = self.updated_on self.date_slug = u"{0}/{1}".format(d.strftime("%Y/%m/%d"), self.slug)
python
def _create_date_slug(self): """Prefixes the slug with the ``published_on`` date.""" if not self.pk: # haven't saved this yet, so use today's date d = utc_now() elif self.published and self.published_on: # use the actual published on date d = self.published_on elif self.updated_on: # default to the last-updated date d = self.updated_on self.date_slug = u"{0}/{1}".format(d.strftime("%Y/%m/%d"), self.slug)
[ "def", "_create_date_slug", "(", "self", ")", ":", "if", "not", "self", ".", "pk", ":", "# haven't saved this yet, so use today's date", "d", "=", "utc_now", "(", ")", "elif", "self", ".", "published", "and", "self", ".", "published_on", ":", "# use the actual published on date", "d", "=", "self", ".", "published_on", "elif", "self", ".", "updated_on", ":", "# default to the last-updated date", "d", "=", "self", ".", "updated_on", "self", ".", "date_slug", "=", "u\"{0}/{1}\"", ".", "format", "(", "d", ".", "strftime", "(", "\"%Y/%m/%d\"", ")", ",", "self", ".", "slug", ")" ]
Prefixes the slug with the ``published_on`` date.
[ "Prefixes", "the", "slug", "with", "the", "published_on", "date", "." ]
5d683e04723889a0d1c6d6cf1a67a3d431a2e617
https://github.com/bradmontgomery/django-blargg/blob/5d683e04723889a0d1c6d6cf1a67a3d431a2e617/blargg/models.py#L126-L137
train
bradmontgomery/django-blargg
blargg/models.py
Entry._render_content
def _render_content(self): """Renders the content according to the ``content_format``.""" if self.content_format == "rst" and docutils_publish is not None: doc_parts = docutils_publish( source=self.raw_content, writer_name="html4css1" ) self.rendered_content = doc_parts['fragment'] elif self.content_format == "rs" and docutils_publish is None: raise RuntimeError("Install docutils to pubilsh reStructuredText") elif self.content_format == "md" and markdown is not None: self.rendered_content = markdown(self.raw_content) elif self.content_format == "md" and markdown is None: raise RuntimeError("Install Markdown to pubilsh markdown") else: # Assume we've got html self.rendered_content = self.raw_content
python
def _render_content(self): """Renders the content according to the ``content_format``.""" if self.content_format == "rst" and docutils_publish is not None: doc_parts = docutils_publish( source=self.raw_content, writer_name="html4css1" ) self.rendered_content = doc_parts['fragment'] elif self.content_format == "rs" and docutils_publish is None: raise RuntimeError("Install docutils to pubilsh reStructuredText") elif self.content_format == "md" and markdown is not None: self.rendered_content = markdown(self.raw_content) elif self.content_format == "md" and markdown is None: raise RuntimeError("Install Markdown to pubilsh markdown") else: # Assume we've got html self.rendered_content = self.raw_content
[ "def", "_render_content", "(", "self", ")", ":", "if", "self", ".", "content_format", "==", "\"rst\"", "and", "docutils_publish", "is", "not", "None", ":", "doc_parts", "=", "docutils_publish", "(", "source", "=", "self", ".", "raw_content", ",", "writer_name", "=", "\"html4css1\"", ")", "self", ".", "rendered_content", "=", "doc_parts", "[", "'fragment'", "]", "elif", "self", ".", "content_format", "==", "\"rs\"", "and", "docutils_publish", "is", "None", ":", "raise", "RuntimeError", "(", "\"Install docutils to pubilsh reStructuredText\"", ")", "elif", "self", ".", "content_format", "==", "\"md\"", "and", "markdown", "is", "not", "None", ":", "self", ".", "rendered_content", "=", "markdown", "(", "self", ".", "raw_content", ")", "elif", "self", ".", "content_format", "==", "\"md\"", "and", "markdown", "is", "None", ":", "raise", "RuntimeError", "(", "\"Install Markdown to pubilsh markdown\"", ")", "else", ":", "# Assume we've got html", "self", ".", "rendered_content", "=", "self", ".", "raw_content" ]
Renders the content according to the ``content_format``.
[ "Renders", "the", "content", "according", "to", "the", "content_format", "." ]
5d683e04723889a0d1c6d6cf1a67a3d431a2e617
https://github.com/bradmontgomery/django-blargg/blob/5d683e04723889a0d1c6d6cf1a67a3d431a2e617/blargg/models.py#L139-L154
train
bradmontgomery/django-blargg
blargg/models.py
Entry.save
def save(self, *args, **kwargs): """Auto-generate a slug from the name.""" self._create_slug() self._create_date_slug() self._render_content() # Call ``_set_published`` the *first* time this Entry is published. # NOTE: if this is unpublished, and then republished, this method won't # get called; e.g. the date won't get changed and the # ``entry_published`` signal won't get re-sent. send_published_signal = False if self.published and self.published_on is None: send_published_signal = self._set_published() super(Entry, self).save(*args, **kwargs) # We need an ID before we can send this signal. if send_published_signal: entry_published.send(sender=self, entry=self)
python
def save(self, *args, **kwargs): """Auto-generate a slug from the name.""" self._create_slug() self._create_date_slug() self._render_content() # Call ``_set_published`` the *first* time this Entry is published. # NOTE: if this is unpublished, and then republished, this method won't # get called; e.g. the date won't get changed and the # ``entry_published`` signal won't get re-sent. send_published_signal = False if self.published and self.published_on is None: send_published_signal = self._set_published() super(Entry, self).save(*args, **kwargs) # We need an ID before we can send this signal. if send_published_signal: entry_published.send(sender=self, entry=self)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_create_slug", "(", ")", "self", ".", "_create_date_slug", "(", ")", "self", ".", "_render_content", "(", ")", "# Call ``_set_published`` the *first* time this Entry is published.", "# NOTE: if this is unpublished, and then republished, this method won't", "# get called; e.g. the date won't get changed and the", "# ``entry_published`` signal won't get re-sent.", "send_published_signal", "=", "False", "if", "self", ".", "published", "and", "self", ".", "published_on", "is", "None", ":", "send_published_signal", "=", "self", ".", "_set_published", "(", ")", "super", "(", "Entry", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# We need an ID before we can send this signal.", "if", "send_published_signal", ":", "entry_published", ".", "send", "(", "sender", "=", "self", ",", "entry", "=", "self", ")" ]
Auto-generate a slug from the name.
[ "Auto", "-", "generate", "a", "slug", "from", "the", "name", "." ]
5d683e04723889a0d1c6d6cf1a67a3d431a2e617
https://github.com/bradmontgomery/django-blargg/blob/5d683e04723889a0d1c6d6cf1a67a3d431a2e617/blargg/models.py#L163-L181
train
bradmontgomery/django-blargg
blargg/models.py
Entry.get_absolute_url_with_date
def get_absolute_url_with_date(self): """URL based on the entry's date & slug.""" pub_date = self.published_on if pub_date and settings.USE_TZ: # If TZ is enabled, convert all of these dates from UTC to whatever # the project's timezone is set as. Ideally, we'd pull this form # some user settings, but the *canonical* publish time is that of # the author (asssuming author == owner of this project). pub_date = make_naive(pub_date, pytz.utc) # Make naive pub_date = pytz.timezone(settings.TIME_ZONE).localize(pub_date) if pub_date: args = [ pub_date.strftime("%Y"), pub_date.strftime("%m"), pub_date.strftime("%d"), self.slug ] else: args = [self.slug] return reverse('blargg:entry_detail', args=args)
python
def get_absolute_url_with_date(self): """URL based on the entry's date & slug.""" pub_date = self.published_on if pub_date and settings.USE_TZ: # If TZ is enabled, convert all of these dates from UTC to whatever # the project's timezone is set as. Ideally, we'd pull this form # some user settings, but the *canonical* publish time is that of # the author (asssuming author == owner of this project). pub_date = make_naive(pub_date, pytz.utc) # Make naive pub_date = pytz.timezone(settings.TIME_ZONE).localize(pub_date) if pub_date: args = [ pub_date.strftime("%Y"), pub_date.strftime("%m"), pub_date.strftime("%d"), self.slug ] else: args = [self.slug] return reverse('blargg:entry_detail', args=args)
[ "def", "get_absolute_url_with_date", "(", "self", ")", ":", "pub_date", "=", "self", ".", "published_on", "if", "pub_date", "and", "settings", ".", "USE_TZ", ":", "# If TZ is enabled, convert all of these dates from UTC to whatever", "# the project's timezone is set as. Ideally, we'd pull this form", "# some user settings, but the *canonical* publish time is that of", "# the author (asssuming author == owner of this project).", "pub_date", "=", "make_naive", "(", "pub_date", ",", "pytz", ".", "utc", ")", "# Make naive", "pub_date", "=", "pytz", ".", "timezone", "(", "settings", ".", "TIME_ZONE", ")", ".", "localize", "(", "pub_date", ")", "if", "pub_date", ":", "args", "=", "[", "pub_date", ".", "strftime", "(", "\"%Y\"", ")", ",", "pub_date", ".", "strftime", "(", "\"%m\"", ")", ",", "pub_date", ".", "strftime", "(", "\"%d\"", ")", ",", "self", ".", "slug", "]", "else", ":", "args", "=", "[", "self", ".", "slug", "]", "return", "reverse", "(", "'blargg:entry_detail'", ",", "args", "=", "args", ")" ]
URL based on the entry's date & slug.
[ "URL", "based", "on", "the", "entry", "s", "date", "&", "slug", "." ]
5d683e04723889a0d1c6d6cf1a67a3d431a2e617
https://github.com/bradmontgomery/django-blargg/blob/5d683e04723889a0d1c6d6cf1a67a3d431a2e617/blargg/models.py#L187-L207
train
bradmontgomery/django-blargg
blargg/models.py
Entry.tag_list
def tag_list(self): """Return a plain python list containing all of this Entry's tags.""" tags = [tag.strip() for tag in self.tag_string.split(",")] return sorted(filter(None, tags))
python
def tag_list(self): """Return a plain python list containing all of this Entry's tags.""" tags = [tag.strip() for tag in self.tag_string.split(",")] return sorted(filter(None, tags))
[ "def", "tag_list", "(", "self", ")", ":", "tags", "=", "[", "tag", ".", "strip", "(", ")", "for", "tag", "in", "self", ".", "tag_string", ".", "split", "(", "\",\"", ")", "]", "return", "sorted", "(", "filter", "(", "None", ",", "tags", ")", ")" ]
Return a plain python list containing all of this Entry's tags.
[ "Return", "a", "plain", "python", "list", "containing", "all", "of", "this", "Entry", "s", "tags", "." ]
5d683e04723889a0d1c6d6cf1a67a3d431a2e617
https://github.com/bradmontgomery/django-blargg/blob/5d683e04723889a0d1c6d6cf1a67a3d431a2e617/blargg/models.py#L220-L223
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.heartbeat
def heartbeat(self): """Check The API Is Up. https://starfighter.readme.io/docs/heartbeat """ url = urljoin(self.base_url, 'heartbeat') return self.session.get(url).json()['ok']
python
def heartbeat(self): """Check The API Is Up. https://starfighter.readme.io/docs/heartbeat """ url = urljoin(self.base_url, 'heartbeat') return self.session.get(url).json()['ok']
[ "def", "heartbeat", "(", "self", ")", ":", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "'heartbeat'", ")", "return", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")", "[", "'ok'", "]" ]
Check The API Is Up. https://starfighter.readme.io/docs/heartbeat
[ "Check", "The", "API", "Is", "Up", "." ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L24-L30
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.venue_healthcheck
def venue_healthcheck(self): """Check A Venue Is Up. https://starfighter.readme.io/docs/venue-healthcheck """ url = urljoin(self.base_url, 'venues/TESTEX/heartbeat') return self.session.get(url).json()['ok']
python
def venue_healthcheck(self): """Check A Venue Is Up. https://starfighter.readme.io/docs/venue-healthcheck """ url = urljoin(self.base_url, 'venues/TESTEX/heartbeat') return self.session.get(url).json()['ok']
[ "def", "venue_healthcheck", "(", "self", ")", ":", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "'venues/TESTEX/heartbeat'", ")", "return", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")", "[", "'ok'", "]" ]
Check A Venue Is Up. https://starfighter.readme.io/docs/venue-healthcheck
[ "Check", "A", "Venue", "Is", "Up", "." ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L32-L38
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.venue_stocks
def venue_stocks(self): """List the stocks available for trading on the venue. https://starfighter.readme.io/docs/list-stocks-on-venue """ url = urljoin(self.base_url, 'venues/{0}/stocks'.format(self.venue)) return self.session.get(url).json()
python
def venue_stocks(self): """List the stocks available for trading on the venue. https://starfighter.readme.io/docs/list-stocks-on-venue """ url = urljoin(self.base_url, 'venues/{0}/stocks'.format(self.venue)) return self.session.get(url).json()
[ "def", "venue_stocks", "(", "self", ")", ":", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "'venues/{0}/stocks'", ".", "format", "(", "self", ".", "venue", ")", ")", "return", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")" ]
List the stocks available for trading on the venue. https://starfighter.readme.io/docs/list-stocks-on-venue
[ "List", "the", "stocks", "available", "for", "trading", "on", "the", "venue", "." ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L40-L46
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.orderbook_for_stock
def orderbook_for_stock(self, stock): """Get the orderbook for a particular stock. https://starfighter.readme.io/docs/get-orderbook-for-stock """ url_fragment = 'venues/{venue}/stocks/{stock}'.format( venue=self.venue, stock=stock, ) url = urljoin(self.base_url, url_fragment) return self.session.get(url).json()
python
def orderbook_for_stock(self, stock): """Get the orderbook for a particular stock. https://starfighter.readme.io/docs/get-orderbook-for-stock """ url_fragment = 'venues/{venue}/stocks/{stock}'.format( venue=self.venue, stock=stock, ) url = urljoin(self.base_url, url_fragment) return self.session.get(url).json()
[ "def", "orderbook_for_stock", "(", "self", ",", "stock", ")", ":", "url_fragment", "=", "'venues/{venue}/stocks/{stock}'", ".", "format", "(", "venue", "=", "self", ".", "venue", ",", "stock", "=", "stock", ",", ")", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url_fragment", ")", "return", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")" ]
Get the orderbook for a particular stock. https://starfighter.readme.io/docs/get-orderbook-for-stock
[ "Get", "the", "orderbook", "for", "a", "particular", "stock", "." ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L48-L58
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.place_new_order
def place_new_order(self, stock, price, qty, direction, order_type): """Place an order for a stock. https://starfighter.readme.io/docs/place-new-order """ url_fragment = 'venues/{venue}/stocks/{stock}/orders'.format( venue=self.venue, stock=stock, ) data = { "stock": stock, "price": price, "venue": self.venue, "account": self.account, "qty": qty, "direction": direction, "orderType": order_type, } url = urljoin(self.base_url, url_fragment) resp = self.session.post(url, json=data) return resp.json()
python
def place_new_order(self, stock, price, qty, direction, order_type): """Place an order for a stock. https://starfighter.readme.io/docs/place-new-order """ url_fragment = 'venues/{venue}/stocks/{stock}/orders'.format( venue=self.venue, stock=stock, ) data = { "stock": stock, "price": price, "venue": self.venue, "account": self.account, "qty": qty, "direction": direction, "orderType": order_type, } url = urljoin(self.base_url, url_fragment) resp = self.session.post(url, json=data) return resp.json()
[ "def", "place_new_order", "(", "self", ",", "stock", ",", "price", ",", "qty", ",", "direction", ",", "order_type", ")", ":", "url_fragment", "=", "'venues/{venue}/stocks/{stock}/orders'", ".", "format", "(", "venue", "=", "self", ".", "venue", ",", "stock", "=", "stock", ",", ")", "data", "=", "{", "\"stock\"", ":", "stock", ",", "\"price\"", ":", "price", ",", "\"venue\"", ":", "self", ".", "venue", ",", "\"account\"", ":", "self", ".", "account", ",", "\"qty\"", ":", "qty", ",", "\"direction\"", ":", "direction", ",", "\"orderType\"", ":", "order_type", ",", "}", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url_fragment", ")", "resp", "=", "self", ".", "session", ".", "post", "(", "url", ",", "json", "=", "data", ")", "return", "resp", ".", "json", "(", ")" ]
Place an order for a stock. https://starfighter.readme.io/docs/place-new-order
[ "Place", "an", "order", "for", "a", "stock", "." ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L60-L80
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.status_for_order
def status_for_order(self, order_id, stock): """Status For An Existing Order https://starfighter.readme.io/docs/status-for-an-existing-order """ url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format( venue=self.venue, stock=stock, order_id=order_id, ) url = urljoin(self.base_url, url_fragment) return self.session.get(url).json()
python
def status_for_order(self, order_id, stock): """Status For An Existing Order https://starfighter.readme.io/docs/status-for-an-existing-order """ url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format( venue=self.venue, stock=stock, order_id=order_id, ) url = urljoin(self.base_url, url_fragment) return self.session.get(url).json()
[ "def", "status_for_order", "(", "self", ",", "order_id", ",", "stock", ")", ":", "url_fragment", "=", "'venues/{venue}/stocks/{stock}/orders/{order_id}'", ".", "format", "(", "venue", "=", "self", ".", "venue", ",", "stock", "=", "stock", ",", "order_id", "=", "order_id", ",", ")", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url_fragment", ")", "return", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")" ]
Status For An Existing Order https://starfighter.readme.io/docs/status-for-an-existing-order
[ "Status", "For", "An", "Existing", "Order" ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L94-L105
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.cancel_order
def cancel_order(self, order_id, stock): """Cancel An Order https://starfighter.readme.io/docs/cancel-an-order """ url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format( venue=self.venue, stock=stock, order_id=order_id, ) url = urljoin(self.base_url, url_fragment) return self.session.delete(url).json()
python
def cancel_order(self, order_id, stock): """Cancel An Order https://starfighter.readme.io/docs/cancel-an-order """ url_fragment = 'venues/{venue}/stocks/{stock}/orders/{order_id}'.format( venue=self.venue, stock=stock, order_id=order_id, ) url = urljoin(self.base_url, url_fragment) return self.session.delete(url).json()
[ "def", "cancel_order", "(", "self", ",", "order_id", ",", "stock", ")", ":", "url_fragment", "=", "'venues/{venue}/stocks/{stock}/orders/{order_id}'", ".", "format", "(", "venue", "=", "self", ".", "venue", ",", "stock", "=", "stock", ",", "order_id", "=", "order_id", ",", ")", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url_fragment", ")", "return", "self", ".", "session", ".", "delete", "(", "url", ")", ".", "json", "(", ")" ]
Cancel An Order https://starfighter.readme.io/docs/cancel-an-order
[ "Cancel", "An", "Order" ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L107-L118
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.status_for_all_orders
def status_for_all_orders(self): """Status for all orders https://starfighter.readme.io/docs/status-for-all-orders """ url_fragment = 'venues/{venue}/accounts/{account}/orders'.format( venue=self.venue, account=self.account, ) url = urljoin(self.base_url, url_fragment) return self.session.get(url).json()
python
def status_for_all_orders(self): """Status for all orders https://starfighter.readme.io/docs/status-for-all-orders """ url_fragment = 'venues/{venue}/accounts/{account}/orders'.format( venue=self.venue, account=self.account, ) url = urljoin(self.base_url, url_fragment) return self.session.get(url).json()
[ "def", "status_for_all_orders", "(", "self", ")", ":", "url_fragment", "=", "'venues/{venue}/accounts/{account}/orders'", ".", "format", "(", "venue", "=", "self", ".", "venue", ",", "account", "=", "self", ".", "account", ",", ")", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url_fragment", ")", "return", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")" ]
Status for all orders https://starfighter.readme.io/docs/status-for-all-orders
[ "Status", "for", "all", "orders" ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L120-L130
train
striglia/stockfighter
stockfighter/stockfighter.py
Stockfighter.status_for_all_orders_in_a_stock
def status_for_all_orders_in_a_stock(self, stock): """Status for all orders in a stock https://starfighter.readme.io/docs/status-for-all-orders-in-a-stock """ url_fragment = 'venues/{venue}/accounts/{account}/stocks/{stock}/orders'.format( stock=stock, venue=self.venue, account=self.account, ) url = urljoin(self.base_url, url_fragment) return self.session.get(url).json()
python
def status_for_all_orders_in_a_stock(self, stock): """Status for all orders in a stock https://starfighter.readme.io/docs/status-for-all-orders-in-a-stock """ url_fragment = 'venues/{venue}/accounts/{account}/stocks/{stock}/orders'.format( stock=stock, venue=self.venue, account=self.account, ) url = urljoin(self.base_url, url_fragment) return self.session.get(url).json()
[ "def", "status_for_all_orders_in_a_stock", "(", "self", ",", "stock", ")", ":", "url_fragment", "=", "'venues/{venue}/accounts/{account}/stocks/{stock}/orders'", ".", "format", "(", "stock", "=", "stock", ",", "venue", "=", "self", ".", "venue", ",", "account", "=", "self", ".", "account", ",", ")", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url_fragment", ")", "return", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")" ]
Status for all orders in a stock https://starfighter.readme.io/docs/status-for-all-orders-in-a-stock
[ "Status", "for", "all", "orders", "in", "a", "stock" ]
df908f5919d6f861601cd00c906a049d04253d47
https://github.com/striglia/stockfighter/blob/df908f5919d6f861601cd00c906a049d04253d47/stockfighter/stockfighter.py#L132-L143
train
tweekmonster/moult
moult/frameworks/django.py
scan_django_settings
def scan_django_settings(values, imports): '''Recursively scans Django settings for values that appear to be imported modules. ''' if isinstance(values, (str, bytes)): if utils.is_import_str(values): imports.add(values) elif isinstance(values, dict): for k, v in values.items(): scan_django_settings(k, imports) scan_django_settings(v, imports) elif hasattr(values, '__file__') and getattr(values, '__file__'): imp, _ = utils.import_path_from_file(getattr(values, '__file__')) imports.add(imp) elif hasattr(values, '__iter__'): for item in values: scan_django_settings(item, imports)
python
def scan_django_settings(values, imports): '''Recursively scans Django settings for values that appear to be imported modules. ''' if isinstance(values, (str, bytes)): if utils.is_import_str(values): imports.add(values) elif isinstance(values, dict): for k, v in values.items(): scan_django_settings(k, imports) scan_django_settings(v, imports) elif hasattr(values, '__file__') and getattr(values, '__file__'): imp, _ = utils.import_path_from_file(getattr(values, '__file__')) imports.add(imp) elif hasattr(values, '__iter__'): for item in values: scan_django_settings(item, imports)
[ "def", "scan_django_settings", "(", "values", ",", "imports", ")", ":", "if", "isinstance", "(", "values", ",", "(", "str", ",", "bytes", ")", ")", ":", "if", "utils", ".", "is_import_str", "(", "values", ")", ":", "imports", ".", "add", "(", "values", ")", "elif", "isinstance", "(", "values", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "values", ".", "items", "(", ")", ":", "scan_django_settings", "(", "k", ",", "imports", ")", "scan_django_settings", "(", "v", ",", "imports", ")", "elif", "hasattr", "(", "values", ",", "'__file__'", ")", "and", "getattr", "(", "values", ",", "'__file__'", ")", ":", "imp", ",", "_", "=", "utils", ".", "import_path_from_file", "(", "getattr", "(", "values", ",", "'__file__'", ")", ")", "imports", ".", "add", "(", "imp", ")", "elif", "hasattr", "(", "values", ",", "'__iter__'", ")", ":", "for", "item", "in", "values", ":", "scan_django_settings", "(", "item", ",", "imports", ")" ]
Recursively scans Django settings for values that appear to be imported modules.
[ "Recursively", "scans", "Django", "settings", "for", "values", "that", "appear", "to", "be", "imported", "modules", "." ]
38d3a3b9002336219897ebe263ca1d8dcadbecf5
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/frameworks/django.py#L21-L37
train
tweekmonster/moult
moult/frameworks/django.py
handle_django_settings
def handle_django_settings(filename): '''Attempts to load a Django project and get package dependencies from settings. Tested using Django 1.4 and 1.8. Not sure if some nuances are missed in the other versions. ''' old_sys_path = sys.path[:] dirpath = os.path.dirname(filename) project = os.path.basename(dirpath) cwd = os.getcwd() project_path = os.path.normpath(os.path.join(dirpath, '..')) if project_path not in sys.path: sys.path.insert(0, project_path) os.chdir(project_path) project_settings = '{}.settings'.format(project) os.environ['DJANGO_SETTINGS_MODULE'] = project_settings try: import django # Sanity django.setup = lambda: False except ImportError: log.error('Found Django settings, but Django is not installed.') return log.warn('Loading Django Settings (Using {}): {}' .format(django.get_version(), filename)) from django.conf import LazySettings installed_apps = set() settings_imports = set() try: settings = LazySettings() settings._setup() for k, v in vars(settings._wrapped).items(): if k not in _excluded_settings and re.match(r'^[A-Z_]+$', k): # log.debug('Scanning Django setting: %s', k) scan_django_settings(v, settings_imports) # Manually scan INSTALLED_APPS since the broad scan won't include # strings without a period in it . for app in getattr(settings, 'INSTALLED_APPS', []): if hasattr(app, '__file__') and getattr(app, '__file__'): imp, _ = utils.import_path_from_file(getattr(app, '__file__')) installed_apps.add(imp) else: installed_apps.add(app) except Exception as e: log.error('Could not load Django settings: %s', e) log.debug('', exc_info=True) return if not installed_apps or not settings_imports: log.error('Got empty settings values from Django settings.') try: from django.apps.registry import apps, Apps, AppRegistryNotReady # Django doesn't like it when the initial instance of `apps` is reused, # but it has to be populated before other instances can be created. if not apps.apps_ready: apps.populate(installed_apps) else: apps = Apps(installed_apps) start = time.time() while True: try: for app in apps.get_app_configs(): installed_apps.add(app.name) except AppRegistryNotReady: if time.time() - start > 10: raise Exception('Bail out of waiting for Django') log.debug('Waiting for apps to load...') continue break except Exception as e: log.debug('Could not use AppConfig: {}'.format(e)) # Restore before sub scans can occur sys.path[:] = old_sys_path os.chdir(cwd) for item in settings_imports: need_scan = item.startswith(_filescan_modules) yield ('django', item, project_path if need_scan else None) for app in installed_apps: need_scan = app.startswith(project) yield ('django', app, project_path if need_scan else None)
python
def handle_django_settings(filename): '''Attempts to load a Django project and get package dependencies from settings. Tested using Django 1.4 and 1.8. Not sure if some nuances are missed in the other versions. ''' old_sys_path = sys.path[:] dirpath = os.path.dirname(filename) project = os.path.basename(dirpath) cwd = os.getcwd() project_path = os.path.normpath(os.path.join(dirpath, '..')) if project_path not in sys.path: sys.path.insert(0, project_path) os.chdir(project_path) project_settings = '{}.settings'.format(project) os.environ['DJANGO_SETTINGS_MODULE'] = project_settings try: import django # Sanity django.setup = lambda: False except ImportError: log.error('Found Django settings, but Django is not installed.') return log.warn('Loading Django Settings (Using {}): {}' .format(django.get_version(), filename)) from django.conf import LazySettings installed_apps = set() settings_imports = set() try: settings = LazySettings() settings._setup() for k, v in vars(settings._wrapped).items(): if k not in _excluded_settings and re.match(r'^[A-Z_]+$', k): # log.debug('Scanning Django setting: %s', k) scan_django_settings(v, settings_imports) # Manually scan INSTALLED_APPS since the broad scan won't include # strings without a period in it . for app in getattr(settings, 'INSTALLED_APPS', []): if hasattr(app, '__file__') and getattr(app, '__file__'): imp, _ = utils.import_path_from_file(getattr(app, '__file__')) installed_apps.add(imp) else: installed_apps.add(app) except Exception as e: log.error('Could not load Django settings: %s', e) log.debug('', exc_info=True) return if not installed_apps or not settings_imports: log.error('Got empty settings values from Django settings.') try: from django.apps.registry import apps, Apps, AppRegistryNotReady # Django doesn't like it when the initial instance of `apps` is reused, # but it has to be populated before other instances can be created. if not apps.apps_ready: apps.populate(installed_apps) else: apps = Apps(installed_apps) start = time.time() while True: try: for app in apps.get_app_configs(): installed_apps.add(app.name) except AppRegistryNotReady: if time.time() - start > 10: raise Exception('Bail out of waiting for Django') log.debug('Waiting for apps to load...') continue break except Exception as e: log.debug('Could not use AppConfig: {}'.format(e)) # Restore before sub scans can occur sys.path[:] = old_sys_path os.chdir(cwd) for item in settings_imports: need_scan = item.startswith(_filescan_modules) yield ('django', item, project_path if need_scan else None) for app in installed_apps: need_scan = app.startswith(project) yield ('django', app, project_path if need_scan else None)
[ "def", "handle_django_settings", "(", "filename", ")", ":", "old_sys_path", "=", "sys", ".", "path", "[", ":", "]", "dirpath", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "project", "=", "os", ".", "path", ".", "basename", "(", "dirpath", ")", "cwd", "=", "os", ".", "getcwd", "(", ")", "project_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "dirpath", ",", "'..'", ")", ")", "if", "project_path", "not", "in", "sys", ".", "path", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "project_path", ")", "os", ".", "chdir", "(", "project_path", ")", "project_settings", "=", "'{}.settings'", ".", "format", "(", "project", ")", "os", ".", "environ", "[", "'DJANGO_SETTINGS_MODULE'", "]", "=", "project_settings", "try", ":", "import", "django", "# Sanity", "django", ".", "setup", "=", "lambda", ":", "False", "except", "ImportError", ":", "log", ".", "error", "(", "'Found Django settings, but Django is not installed.'", ")", "return", "log", ".", "warn", "(", "'Loading Django Settings (Using {}): {}'", ".", "format", "(", "django", ".", "get_version", "(", ")", ",", "filename", ")", ")", "from", "django", ".", "conf", "import", "LazySettings", "installed_apps", "=", "set", "(", ")", "settings_imports", "=", "set", "(", ")", "try", ":", "settings", "=", "LazySettings", "(", ")", "settings", ".", "_setup", "(", ")", "for", "k", ",", "v", "in", "vars", "(", "settings", ".", "_wrapped", ")", ".", "items", "(", ")", ":", "if", "k", "not", "in", "_excluded_settings", "and", "re", ".", "match", "(", "r'^[A-Z_]+$'", ",", "k", ")", ":", "# log.debug('Scanning Django setting: %s', k)", "scan_django_settings", "(", "v", ",", "settings_imports", ")", "# Manually scan INSTALLED_APPS since the broad scan won't include", "# strings without a period in it .", "for", "app", "in", "getattr", "(", "settings", ",", "'INSTALLED_APPS'", ",", "[", "]", ")", ":", "if", "hasattr", "(", "app", ",", "'__file__'", ")", "and", "getattr", "(", "app", ",", "'__file__'", ")", ":", "imp", ",", "_", "=", "utils", ".", "import_path_from_file", "(", "getattr", "(", "app", ",", "'__file__'", ")", ")", "installed_apps", ".", "add", "(", "imp", ")", "else", ":", "installed_apps", ".", "add", "(", "app", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "'Could not load Django settings: %s'", ",", "e", ")", "log", ".", "debug", "(", "''", ",", "exc_info", "=", "True", ")", "return", "if", "not", "installed_apps", "or", "not", "settings_imports", ":", "log", ".", "error", "(", "'Got empty settings values from Django settings.'", ")", "try", ":", "from", "django", ".", "apps", ".", "registry", "import", "apps", ",", "Apps", ",", "AppRegistryNotReady", "# Django doesn't like it when the initial instance of `apps` is reused,", "# but it has to be populated before other instances can be created.", "if", "not", "apps", ".", "apps_ready", ":", "apps", ".", "populate", "(", "installed_apps", ")", "else", ":", "apps", "=", "Apps", "(", "installed_apps", ")", "start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "try", ":", "for", "app", "in", "apps", ".", "get_app_configs", "(", ")", ":", "installed_apps", ".", "add", "(", "app", ".", "name", ")", "except", "AppRegistryNotReady", ":", "if", "time", ".", "time", "(", ")", "-", "start", ">", "10", ":", "raise", "Exception", "(", "'Bail out of waiting for Django'", ")", "log", ".", "debug", "(", "'Waiting for apps to load...'", ")", "continue", "break", "except", "Exception", "as", "e", ":", "log", ".", "debug", "(", "'Could not use AppConfig: {}'", ".", "format", "(", "e", ")", ")", "# Restore before sub scans can occur", "sys", ".", "path", "[", ":", "]", "=", "old_sys_path", "os", ".", "chdir", "(", "cwd", ")", "for", "item", "in", "settings_imports", ":", "need_scan", "=", "item", ".", "startswith", "(", "_filescan_modules", ")", "yield", "(", "'django'", ",", "item", ",", "project_path", "if", "need_scan", "else", "None", ")", "for", "app", "in", "installed_apps", ":", "need_scan", "=", "app", ".", "startswith", "(", "project", ")", "yield", "(", "'django'", ",", "app", ",", "project_path", "if", "need_scan", "else", "None", ")" ]
Attempts to load a Django project and get package dependencies from settings. Tested using Django 1.4 and 1.8. Not sure if some nuances are missed in the other versions.
[ "Attempts", "to", "load", "a", "Django", "project", "and", "get", "package", "dependencies", "from", "settings", "." ]
38d3a3b9002336219897ebe263ca1d8dcadbecf5
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/frameworks/django.py#L40-L132
train
Dullage/starlingbank
starlingbank/__init__.py
_url
def _url(endpoint: str, sandbox: bool=False) -> str: """Build a URL from the API's base URLs.""" if sandbox is True: url = BASE_URL_SANDBOX else: url = BASE_URL return "{0}{1}".format(url, endpoint)
python
def _url(endpoint: str, sandbox: bool=False) -> str: """Build a URL from the API's base URLs.""" if sandbox is True: url = BASE_URL_SANDBOX else: url = BASE_URL return "{0}{1}".format(url, endpoint)
[ "def", "_url", "(", "endpoint", ":", "str", ",", "sandbox", ":", "bool", "=", "False", ")", "->", "str", ":", "if", "sandbox", "is", "True", ":", "url", "=", "BASE_URL_SANDBOX", "else", ":", "url", "=", "BASE_URL", "return", "\"{0}{1}\"", ".", "format", "(", "url", ",", "endpoint", ")" ]
Build a URL from the API's base URLs.
[ "Build", "a", "URL", "from", "the", "API", "s", "base", "URLs", "." ]
9495456980d5d6d85c4e999a17dc69481067af09
https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L12-L18
train
Dullage/starlingbank
starlingbank/__init__.py
SavingsGoal.update
def update(self, goal: Dict=None) -> None: """Update a single savings goals data.""" if goal is None: endpoint = "/account/{0}/savings-goals/{1}".format( self._account_uid, self.uid ) response = get( _url(endpoint, self._sandbox), headers=self._auth_headers ) response.raise_for_status() goal = response.json() self.uid = goal.get('savingsGoalUid') self.name = goal.get('name') target = goal.get('target', {}) self.target_currency = target.get('currency') self.target_minor_units = target.get('minorUnits') total_saved = goal.get('totalSaved', {}) self.total_saved_currency = total_saved.get('currency') self.total_saved_minor_units = total_saved.get('minorUnits')
python
def update(self, goal: Dict=None) -> None: """Update a single savings goals data.""" if goal is None: endpoint = "/account/{0}/savings-goals/{1}".format( self._account_uid, self.uid ) response = get( _url(endpoint, self._sandbox), headers=self._auth_headers ) response.raise_for_status() goal = response.json() self.uid = goal.get('savingsGoalUid') self.name = goal.get('name') target = goal.get('target', {}) self.target_currency = target.get('currency') self.target_minor_units = target.get('minorUnits') total_saved = goal.get('totalSaved', {}) self.total_saved_currency = total_saved.get('currency') self.total_saved_minor_units = total_saved.get('minorUnits')
[ "def", "update", "(", "self", ",", "goal", ":", "Dict", "=", "None", ")", "->", "None", ":", "if", "goal", "is", "None", ":", "endpoint", "=", "\"/account/{0}/savings-goals/{1}\"", ".", "format", "(", "self", ".", "_account_uid", ",", "self", ".", "uid", ")", "response", "=", "get", "(", "_url", "(", "endpoint", ",", "self", ".", "_sandbox", ")", ",", "headers", "=", "self", ".", "_auth_headers", ")", "response", ".", "raise_for_status", "(", ")", "goal", "=", "response", ".", "json", "(", ")", "self", ".", "uid", "=", "goal", ".", "get", "(", "'savingsGoalUid'", ")", "self", ".", "name", "=", "goal", ".", "get", "(", "'name'", ")", "target", "=", "goal", ".", "get", "(", "'target'", ",", "{", "}", ")", "self", ".", "target_currency", "=", "target", ".", "get", "(", "'currency'", ")", "self", ".", "target_minor_units", "=", "target", ".", "get", "(", "'minorUnits'", ")", "total_saved", "=", "goal", ".", "get", "(", "'totalSaved'", ",", "{", "}", ")", "self", ".", "total_saved_currency", "=", "total_saved", ".", "get", "(", "'currency'", ")", "self", ".", "total_saved_minor_units", "=", "total_saved", ".", "get", "(", "'minorUnits'", ")" ]
Update a single savings goals data.
[ "Update", "a", "single", "savings", "goals", "data", "." ]
9495456980d5d6d85c4e999a17dc69481067af09
https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L37-L61
train
Dullage/starlingbank
starlingbank/__init__.py
SavingsGoal.deposit
def deposit(self, deposit_minor_units: int) -> None: """Add funds to a savings goal.""" endpoint = "/account/{0}/savings-goals/{1}/add-money/{2}".format( self._account_uid, self.uid, uuid4() ) body = { "amount": { "currency": self.total_saved_currency, "minorUnits": deposit_minor_units } } response = put( _url(endpoint, self._sandbox), headers=self._auth_headers, data=json_dumps(body) ) response.raise_for_status() self.update()
python
def deposit(self, deposit_minor_units: int) -> None: """Add funds to a savings goal.""" endpoint = "/account/{0}/savings-goals/{1}/add-money/{2}".format( self._account_uid, self.uid, uuid4() ) body = { "amount": { "currency": self.total_saved_currency, "minorUnits": deposit_minor_units } } response = put( _url(endpoint, self._sandbox), headers=self._auth_headers, data=json_dumps(body) ) response.raise_for_status() self.update()
[ "def", "deposit", "(", "self", ",", "deposit_minor_units", ":", "int", ")", "->", "None", ":", "endpoint", "=", "\"/account/{0}/savings-goals/{1}/add-money/{2}\"", ".", "format", "(", "self", ".", "_account_uid", ",", "self", ".", "uid", ",", "uuid4", "(", ")", ")", "body", "=", "{", "\"amount\"", ":", "{", "\"currency\"", ":", "self", ".", "total_saved_currency", ",", "\"minorUnits\"", ":", "deposit_minor_units", "}", "}", "response", "=", "put", "(", "_url", "(", "endpoint", ",", "self", ".", "_sandbox", ")", ",", "headers", "=", "self", ".", "_auth_headers", ",", "data", "=", "json_dumps", "(", "body", ")", ")", "response", ".", "raise_for_status", "(", ")", "self", ".", "update", "(", ")" ]
Add funds to a savings goal.
[ "Add", "funds", "to", "a", "savings", "goal", "." ]
9495456980d5d6d85c4e999a17dc69481067af09
https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L63-L85
train
Dullage/starlingbank
starlingbank/__init__.py
SavingsGoal.get_image
def get_image(self, filename: str=None) -> None: """Download the photo associated with a Savings Goal.""" if filename is None: filename = "{0}.png".format(self.name) endpoint = "/account/{0}/savings-goals/{1}/photo".format( self._account_uid, self.uid ) response = get( _url(endpoint, self._sandbox), headers=self._auth_headers ) response.raise_for_status() base64_image = response.json()['base64EncodedPhoto'] with open(filename, 'wb') as file: file.write(b64decode(base64_image))
python
def get_image(self, filename: str=None) -> None: """Download the photo associated with a Savings Goal.""" if filename is None: filename = "{0}.png".format(self.name) endpoint = "/account/{0}/savings-goals/{1}/photo".format( self._account_uid, self.uid ) response = get( _url(endpoint, self._sandbox), headers=self._auth_headers ) response.raise_for_status() base64_image = response.json()['base64EncodedPhoto'] with open(filename, 'wb') as file: file.write(b64decode(base64_image))
[ "def", "get_image", "(", "self", ",", "filename", ":", "str", "=", "None", ")", "->", "None", ":", "if", "filename", "is", "None", ":", "filename", "=", "\"{0}.png\"", ".", "format", "(", "self", ".", "name", ")", "endpoint", "=", "\"/account/{0}/savings-goals/{1}/photo\"", ".", "format", "(", "self", ".", "_account_uid", ",", "self", ".", "uid", ")", "response", "=", "get", "(", "_url", "(", "endpoint", ",", "self", ".", "_sandbox", ")", ",", "headers", "=", "self", ".", "_auth_headers", ")", "response", ".", "raise_for_status", "(", ")", "base64_image", "=", "response", ".", "json", "(", ")", "[", "'base64EncodedPhoto'", "]", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "file", ":", "file", ".", "write", "(", "b64decode", "(", "base64_image", ")", ")" ]
Download the photo associated with a Savings Goal.
[ "Download", "the", "photo", "associated", "with", "a", "Savings", "Goal", "." ]
9495456980d5d6d85c4e999a17dc69481067af09
https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L111-L129
train
Dullage/starlingbank
starlingbank/__init__.py
StarlingAccount.update_account_data
def update_account_data(self) -> None: """Get basic information for the account.""" response = get( _url( "/accounts/{0}/identifiers".format(self._account_uid), self._sandbox ), headers=self._auth_headers ) response.raise_for_status() response = response.json() self.account_identifier = response.get('accountIdentifier') self.bank_identifier = response.get('bankIdentifier') self.iban = response.get('iban') self.bic = response.get('bic')
python
def update_account_data(self) -> None: """Get basic information for the account.""" response = get( _url( "/accounts/{0}/identifiers".format(self._account_uid), self._sandbox ), headers=self._auth_headers ) response.raise_for_status() response = response.json() self.account_identifier = response.get('accountIdentifier') self.bank_identifier = response.get('bankIdentifier') self.iban = response.get('iban') self.bic = response.get('bic')
[ "def", "update_account_data", "(", "self", ")", "->", "None", ":", "response", "=", "get", "(", "_url", "(", "\"/accounts/{0}/identifiers\"", ".", "format", "(", "self", ".", "_account_uid", ")", ",", "self", ".", "_sandbox", ")", ",", "headers", "=", "self", ".", "_auth_headers", ")", "response", ".", "raise_for_status", "(", ")", "response", "=", "response", ".", "json", "(", ")", "self", ".", "account_identifier", "=", "response", ".", "get", "(", "'accountIdentifier'", ")", "self", ".", "bank_identifier", "=", "response", ".", "get", "(", "'bankIdentifier'", ")", "self", ".", "iban", "=", "response", ".", "get", "(", "'iban'", ")", "self", ".", "bic", "=", "response", ".", "get", "(", "'bic'", ")" ]
Get basic information for the account.
[ "Get", "basic", "information", "for", "the", "account", "." ]
9495456980d5d6d85c4e999a17dc69481067af09
https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L135-L151
train
Dullage/starlingbank
starlingbank/__init__.py
StarlingAccount.update_balance_data
def update_balance_data(self) -> None: """Get the latest balance information for the account.""" response = get( _url( "/accounts/{0}/balance".format(self._account_uid), self._sandbox ), headers=self._auth_headers ) response.raise_for_status() response = response.json() self.cleared_balance = response['clearedBalance']['minorUnits'] self.effective_balance = response['effectiveBalance']['minorUnits'] self.pending_transactions \ = response['pendingTransactions']['minorUnits'] self.available_to_spend = response['availableToSpend']['minorUnits'] self.accepted_overdraft = response['acceptedOverdraft']['minorUnits']
python
def update_balance_data(self) -> None: """Get the latest balance information for the account.""" response = get( _url( "/accounts/{0}/balance".format(self._account_uid), self._sandbox ), headers=self._auth_headers ) response.raise_for_status() response = response.json() self.cleared_balance = response['clearedBalance']['minorUnits'] self.effective_balance = response['effectiveBalance']['minorUnits'] self.pending_transactions \ = response['pendingTransactions']['minorUnits'] self.available_to_spend = response['availableToSpend']['minorUnits'] self.accepted_overdraft = response['acceptedOverdraft']['minorUnits']
[ "def", "update_balance_data", "(", "self", ")", "->", "None", ":", "response", "=", "get", "(", "_url", "(", "\"/accounts/{0}/balance\"", ".", "format", "(", "self", ".", "_account_uid", ")", ",", "self", ".", "_sandbox", ")", ",", "headers", "=", "self", ".", "_auth_headers", ")", "response", ".", "raise_for_status", "(", ")", "response", "=", "response", ".", "json", "(", ")", "self", ".", "cleared_balance", "=", "response", "[", "'clearedBalance'", "]", "[", "'minorUnits'", "]", "self", ".", "effective_balance", "=", "response", "[", "'effectiveBalance'", "]", "[", "'minorUnits'", "]", "self", ".", "pending_transactions", "=", "response", "[", "'pendingTransactions'", "]", "[", "'minorUnits'", "]", "self", ".", "available_to_spend", "=", "response", "[", "'availableToSpend'", "]", "[", "'minorUnits'", "]", "self", ".", "accepted_overdraft", "=", "response", "[", "'acceptedOverdraft'", "]", "[", "'minorUnits'", "]" ]
Get the latest balance information for the account.
[ "Get", "the", "latest", "balance", "information", "for", "the", "account", "." ]
9495456980d5d6d85c4e999a17dc69481067af09
https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L153-L170
train
Dullage/starlingbank
starlingbank/__init__.py
StarlingAccount.update_savings_goal_data
def update_savings_goal_data(self) -> None: """Get the latest savings goal information for the account.""" response = get( _url( "/account/{0}/savings-goals".format(self._account_uid), self._sandbox ), headers=self._auth_headers ) response.raise_for_status() response = response.json() response_savings_goals = response.get('savingsGoalList', {}) returned_uids = [] # New / Update for goal in response_savings_goals: uid = goal.get('savingsGoalUid') returned_uids.append(uid) # Intiialise new _SavingsGoal object if new if uid not in self.savings_goals: self.savings_goals[uid] = SavingsGoal( self._auth_headers, self._sandbox, self._account_uid ) self.savings_goals[uid].update(goal) # Forget about savings goals if the UID isn't returned by Starling for uid in list(self.savings_goals): if uid not in returned_uids: self.savings_goals.pop(uid)
python
def update_savings_goal_data(self) -> None: """Get the latest savings goal information for the account.""" response = get( _url( "/account/{0}/savings-goals".format(self._account_uid), self._sandbox ), headers=self._auth_headers ) response.raise_for_status() response = response.json() response_savings_goals = response.get('savingsGoalList', {}) returned_uids = [] # New / Update for goal in response_savings_goals: uid = goal.get('savingsGoalUid') returned_uids.append(uid) # Intiialise new _SavingsGoal object if new if uid not in self.savings_goals: self.savings_goals[uid] = SavingsGoal( self._auth_headers, self._sandbox, self._account_uid ) self.savings_goals[uid].update(goal) # Forget about savings goals if the UID isn't returned by Starling for uid in list(self.savings_goals): if uid not in returned_uids: self.savings_goals.pop(uid)
[ "def", "update_savings_goal_data", "(", "self", ")", "->", "None", ":", "response", "=", "get", "(", "_url", "(", "\"/account/{0}/savings-goals\"", ".", "format", "(", "self", ".", "_account_uid", ")", ",", "self", ".", "_sandbox", ")", ",", "headers", "=", "self", ".", "_auth_headers", ")", "response", ".", "raise_for_status", "(", ")", "response", "=", "response", ".", "json", "(", ")", "response_savings_goals", "=", "response", ".", "get", "(", "'savingsGoalList'", ",", "{", "}", ")", "returned_uids", "=", "[", "]", "# New / Update", "for", "goal", "in", "response_savings_goals", ":", "uid", "=", "goal", ".", "get", "(", "'savingsGoalUid'", ")", "returned_uids", ".", "append", "(", "uid", ")", "# Intiialise new _SavingsGoal object if new", "if", "uid", "not", "in", "self", ".", "savings_goals", ":", "self", ".", "savings_goals", "[", "uid", "]", "=", "SavingsGoal", "(", "self", ".", "_auth_headers", ",", "self", ".", "_sandbox", ",", "self", ".", "_account_uid", ")", "self", ".", "savings_goals", "[", "uid", "]", ".", "update", "(", "goal", ")", "# Forget about savings goals if the UID isn't returned by Starling", "for", "uid", "in", "list", "(", "self", ".", "savings_goals", ")", ":", "if", "uid", "not", "in", "returned_uids", ":", "self", ".", "savings_goals", ".", "pop", "(", "uid", ")" ]
Get the latest savings goal information for the account.
[ "Get", "the", "latest", "savings", "goal", "information", "for", "the", "account", "." ]
9495456980d5d6d85c4e999a17dc69481067af09
https://github.com/Dullage/starlingbank/blob/9495456980d5d6d85c4e999a17dc69481067af09/starlingbank/__init__.py#L172-L206
train
mintchaos/django_inlines
django_inlines/templatetags/inlines.py
process_inlines
def process_inlines(parser, token): """ Searches through the provided content and applies inlines where ever they are found. Syntax:: {% process_inlines entry.body [in template_dir] [as varname] } Examples:: {% process_inlines entry.body %} {% process_inlines entry.body as body %} {% process_inlines entry.body in 'inlines/sidebar' %} {% process_inlines entry.body in 'inlines/sidebar' as body %} """ args = token.split_contents() if not len(args) in (2, 4, 6): raise template.TemplateSyntaxError("%r tag requires either 1, 3 or 5 arguments." % args[0]) var_name = args[1] ALLOWED_ARGS = ['as', 'in'] kwargs = { 'template_directory': None } if len(args) > 2: tuples = zip(*[args[2:][i::2] for i in range(2)]) for k,v in tuples: if not k in ALLOWED_ARGS: raise template.TemplateSyntaxError("%r tag options arguments must be one of %s." % (args[0], ', '.join(ALLOWED_ARGS))) if k == 'in': kwargs['template_directory'] = v if k == 'as': kwargs['asvar'] = v return InlinesNode(var_name, **kwargs)
python
def process_inlines(parser, token): """ Searches through the provided content and applies inlines where ever they are found. Syntax:: {% process_inlines entry.body [in template_dir] [as varname] } Examples:: {% process_inlines entry.body %} {% process_inlines entry.body as body %} {% process_inlines entry.body in 'inlines/sidebar' %} {% process_inlines entry.body in 'inlines/sidebar' as body %} """ args = token.split_contents() if not len(args) in (2, 4, 6): raise template.TemplateSyntaxError("%r tag requires either 1, 3 or 5 arguments." % args[0]) var_name = args[1] ALLOWED_ARGS = ['as', 'in'] kwargs = { 'template_directory': None } if len(args) > 2: tuples = zip(*[args[2:][i::2] for i in range(2)]) for k,v in tuples: if not k in ALLOWED_ARGS: raise template.TemplateSyntaxError("%r tag options arguments must be one of %s." % (args[0], ', '.join(ALLOWED_ARGS))) if k == 'in': kwargs['template_directory'] = v if k == 'as': kwargs['asvar'] = v return InlinesNode(var_name, **kwargs)
[ "def", "process_inlines", "(", "parser", ",", "token", ")", ":", "args", "=", "token", ".", "split_contents", "(", ")", "if", "not", "len", "(", "args", ")", "in", "(", "2", ",", "4", ",", "6", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"%r tag requires either 1, 3 or 5 arguments.\"", "%", "args", "[", "0", "]", ")", "var_name", "=", "args", "[", "1", "]", "ALLOWED_ARGS", "=", "[", "'as'", ",", "'in'", "]", "kwargs", "=", "{", "'template_directory'", ":", "None", "}", "if", "len", "(", "args", ")", ">", "2", ":", "tuples", "=", "zip", "(", "*", "[", "args", "[", "2", ":", "]", "[", "i", ":", ":", "2", "]", "for", "i", "in", "range", "(", "2", ")", "]", ")", "for", "k", ",", "v", "in", "tuples", ":", "if", "not", "k", "in", "ALLOWED_ARGS", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"%r tag options arguments must be one of %s.\"", "%", "(", "args", "[", "0", "]", ",", "', '", ".", "join", "(", "ALLOWED_ARGS", ")", ")", ")", "if", "k", "==", "'in'", ":", "kwargs", "[", "'template_directory'", "]", "=", "v", "if", "k", "==", "'as'", ":", "kwargs", "[", "'asvar'", "]", "=", "v", "return", "InlinesNode", "(", "var_name", ",", "*", "*", "kwargs", ")" ]
Searches through the provided content and applies inlines where ever they are found. Syntax:: {% process_inlines entry.body [in template_dir] [as varname] } Examples:: {% process_inlines entry.body %} {% process_inlines entry.body as body %} {% process_inlines entry.body in 'inlines/sidebar' %} {% process_inlines entry.body in 'inlines/sidebar' as body %}
[ "Searches", "through", "the", "provided", "content", "and", "applies", "inlines", "where", "ever", "they", "are", "found", "." ]
1912e508d04884713a6c44a068c21fbd217d478a
https://github.com/mintchaos/django_inlines/blob/1912e508d04884713a6c44a068c21fbd217d478a/django_inlines/templatetags/inlines.py#L40-L80
train
klichukb/django-migrate-sql
migrate_sql/graph.py
build_current_graph
def build_current_graph(): """ Read current state of SQL items from the current project state. Returns: (SQLStateGraph) Current project state graph. """ graph = SQLStateGraph() for app_name, config in apps.app_configs.items(): try: module = import_module( '.'.join((config.module.__name__, SQL_CONFIG_MODULE))) sql_items = module.sql_items except (ImportError, AttributeError): continue for sql_item in sql_items: graph.add_node((app_name, sql_item.name), sql_item) for dep in sql_item.dependencies: graph.add_lazy_dependency((app_name, sql_item.name), dep) graph.build_graph() return graph
python
def build_current_graph(): """ Read current state of SQL items from the current project state. Returns: (SQLStateGraph) Current project state graph. """ graph = SQLStateGraph() for app_name, config in apps.app_configs.items(): try: module = import_module( '.'.join((config.module.__name__, SQL_CONFIG_MODULE))) sql_items = module.sql_items except (ImportError, AttributeError): continue for sql_item in sql_items: graph.add_node((app_name, sql_item.name), sql_item) for dep in sql_item.dependencies: graph.add_lazy_dependency((app_name, sql_item.name), dep) graph.build_graph() return graph
[ "def", "build_current_graph", "(", ")", ":", "graph", "=", "SQLStateGraph", "(", ")", "for", "app_name", ",", "config", "in", "apps", ".", "app_configs", ".", "items", "(", ")", ":", "try", ":", "module", "=", "import_module", "(", "'.'", ".", "join", "(", "(", "config", ".", "module", ".", "__name__", ",", "SQL_CONFIG_MODULE", ")", ")", ")", "sql_items", "=", "module", ".", "sql_items", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "continue", "for", "sql_item", "in", "sql_items", ":", "graph", ".", "add_node", "(", "(", "app_name", ",", "sql_item", ".", "name", ")", ",", "sql_item", ")", "for", "dep", "in", "sql_item", ".", "dependencies", ":", "graph", ".", "add_lazy_dependency", "(", "(", "app_name", ",", "sql_item", ".", "name", ")", ",", "dep", ")", "graph", ".", "build_graph", "(", ")", "return", "graph" ]
Read current state of SQL items from the current project state. Returns: (SQLStateGraph) Current project state graph.
[ "Read", "current", "state", "of", "SQL", "items", "from", "the", "current", "project", "state", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/graph.py#L100-L122
train
klichukb/django-migrate-sql
migrate_sql/graph.py
SQLStateGraph.build_graph
def build_graph(self): """ Read lazy dependency list and build graph. """ for child, parents in self.dependencies.items(): if child not in self.nodes: raise NodeNotFoundError( "App %s SQL item dependencies reference nonexistent child node %r" % ( child[0], child), child ) for parent in parents: if parent not in self.nodes: raise NodeNotFoundError( "App %s SQL item dependencies reference nonexistent parent node %r" % ( child[0], parent), parent ) self.node_map[child].add_parent(self.node_map[parent]) self.node_map[parent].add_child(self.node_map[child]) for node in self.nodes: self.ensure_not_cyclic(node, lambda x: (parent.key for parent in self.node_map[x].parents))
python
def build_graph(self): """ Read lazy dependency list and build graph. """ for child, parents in self.dependencies.items(): if child not in self.nodes: raise NodeNotFoundError( "App %s SQL item dependencies reference nonexistent child node %r" % ( child[0], child), child ) for parent in parents: if parent not in self.nodes: raise NodeNotFoundError( "App %s SQL item dependencies reference nonexistent parent node %r" % ( child[0], parent), parent ) self.node_map[child].add_parent(self.node_map[parent]) self.node_map[parent].add_child(self.node_map[child]) for node in self.nodes: self.ensure_not_cyclic(node, lambda x: (parent.key for parent in self.node_map[x].parents))
[ "def", "build_graph", "(", "self", ")", ":", "for", "child", ",", "parents", "in", "self", ".", "dependencies", ".", "items", "(", ")", ":", "if", "child", "not", "in", "self", ".", "nodes", ":", "raise", "NodeNotFoundError", "(", "\"App %s SQL item dependencies reference nonexistent child node %r\"", "%", "(", "child", "[", "0", "]", ",", "child", ")", ",", "child", ")", "for", "parent", "in", "parents", ":", "if", "parent", "not", "in", "self", ".", "nodes", ":", "raise", "NodeNotFoundError", "(", "\"App %s SQL item dependencies reference nonexistent parent node %r\"", "%", "(", "child", "[", "0", "]", ",", "parent", ")", ",", "parent", ")", "self", ".", "node_map", "[", "child", "]", ".", "add_parent", "(", "self", ".", "node_map", "[", "parent", "]", ")", "self", ".", "node_map", "[", "parent", "]", ".", "add_child", "(", "self", ".", "node_map", "[", "child", "]", ")", "for", "node", "in", "self", ".", "nodes", ":", "self", ".", "ensure_not_cyclic", "(", "node", ",", "lambda", "x", ":", "(", "parent", ".", "key", "for", "parent", "in", "self", ".", "node_map", "[", "x", "]", ".", "parents", ")", ")" ]
Read lazy dependency list and build graph.
[ "Read", "lazy", "dependency", "list", "and", "build", "graph", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/graph.py#L54-L77
train
ngmarchant/oasis
oasis/passive.py
PassiveSampler.sample
def sample(self, n_to_sample, **kwargs): """Sample a sequence of items from the pool Parameters ---------- n_to_sample : int number of items to sample """ n_to_sample = verify_positive(int(n_to_sample)) n_remaining = self._max_iter - self.t_ if n_remaining == 0: if (not self.replace) and (self._n_items == self._max_iter): raise Exception("All items have already been sampled") else: raise Exception("No more space available to continue sampling. " "Consider re-initialising with a larger value " "of max_iter.") if n_to_sample > n_remaining: warnings.warn("Space only remains for {} more iteration(s). " "Setting n_to_sample = {}.".format(n_remaining, \ n_remaining)) n_to_sample = n_remaining for _ in range(n_to_sample): self._iterate(**kwargs)
python
def sample(self, n_to_sample, **kwargs): """Sample a sequence of items from the pool Parameters ---------- n_to_sample : int number of items to sample """ n_to_sample = verify_positive(int(n_to_sample)) n_remaining = self._max_iter - self.t_ if n_remaining == 0: if (not self.replace) and (self._n_items == self._max_iter): raise Exception("All items have already been sampled") else: raise Exception("No more space available to continue sampling. " "Consider re-initialising with a larger value " "of max_iter.") if n_to_sample > n_remaining: warnings.warn("Space only remains for {} more iteration(s). " "Setting n_to_sample = {}.".format(n_remaining, \ n_remaining)) n_to_sample = n_remaining for _ in range(n_to_sample): self._iterate(**kwargs)
[ "def", "sample", "(", "self", ",", "n_to_sample", ",", "*", "*", "kwargs", ")", ":", "n_to_sample", "=", "verify_positive", "(", "int", "(", "n_to_sample", ")", ")", "n_remaining", "=", "self", ".", "_max_iter", "-", "self", ".", "t_", "if", "n_remaining", "==", "0", ":", "if", "(", "not", "self", ".", "replace", ")", "and", "(", "self", ".", "_n_items", "==", "self", ".", "_max_iter", ")", ":", "raise", "Exception", "(", "\"All items have already been sampled\"", ")", "else", ":", "raise", "Exception", "(", "\"No more space available to continue sampling. \"", "\"Consider re-initialising with a larger value \"", "\"of max_iter.\"", ")", "if", "n_to_sample", ">", "n_remaining", ":", "warnings", ".", "warn", "(", "\"Space only remains for {} more iteration(s). \"", "\"Setting n_to_sample = {}.\"", ".", "format", "(", "n_remaining", ",", "n_remaining", ")", ")", "n_to_sample", "=", "n_remaining", "for", "_", "in", "range", "(", "n_to_sample", ")", ":", "self", ".", "_iterate", "(", "*", "*", "kwargs", ")" ]
Sample a sequence of items from the pool Parameters ---------- n_to_sample : int number of items to sample
[ "Sample", "a", "sequence", "of", "items", "from", "the", "pool" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/passive.py#L174-L201
train
ngmarchant/oasis
oasis/passive.py
PassiveSampler.sample_distinct
def sample_distinct(self, n_to_sample, **kwargs): """Sample a sequence of items from the pool until a minimum number of distinct items are queried Parameters ---------- n_to_sample : int number of distinct items to sample. If sampling with replacement, this number is not necessarily the same as the number of iterations. """ # Record how many distinct items have not yet been sampled n_notsampled = np.sum(np.isnan(self.cached_labels_)) if n_notsampled == 0: raise Exception("All distinct items have already been sampled.") if n_to_sample > n_notsampled: warnings.warn("Only {} distinct item(s) have not yet been sampled." " Setting n_to_sample = {}.".format(n_notsampled, \ n_notsampled)) n_to_sample = n_notsampled n_sampled = 0 # number of distinct items sampled this round while n_sampled < n_to_sample: self.sample(1,**kwargs) n_sampled += self._queried_oracle[self.t_ - 1]*1
python
def sample_distinct(self, n_to_sample, **kwargs): """Sample a sequence of items from the pool until a minimum number of distinct items are queried Parameters ---------- n_to_sample : int number of distinct items to sample. If sampling with replacement, this number is not necessarily the same as the number of iterations. """ # Record how many distinct items have not yet been sampled n_notsampled = np.sum(np.isnan(self.cached_labels_)) if n_notsampled == 0: raise Exception("All distinct items have already been sampled.") if n_to_sample > n_notsampled: warnings.warn("Only {} distinct item(s) have not yet been sampled." " Setting n_to_sample = {}.".format(n_notsampled, \ n_notsampled)) n_to_sample = n_notsampled n_sampled = 0 # number of distinct items sampled this round while n_sampled < n_to_sample: self.sample(1,**kwargs) n_sampled += self._queried_oracle[self.t_ - 1]*1
[ "def", "sample_distinct", "(", "self", ",", "n_to_sample", ",", "*", "*", "kwargs", ")", ":", "# Record how many distinct items have not yet been sampled", "n_notsampled", "=", "np", ".", "sum", "(", "np", ".", "isnan", "(", "self", ".", "cached_labels_", ")", ")", "if", "n_notsampled", "==", "0", ":", "raise", "Exception", "(", "\"All distinct items have already been sampled.\"", ")", "if", "n_to_sample", ">", "n_notsampled", ":", "warnings", ".", "warn", "(", "\"Only {} distinct item(s) have not yet been sampled.\"", "\" Setting n_to_sample = {}.\"", ".", "format", "(", "n_notsampled", ",", "n_notsampled", ")", ")", "n_to_sample", "=", "n_notsampled", "n_sampled", "=", "0", "# number of distinct items sampled this round", "while", "n_sampled", "<", "n_to_sample", ":", "self", ".", "sample", "(", "1", ",", "*", "*", "kwargs", ")", "n_sampled", "+=", "self", ".", "_queried_oracle", "[", "self", ".", "t_", "-", "1", "]", "*", "1" ]
Sample a sequence of items from the pool until a minimum number of distinct items are queried Parameters ---------- n_to_sample : int number of distinct items to sample. If sampling with replacement, this number is not necessarily the same as the number of iterations.
[ "Sample", "a", "sequence", "of", "items", "from", "the", "pool", "until", "a", "minimum", "number", "of", "distinct", "items", "are", "queried" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/passive.py#L203-L229
train
ngmarchant/oasis
oasis/passive.py
PassiveSampler._sample_item
def _sample_item(self, **kwargs): """Sample an item from the pool""" if self.replace: # Can sample from any of the items loc = np.random.choice(self._n_items) else: # Can only sample from items that have not been seen # Find ids that haven't been seen yet not_seen_ids = np.where(np.isnan(self.cached_labels_))[0] loc = np.random.choice(not_seen_ids) return loc, 1, {}
python
def _sample_item(self, **kwargs): """Sample an item from the pool""" if self.replace: # Can sample from any of the items loc = np.random.choice(self._n_items) else: # Can only sample from items that have not been seen # Find ids that haven't been seen yet not_seen_ids = np.where(np.isnan(self.cached_labels_))[0] loc = np.random.choice(not_seen_ids) return loc, 1, {}
[ "def", "_sample_item", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "replace", ":", "# Can sample from any of the items", "loc", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "_n_items", ")", "else", ":", "# Can only sample from items that have not been seen", "# Find ids that haven't been seen yet", "not_seen_ids", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "self", ".", "cached_labels_", ")", ")", "[", "0", "]", "loc", "=", "np", ".", "random", ".", "choice", "(", "not_seen_ids", ")", "return", "loc", ",", "1", ",", "{", "}" ]
Sample an item from the pool
[ "Sample", "an", "item", "from", "the", "pool" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/passive.py#L231-L241
train
ngmarchant/oasis
oasis/passive.py
PassiveSampler._query_label
def _query_label(self, loc): """Query the label for the item with index `loc`. Preferentially queries the label from the cache, but if not yet cached, queries the oracle. Returns ------- int the true label "0" or "1". """ # Try to get label from cache ell = self.cached_labels_[loc] if np.isnan(ell): # Label has not been cached. Need to query oracle oracle_arg = self.identifiers[loc] ell = self.oracle(oracle_arg) if ell not in [0, 1]: raise Exception("Oracle provided an invalid label.") #TODO Gracefully handle errors from oracle? self._queried_oracle[self.t_] = True self.cached_labels_[loc] = ell return ell
python
def _query_label(self, loc): """Query the label for the item with index `loc`. Preferentially queries the label from the cache, but if not yet cached, queries the oracle. Returns ------- int the true label "0" or "1". """ # Try to get label from cache ell = self.cached_labels_[loc] if np.isnan(ell): # Label has not been cached. Need to query oracle oracle_arg = self.identifiers[loc] ell = self.oracle(oracle_arg) if ell not in [0, 1]: raise Exception("Oracle provided an invalid label.") #TODO Gracefully handle errors from oracle? self._queried_oracle[self.t_] = True self.cached_labels_[loc] = ell return ell
[ "def", "_query_label", "(", "self", ",", "loc", ")", ":", "# Try to get label from cache", "ell", "=", "self", ".", "cached_labels_", "[", "loc", "]", "if", "np", ".", "isnan", "(", "ell", ")", ":", "# Label has not been cached. Need to query oracle", "oracle_arg", "=", "self", ".", "identifiers", "[", "loc", "]", "ell", "=", "self", ".", "oracle", "(", "oracle_arg", ")", "if", "ell", "not", "in", "[", "0", ",", "1", "]", ":", "raise", "Exception", "(", "\"Oracle provided an invalid label.\"", ")", "#TODO Gracefully handle errors from oracle?", "self", ".", "_queried_oracle", "[", "self", ".", "t_", "]", "=", "True", "self", ".", "cached_labels_", "[", "loc", "]", "=", "ell", "return", "ell" ]
Query the label for the item with index `loc`. Preferentially queries the label from the cache, but if not yet cached, queries the oracle. Returns ------- int the true label "0" or "1".
[ "Query", "the", "label", "for", "the", "item", "with", "index", "loc", ".", "Preferentially", "queries", "the", "label", "from", "the", "cache", "but", "if", "not", "yet", "cached", "queries", "the", "oracle", "." ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/passive.py#L243-L266
train
ngmarchant/oasis
oasis/passive.py
PassiveSampler._F_measure
def _F_measure(self, alpha, TP, FP, FN, return_num_den=False): """Calculate the weighted F-measure""" num = np.float64(TP) den = np.float64(alpha * (TP + FP) + (1 - alpha) * (TP + FN)) with np.errstate(divide='ignore', invalid='ignore'): F_measure = num/den #F_measure = num/den if return_num_den: return F_measure, num, den else: return F_measure
python
def _F_measure(self, alpha, TP, FP, FN, return_num_den=False): """Calculate the weighted F-measure""" num = np.float64(TP) den = np.float64(alpha * (TP + FP) + (1 - alpha) * (TP + FN)) with np.errstate(divide='ignore', invalid='ignore'): F_measure = num/den #F_measure = num/den if return_num_den: return F_measure, num, den else: return F_measure
[ "def", "_F_measure", "(", "self", ",", "alpha", ",", "TP", ",", "FP", ",", "FN", ",", "return_num_den", "=", "False", ")", ":", "num", "=", "np", ".", "float64", "(", "TP", ")", "den", "=", "np", ".", "float64", "(", "alpha", "*", "(", "TP", "+", "FP", ")", "+", "(", "1", "-", "alpha", ")", "*", "(", "TP", "+", "FN", ")", ")", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ",", "invalid", "=", "'ignore'", ")", ":", "F_measure", "=", "num", "/", "den", "#F_measure = num/den", "if", "return_num_den", ":", "return", "F_measure", ",", "num", ",", "den", "else", ":", "return", "F_measure" ]
Calculate the weighted F-measure
[ "Calculate", "the", "weighted", "F", "-", "measure" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/passive.py#L268-L279
train
openearth/mmi-python
mmi/tracker_client.py
MMITracker.key_occurrence
def key_occurrence(self, key, update=True): """ Return a dict containing the value of the provided key and its uuid as value. """ if update: self.update() result = {} for k, v in self.database.items(): if key in v: result[str(v[key])] = k return result
python
def key_occurrence(self, key, update=True): """ Return a dict containing the value of the provided key and its uuid as value. """ if update: self.update() result = {} for k, v in self.database.items(): if key in v: result[str(v[key])] = k return result
[ "def", "key_occurrence", "(", "self", ",", "key", ",", "update", "=", "True", ")", ":", "if", "update", ":", "self", ".", "update", "(", ")", "result", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "database", ".", "items", "(", ")", ":", "if", "key", "in", "v", ":", "result", "[", "str", "(", "v", "[", "key", "]", ")", "]", "=", "k", "return", "result" ]
Return a dict containing the value of the provided key and its uuid as value.
[ "Return", "a", "dict", "containing", "the", "value", "of", "the", "provided", "key", "and", "its", "uuid", "as", "value", "." ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/tracker_client.py#L17-L28
train
openearth/mmi-python
mmi/tracker_client.py
MMITracker.zmq_address
def zmq_address(self, key): """ Return a ZeroMQ address to the module with the provided key. """ zmq_address = "tcp://" + self.database[key]['node'] + ":" + str(self.database[key]['ports']['REQ']) return zmq_address
python
def zmq_address(self, key): """ Return a ZeroMQ address to the module with the provided key. """ zmq_address = "tcp://" + self.database[key]['node'] + ":" + str(self.database[key]['ports']['REQ']) return zmq_address
[ "def", "zmq_address", "(", "self", ",", "key", ")", ":", "zmq_address", "=", "\"tcp://\"", "+", "self", ".", "database", "[", "key", "]", "[", "'node'", "]", "+", "\":\"", "+", "str", "(", "self", ".", "database", "[", "key", "]", "[", "'ports'", "]", "[", "'REQ'", "]", ")", "return", "zmq_address" ]
Return a ZeroMQ address to the module with the provided key.
[ "Return", "a", "ZeroMQ", "address", "to", "the", "module", "with", "the", "provided", "key", "." ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/tracker_client.py#L30-L35
train
mkoura/dump2polarion
dump2polarion/results/ostriztools.py
_get_json
def _get_json(location): """Reads JSON data from file or URL.""" location = os.path.expanduser(location) try: if os.path.isfile(location): with io.open(location, encoding="utf-8") as json_data: return json.load(json_data, object_pairs_hook=OrderedDict).get("tests") elif "http" in location: json_data = requests.get(location) if not json_data: raise Dump2PolarionException("Failed to download") return json.loads(json_data.text, object_pairs_hook=OrderedDict).get("tests") else: raise Dump2PolarionException("Invalid location") except Exception as err: raise Dump2PolarionException("Failed to parse JSON from {}: {}".format(location, err))
python
def _get_json(location): """Reads JSON data from file or URL.""" location = os.path.expanduser(location) try: if os.path.isfile(location): with io.open(location, encoding="utf-8") as json_data: return json.load(json_data, object_pairs_hook=OrderedDict).get("tests") elif "http" in location: json_data = requests.get(location) if not json_data: raise Dump2PolarionException("Failed to download") return json.loads(json_data.text, object_pairs_hook=OrderedDict).get("tests") else: raise Dump2PolarionException("Invalid location") except Exception as err: raise Dump2PolarionException("Failed to parse JSON from {}: {}".format(location, err))
[ "def", "_get_json", "(", "location", ")", ":", "location", "=", "os", ".", "path", ".", "expanduser", "(", "location", ")", "try", ":", "if", "os", ".", "path", ".", "isfile", "(", "location", ")", ":", "with", "io", ".", "open", "(", "location", ",", "encoding", "=", "\"utf-8\"", ")", "as", "json_data", ":", "return", "json", ".", "load", "(", "json_data", ",", "object_pairs_hook", "=", "OrderedDict", ")", ".", "get", "(", "\"tests\"", ")", "elif", "\"http\"", "in", "location", ":", "json_data", "=", "requests", ".", "get", "(", "location", ")", "if", "not", "json_data", ":", "raise", "Dump2PolarionException", "(", "\"Failed to download\"", ")", "return", "json", ".", "loads", "(", "json_data", ".", "text", ",", "object_pairs_hook", "=", "OrderedDict", ")", ".", "get", "(", "\"tests\"", ")", "else", ":", "raise", "Dump2PolarionException", "(", "\"Invalid location\"", ")", "except", "Exception", "as", "err", ":", "raise", "Dump2PolarionException", "(", "\"Failed to parse JSON from {}: {}\"", ".", "format", "(", "location", ",", "err", ")", ")" ]
Reads JSON data from file or URL.
[ "Reads", "JSON", "data", "from", "file", "or", "URL", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/ostriztools.py#L29-L44
train
mkoura/dump2polarion
dump2polarion/results/ostriztools.py
_calculate_duration
def _calculate_duration(start_time, finish_time): """Calculates how long it took to execute the testcase.""" if not (start_time and finish_time): return 0 start = datetime.datetime.fromtimestamp(start_time) finish = datetime.datetime.fromtimestamp(finish_time) duration = finish - start decimals = float(("0." + str(duration.microseconds))) return duration.seconds + decimals
python
def _calculate_duration(start_time, finish_time): """Calculates how long it took to execute the testcase.""" if not (start_time and finish_time): return 0 start = datetime.datetime.fromtimestamp(start_time) finish = datetime.datetime.fromtimestamp(finish_time) duration = finish - start decimals = float(("0." + str(duration.microseconds))) return duration.seconds + decimals
[ "def", "_calculate_duration", "(", "start_time", ",", "finish_time", ")", ":", "if", "not", "(", "start_time", "and", "finish_time", ")", ":", "return", "0", "start", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "start_time", ")", "finish", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "finish_time", ")", "duration", "=", "finish", "-", "start", "decimals", "=", "float", "(", "(", "\"0.\"", "+", "str", "(", "duration", ".", "microseconds", ")", ")", ")", "return", "duration", ".", "seconds", "+", "decimals" ]
Calculates how long it took to execute the testcase.
[ "Calculates", "how", "long", "it", "took", "to", "execute", "the", "testcase", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/ostriztools.py#L67-L76
train
mkoura/dump2polarion
dump2polarion/results/ostriztools.py
_filter_parameters
def _filter_parameters(parameters): """Filters the ignored parameters out.""" if not parameters: return None return OrderedDict( (param, value) for param, value in six.iteritems(parameters) if param not in IGNORED_PARAMS )
python
def _filter_parameters(parameters): """Filters the ignored parameters out.""" if not parameters: return None return OrderedDict( (param, value) for param, value in six.iteritems(parameters) if param not in IGNORED_PARAMS )
[ "def", "_filter_parameters", "(", "parameters", ")", ":", "if", "not", "parameters", ":", "return", "None", "return", "OrderedDict", "(", "(", "param", ",", "value", ")", "for", "param", ",", "value", "in", "six", ".", "iteritems", "(", "parameters", ")", "if", "param", "not", "in", "IGNORED_PARAMS", ")" ]
Filters the ignored parameters out.
[ "Filters", "the", "ignored", "parameters", "out", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/ostriztools.py#L88-L94
train
mkoura/dump2polarion
dump2polarion/results/ostriztools.py
_append_record
def _append_record(test_data, results, test_path): """Adds data of single testcase results to results database.""" statuses = test_data.get("statuses") jenkins_data = test_data.get("jenkins") or {} data = [ ("title", test_data.get("test_name") or _get_testname(test_path)), ("verdict", statuses.get("overall")), ("source", test_data.get("source")), ("job_name", jenkins_data.get("job_name")), ("run", jenkins_data.get("build_number")), ("params", _filter_parameters(test_data.get("params"))), ( "time", _calculate_duration(test_data.get("start_time"), test_data.get("finish_time")) or 0, ), ] test_id = test_data.get("polarion") if test_id: if isinstance(test_id, list): test_id = test_id[0] data.append(("test_id", test_id)) results.append(OrderedDict(data))
python
def _append_record(test_data, results, test_path): """Adds data of single testcase results to results database.""" statuses = test_data.get("statuses") jenkins_data = test_data.get("jenkins") or {} data = [ ("title", test_data.get("test_name") or _get_testname(test_path)), ("verdict", statuses.get("overall")), ("source", test_data.get("source")), ("job_name", jenkins_data.get("job_name")), ("run", jenkins_data.get("build_number")), ("params", _filter_parameters(test_data.get("params"))), ( "time", _calculate_duration(test_data.get("start_time"), test_data.get("finish_time")) or 0, ), ] test_id = test_data.get("polarion") if test_id: if isinstance(test_id, list): test_id = test_id[0] data.append(("test_id", test_id)) results.append(OrderedDict(data))
[ "def", "_append_record", "(", "test_data", ",", "results", ",", "test_path", ")", ":", "statuses", "=", "test_data", ".", "get", "(", "\"statuses\"", ")", "jenkins_data", "=", "test_data", ".", "get", "(", "\"jenkins\"", ")", "or", "{", "}", "data", "=", "[", "(", "\"title\"", ",", "test_data", ".", "get", "(", "\"test_name\"", ")", "or", "_get_testname", "(", "test_path", ")", ")", ",", "(", "\"verdict\"", ",", "statuses", ".", "get", "(", "\"overall\"", ")", ")", ",", "(", "\"source\"", ",", "test_data", ".", "get", "(", "\"source\"", ")", ")", ",", "(", "\"job_name\"", ",", "jenkins_data", ".", "get", "(", "\"job_name\"", ")", ")", ",", "(", "\"run\"", ",", "jenkins_data", ".", "get", "(", "\"build_number\"", ")", ")", ",", "(", "\"params\"", ",", "_filter_parameters", "(", "test_data", ".", "get", "(", "\"params\"", ")", ")", ")", ",", "(", "\"time\"", ",", "_calculate_duration", "(", "test_data", ".", "get", "(", "\"start_time\"", ")", ",", "test_data", ".", "get", "(", "\"finish_time\"", ")", ")", "or", "0", ",", ")", ",", "]", "test_id", "=", "test_data", ".", "get", "(", "\"polarion\"", ")", "if", "test_id", ":", "if", "isinstance", "(", "test_id", ",", "list", ")", ":", "test_id", "=", "test_id", "[", "0", "]", "data", ".", "append", "(", "(", "\"test_id\"", ",", "test_id", ")", ")", "results", ".", "append", "(", "OrderedDict", "(", "data", ")", ")" ]
Adds data of single testcase results to results database.
[ "Adds", "data", "of", "single", "testcase", "results", "to", "results", "database", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/ostriztools.py#L97-L120
train
mkoura/dump2polarion
dump2polarion/results/ostriztools.py
_parse_ostriz
def _parse_ostriz(ostriz_data): """Reads the content of the input JSON and returns testcases results.""" if not ostriz_data: raise NothingToDoException("No data to import") results = [] found_build = None last_finish_time = [0] for test_path, test_data in six.iteritems(ostriz_data): curr_build = test_data.get("build") if not curr_build: continue # set `found_build` from first record where it's present if not found_build: found_build = curr_build # make sure we are collecting data for the same build if found_build != curr_build: continue if not test_data.get("statuses"): continue _append_record(test_data, results, test_path) _comp_finish_time(test_data, last_finish_time) if last_finish_time[0]: logger.info("Last result finished at %s", last_finish_time[0]) testrun_id = _get_testrun_id(found_build) return xunit_exporter.ImportedData(results=results, testrun=testrun_id)
python
def _parse_ostriz(ostriz_data): """Reads the content of the input JSON and returns testcases results.""" if not ostriz_data: raise NothingToDoException("No data to import") results = [] found_build = None last_finish_time = [0] for test_path, test_data in six.iteritems(ostriz_data): curr_build = test_data.get("build") if not curr_build: continue # set `found_build` from first record where it's present if not found_build: found_build = curr_build # make sure we are collecting data for the same build if found_build != curr_build: continue if not test_data.get("statuses"): continue _append_record(test_data, results, test_path) _comp_finish_time(test_data, last_finish_time) if last_finish_time[0]: logger.info("Last result finished at %s", last_finish_time[0]) testrun_id = _get_testrun_id(found_build) return xunit_exporter.ImportedData(results=results, testrun=testrun_id)
[ "def", "_parse_ostriz", "(", "ostriz_data", ")", ":", "if", "not", "ostriz_data", ":", "raise", "NothingToDoException", "(", "\"No data to import\"", ")", "results", "=", "[", "]", "found_build", "=", "None", "last_finish_time", "=", "[", "0", "]", "for", "test_path", ",", "test_data", "in", "six", ".", "iteritems", "(", "ostriz_data", ")", ":", "curr_build", "=", "test_data", ".", "get", "(", "\"build\"", ")", "if", "not", "curr_build", ":", "continue", "# set `found_build` from first record where it's present", "if", "not", "found_build", ":", "found_build", "=", "curr_build", "# make sure we are collecting data for the same build", "if", "found_build", "!=", "curr_build", ":", "continue", "if", "not", "test_data", ".", "get", "(", "\"statuses\"", ")", ":", "continue", "_append_record", "(", "test_data", ",", "results", ",", "test_path", ")", "_comp_finish_time", "(", "test_data", ",", "last_finish_time", ")", "if", "last_finish_time", "[", "0", "]", ":", "logger", ".", "info", "(", "\"Last result finished at %s\"", ",", "last_finish_time", "[", "0", "]", ")", "testrun_id", "=", "_get_testrun_id", "(", "found_build", ")", "return", "xunit_exporter", ".", "ImportedData", "(", "results", "=", "results", ",", "testrun", "=", "testrun_id", ")" ]
Reads the content of the input JSON and returns testcases results.
[ "Reads", "the", "content", "of", "the", "input", "JSON", "and", "returns", "testcases", "results", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/ostriztools.py#L129-L160
train
openearth/mmi-python
mmi/__init__.py
send_array
def send_array( socket, A=None, metadata=None, flags=0, copy=False, track=False, compress=None, chunksize=50 * 1000 * 1000 ): """send a numpy array with metadata over zmq message is mostly multipart: metadata | array part 1 | array part 2, etc only metadata: metadata the chunksize roughly determines the size of the parts being sent if the chunksize is too big, you get an error like: zmq.error.Again: Resource temporarily unavailable """ # create a metadata dictionary for the message md = {} # always add a timestamp md['timestamp'] = datetime.datetime.now().isoformat() # copy extra metadata if metadata: md.update(metadata) # if we don't have an array if A is None: # send only json md['parts'] = 0 socket.send_json(md, flags) # and we're done return # support single values (empty shape) if isinstance(A, float) or isinstance(A, int): A = np.asarray(A) # add array metadata md['dtype'] = str(A.dtype) md['shape'] = A.shape # determine number of parts md['parts'] = int(np.prod(A.shape) // chunksize + 1) try: # If an array has a fill value assume it's an array with missings # store the fill_Value in the metadata and fill the array before sending. # asscalar should work for scalar, 0d array or nd array of size 1 md['fill_value'] = np.asscalar(A.fill_value) A = A.filled() except AttributeError: # no masked array, nothing to do pass # send json, followed by array (in x parts) socket.send_json(md, flags | zmq.SNDMORE) # although the check is not strictly necessary, we try to maintain fast # pointer transfer when there is only 1 part if md['parts'] == 1: msg = memoryview(np.ascontiguousarray(A)) socket.send(msg, flags, copy=copy, track=track) else: # split array at first dimension and send parts for i, a in enumerate(np.array_split(A, md['parts'])): # Make a copy if required and pass along the memoryview msg = memoryview(np.ascontiguousarray(a)) flags_ = flags if i != md['parts'] - 1: flags_ |= zmq.SNDMORE socket.send(msg, flags_, copy=copy, track=track) return
python
def send_array( socket, A=None, metadata=None, flags=0, copy=False, track=False, compress=None, chunksize=50 * 1000 * 1000 ): """send a numpy array with metadata over zmq message is mostly multipart: metadata | array part 1 | array part 2, etc only metadata: metadata the chunksize roughly determines the size of the parts being sent if the chunksize is too big, you get an error like: zmq.error.Again: Resource temporarily unavailable """ # create a metadata dictionary for the message md = {} # always add a timestamp md['timestamp'] = datetime.datetime.now().isoformat() # copy extra metadata if metadata: md.update(metadata) # if we don't have an array if A is None: # send only json md['parts'] = 0 socket.send_json(md, flags) # and we're done return # support single values (empty shape) if isinstance(A, float) or isinstance(A, int): A = np.asarray(A) # add array metadata md['dtype'] = str(A.dtype) md['shape'] = A.shape # determine number of parts md['parts'] = int(np.prod(A.shape) // chunksize + 1) try: # If an array has a fill value assume it's an array with missings # store the fill_Value in the metadata and fill the array before sending. # asscalar should work for scalar, 0d array or nd array of size 1 md['fill_value'] = np.asscalar(A.fill_value) A = A.filled() except AttributeError: # no masked array, nothing to do pass # send json, followed by array (in x parts) socket.send_json(md, flags | zmq.SNDMORE) # although the check is not strictly necessary, we try to maintain fast # pointer transfer when there is only 1 part if md['parts'] == 1: msg = memoryview(np.ascontiguousarray(A)) socket.send(msg, flags, copy=copy, track=track) else: # split array at first dimension and send parts for i, a in enumerate(np.array_split(A, md['parts'])): # Make a copy if required and pass along the memoryview msg = memoryview(np.ascontiguousarray(a)) flags_ = flags if i != md['parts'] - 1: flags_ |= zmq.SNDMORE socket.send(msg, flags_, copy=copy, track=track) return
[ "def", "send_array", "(", "socket", ",", "A", "=", "None", ",", "metadata", "=", "None", ",", "flags", "=", "0", ",", "copy", "=", "False", ",", "track", "=", "False", ",", "compress", "=", "None", ",", "chunksize", "=", "50", "*", "1000", "*", "1000", ")", ":", "# create a metadata dictionary for the message", "md", "=", "{", "}", "# always add a timestamp", "md", "[", "'timestamp'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "# copy extra metadata", "if", "metadata", ":", "md", ".", "update", "(", "metadata", ")", "# if we don't have an array", "if", "A", "is", "None", ":", "# send only json", "md", "[", "'parts'", "]", "=", "0", "socket", ".", "send_json", "(", "md", ",", "flags", ")", "# and we're done", "return", "# support single values (empty shape)", "if", "isinstance", "(", "A", ",", "float", ")", "or", "isinstance", "(", "A", ",", "int", ")", ":", "A", "=", "np", ".", "asarray", "(", "A", ")", "# add array metadata", "md", "[", "'dtype'", "]", "=", "str", "(", "A", ".", "dtype", ")", "md", "[", "'shape'", "]", "=", "A", ".", "shape", "# determine number of parts", "md", "[", "'parts'", "]", "=", "int", "(", "np", ".", "prod", "(", "A", ".", "shape", ")", "//", "chunksize", "+", "1", ")", "try", ":", "# If an array has a fill value assume it's an array with missings", "# store the fill_Value in the metadata and fill the array before sending.", "# asscalar should work for scalar, 0d array or nd array of size 1", "md", "[", "'fill_value'", "]", "=", "np", ".", "asscalar", "(", "A", ".", "fill_value", ")", "A", "=", "A", ".", "filled", "(", ")", "except", "AttributeError", ":", "# no masked array, nothing to do", "pass", "# send json, followed by array (in x parts)", "socket", ".", "send_json", "(", "md", ",", "flags", "|", "zmq", ".", "SNDMORE", ")", "# although the check is not strictly necessary, we try to maintain fast", "# pointer transfer when there is only 1 part", "if", "md", "[", "'parts'", "]", "==", "1", ":", "msg", "=", "memoryview", "(", "np", ".", "ascontiguousarray", "(", "A", ")", ")", "socket", ".", "send", "(", "msg", ",", "flags", ",", "copy", "=", "copy", ",", "track", "=", "track", ")", "else", ":", "# split array at first dimension and send parts", "for", "i", ",", "a", "in", "enumerate", "(", "np", ".", "array_split", "(", "A", ",", "md", "[", "'parts'", "]", ")", ")", ":", "# Make a copy if required and pass along the memoryview", "msg", "=", "memoryview", "(", "np", ".", "ascontiguousarray", "(", "a", ")", ")", "flags_", "=", "flags", "if", "i", "!=", "md", "[", "'parts'", "]", "-", "1", ":", "flags_", "|=", "zmq", ".", "SNDMORE", "socket", ".", "send", "(", "msg", ",", "flags_", ",", "copy", "=", "copy", ",", "track", "=", "track", ")", "return" ]
send a numpy array with metadata over zmq message is mostly multipart: metadata | array part 1 | array part 2, etc only metadata: metadata the chunksize roughly determines the size of the parts being sent if the chunksize is too big, you get an error like: zmq.error.Again: Resource temporarily unavailable
[ "send", "a", "numpy", "array", "with", "metadata", "over", "zmq" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/__init__.py#L27-L98
train
openearth/mmi-python
mmi/__init__.py
recv_array
def recv_array( socket, flags=0, copy=False, track=False, poll=None, poll_timeout=10000 ): """recv a metadata and an optional numpy array from a zmq socket Optionally provide poll object to use recv_array with timeout poll_timeout is in millis """ if poll is None: md = socket.recv_json(flags=flags) else: # one-try "Lazy Pirate" method: http://zguide.zeromq.org/php:chapter4 socks = dict(poll.poll(poll_timeout)) if socks.get(socket) == zmq.POLLIN: reply = socket.recv_json(flags=flags) # note that reply can be an empty array md = reply else: raise NoResponseException( "Recv_array got no response within timeout (1)") if md['parts'] == 0: # No array expected A = None elif md['parts'] == 1: # although the check is not strictly necessary, we try to maintain fast # pointer transfer when there is only 1 part if poll is None: msg = socket.recv(flags=flags, copy=copy, track=track) else: # one-try "Lazy Pirate" method: http://zguide.zeromq.org/php:chapter4 socks = dict(poll.poll(poll_timeout)) if socks.get(socket) == zmq.POLLIN: reply = socket.recv(flags=flags, copy=copy, track=track) # note that reply can be an empty array msg = reply else: raise NoResponseException( "Recv_array got no response within timeout (2)") buf = buffer(msg) A = np.frombuffer(buf, dtype=md['dtype']) A = A.reshape(md['shape']) if 'fill_value' in md: A = np.ma.masked_equal(A, md['fill_value']) else: # multi part array A = np.zeros(np.prod(md['shape']), dtype=md['dtype']) arr_position = 0 for i in range(md['parts']): if poll is None: msg = socket.recv(flags=flags, copy=copy, track=track) else: # one-try "Lazy Pirate" method: http://zguide.zeromq.org/php:chapter4 socks = dict(poll.poll(poll_timeout)) if socks.get(socket) == zmq.POLLIN: reply = socket.recv(flags=flags, copy=copy, track=track) if not reply: raise EmptyResponseException( "Recv_array got an empty response (2)") msg = reply else: raise NoResponseException( "Recv_array got no response within timeout (2)") buf = buffer(msg) a = np.frombuffer(buf, dtype=md['dtype']) A[arr_position:arr_position + a.shape[0]] = a[:] arr_position += a.shape[0] A = A.reshape(md['shape']) if 'fill_value' in md: A = np.ma.masked_equal(A, md['fill_value']) return A, md
python
def recv_array( socket, flags=0, copy=False, track=False, poll=None, poll_timeout=10000 ): """recv a metadata and an optional numpy array from a zmq socket Optionally provide poll object to use recv_array with timeout poll_timeout is in millis """ if poll is None: md = socket.recv_json(flags=flags) else: # one-try "Lazy Pirate" method: http://zguide.zeromq.org/php:chapter4 socks = dict(poll.poll(poll_timeout)) if socks.get(socket) == zmq.POLLIN: reply = socket.recv_json(flags=flags) # note that reply can be an empty array md = reply else: raise NoResponseException( "Recv_array got no response within timeout (1)") if md['parts'] == 0: # No array expected A = None elif md['parts'] == 1: # although the check is not strictly necessary, we try to maintain fast # pointer transfer when there is only 1 part if poll is None: msg = socket.recv(flags=flags, copy=copy, track=track) else: # one-try "Lazy Pirate" method: http://zguide.zeromq.org/php:chapter4 socks = dict(poll.poll(poll_timeout)) if socks.get(socket) == zmq.POLLIN: reply = socket.recv(flags=flags, copy=copy, track=track) # note that reply can be an empty array msg = reply else: raise NoResponseException( "Recv_array got no response within timeout (2)") buf = buffer(msg) A = np.frombuffer(buf, dtype=md['dtype']) A = A.reshape(md['shape']) if 'fill_value' in md: A = np.ma.masked_equal(A, md['fill_value']) else: # multi part array A = np.zeros(np.prod(md['shape']), dtype=md['dtype']) arr_position = 0 for i in range(md['parts']): if poll is None: msg = socket.recv(flags=flags, copy=copy, track=track) else: # one-try "Lazy Pirate" method: http://zguide.zeromq.org/php:chapter4 socks = dict(poll.poll(poll_timeout)) if socks.get(socket) == zmq.POLLIN: reply = socket.recv(flags=flags, copy=copy, track=track) if not reply: raise EmptyResponseException( "Recv_array got an empty response (2)") msg = reply else: raise NoResponseException( "Recv_array got no response within timeout (2)") buf = buffer(msg) a = np.frombuffer(buf, dtype=md['dtype']) A[arr_position:arr_position + a.shape[0]] = a[:] arr_position += a.shape[0] A = A.reshape(md['shape']) if 'fill_value' in md: A = np.ma.masked_equal(A, md['fill_value']) return A, md
[ "def", "recv_array", "(", "socket", ",", "flags", "=", "0", ",", "copy", "=", "False", ",", "track", "=", "False", ",", "poll", "=", "None", ",", "poll_timeout", "=", "10000", ")", ":", "if", "poll", "is", "None", ":", "md", "=", "socket", ".", "recv_json", "(", "flags", "=", "flags", ")", "else", ":", "# one-try \"Lazy Pirate\" method: http://zguide.zeromq.org/php:chapter4", "socks", "=", "dict", "(", "poll", ".", "poll", "(", "poll_timeout", ")", ")", "if", "socks", ".", "get", "(", "socket", ")", "==", "zmq", ".", "POLLIN", ":", "reply", "=", "socket", ".", "recv_json", "(", "flags", "=", "flags", ")", "# note that reply can be an empty array", "md", "=", "reply", "else", ":", "raise", "NoResponseException", "(", "\"Recv_array got no response within timeout (1)\"", ")", "if", "md", "[", "'parts'", "]", "==", "0", ":", "# No array expected", "A", "=", "None", "elif", "md", "[", "'parts'", "]", "==", "1", ":", "# although the check is not strictly necessary, we try to maintain fast", "# pointer transfer when there is only 1 part", "if", "poll", "is", "None", ":", "msg", "=", "socket", ".", "recv", "(", "flags", "=", "flags", ",", "copy", "=", "copy", ",", "track", "=", "track", ")", "else", ":", "# one-try \"Lazy Pirate\" method: http://zguide.zeromq.org/php:chapter4", "socks", "=", "dict", "(", "poll", ".", "poll", "(", "poll_timeout", ")", ")", "if", "socks", ".", "get", "(", "socket", ")", "==", "zmq", ".", "POLLIN", ":", "reply", "=", "socket", ".", "recv", "(", "flags", "=", "flags", ",", "copy", "=", "copy", ",", "track", "=", "track", ")", "# note that reply can be an empty array", "msg", "=", "reply", "else", ":", "raise", "NoResponseException", "(", "\"Recv_array got no response within timeout (2)\"", ")", "buf", "=", "buffer", "(", "msg", ")", "A", "=", "np", ".", "frombuffer", "(", "buf", ",", "dtype", "=", "md", "[", "'dtype'", "]", ")", "A", "=", "A", ".", "reshape", "(", "md", "[", "'shape'", "]", ")", "if", "'fill_value'", "in", "md", ":", "A", "=", "np", ".", "ma", ".", "masked_equal", "(", "A", ",", "md", "[", "'fill_value'", "]", ")", "else", ":", "# multi part array", "A", "=", "np", ".", "zeros", "(", "np", ".", "prod", "(", "md", "[", "'shape'", "]", ")", ",", "dtype", "=", "md", "[", "'dtype'", "]", ")", "arr_position", "=", "0", "for", "i", "in", "range", "(", "md", "[", "'parts'", "]", ")", ":", "if", "poll", "is", "None", ":", "msg", "=", "socket", ".", "recv", "(", "flags", "=", "flags", ",", "copy", "=", "copy", ",", "track", "=", "track", ")", "else", ":", "# one-try \"Lazy Pirate\" method: http://zguide.zeromq.org/php:chapter4", "socks", "=", "dict", "(", "poll", ".", "poll", "(", "poll_timeout", ")", ")", "if", "socks", ".", "get", "(", "socket", ")", "==", "zmq", ".", "POLLIN", ":", "reply", "=", "socket", ".", "recv", "(", "flags", "=", "flags", ",", "copy", "=", "copy", ",", "track", "=", "track", ")", "if", "not", "reply", ":", "raise", "EmptyResponseException", "(", "\"Recv_array got an empty response (2)\"", ")", "msg", "=", "reply", "else", ":", "raise", "NoResponseException", "(", "\"Recv_array got no response within timeout (2)\"", ")", "buf", "=", "buffer", "(", "msg", ")", "a", "=", "np", ".", "frombuffer", "(", "buf", ",", "dtype", "=", "md", "[", "'dtype'", "]", ")", "A", "[", "arr_position", ":", "arr_position", "+", "a", ".", "shape", "[", "0", "]", "]", "=", "a", "[", ":", "]", "arr_position", "+=", "a", ".", "shape", "[", "0", "]", "A", "=", "A", ".", "reshape", "(", "md", "[", "'shape'", "]", ")", "if", "'fill_value'", "in", "md", ":", "A", "=", "np", ".", "ma", ".", "masked_equal", "(", "A", ",", "md", "[", "'fill_value'", "]", ")", "return", "A", ",", "md" ]
recv a metadata and an optional numpy array from a zmq socket Optionally provide poll object to use recv_array with timeout poll_timeout is in millis
[ "recv", "a", "metadata", "and", "an", "optional", "numpy", "array", "from", "a", "zmq", "socket" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/__init__.py#L101-L182
train
klichukb/django-migrate-sql
migrate_sql/autodetector.py
is_sql_equal
def is_sql_equal(sqls1, sqls2): """ Find out equality of two SQL items. See https://docs.djangoproject.com/en/1.8/ref/migration-operations/#runsql. Args: sqls1, sqls2: SQL items, have the same format as supported by Django's RunSQL operation. Returns: (bool) `True` if equal, otherwise `False`. """ is_seq1 = isinstance(sqls1, (list, tuple)) is_seq2 = isinstance(sqls2, (list, tuple)) if not is_seq1: sqls1 = (sqls1,) if not is_seq2: sqls2 = (sqls2,) if len(sqls1) != len(sqls2): return False for sql1, sql2 in zip(sqls1, sqls2): sql1, params1 = _sql_params(sql1) sql2, params2 = _sql_params(sql2) if sql1 != sql2 or params1 != params2: return False return True
python
def is_sql_equal(sqls1, sqls2): """ Find out equality of two SQL items. See https://docs.djangoproject.com/en/1.8/ref/migration-operations/#runsql. Args: sqls1, sqls2: SQL items, have the same format as supported by Django's RunSQL operation. Returns: (bool) `True` if equal, otherwise `False`. """ is_seq1 = isinstance(sqls1, (list, tuple)) is_seq2 = isinstance(sqls2, (list, tuple)) if not is_seq1: sqls1 = (sqls1,) if not is_seq2: sqls2 = (sqls2,) if len(sqls1) != len(sqls2): return False for sql1, sql2 in zip(sqls1, sqls2): sql1, params1 = _sql_params(sql1) sql2, params2 = _sql_params(sql2) if sql1 != sql2 or params1 != params2: return False return True
[ "def", "is_sql_equal", "(", "sqls1", ",", "sqls2", ")", ":", "is_seq1", "=", "isinstance", "(", "sqls1", ",", "(", "list", ",", "tuple", ")", ")", "is_seq2", "=", "isinstance", "(", "sqls2", ",", "(", "list", ",", "tuple", ")", ")", "if", "not", "is_seq1", ":", "sqls1", "=", "(", "sqls1", ",", ")", "if", "not", "is_seq2", ":", "sqls2", "=", "(", "sqls2", ",", ")", "if", "len", "(", "sqls1", ")", "!=", "len", "(", "sqls2", ")", ":", "return", "False", "for", "sql1", ",", "sql2", "in", "zip", "(", "sqls1", ",", "sqls2", ")", ":", "sql1", ",", "params1", "=", "_sql_params", "(", "sql1", ")", "sql2", ",", "params2", "=", "_sql_params", "(", "sql2", ")", "if", "sql1", "!=", "sql2", "or", "params1", "!=", "params2", ":", "return", "False", "return", "True" ]
Find out equality of two SQL items. See https://docs.djangoproject.com/en/1.8/ref/migration-operations/#runsql. Args: sqls1, sqls2: SQL items, have the same format as supported by Django's RunSQL operation. Returns: (bool) `True` if equal, otherwise `False`.
[ "Find", "out", "equality", "of", "two", "SQL", "items", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/autodetector.py#L33-L59
train
klichukb/django-migrate-sql
migrate_sql/autodetector.py
MigrationAutodetector.add_sql_operation
def add_sql_operation(self, app_label, sql_name, operation, dependencies): """ Add SQL operation and register it to be used as dependency for further sequential operations. """ deps = [(dp[0], SQL_BLOB, dp[1], self._sql_operations.get(dp)) for dp in dependencies] self.add_operation(app_label, operation, dependencies=deps) self._sql_operations[(app_label, sql_name)] = operation
python
def add_sql_operation(self, app_label, sql_name, operation, dependencies): """ Add SQL operation and register it to be used as dependency for further sequential operations. """ deps = [(dp[0], SQL_BLOB, dp[1], self._sql_operations.get(dp)) for dp in dependencies] self.add_operation(app_label, operation, dependencies=deps) self._sql_operations[(app_label, sql_name)] = operation
[ "def", "add_sql_operation", "(", "self", ",", "app_label", ",", "sql_name", ",", "operation", ",", "dependencies", ")", ":", "deps", "=", "[", "(", "dp", "[", "0", "]", ",", "SQL_BLOB", ",", "dp", "[", "1", "]", ",", "self", ".", "_sql_operations", ".", "get", "(", "dp", ")", ")", "for", "dp", "in", "dependencies", "]", "self", ".", "add_operation", "(", "app_label", ",", "operation", ",", "dependencies", "=", "deps", ")", "self", ".", "_sql_operations", "[", "(", "app_label", ",", "sql_name", ")", "]", "=", "operation" ]
Add SQL operation and register it to be used as dependency for further sequential operations.
[ "Add", "SQL", "operation", "and", "register", "it", "to", "be", "used", "as", "dependency", "for", "further", "sequential", "operations", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/autodetector.py#L110-L118
train
klichukb/django-migrate-sql
migrate_sql/autodetector.py
MigrationAutodetector._generate_reversed_sql
def _generate_reversed_sql(self, keys, changed_keys): """ Generate reversed operations for changes, that require full rollback and creation. """ for key in keys: if key not in changed_keys: continue app_label, sql_name = key old_item = self.from_sql_graph.nodes[key] new_item = self.to_sql_graph.nodes[key] if not old_item.reverse_sql or old_item.reverse_sql == RunSQL.noop or new_item.replace: continue # migrate backwards operation = ReverseAlterSQL(sql_name, old_item.reverse_sql, reverse_sql=old_item.sql) sql_deps = [n.key for n in self.from_sql_graph.node_map[key].children] sql_deps.append(key) self.add_sql_operation(app_label, sql_name, operation, sql_deps)
python
def _generate_reversed_sql(self, keys, changed_keys): """ Generate reversed operations for changes, that require full rollback and creation. """ for key in keys: if key not in changed_keys: continue app_label, sql_name = key old_item = self.from_sql_graph.nodes[key] new_item = self.to_sql_graph.nodes[key] if not old_item.reverse_sql or old_item.reverse_sql == RunSQL.noop or new_item.replace: continue # migrate backwards operation = ReverseAlterSQL(sql_name, old_item.reverse_sql, reverse_sql=old_item.sql) sql_deps = [n.key for n in self.from_sql_graph.node_map[key].children] sql_deps.append(key) self.add_sql_operation(app_label, sql_name, operation, sql_deps)
[ "def", "_generate_reversed_sql", "(", "self", ",", "keys", ",", "changed_keys", ")", ":", "for", "key", "in", "keys", ":", "if", "key", "not", "in", "changed_keys", ":", "continue", "app_label", ",", "sql_name", "=", "key", "old_item", "=", "self", ".", "from_sql_graph", ".", "nodes", "[", "key", "]", "new_item", "=", "self", ".", "to_sql_graph", ".", "nodes", "[", "key", "]", "if", "not", "old_item", ".", "reverse_sql", "or", "old_item", ".", "reverse_sql", "==", "RunSQL", ".", "noop", "or", "new_item", ".", "replace", ":", "continue", "# migrate backwards", "operation", "=", "ReverseAlterSQL", "(", "sql_name", ",", "old_item", ".", "reverse_sql", ",", "reverse_sql", "=", "old_item", ".", "sql", ")", "sql_deps", "=", "[", "n", ".", "key", "for", "n", "in", "self", ".", "from_sql_graph", ".", "node_map", "[", "key", "]", ".", "children", "]", "sql_deps", ".", "append", "(", "key", ")", "self", ".", "add_sql_operation", "(", "app_label", ",", "sql_name", ",", "operation", ",", "sql_deps", ")" ]
Generate reversed operations for changes, that require full rollback and creation.
[ "Generate", "reversed", "operations", "for", "changes", "that", "require", "full", "rollback", "and", "creation", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/autodetector.py#L120-L137
train
klichukb/django-migrate-sql
migrate_sql/autodetector.py
MigrationAutodetector._generate_delete_sql
def _generate_delete_sql(self, delete_keys): """ Generate forward delete operations for SQL items. """ for key in delete_keys: app_label, sql_name = key old_node = self.from_sql_graph.nodes[key] operation = DeleteSQL(sql_name, old_node.reverse_sql, reverse_sql=old_node.sql) sql_deps = [n.key for n in self.from_sql_graph.node_map[key].children] sql_deps.append(key) self.add_sql_operation(app_label, sql_name, operation, sql_deps)
python
def _generate_delete_sql(self, delete_keys): """ Generate forward delete operations for SQL items. """ for key in delete_keys: app_label, sql_name = key old_node = self.from_sql_graph.nodes[key] operation = DeleteSQL(sql_name, old_node.reverse_sql, reverse_sql=old_node.sql) sql_deps = [n.key for n in self.from_sql_graph.node_map[key].children] sql_deps.append(key) self.add_sql_operation(app_label, sql_name, operation, sql_deps)
[ "def", "_generate_delete_sql", "(", "self", ",", "delete_keys", ")", ":", "for", "key", "in", "delete_keys", ":", "app_label", ",", "sql_name", "=", "key", "old_node", "=", "self", ".", "from_sql_graph", ".", "nodes", "[", "key", "]", "operation", "=", "DeleteSQL", "(", "sql_name", ",", "old_node", ".", "reverse_sql", ",", "reverse_sql", "=", "old_node", ".", "sql", ")", "sql_deps", "=", "[", "n", ".", "key", "for", "n", "in", "self", ".", "from_sql_graph", ".", "node_map", "[", "key", "]", ".", "children", "]", "sql_deps", ".", "append", "(", "key", ")", "self", ".", "add_sql_operation", "(", "app_label", ",", "sql_name", ",", "operation", ",", "sql_deps", ")" ]
Generate forward delete operations for SQL items.
[ "Generate", "forward", "delete", "operations", "for", "SQL", "items", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/autodetector.py#L186-L196
train
klichukb/django-migrate-sql
migrate_sql/autodetector.py
MigrationAutodetector.generate_sql_changes
def generate_sql_changes(self): """ Starting point of this tool, which identifies changes and generates respective operations. """ from_keys = set(self.from_sql_graph.nodes.keys()) to_keys = set(self.to_sql_graph.nodes.keys()) new_keys = to_keys - from_keys delete_keys = from_keys - to_keys changed_keys = set() dep_changed_keys = [] for key in from_keys & to_keys: old_node = self.from_sql_graph.nodes[key] new_node = self.to_sql_graph.nodes[key] # identify SQL changes -- these will alter database. if not is_sql_equal(old_node.sql, new_node.sql): changed_keys.add(key) # identify dependencies change old_deps = self.from_sql_graph.dependencies[key] new_deps = self.to_sql_graph.dependencies[key] removed_deps = old_deps - new_deps added_deps = new_deps - old_deps if removed_deps or added_deps: dep_changed_keys.append((key, removed_deps, added_deps)) # we do basic sort here and inject dependency keys here. # operations built using these keys will properly set operation dependencies which will # enforce django to build/keep a correct order of operations (stable_topological_sort). keys = self.assemble_changes(new_keys, changed_keys, self.to_sql_graph) delete_keys = self.assemble_changes(delete_keys, set(), self.from_sql_graph) self._sql_operations = {} self._generate_reversed_sql(keys, changed_keys) self._generate_sql(keys, changed_keys) self._generate_delete_sql(delete_keys) self._generate_altered_sql_dependencies(dep_changed_keys)
python
def generate_sql_changes(self): """ Starting point of this tool, which identifies changes and generates respective operations. """ from_keys = set(self.from_sql_graph.nodes.keys()) to_keys = set(self.to_sql_graph.nodes.keys()) new_keys = to_keys - from_keys delete_keys = from_keys - to_keys changed_keys = set() dep_changed_keys = [] for key in from_keys & to_keys: old_node = self.from_sql_graph.nodes[key] new_node = self.to_sql_graph.nodes[key] # identify SQL changes -- these will alter database. if not is_sql_equal(old_node.sql, new_node.sql): changed_keys.add(key) # identify dependencies change old_deps = self.from_sql_graph.dependencies[key] new_deps = self.to_sql_graph.dependencies[key] removed_deps = old_deps - new_deps added_deps = new_deps - old_deps if removed_deps or added_deps: dep_changed_keys.append((key, removed_deps, added_deps)) # we do basic sort here and inject dependency keys here. # operations built using these keys will properly set operation dependencies which will # enforce django to build/keep a correct order of operations (stable_topological_sort). keys = self.assemble_changes(new_keys, changed_keys, self.to_sql_graph) delete_keys = self.assemble_changes(delete_keys, set(), self.from_sql_graph) self._sql_operations = {} self._generate_reversed_sql(keys, changed_keys) self._generate_sql(keys, changed_keys) self._generate_delete_sql(delete_keys) self._generate_altered_sql_dependencies(dep_changed_keys)
[ "def", "generate_sql_changes", "(", "self", ")", ":", "from_keys", "=", "set", "(", "self", ".", "from_sql_graph", ".", "nodes", ".", "keys", "(", ")", ")", "to_keys", "=", "set", "(", "self", ".", "to_sql_graph", ".", "nodes", ".", "keys", "(", ")", ")", "new_keys", "=", "to_keys", "-", "from_keys", "delete_keys", "=", "from_keys", "-", "to_keys", "changed_keys", "=", "set", "(", ")", "dep_changed_keys", "=", "[", "]", "for", "key", "in", "from_keys", "&", "to_keys", ":", "old_node", "=", "self", ".", "from_sql_graph", ".", "nodes", "[", "key", "]", "new_node", "=", "self", ".", "to_sql_graph", ".", "nodes", "[", "key", "]", "# identify SQL changes -- these will alter database.", "if", "not", "is_sql_equal", "(", "old_node", ".", "sql", ",", "new_node", ".", "sql", ")", ":", "changed_keys", ".", "add", "(", "key", ")", "# identify dependencies change", "old_deps", "=", "self", ".", "from_sql_graph", ".", "dependencies", "[", "key", "]", "new_deps", "=", "self", ".", "to_sql_graph", ".", "dependencies", "[", "key", "]", "removed_deps", "=", "old_deps", "-", "new_deps", "added_deps", "=", "new_deps", "-", "old_deps", "if", "removed_deps", "or", "added_deps", ":", "dep_changed_keys", ".", "append", "(", "(", "key", ",", "removed_deps", ",", "added_deps", ")", ")", "# we do basic sort here and inject dependency keys here.", "# operations built using these keys will properly set operation dependencies which will", "# enforce django to build/keep a correct order of operations (stable_topological_sort).", "keys", "=", "self", ".", "assemble_changes", "(", "new_keys", ",", "changed_keys", ",", "self", ".", "to_sql_graph", ")", "delete_keys", "=", "self", ".", "assemble_changes", "(", "delete_keys", ",", "set", "(", ")", ",", "self", ".", "from_sql_graph", ")", "self", ".", "_sql_operations", "=", "{", "}", "self", ".", "_generate_reversed_sql", "(", "keys", ",", "changed_keys", ")", "self", ".", "_generate_sql", "(", "keys", ",", "changed_keys", ")", "self", ".", "_generate_delete_sql", "(", "delete_keys", ")", "self", ".", "_generate_altered_sql_dependencies", "(", "dep_changed_keys", ")" ]
Starting point of this tool, which identifies changes and generates respective operations.
[ "Starting", "point", "of", "this", "tool", "which", "identifies", "changes", "and", "generates", "respective", "operations", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/autodetector.py#L198-L236
train
klichukb/django-migrate-sql
migrate_sql/autodetector.py
MigrationAutodetector.check_dependency
def check_dependency(self, operation, dependency): """ Enhances default behavior of method by checking dependency for matching operation. """ if isinstance(dependency[1], SQLBlob): # NOTE: we follow the sort order created by `assemble_changes` so we build a fixed chain # of operations. thus we should match exact operation here. return dependency[3] == operation return super(MigrationAutodetector, self).check_dependency(operation, dependency)
python
def check_dependency(self, operation, dependency): """ Enhances default behavior of method by checking dependency for matching operation. """ if isinstance(dependency[1], SQLBlob): # NOTE: we follow the sort order created by `assemble_changes` so we build a fixed chain # of operations. thus we should match exact operation here. return dependency[3] == operation return super(MigrationAutodetector, self).check_dependency(operation, dependency)
[ "def", "check_dependency", "(", "self", ",", "operation", ",", "dependency", ")", ":", "if", "isinstance", "(", "dependency", "[", "1", "]", ",", "SQLBlob", ")", ":", "# NOTE: we follow the sort order created by `assemble_changes` so we build a fixed chain", "# of operations. thus we should match exact operation here.", "return", "dependency", "[", "3", "]", "==", "operation", "return", "super", "(", "MigrationAutodetector", ",", "self", ")", ".", "check_dependency", "(", "operation", ",", "dependency", ")" ]
Enhances default behavior of method by checking dependency for matching operation.
[ "Enhances", "default", "behavior", "of", "method", "by", "checking", "dependency", "for", "matching", "operation", "." ]
be48ff2c9283404e3d951128c459c3496d1ba25d
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/autodetector.py#L238-L246
train
skioo/django-customer-billing
billing/actions/credit_cards.py
reactivate
def reactivate(credit_card_id: str) -> None: """ Reactivates a credit card. """ logger.info('reactivating-credit-card', credit_card_id=credit_card_id) with transaction.atomic(): cc = CreditCard.objects.get(pk=credit_card_id) cc.reactivate() cc.save()
python
def reactivate(credit_card_id: str) -> None: """ Reactivates a credit card. """ logger.info('reactivating-credit-card', credit_card_id=credit_card_id) with transaction.atomic(): cc = CreditCard.objects.get(pk=credit_card_id) cc.reactivate() cc.save()
[ "def", "reactivate", "(", "credit_card_id", ":", "str", ")", "->", "None", ":", "logger", ".", "info", "(", "'reactivating-credit-card'", ",", "credit_card_id", "=", "credit_card_id", ")", "with", "transaction", ".", "atomic", "(", ")", ":", "cc", "=", "CreditCard", ".", "objects", ".", "get", "(", "pk", "=", "credit_card_id", ")", "cc", ".", "reactivate", "(", ")", "cc", ".", "save", "(", ")" ]
Reactivates a credit card.
[ "Reactivates", "a", "credit", "card", "." ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/actions/credit_cards.py#L20-L28
train
mgoral/subconvert
src/subconvert/parsing/Offset.py
TimeSync.sync
def sync(self, syncPointList): """Synchronise subtitles using a given list of SyncPoints.""" if len(syncPointList) == 0: return subsCopy = self._subs.clone() syncPointList.sort() SubAssert(syncPointList[0].subNo >= 0) SubAssert(syncPointList[0].subNo < subsCopy.size()) SubAssert(syncPointList[-1].subNo < subsCopy.size()) # Always start from the first subtitle. firstSyncPoint = self._getLowestSyncPoint(syncPointList, subsCopy) if firstSyncPoint != syncPointList[0]: syncPointList.insert(0, firstSyncPoint) for i, syncPoint in enumerate(syncPointList): # Algorithm: # 1. Calculate time deltas between sync points and between subs: # DE_OLD = subTime[secondSyncSubNo] - subTime[firstSyncSubNo] # DE_NEW = secondSyncTime - firstSyncTime # 2. Calculate proportional sub position within DE_OLD: # d = (subTime - subTime[firstSubNo]) / DE_OLD # 3. "d" is constant within deltas, so we can now calculate newSubTime: # newSubTime = DE_NEW * d + firstSyncTime firstSyncPoint = syncPointList[i] secondSyncPoint = self._getSyncPointOrEnd(i + 1, syncPointList, subsCopy) log.debug(_("Syncing times for sync points:")) log.debug(" %s" % firstSyncPoint) log.debug(" %s" % secondSyncPoint) # A case for the last one syncPoint if firstSyncPoint == secondSyncPoint: continue secondSubNo = secondSyncPoint.subNo firstSubNo = firstSyncPoint.subNo firstOldSub = subsCopy[firstSubNo] secondOldSub = subsCopy[secondSubNo] oldStartDelta, oldEndDelta = self._getDeltas(firstOldSub, secondOldSub) newStartDelta, newEndDelta = self._getDeltas(firstSyncPoint, secondSyncPoint) for subNo in range(firstSubNo, secondSubNo + 1): sub = subsCopy[subNo] newStartTime = self._calculateTime(sub.start, firstOldSub.start, firstSyncPoint.start, oldStartDelta, newStartDelta) newEndTime = self._calculateTime(sub.end, firstOldSub.end, firstSyncPoint.end, oldEndDelta, newEndDelta) self._subs.changeSubStart(subNo, newStartTime) self._subs.changeSubEnd(subNo, newEndTime)
python
def sync(self, syncPointList): """Synchronise subtitles using a given list of SyncPoints.""" if len(syncPointList) == 0: return subsCopy = self._subs.clone() syncPointList.sort() SubAssert(syncPointList[0].subNo >= 0) SubAssert(syncPointList[0].subNo < subsCopy.size()) SubAssert(syncPointList[-1].subNo < subsCopy.size()) # Always start from the first subtitle. firstSyncPoint = self._getLowestSyncPoint(syncPointList, subsCopy) if firstSyncPoint != syncPointList[0]: syncPointList.insert(0, firstSyncPoint) for i, syncPoint in enumerate(syncPointList): # Algorithm: # 1. Calculate time deltas between sync points and between subs: # DE_OLD = subTime[secondSyncSubNo] - subTime[firstSyncSubNo] # DE_NEW = secondSyncTime - firstSyncTime # 2. Calculate proportional sub position within DE_OLD: # d = (subTime - subTime[firstSubNo]) / DE_OLD # 3. "d" is constant within deltas, so we can now calculate newSubTime: # newSubTime = DE_NEW * d + firstSyncTime firstSyncPoint = syncPointList[i] secondSyncPoint = self._getSyncPointOrEnd(i + 1, syncPointList, subsCopy) log.debug(_("Syncing times for sync points:")) log.debug(" %s" % firstSyncPoint) log.debug(" %s" % secondSyncPoint) # A case for the last one syncPoint if firstSyncPoint == secondSyncPoint: continue secondSubNo = secondSyncPoint.subNo firstSubNo = firstSyncPoint.subNo firstOldSub = subsCopy[firstSubNo] secondOldSub = subsCopy[secondSubNo] oldStartDelta, oldEndDelta = self._getDeltas(firstOldSub, secondOldSub) newStartDelta, newEndDelta = self._getDeltas(firstSyncPoint, secondSyncPoint) for subNo in range(firstSubNo, secondSubNo + 1): sub = subsCopy[subNo] newStartTime = self._calculateTime(sub.start, firstOldSub.start, firstSyncPoint.start, oldStartDelta, newStartDelta) newEndTime = self._calculateTime(sub.end, firstOldSub.end, firstSyncPoint.end, oldEndDelta, newEndDelta) self._subs.changeSubStart(subNo, newStartTime) self._subs.changeSubEnd(subNo, newEndTime)
[ "def", "sync", "(", "self", ",", "syncPointList", ")", ":", "if", "len", "(", "syncPointList", ")", "==", "0", ":", "return", "subsCopy", "=", "self", ".", "_subs", ".", "clone", "(", ")", "syncPointList", ".", "sort", "(", ")", "SubAssert", "(", "syncPointList", "[", "0", "]", ".", "subNo", ">=", "0", ")", "SubAssert", "(", "syncPointList", "[", "0", "]", ".", "subNo", "<", "subsCopy", ".", "size", "(", ")", ")", "SubAssert", "(", "syncPointList", "[", "-", "1", "]", ".", "subNo", "<", "subsCopy", ".", "size", "(", ")", ")", "# Always start from the first subtitle.", "firstSyncPoint", "=", "self", ".", "_getLowestSyncPoint", "(", "syncPointList", ",", "subsCopy", ")", "if", "firstSyncPoint", "!=", "syncPointList", "[", "0", "]", ":", "syncPointList", ".", "insert", "(", "0", ",", "firstSyncPoint", ")", "for", "i", ",", "syncPoint", "in", "enumerate", "(", "syncPointList", ")", ":", "# Algorithm:", "# 1. Calculate time deltas between sync points and between subs:", "# DE_OLD = subTime[secondSyncSubNo] - subTime[firstSyncSubNo]", "# DE_NEW = secondSyncTime - firstSyncTime", "# 2. Calculate proportional sub position within DE_OLD:", "# d = (subTime - subTime[firstSubNo]) / DE_OLD", "# 3. \"d\" is constant within deltas, so we can now calculate newSubTime:", "# newSubTime = DE_NEW * d + firstSyncTime", "firstSyncPoint", "=", "syncPointList", "[", "i", "]", "secondSyncPoint", "=", "self", ".", "_getSyncPointOrEnd", "(", "i", "+", "1", ",", "syncPointList", ",", "subsCopy", ")", "log", ".", "debug", "(", "_", "(", "\"Syncing times for sync points:\"", ")", ")", "log", ".", "debug", "(", "\" %s\"", "%", "firstSyncPoint", ")", "log", ".", "debug", "(", "\" %s\"", "%", "secondSyncPoint", ")", "# A case for the last one syncPoint", "if", "firstSyncPoint", "==", "secondSyncPoint", ":", "continue", "secondSubNo", "=", "secondSyncPoint", ".", "subNo", "firstSubNo", "=", "firstSyncPoint", ".", "subNo", "firstOldSub", "=", "subsCopy", "[", "firstSubNo", "]", "secondOldSub", "=", "subsCopy", "[", "secondSubNo", "]", "oldStartDelta", ",", "oldEndDelta", "=", "self", ".", "_getDeltas", "(", "firstOldSub", ",", "secondOldSub", ")", "newStartDelta", ",", "newEndDelta", "=", "self", ".", "_getDeltas", "(", "firstSyncPoint", ",", "secondSyncPoint", ")", "for", "subNo", "in", "range", "(", "firstSubNo", ",", "secondSubNo", "+", "1", ")", ":", "sub", "=", "subsCopy", "[", "subNo", "]", "newStartTime", "=", "self", ".", "_calculateTime", "(", "sub", ".", "start", ",", "firstOldSub", ".", "start", ",", "firstSyncPoint", ".", "start", ",", "oldStartDelta", ",", "newStartDelta", ")", "newEndTime", "=", "self", ".", "_calculateTime", "(", "sub", ".", "end", ",", "firstOldSub", ".", "end", ",", "firstSyncPoint", ".", "end", ",", "oldEndDelta", ",", "newEndDelta", ")", "self", ".", "_subs", ".", "changeSubStart", "(", "subNo", ",", "newStartTime", ")", "self", ".", "_subs", ".", "changeSubEnd", "(", "subNo", ",", "newEndTime", ")" ]
Synchronise subtitles using a given list of SyncPoints.
[ "Synchronise", "subtitles", "using", "a", "given", "list", "of", "SyncPoints", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/parsing/Offset.py#L60-L118
train
mgoral/subconvert
src/subconvert/parsing/Offset.py
TimeSync._getDeltas
def _getDeltas(self, firstSub, secondSub): """Arguments must have "start" and "end" properties which are FrameTimes.""" startDelta = max(firstSub.start, secondSub.start) - min(firstSub.start, secondSub.start) endDelta = max(firstSub.end, secondSub.end) - min(firstSub.end, secondSub.end) return (startDelta, endDelta)
python
def _getDeltas(self, firstSub, secondSub): """Arguments must have "start" and "end" properties which are FrameTimes.""" startDelta = max(firstSub.start, secondSub.start) - min(firstSub.start, secondSub.start) endDelta = max(firstSub.end, secondSub.end) - min(firstSub.end, secondSub.end) return (startDelta, endDelta)
[ "def", "_getDeltas", "(", "self", ",", "firstSub", ",", "secondSub", ")", ":", "startDelta", "=", "max", "(", "firstSub", ".", "start", ",", "secondSub", ".", "start", ")", "-", "min", "(", "firstSub", ".", "start", ",", "secondSub", ".", "start", ")", "endDelta", "=", "max", "(", "firstSub", ".", "end", ",", "secondSub", ".", "end", ")", "-", "min", "(", "firstSub", ".", "end", ",", "secondSub", ".", "end", ")", "return", "(", "startDelta", ",", "endDelta", ")" ]
Arguments must have "start" and "end" properties which are FrameTimes.
[ "Arguments", "must", "have", "start", "and", "end", "properties", "which", "are", "FrameTimes", "." ]
59701e5e69ef1ca26ce7d1d766c936664aa2cb32
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/parsing/Offset.py#L120-L124
train
ludeeus/GHLocalApi
ghlocalapi/scan.py
NetworkScan.scan_for_units
async def scan_for_units(self, iprange): """Scan local network for GH units.""" units = [] for ip_address in ipaddress.IPv4Network(iprange): sock = socket.socket() sock.settimeout(0.02) host = str(ip_address) try: scan_result = sock.connect((host, PORT)) except socket.error: scan_result = 1 _LOGGER.debug('Checking port connectivity on %s:%s', host, (str(PORT))) if scan_result is None: ghlocalapi = DeviceInfo(self._loop, self._session, host) await ghlocalapi.get_device_info() data = ghlocalapi.device_info if data is not None: cap = data['device_info']['capabilities'] units.append({ 'host': host, 'name': data['name'], 'model': data['device_info']['model_name'], 'assistant_supported': cap.get('assistant_supported', False) }) sock.close() return units
python
async def scan_for_units(self, iprange): """Scan local network for GH units.""" units = [] for ip_address in ipaddress.IPv4Network(iprange): sock = socket.socket() sock.settimeout(0.02) host = str(ip_address) try: scan_result = sock.connect((host, PORT)) except socket.error: scan_result = 1 _LOGGER.debug('Checking port connectivity on %s:%s', host, (str(PORT))) if scan_result is None: ghlocalapi = DeviceInfo(self._loop, self._session, host) await ghlocalapi.get_device_info() data = ghlocalapi.device_info if data is not None: cap = data['device_info']['capabilities'] units.append({ 'host': host, 'name': data['name'], 'model': data['device_info']['model_name'], 'assistant_supported': cap.get('assistant_supported', False) }) sock.close() return units
[ "async", "def", "scan_for_units", "(", "self", ",", "iprange", ")", ":", "units", "=", "[", "]", "for", "ip_address", "in", "ipaddress", ".", "IPv4Network", "(", "iprange", ")", ":", "sock", "=", "socket", ".", "socket", "(", ")", "sock", ".", "settimeout", "(", "0.02", ")", "host", "=", "str", "(", "ip_address", ")", "try", ":", "scan_result", "=", "sock", ".", "connect", "(", "(", "host", ",", "PORT", ")", ")", "except", "socket", ".", "error", ":", "scan_result", "=", "1", "_LOGGER", ".", "debug", "(", "'Checking port connectivity on %s:%s'", ",", "host", ",", "(", "str", "(", "PORT", ")", ")", ")", "if", "scan_result", "is", "None", ":", "ghlocalapi", "=", "DeviceInfo", "(", "self", ".", "_loop", ",", "self", ".", "_session", ",", "host", ")", "await", "ghlocalapi", ".", "get_device_info", "(", ")", "data", "=", "ghlocalapi", ".", "device_info", "if", "data", "is", "not", "None", ":", "cap", "=", "data", "[", "'device_info'", "]", "[", "'capabilities'", "]", "units", ".", "append", "(", "{", "'host'", ":", "host", ",", "'name'", ":", "data", "[", "'name'", "]", ",", "'model'", ":", "data", "[", "'device_info'", "]", "[", "'model_name'", "]", ",", "'assistant_supported'", ":", "cap", ".", "get", "(", "'assistant_supported'", ",", "False", ")", "}", ")", "sock", ".", "close", "(", ")", "return", "units" ]
Scan local network for GH units.
[ "Scan", "local", "network", "for", "GH", "units", "." ]
93abdee299c4a4b65aa9dd03c77ec34e174e3c56
https://github.com/ludeeus/GHLocalApi/blob/93abdee299c4a4b65aa9dd03c77ec34e174e3c56/ghlocalapi/scan.py#L24-L51
train
ludeeus/GHLocalApi
examples/bluetooth_scan_unit.py
bluetooth_scan
async def bluetooth_scan(): """Get nearby bluetooth devices.""" async with aiohttp.ClientSession() as session: ghlocalapi = Bluetooth(LOOP, session, IPADDRESS) await ghlocalapi.scan_for_devices() # Start device scan await ghlocalapi.get_scan_result() # Returns the result print("Device info:", ghlocalapi.devices)
python
async def bluetooth_scan(): """Get nearby bluetooth devices.""" async with aiohttp.ClientSession() as session: ghlocalapi = Bluetooth(LOOP, session, IPADDRESS) await ghlocalapi.scan_for_devices() # Start device scan await ghlocalapi.get_scan_result() # Returns the result print("Device info:", ghlocalapi.devices)
[ "async", "def", "bluetooth_scan", "(", ")", ":", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "ghlocalapi", "=", "Bluetooth", "(", "LOOP", ",", "session", ",", "IPADDRESS", ")", "await", "ghlocalapi", ".", "scan_for_devices", "(", ")", "# Start device scan", "await", "ghlocalapi", ".", "get_scan_result", "(", ")", "# Returns the result", "print", "(", "\"Device info:\"", ",", "ghlocalapi", ".", "devices", ")" ]
Get nearby bluetooth devices.
[ "Get", "nearby", "bluetooth", "devices", "." ]
93abdee299c4a4b65aa9dd03c77ec34e174e3c56
https://github.com/ludeeus/GHLocalApi/blob/93abdee299c4a4b65aa9dd03c77ec34e174e3c56/examples/bluetooth_scan_unit.py#L9-L16
train
romanorac/discomll
discomll/classification/naivebayes.py
reduce_fit
def reduce_fit(interface, state, label, inp): """ Function separates aggregation of continuous and discrete features. For continuous features it aggregates partially calculated means and variances and returns them. For discrete features it aggregates pairs and returns them. Pairs with label occurrences are used to calculate prior probabilities """ from disco.util import kvgroup # function for grouping values by key import numpy as np out = interface.output(0) # all outputted pairs have the same output label # model of naive Bayes stores label names, sum of all label occurrences and pairs # (feature index, feature values) for discrete features which are needed to optimize predict phase. fit_model = {"y_labels": [], "y_sum": 0, "iv": set()} combiner = {} # combiner maintains correct order of means and variances. means, variances = [], [] k_prev = "" for key, value in kvgroup(inp): # input pairs are sorted and grouped by key k_split = key.split(state["delimiter"]) # pair is split if len(k_split) == 3: # discrete features # store pair (feature index, feature value) fit_model["iv"].add(tuple(k_split[1:])) # aggregate and output occurrences of a pair out.add(tuple(k_split), sum(value)) elif len(k_split) == 2: # continuous features # if label is different than previous. # This enables calculation of all variances and means for every feature for current label. if k_split[0] != k_prev and k_prev != "": mean, var = zip(*[combiner[key] for key in sorted(combiner.keys())]) means.append(mean) variances.append(var) # number of elements, partial mean, partial variance. n_a = mean_a = var_a = 0 # code aggregates partially calculated means and variances for n_b, mean_b, var_b in value: n_ab = n_a + n_b var_a = ((n_a * var_a + n_b * var_b) / float(n_ab)) + ( n_a * n_b * ((mean_b - mean_a) / float(n_ab)) ** 2) mean_a = (n_a * mean_a + n_b * mean_b) / float(n_ab) n_a = n_ab # maintains correct order of statistics for every feature combiner[int(k_split[1])] = (mean_a, var_a + 1e-9) k_prev = k_split[0] else: # aggregates label occurrences fit_model[key] = np.sum(value) fit_model["y_sum"] += fit_model[key] # sum of all label occurrences fit_model["y_labels"].append(key) # if statistics for continuous features were not output in last iteration if len(means) > 0: mean, var = zip(*[combiner[key] for key in sorted(combiner.keys())]) out.add("mean", np.array(means + [mean], dtype=np.float32)) variances = np.array(variances + [var], dtype=np.float32) out.add("var", variances) out.add("var_log", np.log(np.pi * variances)) # calculation of prior probabilities prior = [fit_model[y_label] / float(fit_model["y_sum"]) for y_label in fit_model["y_labels"]] out.add("prior", np.array(prior, dtype=np.float32)) out.add("prior_log", np.log(prior)) out.add("iv", list(fit_model["iv"])) out.add("y_labels", fit_model["y_labels"])
python
def reduce_fit(interface, state, label, inp): """ Function separates aggregation of continuous and discrete features. For continuous features it aggregates partially calculated means and variances and returns them. For discrete features it aggregates pairs and returns them. Pairs with label occurrences are used to calculate prior probabilities """ from disco.util import kvgroup # function for grouping values by key import numpy as np out = interface.output(0) # all outputted pairs have the same output label # model of naive Bayes stores label names, sum of all label occurrences and pairs # (feature index, feature values) for discrete features which are needed to optimize predict phase. fit_model = {"y_labels": [], "y_sum": 0, "iv": set()} combiner = {} # combiner maintains correct order of means and variances. means, variances = [], [] k_prev = "" for key, value in kvgroup(inp): # input pairs are sorted and grouped by key k_split = key.split(state["delimiter"]) # pair is split if len(k_split) == 3: # discrete features # store pair (feature index, feature value) fit_model["iv"].add(tuple(k_split[1:])) # aggregate and output occurrences of a pair out.add(tuple(k_split), sum(value)) elif len(k_split) == 2: # continuous features # if label is different than previous. # This enables calculation of all variances and means for every feature for current label. if k_split[0] != k_prev and k_prev != "": mean, var = zip(*[combiner[key] for key in sorted(combiner.keys())]) means.append(mean) variances.append(var) # number of elements, partial mean, partial variance. n_a = mean_a = var_a = 0 # code aggregates partially calculated means and variances for n_b, mean_b, var_b in value: n_ab = n_a + n_b var_a = ((n_a * var_a + n_b * var_b) / float(n_ab)) + ( n_a * n_b * ((mean_b - mean_a) / float(n_ab)) ** 2) mean_a = (n_a * mean_a + n_b * mean_b) / float(n_ab) n_a = n_ab # maintains correct order of statistics for every feature combiner[int(k_split[1])] = (mean_a, var_a + 1e-9) k_prev = k_split[0] else: # aggregates label occurrences fit_model[key] = np.sum(value) fit_model["y_sum"] += fit_model[key] # sum of all label occurrences fit_model["y_labels"].append(key) # if statistics for continuous features were not output in last iteration if len(means) > 0: mean, var = zip(*[combiner[key] for key in sorted(combiner.keys())]) out.add("mean", np.array(means + [mean], dtype=np.float32)) variances = np.array(variances + [var], dtype=np.float32) out.add("var", variances) out.add("var_log", np.log(np.pi * variances)) # calculation of prior probabilities prior = [fit_model[y_label] / float(fit_model["y_sum"]) for y_label in fit_model["y_labels"]] out.add("prior", np.array(prior, dtype=np.float32)) out.add("prior_log", np.log(prior)) out.add("iv", list(fit_model["iv"])) out.add("y_labels", fit_model["y_labels"])
[ "def", "reduce_fit", "(", "interface", ",", "state", ",", "label", ",", "inp", ")", ":", "from", "disco", ".", "util", "import", "kvgroup", "# function for grouping values by key", "import", "numpy", "as", "np", "out", "=", "interface", ".", "output", "(", "0", ")", "# all outputted pairs have the same output label", "# model of naive Bayes stores label names, sum of all label occurrences and pairs", "# (feature index, feature values) for discrete features which are needed to optimize predict phase.", "fit_model", "=", "{", "\"y_labels\"", ":", "[", "]", ",", "\"y_sum\"", ":", "0", ",", "\"iv\"", ":", "set", "(", ")", "}", "combiner", "=", "{", "}", "# combiner maintains correct order of means and variances.", "means", ",", "variances", "=", "[", "]", ",", "[", "]", "k_prev", "=", "\"\"", "for", "key", ",", "value", "in", "kvgroup", "(", "inp", ")", ":", "# input pairs are sorted and grouped by key", "k_split", "=", "key", ".", "split", "(", "state", "[", "\"delimiter\"", "]", ")", "# pair is split", "if", "len", "(", "k_split", ")", "==", "3", ":", "# discrete features", "# store pair (feature index, feature value)", "fit_model", "[", "\"iv\"", "]", ".", "add", "(", "tuple", "(", "k_split", "[", "1", ":", "]", ")", ")", "# aggregate and output occurrences of a pair", "out", ".", "add", "(", "tuple", "(", "k_split", ")", ",", "sum", "(", "value", ")", ")", "elif", "len", "(", "k_split", ")", "==", "2", ":", "# continuous features", "# if label is different than previous.", "# This enables calculation of all variances and means for every feature for current label.", "if", "k_split", "[", "0", "]", "!=", "k_prev", "and", "k_prev", "!=", "\"\"", ":", "mean", ",", "var", "=", "zip", "(", "*", "[", "combiner", "[", "key", "]", "for", "key", "in", "sorted", "(", "combiner", ".", "keys", "(", ")", ")", "]", ")", "means", ".", "append", "(", "mean", ")", "variances", ".", "append", "(", "var", ")", "# number of elements, partial mean, partial variance.", "n_a", "=", "mean_a", "=", "var_a", "=", "0", "# code aggregates partially calculated means and variances", "for", "n_b", ",", "mean_b", ",", "var_b", "in", "value", ":", "n_ab", "=", "n_a", "+", "n_b", "var_a", "=", "(", "(", "n_a", "*", "var_a", "+", "n_b", "*", "var_b", ")", "/", "float", "(", "n_ab", ")", ")", "+", "(", "n_a", "*", "n_b", "*", "(", "(", "mean_b", "-", "mean_a", ")", "/", "float", "(", "n_ab", ")", ")", "**", "2", ")", "mean_a", "=", "(", "n_a", "*", "mean_a", "+", "n_b", "*", "mean_b", ")", "/", "float", "(", "n_ab", ")", "n_a", "=", "n_ab", "# maintains correct order of statistics for every feature", "combiner", "[", "int", "(", "k_split", "[", "1", "]", ")", "]", "=", "(", "mean_a", ",", "var_a", "+", "1e-9", ")", "k_prev", "=", "k_split", "[", "0", "]", "else", ":", "# aggregates label occurrences", "fit_model", "[", "key", "]", "=", "np", ".", "sum", "(", "value", ")", "fit_model", "[", "\"y_sum\"", "]", "+=", "fit_model", "[", "key", "]", "# sum of all label occurrences", "fit_model", "[", "\"y_labels\"", "]", ".", "append", "(", "key", ")", "# if statistics for continuous features were not output in last iteration", "if", "len", "(", "means", ")", ">", "0", ":", "mean", ",", "var", "=", "zip", "(", "*", "[", "combiner", "[", "key", "]", "for", "key", "in", "sorted", "(", "combiner", ".", "keys", "(", ")", ")", "]", ")", "out", ".", "add", "(", "\"mean\"", ",", "np", ".", "array", "(", "means", "+", "[", "mean", "]", ",", "dtype", "=", "np", ".", "float32", ")", ")", "variances", "=", "np", ".", "array", "(", "variances", "+", "[", "var", "]", ",", "dtype", "=", "np", ".", "float32", ")", "out", ".", "add", "(", "\"var\"", ",", "variances", ")", "out", ".", "add", "(", "\"var_log\"", ",", "np", ".", "log", "(", "np", ".", "pi", "*", "variances", ")", ")", "# calculation of prior probabilities", "prior", "=", "[", "fit_model", "[", "y_label", "]", "/", "float", "(", "fit_model", "[", "\"y_sum\"", "]", ")", "for", "y_label", "in", "fit_model", "[", "\"y_labels\"", "]", "]", "out", ".", "add", "(", "\"prior\"", ",", "np", ".", "array", "(", "prior", ",", "dtype", "=", "np", ".", "float32", ")", ")", "out", ".", "add", "(", "\"prior_log\"", ",", "np", ".", "log", "(", "prior", ")", ")", "out", ".", "add", "(", "\"iv\"", ",", "list", "(", "fit_model", "[", "\"iv\"", "]", ")", ")", "out", ".", "add", "(", "\"y_labels\"", ",", "fit_model", "[", "\"y_labels\"", "]", ")" ]
Function separates aggregation of continuous and discrete features. For continuous features it aggregates partially calculated means and variances and returns them. For discrete features it aggregates pairs and returns them. Pairs with label occurrences are used to calculate prior probabilities
[ "Function", "separates", "aggregation", "of", "continuous", "and", "discrete", "features", ".", "For", "continuous", "features", "it", "aggregates", "partially", "calculated", "means", "and", "variances", "and", "returns", "them", ".", "For", "discrete", "features", "it", "aggregates", "pairs", "and", "returns", "them", ".", "Pairs", "with", "label", "occurrences", "are", "used", "to", "calculate", "prior", "probabilities" ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/classification/naivebayes.py#L60-L128
train
romanorac/discomll
discomll/classification/naivebayes.py
map_predict
def map_predict(interface, state, label, inp): """ Function makes a predictions of samples with given model. It calculates probabilities with multinomial and Gaussian distribution. """ import numpy as np out = interface.output(0) continuous = [j for i, j in enumerate(state["X_indices"]) if state["X_meta"][i] == "c"] # indices of continuous features discrete = [j for i, j in enumerate(state["X_indices"]) if state["X_meta"][i] == "d"] # indices of discrete features cont = True if len(continuous) > 0 else False # enables calculation of Gaussian probabilities disc = True if len(discrete) > 0 else False # enables calculation of multinomial probabilities. for row in inp: row = row.strip().split(state["delimiter"]) if len(row) > 1: # if row is empty # set id of a sample x_id = "" if state["id_index"] == -1 else row[state["id_index"]] # initialize prior probability for all labels probs = state["fit_model"]["prior_log"] if cont: # continuous features x = np.array([(0 if row[j] in state["missing_vals"] else float(row[j])) for j in continuous]) # sets selected features of the sample # Gaussian distribution probs = probs - 0.5 * np.sum( np.true_divide((x - state["fit_model"]["mean"]) ** 2, state["fit_model"]["var"]) + state["fit_model"]["var_log"], axis=1) if disc: # discrete features # multinomial distribution probs = probs + np.sum( [(0 if row[i] in state["missing_vals"] else state["fit_model"].get((str(i), row[i]), np.zeros(1))) for i in discrete], axis=0) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = np.log(np.sum(np.exp(probs))) probs = np.exp(np.array(probs) - log_prob_x) # Predicted label is the one with highest probability y_predicted = max(zip(probs, state["fit_model"]["y_labels"]))[1] out.add(x_id, (y_predicted, probs.tolist()))
python
def map_predict(interface, state, label, inp): """ Function makes a predictions of samples with given model. It calculates probabilities with multinomial and Gaussian distribution. """ import numpy as np out = interface.output(0) continuous = [j for i, j in enumerate(state["X_indices"]) if state["X_meta"][i] == "c"] # indices of continuous features discrete = [j for i, j in enumerate(state["X_indices"]) if state["X_meta"][i] == "d"] # indices of discrete features cont = True if len(continuous) > 0 else False # enables calculation of Gaussian probabilities disc = True if len(discrete) > 0 else False # enables calculation of multinomial probabilities. for row in inp: row = row.strip().split(state["delimiter"]) if len(row) > 1: # if row is empty # set id of a sample x_id = "" if state["id_index"] == -1 else row[state["id_index"]] # initialize prior probability for all labels probs = state["fit_model"]["prior_log"] if cont: # continuous features x = np.array([(0 if row[j] in state["missing_vals"] else float(row[j])) for j in continuous]) # sets selected features of the sample # Gaussian distribution probs = probs - 0.5 * np.sum( np.true_divide((x - state["fit_model"]["mean"]) ** 2, state["fit_model"]["var"]) + state["fit_model"]["var_log"], axis=1) if disc: # discrete features # multinomial distribution probs = probs + np.sum( [(0 if row[i] in state["missing_vals"] else state["fit_model"].get((str(i), row[i]), np.zeros(1))) for i in discrete], axis=0) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = np.log(np.sum(np.exp(probs))) probs = np.exp(np.array(probs) - log_prob_x) # Predicted label is the one with highest probability y_predicted = max(zip(probs, state["fit_model"]["y_labels"]))[1] out.add(x_id, (y_predicted, probs.tolist()))
[ "def", "map_predict", "(", "interface", ",", "state", ",", "label", ",", "inp", ")", ":", "import", "numpy", "as", "np", "out", "=", "interface", ".", "output", "(", "0", ")", "continuous", "=", "[", "j", "for", "i", ",", "j", "in", "enumerate", "(", "state", "[", "\"X_indices\"", "]", ")", "if", "state", "[", "\"X_meta\"", "]", "[", "i", "]", "==", "\"c\"", "]", "# indices of continuous features", "discrete", "=", "[", "j", "for", "i", ",", "j", "in", "enumerate", "(", "state", "[", "\"X_indices\"", "]", ")", "if", "state", "[", "\"X_meta\"", "]", "[", "i", "]", "==", "\"d\"", "]", "# indices of discrete features", "cont", "=", "True", "if", "len", "(", "continuous", ")", ">", "0", "else", "False", "# enables calculation of Gaussian probabilities", "disc", "=", "True", "if", "len", "(", "discrete", ")", ">", "0", "else", "False", "# enables calculation of multinomial probabilities.", "for", "row", "in", "inp", ":", "row", "=", "row", ".", "strip", "(", ")", ".", "split", "(", "state", "[", "\"delimiter\"", "]", ")", "if", "len", "(", "row", ")", ">", "1", ":", "# if row is empty", "# set id of a sample", "x_id", "=", "\"\"", "if", "state", "[", "\"id_index\"", "]", "==", "-", "1", "else", "row", "[", "state", "[", "\"id_index\"", "]", "]", "# initialize prior probability for all labels", "probs", "=", "state", "[", "\"fit_model\"", "]", "[", "\"prior_log\"", "]", "if", "cont", ":", "# continuous features", "x", "=", "np", ".", "array", "(", "[", "(", "0", "if", "row", "[", "j", "]", "in", "state", "[", "\"missing_vals\"", "]", "else", "float", "(", "row", "[", "j", "]", ")", ")", "for", "j", "in", "continuous", "]", ")", "# sets selected features of the sample", "# Gaussian distribution", "probs", "=", "probs", "-", "0.5", "*", "np", ".", "sum", "(", "np", ".", "true_divide", "(", "(", "x", "-", "state", "[", "\"fit_model\"", "]", "[", "\"mean\"", "]", ")", "**", "2", ",", "state", "[", "\"fit_model\"", "]", "[", "\"var\"", "]", ")", "+", "state", "[", "\"fit_model\"", "]", "[", "\"var_log\"", "]", ",", "axis", "=", "1", ")", "if", "disc", ":", "# discrete features", "# multinomial distribution", "probs", "=", "probs", "+", "np", ".", "sum", "(", "[", "(", "0", "if", "row", "[", "i", "]", "in", "state", "[", "\"missing_vals\"", "]", "else", "state", "[", "\"fit_model\"", "]", ".", "get", "(", "(", "str", "(", "i", ")", ",", "row", "[", "i", "]", ")", ",", "np", ".", "zeros", "(", "1", ")", ")", ")", "for", "i", "in", "discrete", "]", ",", "axis", "=", "0", ")", "# normalize by P(x) = P(f_1, ..., f_n)", "log_prob_x", "=", "np", ".", "log", "(", "np", ".", "sum", "(", "np", ".", "exp", "(", "probs", ")", ")", ")", "probs", "=", "np", ".", "exp", "(", "np", ".", "array", "(", "probs", ")", "-", "log_prob_x", ")", "# Predicted label is the one with highest probability", "y_predicted", "=", "max", "(", "zip", "(", "probs", ",", "state", "[", "\"fit_model\"", "]", "[", "\"y_labels\"", "]", ")", ")", "[", "1", "]", "out", ".", "add", "(", "x_id", ",", "(", "y_predicted", ",", "probs", ".", "tolist", "(", ")", ")", ")" ]
Function makes a predictions of samples with given model. It calculates probabilities with multinomial and Gaussian distribution.
[ "Function", "makes", "a", "predictions", "of", "samples", "with", "given", "model", ".", "It", "calculates", "probabilities", "with", "multinomial", "and", "Gaussian", "distribution", "." ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/classification/naivebayes.py#L131-L173
train
romanorac/discomll
discomll/classification/naivebayes.py
predict
def predict(dataset, fitmodel_url, m=1, save_results=True, show=False): """ Function starts a job that makes predictions to input data with a given model Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase m - m estimate is used with discrete features save_results - save results to ddfs show - show info about job execution Returns ------- Urls of predictions on ddfs """ from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator import numpy as np try: m = float(m) except ValueError: raise Exception("Parameter m should be numerical.") if "naivebayes_fitmodel" in fitmodel_url: # fit model is loaded from ddfs fit_model = dict((k, v) for k, v in result_iterator(fitmodel_url["naivebayes_fitmodel"])) if len(fit_model["y_labels"]) < 2: print "There is only one class in training data." return [] else: raise Exception("Incorrect fit model.") if dataset.params["X_meta"].count("d") > 0: # if there are discrete features in the model # code calculates logarithms to optimize predict phase as opposed to calculation by every mapped. np.seterr(divide='ignore') for iv in fit_model["iv"]: dist = [fit_model.pop((y,) + iv, 0) for y in fit_model["y_labels"]] fit_model[iv] = np.nan_to_num( np.log(np.true_divide(np.array(dist) + m * fit_model["prior"], np.sum(dist) + m))) - fit_model[ "prior_log"] del (fit_model["iv"]) # define a job and set save of results to ddfs job = Job(worker=Worker(save_results=save_results)) # job parallelizes execution of mappers job.pipeline = [ ("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))] job.params = dataset.params # job parameters (dataset object) job.params["fit_model"] = fit_model # define name of a job and input data urls job.run(name="naivebayes_predict", input=dataset.params["data_tag"]) results = job.wait(show=show) return results
python
def predict(dataset, fitmodel_url, m=1, save_results=True, show=False): """ Function starts a job that makes predictions to input data with a given model Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase m - m estimate is used with discrete features save_results - save results to ddfs show - show info about job execution Returns ------- Urls of predictions on ddfs """ from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator import numpy as np try: m = float(m) except ValueError: raise Exception("Parameter m should be numerical.") if "naivebayes_fitmodel" in fitmodel_url: # fit model is loaded from ddfs fit_model = dict((k, v) for k, v in result_iterator(fitmodel_url["naivebayes_fitmodel"])) if len(fit_model["y_labels"]) < 2: print "There is only one class in training data." return [] else: raise Exception("Incorrect fit model.") if dataset.params["X_meta"].count("d") > 0: # if there are discrete features in the model # code calculates logarithms to optimize predict phase as opposed to calculation by every mapped. np.seterr(divide='ignore') for iv in fit_model["iv"]: dist = [fit_model.pop((y,) + iv, 0) for y in fit_model["y_labels"]] fit_model[iv] = np.nan_to_num( np.log(np.true_divide(np.array(dist) + m * fit_model["prior"], np.sum(dist) + m))) - fit_model[ "prior_log"] del (fit_model["iv"]) # define a job and set save of results to ddfs job = Job(worker=Worker(save_results=save_results)) # job parallelizes execution of mappers job.pipeline = [ ("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))] job.params = dataset.params # job parameters (dataset object) job.params["fit_model"] = fit_model # define name of a job and input data urls job.run(name="naivebayes_predict", input=dataset.params["data_tag"]) results = job.wait(show=show) return results
[ "def", "predict", "(", "dataset", ",", "fitmodel_url", ",", "m", "=", "1", ",", "save_results", "=", "True", ",", "show", "=", "False", ")", ":", "from", "disco", ".", "worker", ".", "pipeline", ".", "worker", "import", "Worker", ",", "Stage", "from", "disco", ".", "core", "import", "Job", ",", "result_iterator", "import", "numpy", "as", "np", "try", ":", "m", "=", "float", "(", "m", ")", "except", "ValueError", ":", "raise", "Exception", "(", "\"Parameter m should be numerical.\"", ")", "if", "\"naivebayes_fitmodel\"", "in", "fitmodel_url", ":", "# fit model is loaded from ddfs", "fit_model", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "result_iterator", "(", "fitmodel_url", "[", "\"naivebayes_fitmodel\"", "]", ")", ")", "if", "len", "(", "fit_model", "[", "\"y_labels\"", "]", ")", "<", "2", ":", "print", "\"There is only one class in training data.\"", "return", "[", "]", "else", ":", "raise", "Exception", "(", "\"Incorrect fit model.\"", ")", "if", "dataset", ".", "params", "[", "\"X_meta\"", "]", ".", "count", "(", "\"d\"", ")", ">", "0", ":", "# if there are discrete features in the model", "# code calculates logarithms to optimize predict phase as opposed to calculation by every mapped.", "np", ".", "seterr", "(", "divide", "=", "'ignore'", ")", "for", "iv", "in", "fit_model", "[", "\"iv\"", "]", ":", "dist", "=", "[", "fit_model", ".", "pop", "(", "(", "y", ",", ")", "+", "iv", ",", "0", ")", "for", "y", "in", "fit_model", "[", "\"y_labels\"", "]", "]", "fit_model", "[", "iv", "]", "=", "np", ".", "nan_to_num", "(", "np", ".", "log", "(", "np", ".", "true_divide", "(", "np", ".", "array", "(", "dist", ")", "+", "m", "*", "fit_model", "[", "\"prior\"", "]", ",", "np", ".", "sum", "(", "dist", ")", "+", "m", ")", ")", ")", "-", "fit_model", "[", "\"prior_log\"", "]", "del", "(", "fit_model", "[", "\"iv\"", "]", ")", "# define a job and set save of results to ddfs", "job", "=", "Job", "(", "worker", "=", "Worker", "(", "save_results", "=", "save_results", ")", ")", "# job parallelizes execution of mappers", "job", ".", "pipeline", "=", "[", "(", "\"split\"", ",", "Stage", "(", "\"map\"", ",", "input_chain", "=", "dataset", ".", "params", "[", "\"input_chain\"", "]", ",", "init", "=", "simple_init", ",", "process", "=", "map_predict", ")", ")", "]", "job", ".", "params", "=", "dataset", ".", "params", "# job parameters (dataset object)", "job", ".", "params", "[", "\"fit_model\"", "]", "=", "fit_model", "# define name of a job and input data urls", "job", ".", "run", "(", "name", "=", "\"naivebayes_predict\"", ",", "input", "=", "dataset", ".", "params", "[", "\"data_tag\"", "]", ")", "results", "=", "job", ".", "wait", "(", "show", "=", "show", ")", "return", "results" ]
Function starts a job that makes predictions to input data with a given model Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase m - m estimate is used with discrete features save_results - save results to ddfs show - show info about job execution Returns ------- Urls of predictions on ddfs
[ "Function", "starts", "a", "job", "that", "makes", "predictions", "to", "input", "data", "with", "a", "given", "model" ]
a4703daffb2ba3c9f614bc3dbe45ae55884aea00
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/classification/naivebayes.py#L208-L264
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.data
def data(self): """Returns the request data as a dictionary. Merges the path parameters, GET parameters and POST parameters (form-encoded or JSON dictionary). If a key is present in multiple of these, the first one defined is used. """ if self._data: return self._data retval = {} data = self.get_request_data() for subdata in data: for key, value in subdata.iteritems(): if not key in retval: retval[key] = value self._data = retval return retval
python
def data(self): """Returns the request data as a dictionary. Merges the path parameters, GET parameters and POST parameters (form-encoded or JSON dictionary). If a key is present in multiple of these, the first one defined is used. """ if self._data: return self._data retval = {} data = self.get_request_data() for subdata in data: for key, value in subdata.iteritems(): if not key in retval: retval[key] = value self._data = retval return retval
[ "def", "data", "(", "self", ")", ":", "if", "self", ".", "_data", ":", "return", "self", ".", "_data", "retval", "=", "{", "}", "data", "=", "self", ".", "get_request_data", "(", ")", "for", "subdata", "in", "data", ":", "for", "key", ",", "value", "in", "subdata", ".", "iteritems", "(", ")", ":", "if", "not", "key", "in", "retval", ":", "retval", "[", "key", "]", "=", "value", "self", ".", "_data", "=", "retval", "return", "retval" ]
Returns the request data as a dictionary. Merges the path parameters, GET parameters and POST parameters (form-encoded or JSON dictionary). If a key is present in multiple of these, the first one defined is used.
[ "Returns", "the", "request", "data", "as", "a", "dictionary", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L157-L175
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.get_resource
def get_resource(self, resource, **kwargs): """Returns a new instance of the resource class passed in as resource. This is a helper to make future-compatibility easier when new arguments get added to the constructor. :param resource: Resource class to instantiate. Gets called with the named arguments as required for the constructor. :type resource: :class:`Resource` :param kwargs: Additional named arguments to pass to the constructor function. :type kwargs: dict """ return resource(request=self.request, response=self.response, path_params=self.path_params, application=self.application, **kwargs)
python
def get_resource(self, resource, **kwargs): """Returns a new instance of the resource class passed in as resource. This is a helper to make future-compatibility easier when new arguments get added to the constructor. :param resource: Resource class to instantiate. Gets called with the named arguments as required for the constructor. :type resource: :class:`Resource` :param kwargs: Additional named arguments to pass to the constructor function. :type kwargs: dict """ return resource(request=self.request, response=self.response, path_params=self.path_params, application=self.application, **kwargs)
[ "def", "get_resource", "(", "self", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "return", "resource", "(", "request", "=", "self", ".", "request", ",", "response", "=", "self", ".", "response", ",", "path_params", "=", "self", ".", "path_params", ",", "application", "=", "self", ".", "application", ",", "*", "*", "kwargs", ")" ]
Returns a new instance of the resource class passed in as resource. This is a helper to make future-compatibility easier when new arguments get added to the constructor. :param resource: Resource class to instantiate. Gets called with the named arguments as required for the constructor. :type resource: :class:`Resource` :param kwargs: Additional named arguments to pass to the constructor function. :type kwargs: dict
[ "Returns", "a", "new", "instance", "of", "the", "resource", "class", "passed", "in", "as", "resource", ".", "This", "is", "a", "helper", "to", "make", "future", "-", "compatibility", "easier", "when", "new", "arguments", "get", "added", "to", "the", "constructor", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L177-L191
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.assert_conditions
def assert_conditions(self): """Handles various HTTP conditions and raises HTTP exceptions to abort the request. - Content-MD5 request header must match the MD5 hash of the full input (:func:`assert_condition_md5`). - If-Match and If-None-Match etags are checked against the ETag of this resource (:func:`assert_condition_etag`). - If-Modified-Since and If-Unmodified-Since are checked against the modification date of this resource (:func:`assert_condition_last_modified`). .. todo:: Return a 501 exception when any Content-* headers have been set in the request. (See :rfc:`2616`, section 9.6) """ self.assert_condition_md5() etag = self.clean_etag(self.call_method('get_etag')) self.response.last_modified = self.call_method('get_last_modified') self.assert_condition_etag() self.assert_condition_last_modified()
python
def assert_conditions(self): """Handles various HTTP conditions and raises HTTP exceptions to abort the request. - Content-MD5 request header must match the MD5 hash of the full input (:func:`assert_condition_md5`). - If-Match and If-None-Match etags are checked against the ETag of this resource (:func:`assert_condition_etag`). - If-Modified-Since and If-Unmodified-Since are checked against the modification date of this resource (:func:`assert_condition_last_modified`). .. todo:: Return a 501 exception when any Content-* headers have been set in the request. (See :rfc:`2616`, section 9.6) """ self.assert_condition_md5() etag = self.clean_etag(self.call_method('get_etag')) self.response.last_modified = self.call_method('get_last_modified') self.assert_condition_etag() self.assert_condition_last_modified()
[ "def", "assert_conditions", "(", "self", ")", ":", "self", ".", "assert_condition_md5", "(", ")", "etag", "=", "self", ".", "clean_etag", "(", "self", ".", "call_method", "(", "'get_etag'", ")", ")", "self", ".", "response", ".", "last_modified", "=", "self", ".", "call_method", "(", "'get_last_modified'", ")", "self", ".", "assert_condition_etag", "(", ")", "self", ".", "assert_condition_last_modified", "(", ")" ]
Handles various HTTP conditions and raises HTTP exceptions to abort the request. - Content-MD5 request header must match the MD5 hash of the full input (:func:`assert_condition_md5`). - If-Match and If-None-Match etags are checked against the ETag of this resource (:func:`assert_condition_etag`). - If-Modified-Since and If-Unmodified-Since are checked against the modification date of this resource (:func:`assert_condition_last_modified`). .. todo:: Return a 501 exception when any Content-* headers have been set in the request. (See :rfc:`2616`, section 9.6)
[ "Handles", "various", "HTTP", "conditions", "and", "raises", "HTTP", "exceptions", "to", "abort", "the", "request", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L258-L277
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.assert_condition_md5
def assert_condition_md5(self): """If the ``Content-MD5`` request header is present in the request it's verified against the MD5 hash of the request body. If they don't match, a 400 HTTP response is returned. :raises: :class:`webob.exceptions.ResponseException` of status 400 if the MD5 hash does not match the body. """ if 'Content-MD5' in self.request.headers: body_md5 = hashlib.md5(self.request.body).hexdigest() if body_md5 != self.request.headers['Content-MD5']: raise_400(self, msg='Invalid Content-MD5 request header.')
python
def assert_condition_md5(self): """If the ``Content-MD5`` request header is present in the request it's verified against the MD5 hash of the request body. If they don't match, a 400 HTTP response is returned. :raises: :class:`webob.exceptions.ResponseException` of status 400 if the MD5 hash does not match the body. """ if 'Content-MD5' in self.request.headers: body_md5 = hashlib.md5(self.request.body).hexdigest() if body_md5 != self.request.headers['Content-MD5']: raise_400(self, msg='Invalid Content-MD5 request header.')
[ "def", "assert_condition_md5", "(", "self", ")", ":", "if", "'Content-MD5'", "in", "self", ".", "request", ".", "headers", ":", "body_md5", "=", "hashlib", ".", "md5", "(", "self", ".", "request", ".", "body", ")", ".", "hexdigest", "(", ")", "if", "body_md5", "!=", "self", ".", "request", ".", "headers", "[", "'Content-MD5'", "]", ":", "raise_400", "(", "self", ",", "msg", "=", "'Invalid Content-MD5 request header.'", ")" ]
If the ``Content-MD5`` request header is present in the request it's verified against the MD5 hash of the request body. If they don't match, a 400 HTTP response is returned. :raises: :class:`webob.exceptions.ResponseException` of status 400 if the MD5 hash does not match the body.
[ "If", "the", "Content", "-", "MD5", "request", "header", "is", "present", "in", "the", "request", "it", "s", "verified", "against", "the", "MD5", "hash", "of", "the", "request", "body", ".", "If", "they", "don", "t", "match", "a", "400", "HTTP", "response", "is", "returned", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L279-L290
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.get_allowed_methods
def get_allowed_methods(self): """Returns a coma-separated list of method names that are allowed on this instance. Useful to set the ``Allowed`` response header. """ return ", ".join([method for method in dir(self) if method.upper() == method and callable(getattr(self, method))])
python
def get_allowed_methods(self): """Returns a coma-separated list of method names that are allowed on this instance. Useful to set the ``Allowed`` response header. """ return ", ".join([method for method in dir(self) if method.upper() == method and callable(getattr(self, method))])
[ "def", "get_allowed_methods", "(", "self", ")", ":", "return", "\", \"", ".", "join", "(", "[", "method", "for", "method", "in", "dir", "(", "self", ")", "if", "method", ".", "upper", "(", ")", "==", "method", "and", "callable", "(", "getattr", "(", "self", ",", "method", ")", ")", "]", ")" ]
Returns a coma-separated list of method names that are allowed on this instance. Useful to set the ``Allowed`` response header.
[ "Returns", "a", "coma", "-", "separated", "list", "of", "method", "names", "that", "are", "allowed", "on", "this", "instance", ".", "Useful", "to", "set", "the", "Allowed", "response", "header", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L373-L379
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.convert_param
def convert_param(self, method, param, value): """Converts the parameter using the function 'convert' function of the validation rules. Same parameters as the `validate_param` method, so it might have just been added there. But lumping together the two functionalities would make overwriting harder. :param method: A function to get the validation information from (done using :func:`_get_validation`). :type method: Python function :param param: Name of the parameter to validate the value for. :type param: str :param value: Value passed in for the given parameter. :type value: Any valid Python value :raises: :class:`wsgiservice.exceptions.ValidationException` if the value is invalid for the given method and parameter. """ rules = self._get_validation(method, param) if not rules or not rules.get('convert'): return value try: return rules['convert'](value) except ValueError: raise ValidationException( "{0} value {1} does not validate.".format(param, value))
python
def convert_param(self, method, param, value): """Converts the parameter using the function 'convert' function of the validation rules. Same parameters as the `validate_param` method, so it might have just been added there. But lumping together the two functionalities would make overwriting harder. :param method: A function to get the validation information from (done using :func:`_get_validation`). :type method: Python function :param param: Name of the parameter to validate the value for. :type param: str :param value: Value passed in for the given parameter. :type value: Any valid Python value :raises: :class:`wsgiservice.exceptions.ValidationException` if the value is invalid for the given method and parameter. """ rules = self._get_validation(method, param) if not rules or not rules.get('convert'): return value try: return rules['convert'](value) except ValueError: raise ValidationException( "{0} value {1} does not validate.".format(param, value))
[ "def", "convert_param", "(", "self", ",", "method", ",", "param", ",", "value", ")", ":", "rules", "=", "self", ".", "_get_validation", "(", "method", ",", "param", ")", "if", "not", "rules", "or", "not", "rules", ".", "get", "(", "'convert'", ")", ":", "return", "value", "try", ":", "return", "rules", "[", "'convert'", "]", "(", "value", ")", "except", "ValueError", ":", "raise", "ValidationException", "(", "\"{0} value {1} does not validate.\"", ".", "format", "(", "param", ",", "value", ")", ")" ]
Converts the parameter using the function 'convert' function of the validation rules. Same parameters as the `validate_param` method, so it might have just been added there. But lumping together the two functionalities would make overwriting harder. :param method: A function to get the validation information from (done using :func:`_get_validation`). :type method: Python function :param param: Name of the parameter to validate the value for. :type param: str :param value: Value passed in for the given parameter. :type value: Any valid Python value :raises: :class:`wsgiservice.exceptions.ValidationException` if the value is invalid for the given method and parameter.
[ "Converts", "the", "parameter", "using", "the", "function", "convert", "function", "of", "the", "validation", "rules", ".", "Same", "parameters", "as", "the", "validate_param", "method", "so", "it", "might", "have", "just", "been", "added", "there", ".", "But", "lumping", "together", "the", "two", "functionalities", "would", "make", "overwriting", "harder", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L442-L466
train
pneff/wsgiservice
wsgiservice/resource.py
Resource._get_validation
def _get_validation(self, method, param): """Return the correct validations dictionary for this parameter. First checks the method itself and then its class. If no validation is defined for this parameter, None is returned. :param method: A function to get the validation information from. :type method: Python function :param param: Name of the parameter to get validation information for. :type param: str """ if hasattr(method, '_validations') and param in method._validations: return method._validations[param] elif (hasattr(method.im_class, '_validations') and param in method.im_class._validations): return method.im_class._validations[param] else: return None
python
def _get_validation(self, method, param): """Return the correct validations dictionary for this parameter. First checks the method itself and then its class. If no validation is defined for this parameter, None is returned. :param method: A function to get the validation information from. :type method: Python function :param param: Name of the parameter to get validation information for. :type param: str """ if hasattr(method, '_validations') and param in method._validations: return method._validations[param] elif (hasattr(method.im_class, '_validations') and param in method.im_class._validations): return method.im_class._validations[param] else: return None
[ "def", "_get_validation", "(", "self", ",", "method", ",", "param", ")", ":", "if", "hasattr", "(", "method", ",", "'_validations'", ")", "and", "param", "in", "method", ".", "_validations", ":", "return", "method", ".", "_validations", "[", "param", "]", "elif", "(", "hasattr", "(", "method", ".", "im_class", ",", "'_validations'", ")", "and", "param", "in", "method", ".", "im_class", ".", "_validations", ")", ":", "return", "method", ".", "im_class", ".", "_validations", "[", "param", "]", "else", ":", "return", "None" ]
Return the correct validations dictionary for this parameter. First checks the method itself and then its class. If no validation is defined for this parameter, None is returned. :param method: A function to get the validation information from. :type method: Python function :param param: Name of the parameter to get validation information for. :type param: str
[ "Return", "the", "correct", "validations", "dictionary", "for", "this", "parameter", ".", "First", "checks", "the", "method", "itself", "and", "then", "its", "class", ".", "If", "no", "validation", "is", "defined", "for", "this", "parameter", "None", "is", "returned", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L468-L484
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.convert_response
def convert_response(self): """Finish filling the instance's response object so it's ready to be served to the client. This includes converting the body_raw property to the content type requested by the user if necessary. """ if hasattr(self.response, 'body_raw'): if self.response.body_raw is not None: to_type = re.sub('[^a-zA-Z_]', '_', self.type) to_type_method = 'to_' + to_type if hasattr(self, to_type_method): self.response.body = getattr(self, to_type_method)( self.response.body_raw) del self.response.body_raw
python
def convert_response(self): """Finish filling the instance's response object so it's ready to be served to the client. This includes converting the body_raw property to the content type requested by the user if necessary. """ if hasattr(self.response, 'body_raw'): if self.response.body_raw is not None: to_type = re.sub('[^a-zA-Z_]', '_', self.type) to_type_method = 'to_' + to_type if hasattr(self, to_type_method): self.response.body = getattr(self, to_type_method)( self.response.body_raw) del self.response.body_raw
[ "def", "convert_response", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "response", ",", "'body_raw'", ")", ":", "if", "self", ".", "response", ".", "body_raw", "is", "not", "None", ":", "to_type", "=", "re", ".", "sub", "(", "'[^a-zA-Z_]'", ",", "'_'", ",", "self", ".", "type", ")", "to_type_method", "=", "'to_'", "+", "to_type", "if", "hasattr", "(", "self", ",", "to_type_method", ")", ":", "self", ".", "response", ".", "body", "=", "getattr", "(", "self", ",", "to_type_method", ")", "(", "self", ".", "response", ".", "body_raw", ")", "del", "self", ".", "response", ".", "body_raw" ]
Finish filling the instance's response object so it's ready to be served to the client. This includes converting the body_raw property to the content type requested by the user if necessary.
[ "Finish", "filling", "the", "instance", "s", "response", "object", "so", "it", "s", "ready", "to", "be", "served", "to", "the", "client", ".", "This", "includes", "converting", "the", "body_raw", "property", "to", "the", "content", "type", "requested", "by", "the", "user", "if", "necessary", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L486-L498
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.handle_exception
def handle_exception(self, e, status=500): """Handle the given exception. Log, sets the response code and output the exception message as an error message. :param e: Exception which is being handled. :type e: :class:`Exception` :param status: Status code to set. :type status: int """ logger.exception( "An exception occurred while handling the request: %s", e) self.response.body_raw = {'error': str(e)} self.response.status = status
python
def handle_exception(self, e, status=500): """Handle the given exception. Log, sets the response code and output the exception message as an error message. :param e: Exception which is being handled. :type e: :class:`Exception` :param status: Status code to set. :type status: int """ logger.exception( "An exception occurred while handling the request: %s", e) self.response.body_raw = {'error': str(e)} self.response.status = status
[ "def", "handle_exception", "(", "self", ",", "e", ",", "status", "=", "500", ")", ":", "logger", ".", "exception", "(", "\"An exception occurred while handling the request: %s\"", ",", "e", ")", "self", ".", "response", ".", "body_raw", "=", "{", "'error'", ":", "str", "(", "e", ")", "}", "self", ".", "response", ".", "status", "=", "status" ]
Handle the given exception. Log, sets the response code and output the exception message as an error message. :param e: Exception which is being handled. :type e: :class:`Exception` :param status: Status code to set. :type status: int
[ "Handle", "the", "given", "exception", ".", "Log", "sets", "the", "response", "code", "and", "output", "the", "exception", "message", "as", "an", "error", "message", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L525-L537
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.handle_exception_404
def handle_exception_404(self, e): """Handle the given exception. Log, sets the response code to 404 and output the exception message as an error message. :param e: Exception which is being handled. :type e: :class:`Exception` """ logger.debug("A 404 Not Found exception occurred while handling " "the request.") self.response.body_raw = {'error': 'Not Found'} self.response.status = 404
python
def handle_exception_404(self, e): """Handle the given exception. Log, sets the response code to 404 and output the exception message as an error message. :param e: Exception which is being handled. :type e: :class:`Exception` """ logger.debug("A 404 Not Found exception occurred while handling " "the request.") self.response.body_raw = {'error': 'Not Found'} self.response.status = 404
[ "def", "handle_exception_404", "(", "self", ",", "e", ")", ":", "logger", ".", "debug", "(", "\"A 404 Not Found exception occurred while handling \"", "\"the request.\"", ")", "self", ".", "response", ".", "body_raw", "=", "{", "'error'", ":", "'Not Found'", "}", "self", ".", "response", ".", "status", "=", "404" ]
Handle the given exception. Log, sets the response code to 404 and output the exception message as an error message. :param e: Exception which is being handled. :type e: :class:`Exception`
[ "Handle", "the", "given", "exception", ".", "Log", "sets", "the", "response", "code", "to", "404", "and", "output", "the", "exception", "message", "as", "an", "error", "message", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L539-L549
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.set_response_content_md5
def set_response_content_md5(self): """Set the Content-MD5 response header. Calculated from the the response body by creating the MD5 hash from it. """ self.response.content_md5 = hashlib.md5(self.response.body).hexdigest()
python
def set_response_content_md5(self): """Set the Content-MD5 response header. Calculated from the the response body by creating the MD5 hash from it. """ self.response.content_md5 = hashlib.md5(self.response.body).hexdigest()
[ "def", "set_response_content_md5", "(", "self", ")", ":", "self", ".", "response", ".", "content_md5", "=", "hashlib", ".", "md5", "(", "self", ".", "response", ".", "body", ")", ".", "hexdigest", "(", ")" ]
Set the Content-MD5 response header. Calculated from the the response body by creating the MD5 hash from it.
[ "Set", "the", "Content", "-", "MD5", "response", "header", ".", "Calculated", "from", "the", "the", "response", "body", "by", "creating", "the", "MD5", "hash", "from", "it", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L569-L573
train
pneff/wsgiservice
wsgiservice/resource.py
Resource.get_request_data
def get_request_data(self): """ Read the input values. Returns a list of dictionaries. These will be used to automatically pass them into the method. Additionally a combined dictionary is written to `self.data`. In the case of JSON input, that element in this list will be the parsed JSON value. That may not be a dictionary. """ request_data = [self.path_params, self.request.GET] if self.request.headers.get('Content-Type') == 'application/json' \ and self.request.body: try: post = json.loads(self.request.body) except ValueError: raise_400(self, msg='Invalid JSON content data') if isinstance(post, dict): request_data.append(post) else: request_data.append(self.request.POST) return request_data
python
def get_request_data(self): """ Read the input values. Returns a list of dictionaries. These will be used to automatically pass them into the method. Additionally a combined dictionary is written to `self.data`. In the case of JSON input, that element in this list will be the parsed JSON value. That may not be a dictionary. """ request_data = [self.path_params, self.request.GET] if self.request.headers.get('Content-Type') == 'application/json' \ and self.request.body: try: post = json.loads(self.request.body) except ValueError: raise_400(self, msg='Invalid JSON content data') if isinstance(post, dict): request_data.append(post) else: request_data.append(self.request.POST) return request_data
[ "def", "get_request_data", "(", "self", ")", ":", "request_data", "=", "[", "self", ".", "path_params", ",", "self", ".", "request", ".", "GET", "]", "if", "self", ".", "request", ".", "headers", ".", "get", "(", "'Content-Type'", ")", "==", "'application/json'", "and", "self", ".", "request", ".", "body", ":", "try", ":", "post", "=", "json", ".", "loads", "(", "self", ".", "request", ".", "body", ")", "except", "ValueError", ":", "raise_400", "(", "self", ",", "msg", "=", "'Invalid JSON content data'", ")", "if", "isinstance", "(", "post", ",", "dict", ")", ":", "request_data", ".", "append", "(", "post", ")", "else", ":", "request_data", ".", "append", "(", "self", ".", "request", ".", "POST", ")", "return", "request_data" ]
Read the input values. Returns a list of dictionaries. These will be used to automatically pass them into the method. Additionally a combined dictionary is written to `self.data`. In the case of JSON input, that element in this list will be the parsed JSON value. That may not be a dictionary.
[ "Read", "the", "input", "values", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L575-L600
train
pneff/wsgiservice
wsgiservice/resource.py
Resource._merge_defaults
def _merge_defaults(self, data, method_params, defaults): """Helper method for adding default values to the data dictionary. The `defaults` are the default values inspected from the method that will be called. For any values that are not present in the incoming data, the default value is added. """ if defaults: optional_args = method_params[-len(defaults):] for key, value in zip(optional_args, defaults): if not key in data: data[key] = value return data
python
def _merge_defaults(self, data, method_params, defaults): """Helper method for adding default values to the data dictionary. The `defaults` are the default values inspected from the method that will be called. For any values that are not present in the incoming data, the default value is added. """ if defaults: optional_args = method_params[-len(defaults):] for key, value in zip(optional_args, defaults): if not key in data: data[key] = value return data
[ "def", "_merge_defaults", "(", "self", ",", "data", ",", "method_params", ",", "defaults", ")", ":", "if", "defaults", ":", "optional_args", "=", "method_params", "[", "-", "len", "(", "defaults", ")", ":", "]", "for", "key", ",", "value", "in", "zip", "(", "optional_args", ",", "defaults", ")", ":", "if", "not", "key", "in", "data", ":", "data", "[", "key", "]", "=", "value", "return", "data" ]
Helper method for adding default values to the data dictionary. The `defaults` are the default values inspected from the method that will be called. For any values that are not present in the incoming data, the default value is added.
[ "Helper", "method", "for", "adding", "default", "values", "to", "the", "data", "dictionary", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L602-L614
train
mouradmourafiq/pandas2sklearn
pandas_sklearn/__init__.py
DataSetTransformer._get_columns
def _get_columns(self, X, cols): """ Get a subset of columns from the given table X. X a Pandas dataframe; the table to select columns from cols a string or list of strings representing the columns to select Returns a numpy array with the data from the selected columns """ if isinstance(X, DataSet): X = X[cols] return_vector = False if isinstance(cols, basestring): return_vector = True cols = [cols] if isinstance(X, list): X = [x[cols] for x in X] X = pd.DataFrame(X) if return_vector: t = X[cols[0]] else: t = X.as_matrix(cols) return t
python
def _get_columns(self, X, cols): """ Get a subset of columns from the given table X. X a Pandas dataframe; the table to select columns from cols a string or list of strings representing the columns to select Returns a numpy array with the data from the selected columns """ if isinstance(X, DataSet): X = X[cols] return_vector = False if isinstance(cols, basestring): return_vector = True cols = [cols] if isinstance(X, list): X = [x[cols] for x in X] X = pd.DataFrame(X) if return_vector: t = X[cols[0]] else: t = X.as_matrix(cols) return t
[ "def", "_get_columns", "(", "self", ",", "X", ",", "cols", ")", ":", "if", "isinstance", "(", "X", ",", "DataSet", ")", ":", "X", "=", "X", "[", "cols", "]", "return_vector", "=", "False", "if", "isinstance", "(", "cols", ",", "basestring", ")", ":", "return_vector", "=", "True", "cols", "=", "[", "cols", "]", "if", "isinstance", "(", "X", ",", "list", ")", ":", "X", "=", "[", "x", "[", "cols", "]", "for", "x", "in", "X", "]", "X", "=", "pd", ".", "DataFrame", "(", "X", ")", "if", "return_vector", ":", "t", "=", "X", "[", "cols", "[", "0", "]", "]", "else", ":", "t", "=", "X", ".", "as_matrix", "(", "cols", ")", "return", "t" ]
Get a subset of columns from the given table X. X a Pandas dataframe; the table to select columns from cols a string or list of strings representing the columns to select Returns a numpy array with the data from the selected columns
[ "Get", "a", "subset", "of", "columns", "from", "the", "given", "table", "X", ".", "X", "a", "Pandas", "dataframe", ";", "the", "table", "to", "select", "columns", "from", "cols", "a", "string", "or", "list", "of", "strings", "representing", "the", "columns", "to", "select", "Returns", "a", "numpy", "array", "with", "the", "data", "from", "the", "selected", "columns" ]
dbaf5180a893f4612852c1c217551b161fd519d4
https://github.com/mouradmourafiq/pandas2sklearn/blob/dbaf5180a893f4612852c1c217551b161fd519d4/pandas_sklearn/__init__.py#L181-L206
train
mkoura/dump2polarion
dump2polarion/exporters/requirements_exporter.py
RequirementExport._requirement_element
def _requirement_element(self, parent_element, req_data): """Adds requirement XML element.""" req_data = self._transform_result(req_data) if not req_data: return title = req_data.get("title") if not title: logger.warning("Skipping requirement, title is missing") return req_id = req_data.get("id") if not self._check_lookup_prop(req_id): logger.warning( "Skipping requirement `%s`, data missing for selected lookup method", title ) return attrs, custom_fields = self._classify_data(req_data) attrs, custom_fields = self._fill_defaults(attrs, custom_fields) # For testing purposes, the order of fields in resulting XML # needs to be always the same. attrs = OrderedDict(sorted(attrs.items())) custom_fields = OrderedDict(sorted(custom_fields.items())) requirement = etree.SubElement(parent_element, "requirement", attrs) title_el = etree.SubElement(requirement, "title") title_el.text = title description = req_data.get("description") if description: description_el = etree.SubElement(requirement, "description") description_el.text = description self._fill_custom_fields(requirement, custom_fields)
python
def _requirement_element(self, parent_element, req_data): """Adds requirement XML element.""" req_data = self._transform_result(req_data) if not req_data: return title = req_data.get("title") if not title: logger.warning("Skipping requirement, title is missing") return req_id = req_data.get("id") if not self._check_lookup_prop(req_id): logger.warning( "Skipping requirement `%s`, data missing for selected lookup method", title ) return attrs, custom_fields = self._classify_data(req_data) attrs, custom_fields = self._fill_defaults(attrs, custom_fields) # For testing purposes, the order of fields in resulting XML # needs to be always the same. attrs = OrderedDict(sorted(attrs.items())) custom_fields = OrderedDict(sorted(custom_fields.items())) requirement = etree.SubElement(parent_element, "requirement", attrs) title_el = etree.SubElement(requirement, "title") title_el.text = title description = req_data.get("description") if description: description_el = etree.SubElement(requirement, "description") description_el.text = description self._fill_custom_fields(requirement, custom_fields)
[ "def", "_requirement_element", "(", "self", ",", "parent_element", ",", "req_data", ")", ":", "req_data", "=", "self", ".", "_transform_result", "(", "req_data", ")", "if", "not", "req_data", ":", "return", "title", "=", "req_data", ".", "get", "(", "\"title\"", ")", "if", "not", "title", ":", "logger", ".", "warning", "(", "\"Skipping requirement, title is missing\"", ")", "return", "req_id", "=", "req_data", ".", "get", "(", "\"id\"", ")", "if", "not", "self", ".", "_check_lookup_prop", "(", "req_id", ")", ":", "logger", ".", "warning", "(", "\"Skipping requirement `%s`, data missing for selected lookup method\"", ",", "title", ")", "return", "attrs", ",", "custom_fields", "=", "self", ".", "_classify_data", "(", "req_data", ")", "attrs", ",", "custom_fields", "=", "self", ".", "_fill_defaults", "(", "attrs", ",", "custom_fields", ")", "# For testing purposes, the order of fields in resulting XML", "# needs to be always the same.", "attrs", "=", "OrderedDict", "(", "sorted", "(", "attrs", ".", "items", "(", ")", ")", ")", "custom_fields", "=", "OrderedDict", "(", "sorted", "(", "custom_fields", ".", "items", "(", ")", ")", ")", "requirement", "=", "etree", ".", "SubElement", "(", "parent_element", ",", "\"requirement\"", ",", "attrs", ")", "title_el", "=", "etree", ".", "SubElement", "(", "requirement", ",", "\"title\"", ")", "title_el", ".", "text", "=", "title", "description", "=", "req_data", ".", "get", "(", "\"description\"", ")", "if", "description", ":", "description_el", "=", "etree", ".", "SubElement", "(", "requirement", ",", "\"description\"", ")", "description_el", ".", "text", "=", "description", "self", ".", "_fill_custom_fields", "(", "requirement", ",", "custom_fields", ")" ]
Adds requirement XML element.
[ "Adds", "requirement", "XML", "element", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/requirements_exporter.py#L159-L195
train
mkoura/dump2polarion
dump2polarion/exporters/requirements_exporter.py
RequirementExport.export
def export(self): """Returns requirements XML.""" top = self._top_element() properties = self._properties_element(top) self._fill_requirements(top) self._fill_lookup_prop(properties) return utils.prettify_xml(top)
python
def export(self): """Returns requirements XML.""" top = self._top_element() properties = self._properties_element(top) self._fill_requirements(top) self._fill_lookup_prop(properties) return utils.prettify_xml(top)
[ "def", "export", "(", "self", ")", ":", "top", "=", "self", ".", "_top_element", "(", ")", "properties", "=", "self", ".", "_properties_element", "(", "top", ")", "self", ".", "_fill_requirements", "(", "top", ")", "self", ".", "_fill_lookup_prop", "(", "properties", ")", "return", "utils", ".", "prettify_xml", "(", "top", ")" ]
Returns requirements XML.
[ "Returns", "requirements", "XML", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/requirements_exporter.py#L203-L209
train
mkoura/dump2polarion
dump2polarion/exporters/requirements_exporter.py
RequirementExport.write_xml
def write_xml(xml, output_file=None): """Outputs the XML content into a file.""" gen_filename = "requirements-{:%Y%m%d%H%M%S}.xml".format(datetime.datetime.now()) utils.write_xml(xml, output_loc=output_file, filename=gen_filename)
python
def write_xml(xml, output_file=None): """Outputs the XML content into a file.""" gen_filename = "requirements-{:%Y%m%d%H%M%S}.xml".format(datetime.datetime.now()) utils.write_xml(xml, output_loc=output_file, filename=gen_filename)
[ "def", "write_xml", "(", "xml", ",", "output_file", "=", "None", ")", ":", "gen_filename", "=", "\"requirements-{:%Y%m%d%H%M%S}.xml\"", ".", "format", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "utils", ".", "write_xml", "(", "xml", ",", "output_loc", "=", "output_file", ",", "filename", "=", "gen_filename", ")" ]
Outputs the XML content into a file.
[ "Outputs", "the", "XML", "content", "into", "a", "file", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/requirements_exporter.py#L212-L215
train
SpringerPE/python-cfconfigurator
cfconfigurator/uaa.py
UAA._client
def _client(self, id, secret): """Performs client login with the provided credentials""" url = self.api_url + self.auth_token_url auth_string = '%s:%s' % (id, secret) authorization = base64.b64encode(auth_string.encode()).decode() headers = { 'Authorization': "Basic " + authorization, 'Content-Type': "application/x-www-form-urlencoded" } params = { 'grant_type': 'client_credentials', 'response_type': 'token' } return self.session.post(url, params=params, headers=headers)
python
def _client(self, id, secret): """Performs client login with the provided credentials""" url = self.api_url + self.auth_token_url auth_string = '%s:%s' % (id, secret) authorization = base64.b64encode(auth_string.encode()).decode() headers = { 'Authorization': "Basic " + authorization, 'Content-Type': "application/x-www-form-urlencoded" } params = { 'grant_type': 'client_credentials', 'response_type': 'token' } return self.session.post(url, params=params, headers=headers)
[ "def", "_client", "(", "self", ",", "id", ",", "secret", ")", ":", "url", "=", "self", ".", "api_url", "+", "self", ".", "auth_token_url", "auth_string", "=", "'%s:%s'", "%", "(", "id", ",", "secret", ")", "authorization", "=", "base64", ".", "b64encode", "(", "auth_string", ".", "encode", "(", ")", ")", ".", "decode", "(", ")", "headers", "=", "{", "'Authorization'", ":", "\"Basic \"", "+", "authorization", ",", "'Content-Type'", ":", "\"application/x-www-form-urlencoded\"", "}", "params", "=", "{", "'grant_type'", ":", "'client_credentials'", ",", "'response_type'", ":", "'token'", "}", "return", "self", ".", "session", ".", "post", "(", "url", ",", "params", "=", "params", ",", "headers", "=", "headers", ")" ]
Performs client login with the provided credentials
[ "Performs", "client", "login", "with", "the", "provided", "credentials" ]
198b4e00cd9e362abee726c0242c1d5f986eb073
https://github.com/SpringerPE/python-cfconfigurator/blob/198b4e00cd9e362abee726c0242c1d5f986eb073/cfconfigurator/uaa.py#L73-L86
train
oemof/oemof.db
oemof/db/feedin_pg.py
Feedin.aggregate_cap_val
def aggregate_cap_val(self, conn, **kwargs): ''' Returns the normalised feedin profile and installed capacity for a given region. Parameters ---------- region : Region instance region.geom : shapely.geometry object Geo-spatial data with information for location/region-shape. The geometry can be a polygon/multi-polygon or a point. Returns ------- feedin_df : pandas dataframe Dataframe containing the normalised feedin profile of the given region. Index of the dataframe is the hour of the year; columns are 'pv_pwr' and 'wind_pwr'. cap : pandas series Series containing the installed capacity (in W) of PV and wind turbines of the given region. ''' region = kwargs['region'] [pv_df, wind_df, cap] = self.get_timeseries( conn, geometry=region.geom, **kwargs) if kwargs.get('store', False): self.store_full_df(pv_df, wind_df, **kwargs) # Summerize the results to one column for pv and one for wind cap = cap.sum() df = pd.concat([pv_df.sum(axis=1) / cap['pv_pwr'], wind_df.sum(axis=1) / cap['wind_pwr']], axis=1) feedin_df = df.rename(columns={0: 'pv_pwr', 1: 'wind_pwr'}) return feedin_df, cap
python
def aggregate_cap_val(self, conn, **kwargs): ''' Returns the normalised feedin profile and installed capacity for a given region. Parameters ---------- region : Region instance region.geom : shapely.geometry object Geo-spatial data with information for location/region-shape. The geometry can be a polygon/multi-polygon or a point. Returns ------- feedin_df : pandas dataframe Dataframe containing the normalised feedin profile of the given region. Index of the dataframe is the hour of the year; columns are 'pv_pwr' and 'wind_pwr'. cap : pandas series Series containing the installed capacity (in W) of PV and wind turbines of the given region. ''' region = kwargs['region'] [pv_df, wind_df, cap] = self.get_timeseries( conn, geometry=region.geom, **kwargs) if kwargs.get('store', False): self.store_full_df(pv_df, wind_df, **kwargs) # Summerize the results to one column for pv and one for wind cap = cap.sum() df = pd.concat([pv_df.sum(axis=1) / cap['pv_pwr'], wind_df.sum(axis=1) / cap['wind_pwr']], axis=1) feedin_df = df.rename(columns={0: 'pv_pwr', 1: 'wind_pwr'}) return feedin_df, cap
[ "def", "aggregate_cap_val", "(", "self", ",", "conn", ",", "*", "*", "kwargs", ")", ":", "region", "=", "kwargs", "[", "'region'", "]", "[", "pv_df", ",", "wind_df", ",", "cap", "]", "=", "self", ".", "get_timeseries", "(", "conn", ",", "geometry", "=", "region", ".", "geom", ",", "*", "*", "kwargs", ")", "if", "kwargs", ".", "get", "(", "'store'", ",", "False", ")", ":", "self", ".", "store_full_df", "(", "pv_df", ",", "wind_df", ",", "*", "*", "kwargs", ")", "# Summerize the results to one column for pv and one for wind", "cap", "=", "cap", ".", "sum", "(", ")", "df", "=", "pd", ".", "concat", "(", "[", "pv_df", ".", "sum", "(", "axis", "=", "1", ")", "/", "cap", "[", "'pv_pwr'", "]", ",", "wind_df", ".", "sum", "(", "axis", "=", "1", ")", "/", "cap", "[", "'wind_pwr'", "]", "]", ",", "axis", "=", "1", ")", "feedin_df", "=", "df", ".", "rename", "(", "columns", "=", "{", "0", ":", "'pv_pwr'", ",", "1", ":", "'wind_pwr'", "}", ")", "return", "feedin_df", ",", "cap" ]
Returns the normalised feedin profile and installed capacity for a given region. Parameters ---------- region : Region instance region.geom : shapely.geometry object Geo-spatial data with information for location/region-shape. The geometry can be a polygon/multi-polygon or a point. Returns ------- feedin_df : pandas dataframe Dataframe containing the normalised feedin profile of the given region. Index of the dataframe is the hour of the year; columns are 'pv_pwr' and 'wind_pwr'. cap : pandas series Series containing the installed capacity (in W) of PV and wind turbines of the given region.
[ "Returns", "the", "normalised", "feedin", "profile", "and", "installed", "capacity", "for", "a", "given", "region", "." ]
d51ac50187f03a875bd7ce5991ed4772e8b77b93
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/feedin_pg.py#L21-L58
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.save_assets
def save_assets(self, dest_path): """Save plot assets alongside dest_path. Some plots may have assets, like bitmap files, which need to be saved alongside the rendered plot file. :param dest_path: path of the main output file. """ for idx, subplot in enumerate(self.subplots): subplot.save_assets(dest_path, suffix='_%d' % idx)
python
def save_assets(self, dest_path): """Save plot assets alongside dest_path. Some plots may have assets, like bitmap files, which need to be saved alongside the rendered plot file. :param dest_path: path of the main output file. """ for idx, subplot in enumerate(self.subplots): subplot.save_assets(dest_path, suffix='_%d' % idx)
[ "def", "save_assets", "(", "self", ",", "dest_path", ")", ":", "for", "idx", ",", "subplot", "in", "enumerate", "(", "self", ".", "subplots", ")", ":", "subplot", ".", "save_assets", "(", "dest_path", ",", "suffix", "=", "'_%d'", "%", "idx", ")" ]
Save plot assets alongside dest_path. Some plots may have assets, like bitmap files, which need to be saved alongside the rendered plot file. :param dest_path: path of the main output file.
[ "Save", "plot", "assets", "alongside", "dest_path", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L65-L75
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_empty
def set_empty(self, row, column): """Keep one of the subplots completely empty. :param row,column: specify the subplot. """ subplot = self.get_subplot_at(row, column) subplot.set_empty()
python
def set_empty(self, row, column): """Keep one of the subplots completely empty. :param row,column: specify the subplot. """ subplot = self.get_subplot_at(row, column) subplot.set_empty()
[ "def", "set_empty", "(", "self", ",", "row", ",", "column", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_empty", "(", ")" ]
Keep one of the subplots completely empty. :param row,column: specify the subplot.
[ "Keep", "one", "of", "the", "subplots", "completely", "empty", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L77-L84
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_empty_for_all
def set_empty_for_all(self, row_column_list): """Keep all specified subplots completely empty. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ for row, column in row_column_list: self.set_empty(row, column)
python
def set_empty_for_all(self, row_column_list): """Keep all specified subplots completely empty. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ for row, column in row_column_list: self.set_empty(row, column)
[ "def", "set_empty_for_all", "(", "self", ",", "row_column_list", ")", ":", "for", "row", ",", "column", "in", "row_column_list", ":", "self", ".", "set_empty", "(", "row", ",", "column", ")" ]
Keep all specified subplots completely empty. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None
[ "Keep", "all", "specified", "subplots", "completely", "empty", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L86-L95
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_title
def set_title(self, row, column, text): """Set a title text. :param row,column: specify the subplot. :param text: title text. """ subplot = self.get_subplot_at(row, column) subplot.set_title(text)
python
def set_title(self, row, column, text): """Set a title text. :param row,column: specify the subplot. :param text: title text. """ subplot = self.get_subplot_at(row, column) subplot.set_title(text)
[ "def", "set_title", "(", "self", ",", "row", ",", "column", ",", "text", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_title", "(", "text", ")" ]
Set a title text. :param row,column: specify the subplot. :param text: title text.
[ "Set", "a", "title", "text", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L97-L105
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_label
def set_label(self, row, column, text, location='upper right', style=None): """Set a label for the subplot. :param row,column: specify the subplot. :param text: the label text. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'. :param style: any TikZ style to style the text. """ subplot = self.get_subplot_at(row, column) subplot.set_label(text, location, style)
python
def set_label(self, row, column, text, location='upper right', style=None): """Set a label for the subplot. :param row,column: specify the subplot. :param text: the label text. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'. :param style: any TikZ style to style the text. """ subplot = self.get_subplot_at(row, column) subplot.set_label(text, location, style)
[ "def", "set_label", "(", "self", ",", "row", ",", "column", ",", "text", ",", "location", "=", "'upper right'", ",", "style", "=", "None", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_label", "(", "text", ",", "location", ",", "style", ")" ]
Set a label for the subplot. :param row,column: specify the subplot. :param text: the label text. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'. :param style: any TikZ style to style the text.
[ "Set", "a", "label", "for", "the", "subplot", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L107-L120
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.show_xticklabels
def show_xticklabels(self, row, column): """Show the x-axis tick labels for a subplot. :param row,column: specify the subplot. """ subplot = self.get_subplot_at(row, column) subplot.show_xticklabels()
python
def show_xticklabels(self, row, column): """Show the x-axis tick labels for a subplot. :param row,column: specify the subplot. """ subplot = self.get_subplot_at(row, column) subplot.show_xticklabels()
[ "def", "show_xticklabels", "(", "self", ",", "row", ",", "column", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "show_xticklabels", "(", ")" ]
Show the x-axis tick labels for a subplot. :param row,column: specify the subplot.
[ "Show", "the", "x", "-", "axis", "tick", "labels", "for", "a", "subplot", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L122-L129
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.show_xticklabels_for_all
def show_xticklabels_for_all(self, row_column_list=None): """Show the x-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ if row_column_list is None: for subplot in self.subplots: subplot.show_xticklabels() else: for row, column in row_column_list: self.show_xticklabels(row, column)
python
def show_xticklabels_for_all(self, row_column_list=None): """Show the x-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ if row_column_list is None: for subplot in self.subplots: subplot.show_xticklabels() else: for row, column in row_column_list: self.show_xticklabels(row, column)
[ "def", "show_xticklabels_for_all", "(", "self", ",", "row_column_list", "=", "None", ")", ":", "if", "row_column_list", "is", "None", ":", "for", "subplot", "in", "self", ".", "subplots", ":", "subplot", ".", "show_xticklabels", "(", ")", "else", ":", "for", "row", ",", "column", "in", "row_column_list", ":", "self", ".", "show_xticklabels", "(", "row", ",", "column", ")" ]
Show the x-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None
[ "Show", "the", "x", "-", "axis", "tick", "labels", "for", "all", "specified", "subplots", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L131-L144
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.show_yticklabels
def show_yticklabels(self, row, column): """Show the y-axis tick labels for a subplot. :param row,column: specify the subplot. """ subplot = self.get_subplot_at(row, column) subplot.show_yticklabels()
python
def show_yticklabels(self, row, column): """Show the y-axis tick labels for a subplot. :param row,column: specify the subplot. """ subplot = self.get_subplot_at(row, column) subplot.show_yticklabels()
[ "def", "show_yticklabels", "(", "self", ",", "row", ",", "column", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "show_yticklabels", "(", ")" ]
Show the y-axis tick labels for a subplot. :param row,column: specify the subplot.
[ "Show", "the", "y", "-", "axis", "tick", "labels", "for", "a", "subplot", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L146-L153
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.show_yticklabels_for_all
def show_yticklabels_for_all(self, row_column_list=None): """Show the y-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ if row_column_list is None: for subplot in self.subplots: subplot.show_yticklabels() else: for row, column in row_column_list: self.show_yticklabels(row, column)
python
def show_yticklabels_for_all(self, row_column_list=None): """Show the y-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ if row_column_list is None: for subplot in self.subplots: subplot.show_yticklabels() else: for row, column in row_column_list: self.show_yticklabels(row, column)
[ "def", "show_yticklabels_for_all", "(", "self", ",", "row_column_list", "=", "None", ")", ":", "if", "row_column_list", "is", "None", ":", "for", "subplot", "in", "self", ".", "subplots", ":", "subplot", ".", "show_yticklabels", "(", ")", "else", ":", "for", "row", ",", "column", "in", "row_column_list", ":", "self", ".", "show_yticklabels", "(", "row", ",", "column", ")" ]
Show the y-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None
[ "Show", "the", "y", "-", "axis", "tick", "labels", "for", "all", "specified", "subplots", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L155-L168
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_xlimits
def set_xlimits(self, row, column, min=None, max=None): """Set x-axis limits of a subplot. :param row,column: specify the subplot. :param min: minimal axis value :param max: maximum axis value """ subplot = self.get_subplot_at(row, column) subplot.set_xlimits(min, max)
python
def set_xlimits(self, row, column, min=None, max=None): """Set x-axis limits of a subplot. :param row,column: specify the subplot. :param min: minimal axis value :param max: maximum axis value """ subplot = self.get_subplot_at(row, column) subplot.set_xlimits(min, max)
[ "def", "set_xlimits", "(", "self", ",", "row", ",", "column", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_xlimits", "(", "min", ",", "max", ")" ]
Set x-axis limits of a subplot. :param row,column: specify the subplot. :param min: minimal axis value :param max: maximum axis value
[ "Set", "x", "-", "axis", "limits", "of", "a", "subplot", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L200-L209
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_xlimits_for_all
def set_xlimits_for_all(self, row_column_list=None, min=None, max=None): """Set x-axis limits of specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param min: minimal axis value :param max: maximum axis value """ if row_column_list is None: self.limits['xmin'] = min self.limits['xmax'] = max else: for row, column in row_column_list: self.set_xlimits(row, column, min, max)
python
def set_xlimits_for_all(self, row_column_list=None, min=None, max=None): """Set x-axis limits of specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param min: minimal axis value :param max: maximum axis value """ if row_column_list is None: self.limits['xmin'] = min self.limits['xmax'] = max else: for row, column in row_column_list: self.set_xlimits(row, column, min, max)
[ "def", "set_xlimits_for_all", "(", "self", ",", "row_column_list", "=", "None", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "if", "row_column_list", "is", "None", ":", "self", ".", "limits", "[", "'xmin'", "]", "=", "min", "self", ".", "limits", "[", "'xmax'", "]", "=", "max", "else", ":", "for", "row", ",", "column", "in", "row_column_list", ":", "self", ".", "set_xlimits", "(", "row", ",", "column", ",", "min", ",", "max", ")" ]
Set x-axis limits of specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param min: minimal axis value :param max: maximum axis value
[ "Set", "x", "-", "axis", "limits", "of", "specified", "subplots", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L211-L226
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_ylimits
def set_ylimits(self, row, column, min=None, max=None): """Set y-axis limits of a subplot. :param row,column: specify the subplot. :param min: minimal axis value :param max: maximum axis value """ subplot = self.get_subplot_at(row, column) subplot.set_ylimits(min, max)
python
def set_ylimits(self, row, column, min=None, max=None): """Set y-axis limits of a subplot. :param row,column: specify the subplot. :param min: minimal axis value :param max: maximum axis value """ subplot = self.get_subplot_at(row, column) subplot.set_ylimits(min, max)
[ "def", "set_ylimits", "(", "self", ",", "row", ",", "column", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_ylimits", "(", "min", ",", "max", ")" ]
Set y-axis limits of a subplot. :param row,column: specify the subplot. :param min: minimal axis value :param max: maximum axis value
[ "Set", "y", "-", "axis", "limits", "of", "a", "subplot", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L228-L237
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_ylimits_for_all
def set_ylimits_for_all(self, row_column_list=None, min=None, max=None): """Set y-axis limits of specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param min: minimal axis value :param max: maximum axis value """ if row_column_list is None: self.limits['ymin'] = min self.limits['ymax'] = max else: for row, column in row_column_list: self.set_ylimits(row, column, min, max)
python
def set_ylimits_for_all(self, row_column_list=None, min=None, max=None): """Set y-axis limits of specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param min: minimal axis value :param max: maximum axis value """ if row_column_list is None: self.limits['ymin'] = min self.limits['ymax'] = max else: for row, column in row_column_list: self.set_ylimits(row, column, min, max)
[ "def", "set_ylimits_for_all", "(", "self", ",", "row_column_list", "=", "None", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "if", "row_column_list", "is", "None", ":", "self", ".", "limits", "[", "'ymin'", "]", "=", "min", "self", ".", "limits", "[", "'ymax'", "]", "=", "max", "else", ":", "for", "row", ",", "column", "in", "row_column_list", ":", "self", ".", "set_ylimits", "(", "row", ",", "column", ",", "min", ",", "max", ")" ]
Set y-axis limits of specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None :param min: minimal axis value :param max: maximum axis value
[ "Set", "y", "-", "axis", "limits", "of", "specified", "subplots", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L239-L254
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_slimits
def set_slimits(self, row, column, min, max): """Set limits for the point sizes. :param min: point size for the lowest value. :param max: point size for the highest value. """ subplot = self.get_subplot_at(row, column) subplot.set_slimits(min, max)
python
def set_slimits(self, row, column, min, max): """Set limits for the point sizes. :param min: point size for the lowest value. :param max: point size for the highest value. """ subplot = self.get_subplot_at(row, column) subplot.set_slimits(min, max)
[ "def", "set_slimits", "(", "self", ",", "row", ",", "column", ",", "min", ",", "max", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_slimits", "(", "min", ",", "max", ")" ]
Set limits for the point sizes. :param min: point size for the lowest value. :param max: point size for the highest value.
[ "Set", "limits", "for", "the", "point", "sizes", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L285-L293
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_ytick_labels
def set_ytick_labels(self, row, column, labels): """Manually specify the y-axis tick labels. :param row,column: specify the subplot. :param labels: list of tick labels. """ subplot = self.get_subplot_at(row, column) subplot.set_ytick_labels(labels)
python
def set_ytick_labels(self, row, column, labels): """Manually specify the y-axis tick labels. :param row,column: specify the subplot. :param labels: list of tick labels. """ subplot = self.get_subplot_at(row, column) subplot.set_ytick_labels(labels)
[ "def", "set_ytick_labels", "(", "self", ",", "row", ",", "column", ",", "labels", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_ytick_labels", "(", "labels", ")" ]
Manually specify the y-axis tick labels. :param row,column: specify the subplot. :param labels: list of tick labels.
[ "Manually", "specify", "the", "y", "-", "axis", "tick", "labels", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L456-L464
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.get_subplot_at
def get_subplot_at(self, row, column): """Return the subplot at row, column position. :param row,column: specify the subplot. """ idx = row * self.columns + column return self.subplots[idx]
python
def get_subplot_at(self, row, column): """Return the subplot at row, column position. :param row,column: specify the subplot. """ idx = row * self.columns + column return self.subplots[idx]
[ "def", "get_subplot_at", "(", "self", ",", "row", ",", "column", ")", ":", "idx", "=", "row", "*", "self", ".", "columns", "+", "column", "return", "self", ".", "subplots", "[", "idx", "]" ]
Return the subplot at row, column position. :param row,column: specify the subplot.
[ "Return", "the", "subplot", "at", "row", "column", "position", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L482-L489
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_subplot_xlabel
def set_subplot_xlabel(self, row, column, text): """Set a label for the x-axis of a subplot. :param row,column: specify the subplot. :param text: text of the label. """ subplot = self.get_subplot_at(row, column) subplot.set_xlabel(text)
python
def set_subplot_xlabel(self, row, column, text): """Set a label for the x-axis of a subplot. :param row,column: specify the subplot. :param text: text of the label. """ subplot = self.get_subplot_at(row, column) subplot.set_xlabel(text)
[ "def", "set_subplot_xlabel", "(", "self", ",", "row", ",", "column", ",", "text", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_xlabel", "(", "text", ")" ]
Set a label for the x-axis of a subplot. :param row,column: specify the subplot. :param text: text of the label.
[ "Set", "a", "label", "for", "the", "x", "-", "axis", "of", "a", "subplot", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L539-L547
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_subplot_ylabel
def set_subplot_ylabel(self, row, column, text): """Set a label for the y-axis of a subplot. :param row,column: specify the subplot. :param text: text of the label. """ subplot = self.get_subplot_at(row, column) subplot.set_ylabel(text)
python
def set_subplot_ylabel(self, row, column, text): """Set a label for the y-axis of a subplot. :param row,column: specify the subplot. :param text: text of the label. """ subplot = self.get_subplot_at(row, column) subplot.set_ylabel(text)
[ "def", "set_subplot_ylabel", "(", "self", ",", "row", ",", "column", ",", "text", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_ylabel", "(", "text", ")" ]
Set a label for the y-axis of a subplot. :param row,column: specify the subplot. :param text: text of the label.
[ "Set", "a", "label", "for", "the", "y", "-", "axis", "of", "a", "subplot", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L549-L557
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_scalebar_for_all
def set_scalebar_for_all(self, row_column_list=None, location='lower right'): """Show marker area scale for subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'. """ if row_column_list is None: for subplot in self.subplots: subplot.set_scalebar(location) else: for row, column in row_column_list: subplot = self.get_subplot_at(row, column) subplot.set_scalebar(location)
python
def set_scalebar_for_all(self, row_column_list=None, location='lower right'): """Show marker area scale for subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'. """ if row_column_list is None: for subplot in self.subplots: subplot.set_scalebar(location) else: for row, column in row_column_list: subplot = self.get_subplot_at(row, column) subplot.set_scalebar(location)
[ "def", "set_scalebar_for_all", "(", "self", ",", "row_column_list", "=", "None", ",", "location", "=", "'lower right'", ")", ":", "if", "row_column_list", "is", "None", ":", "for", "subplot", "in", "self", ".", "subplots", ":", "subplot", ".", "set_scalebar", "(", "location", ")", "else", ":", "for", "row", ",", "column", "in", "row_column_list", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_scalebar", "(", "location", ")" ]
Show marker area scale for subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :param location: the location of the label inside the plot. May be one of 'center', 'upper right', 'lower right', 'upper left', 'lower left'.
[ "Show", "marker", "area", "scale", "for", "subplots", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L559-L576
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_colorbar
def set_colorbar(self, label='', horizontal=False): """Show the colorbar, it will be attached to the last plot. Not for the histogram2d, only for the scatter_table. Global mlimits should be set for this to properly reflect the colormap of each subplot. :param label: axis label for the colorbar. :param horizontal: boolean, if True the colobar will be horizontal. """ if self.limits['mmin'] is None or self.limits['mmax'] is None: warnings.warn('Set (only) global point meta limits to ensure the ' 'colorbar is correct for all subplots.') self.colorbar = {'label': label, 'horizontal': horizontal}
python
def set_colorbar(self, label='', horizontal=False): """Show the colorbar, it will be attached to the last plot. Not for the histogram2d, only for the scatter_table. Global mlimits should be set for this to properly reflect the colormap of each subplot. :param label: axis label for the colorbar. :param horizontal: boolean, if True the colobar will be horizontal. """ if self.limits['mmin'] is None or self.limits['mmax'] is None: warnings.warn('Set (only) global point meta limits to ensure the ' 'colorbar is correct for all subplots.') self.colorbar = {'label': label, 'horizontal': horizontal}
[ "def", "set_colorbar", "(", "self", ",", "label", "=", "''", ",", "horizontal", "=", "False", ")", ":", "if", "self", ".", "limits", "[", "'mmin'", "]", "is", "None", "or", "self", ".", "limits", "[", "'mmax'", "]", "is", "None", ":", "warnings", ".", "warn", "(", "'Set (only) global point meta limits to ensure the '", "'colorbar is correct for all subplots.'", ")", "self", ".", "colorbar", "=", "{", "'label'", ":", "label", ",", "'horizontal'", ":", "horizontal", "}" ]
Show the colorbar, it will be attached to the last plot. Not for the histogram2d, only for the scatter_table. Global mlimits should be set for this to properly reflect the colormap of each subplot. :param label: axis label for the colorbar. :param horizontal: boolean, if True the colobar will be horizontal.
[ "Show", "the", "colorbar", "it", "will", "be", "attached", "to", "the", "last", "plot", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L578-L593
train
davidfokkema/artist
artist/multi_plot.py
MultiPlot.set_axis_options
def set_axis_options(self, row, column, text): """Set additionnal options as plain text.""" subplot = self.get_subplot_at(row, column) subplot.set_axis_options(text)
python
def set_axis_options(self, row, column, text): """Set additionnal options as plain text.""" subplot = self.get_subplot_at(row, column) subplot.set_axis_options(text)
[ "def", "set_axis_options", "(", "self", ",", "row", ",", "column", ",", "text", ")", ":", "subplot", "=", "self", ".", "get_subplot_at", "(", "row", ",", "column", ")", "subplot", ".", "set_axis_options", "(", "text", ")" ]
Set additionnal options as plain text.
[ "Set", "additionnal", "options", "as", "plain", "text", "." ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L604-L608
train
openearth/mmi-python
mmi/mmi_client.py
MMIClient.initialize
def initialize(self, configfile=None): """ Initialize the module """ method = "initialize" A = None metadata = {method: configfile} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
python
def initialize(self, configfile=None): """ Initialize the module """ method = "initialize" A = None metadata = {method: configfile} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
[ "def", "initialize", "(", "self", ",", "configfile", "=", "None", ")", ":", "method", "=", "\"initialize\"", "A", "=", "None", "metadata", "=", "{", "method", ":", "configfile", "}", "send_array", "(", "self", ".", "socket", ",", "A", ",", "metadata", ")", "A", ",", "metadata", "=", "recv_array", "(", "self", ".", "socket", ",", "poll", "=", "self", ".", "poll", ",", "poll_timeout", "=", "self", ".", "poll_timeout", ",", "flags", "=", "self", ".", "zmq_flags", ")" ]
Initialize the module
[ "Initialize", "the", "module" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/mmi_client.py#L56-L69
train
openearth/mmi-python
mmi/mmi_client.py
MMIClient.finalize
def finalize(self): """ Finalize the module """ method = "finalize" A = None metadata = {method: -1} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
python
def finalize(self): """ Finalize the module """ method = "finalize" A = None metadata = {method: -1} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
[ "def", "finalize", "(", "self", ")", ":", "method", "=", "\"finalize\"", "A", "=", "None", "metadata", "=", "{", "method", ":", "-", "1", "}", "send_array", "(", "self", ".", "socket", ",", "A", ",", "metadata", ")", "A", ",", "metadata", "=", "recv_array", "(", "self", ".", "socket", ",", "poll", "=", "self", ".", "poll", ",", "poll_timeout", "=", "self", ".", "poll_timeout", ",", "flags", "=", "self", ".", "zmq_flags", ")" ]
Finalize the module
[ "Finalize", "the", "module" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/mmi_client.py#L71-L84
train
openearth/mmi-python
mmi/mmi_client.py
MMIClient.set_current_time
def set_current_time(self, t): """ Set current time of simulation """ method = "set_current_time" A = None metadata = {method: t} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
python
def set_current_time(self, t): """ Set current time of simulation """ method = "set_current_time" A = None metadata = {method: t} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
[ "def", "set_current_time", "(", "self", ",", "t", ")", ":", "method", "=", "\"set_current_time\"", "A", "=", "None", "metadata", "=", "{", "method", ":", "t", "}", "send_array", "(", "self", ".", "socket", ",", "A", ",", "metadata", ")", "A", ",", "metadata", "=", "recv_array", "(", "self", ".", "socket", ",", "poll", "=", "self", ".", "poll", ",", "poll_timeout", "=", "self", ".", "poll_timeout", ",", "flags", "=", "self", ".", "zmq_flags", ")" ]
Set current time of simulation
[ "Set", "current", "time", "of", "simulation" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/mmi_client.py#L188-L201
train
openearth/mmi-python
mmi/mmi_client.py
MMIClient.set_var_slice
def set_var_slice(self, name, start, count, var): """ Set the variable name with the values of var """ method = "set_var_slice" A = var metadata = { method: name, "start": start, "count": count } send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
python
def set_var_slice(self, name, start, count, var): """ Set the variable name with the values of var """ method = "set_var_slice" A = var metadata = { method: name, "start": start, "count": count } send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
[ "def", "set_var_slice", "(", "self", ",", "name", ",", "start", ",", "count", ",", "var", ")", ":", "method", "=", "\"set_var_slice\"", "A", "=", "var", "metadata", "=", "{", "method", ":", "name", ",", "\"start\"", ":", "start", ",", "\"count\"", ":", "count", "}", "send_array", "(", "self", ".", "socket", ",", "A", ",", "metadata", ")", "A", ",", "metadata", "=", "recv_array", "(", "self", ".", "socket", ",", "poll", "=", "self", ".", "poll", ",", "poll_timeout", "=", "self", ".", "poll_timeout", ",", "flags", "=", "self", ".", "zmq_flags", ")" ]
Set the variable name with the values of var
[ "Set", "the", "variable", "name", "with", "the", "values", "of", "var" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/mmi_client.py#L218-L234
train
openearth/mmi-python
mmi/mmi_client.py
MMIClient.update
def update(self, dt): """ Advance the module with timestep dt """ method = "update" A = None metadata = {method: dt} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
python
def update(self, dt): """ Advance the module with timestep dt """ method = "update" A = None metadata = {method: dt} send_array(self.socket, A, metadata) A, metadata = recv_array( self.socket, poll=self.poll, poll_timeout=self.poll_timeout, flags=self.zmq_flags)
[ "def", "update", "(", "self", ",", "dt", ")", ":", "method", "=", "\"update\"", "A", "=", "None", "metadata", "=", "{", "method", ":", "dt", "}", "send_array", "(", "self", ".", "socket", ",", "A", ",", "metadata", ")", "A", ",", "metadata", "=", "recv_array", "(", "self", ".", "socket", ",", "poll", "=", "self", ".", "poll", ",", "poll_timeout", "=", "self", ".", "poll_timeout", ",", "flags", "=", "self", ".", "zmq_flags", ")" ]
Advance the module with timestep dt
[ "Advance", "the", "module", "with", "timestep", "dt" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/mmi_client.py#L304-L317
train