repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
Locu-Unofficial/locu-python
locu/api.py
VenueApiClient.is_open
def is_open(self,id,time,day): """ Checks if the venue is open at the time of day given a venue id. args: id: string of venue id time: string of the format ex: "12:00:00" day: string of weekday ex: "Monday" returns: Bool if there is hours data available None otherwise Note: can get the string of the day and time from a time object if desired by using time.strftime() ex: day = time.strftime('%A',some_time_object) time = time.strftime('%H:%M:%S',some_time_object) """ details = self.get_details(id) has_data = False for obj in details["objects"]: hours = obj["open_hours"][day] if hours: has_data = True for interval in hours: interval = interval.replace(' ','').split('-') open_time = interval[0] close_time = interval[1] if open_time < time < close_time: return True if has_data: return False else: return None
python
def is_open(self,id,time,day): """ Checks if the venue is open at the time of day given a venue id. args: id: string of venue id time: string of the format ex: "12:00:00" day: string of weekday ex: "Monday" returns: Bool if there is hours data available None otherwise Note: can get the string of the day and time from a time object if desired by using time.strftime() ex: day = time.strftime('%A',some_time_object) time = time.strftime('%H:%M:%S',some_time_object) """ details = self.get_details(id) has_data = False for obj in details["objects"]: hours = obj["open_hours"][day] if hours: has_data = True for interval in hours: interval = interval.replace(' ','').split('-') open_time = interval[0] close_time = interval[1] if open_time < time < close_time: return True if has_data: return False else: return None
[ "def", "is_open", "(", "self", ",", "id", ",", "time", ",", "day", ")", ":", "details", "=", "self", ".", "get_details", "(", "id", ")", "has_data", "=", "False", "for", "obj", "in", "details", "[", "\"objects\"", "]", ":", "hours", "=", "obj", "[", "\"open_hours\"", "]", "[", "day", "]", "if", "hours", ":", "has_data", "=", "True", "for", "interval", "in", "hours", ":", "interval", "=", "interval", ".", "replace", "(", "' '", ",", "''", ")", ".", "split", "(", "'-'", ")", "open_time", "=", "interval", "[", "0", "]", "close_time", "=", "interval", "[", "1", "]", "if", "open_time", "<", "time", "<", "close_time", ":", "return", "True", "if", "has_data", ":", "return", "False", "else", ":", "return", "None" ]
Checks if the venue is open at the time of day given a venue id. args: id: string of venue id time: string of the format ex: "12:00:00" day: string of weekday ex: "Monday" returns: Bool if there is hours data available None otherwise Note: can get the string of the day and time from a time object if desired by using time.strftime() ex: day = time.strftime('%A',some_time_object) time = time.strftime('%H:%M:%S',some_time_object)
[ "Checks", "if", "the", "venue", "is", "open", "at", "the", "time", "of", "day", "given", "a", "venue", "id", "." ]
fcdf136b68333ab7055e623591801dd35df3bc45
https://github.com/Locu-Unofficial/locu-python/blob/fcdf136b68333ab7055e623591801dd35df3bc45/locu/api.py#L316-L356
train
Locu-Unofficial/locu-python
locu/api.py
MenuItemApiClient.search
def search(self, name = None, category = None, description = None, price = None, \ price__gt = None, price__gte = None, price__lt = None, price__lte = None, \ location = (None, None), radius = None, tl_coord = (None, None), \ br_coord = (None, None), country = None, locality = None, \ region = None, postal_code = None, street_address = None, \ website_url = None): """ Locu Menu Item Search API Call Wrapper Args: *Note that none of the arguments are required category : List of category types that need to be filtered by: ['restaurant', 'spa', 'beauty salon', 'gym', 'laundry', 'hair care', 'other'] type : [string] location : Tuple that consists of (latitude, longtitude) coordinates type : tuple(float, float) radius : Radius around the given lat, long type : float tl_coord : Tuple that consists of (latitude, longtitude) for bounding box top left coordinates type : tuple(float, float) br_coord : Tuple that consists of (latitude, longtitude) for bounding box bottom right coordinates type : tuple(float, float) name : Name of the venue type : string country : Country where venue is located type : string locality : Locality. Ex 'San Francisco' type : string region : Region/state. Ex. 'CA' type : string postal_code : Postal code type : string street_address : Address type : string website_url : Filter by the a website url type : string description : Filter by description of the menu item type : string price : get menu items with a particular price value type : float price__gt : get menu items with a value greater than particular type : float price__gte : greater than or equal type : float price__lt : less than type : float price__lte : less than or equal type : float Returns: A dictionary with a data returned by the server Raises: HttpException with the error message from the server """ params = self._get_params( name = name, description = description, price = price, \ price__gt = price__gt, price__gte = price__gte, price__lt = price__lt, price__lte = price__lte, \ location = location, radius = radius, tl_coord = tl_coord, \ br_coord = br_coord, country = country, locality = locality, \ region = region, postal_code = postal_code, street_address = street_address,\ website_url = website_url) return self._create_query('search', params)
python
def search(self, name = None, category = None, description = None, price = None, \ price__gt = None, price__gte = None, price__lt = None, price__lte = None, \ location = (None, None), radius = None, tl_coord = (None, None), \ br_coord = (None, None), country = None, locality = None, \ region = None, postal_code = None, street_address = None, \ website_url = None): """ Locu Menu Item Search API Call Wrapper Args: *Note that none of the arguments are required category : List of category types that need to be filtered by: ['restaurant', 'spa', 'beauty salon', 'gym', 'laundry', 'hair care', 'other'] type : [string] location : Tuple that consists of (latitude, longtitude) coordinates type : tuple(float, float) radius : Radius around the given lat, long type : float tl_coord : Tuple that consists of (latitude, longtitude) for bounding box top left coordinates type : tuple(float, float) br_coord : Tuple that consists of (latitude, longtitude) for bounding box bottom right coordinates type : tuple(float, float) name : Name of the venue type : string country : Country where venue is located type : string locality : Locality. Ex 'San Francisco' type : string region : Region/state. Ex. 'CA' type : string postal_code : Postal code type : string street_address : Address type : string website_url : Filter by the a website url type : string description : Filter by description of the menu item type : string price : get menu items with a particular price value type : float price__gt : get menu items with a value greater than particular type : float price__gte : greater than or equal type : float price__lt : less than type : float price__lte : less than or equal type : float Returns: A dictionary with a data returned by the server Raises: HttpException with the error message from the server """ params = self._get_params( name = name, description = description, price = price, \ price__gt = price__gt, price__gte = price__gte, price__lt = price__lt, price__lte = price__lte, \ location = location, radius = radius, tl_coord = tl_coord, \ br_coord = br_coord, country = country, locality = locality, \ region = region, postal_code = postal_code, street_address = street_address,\ website_url = website_url) return self._create_query('search', params)
[ "def", "search", "(", "self", ",", "name", "=", "None", ",", "category", "=", "None", ",", "description", "=", "None", ",", "price", "=", "None", ",", "price__gt", "=", "None", ",", "price__gte", "=", "None", ",", "price__lt", "=", "None", ",", "price__lte", "=", "None", ",", "location", "=", "(", "None", ",", "None", ")", ",", "radius", "=", "None", ",", "tl_coord", "=", "(", "None", ",", "None", ")", ",", "br_coord", "=", "(", "None", ",", "None", ")", ",", "country", "=", "None", ",", "locality", "=", "None", ",", "region", "=", "None", ",", "postal_code", "=", "None", ",", "street_address", "=", "None", ",", "website_url", "=", "None", ")", ":", "params", "=", "self", ".", "_get_params", "(", "name", "=", "name", ",", "description", "=", "description", ",", "price", "=", "price", ",", "price__gt", "=", "price__gt", ",", "price__gte", "=", "price__gte", ",", "price__lt", "=", "price__lt", ",", "price__lte", "=", "price__lte", ",", "location", "=", "location", ",", "radius", "=", "radius", ",", "tl_coord", "=", "tl_coord", ",", "br_coord", "=", "br_coord", ",", "country", "=", "country", ",", "locality", "=", "locality", ",", "region", "=", "region", ",", "postal_code", "=", "postal_code", ",", "street_address", "=", "street_address", ",", "website_url", "=", "website_url", ")", "return", "self", ".", "_create_query", "(", "'search'", ",", "params", ")" ]
Locu Menu Item Search API Call Wrapper Args: *Note that none of the arguments are required category : List of category types that need to be filtered by: ['restaurant', 'spa', 'beauty salon', 'gym', 'laundry', 'hair care', 'other'] type : [string] location : Tuple that consists of (latitude, longtitude) coordinates type : tuple(float, float) radius : Radius around the given lat, long type : float tl_coord : Tuple that consists of (latitude, longtitude) for bounding box top left coordinates type : tuple(float, float) br_coord : Tuple that consists of (latitude, longtitude) for bounding box bottom right coordinates type : tuple(float, float) name : Name of the venue type : string country : Country where venue is located type : string locality : Locality. Ex 'San Francisco' type : string region : Region/state. Ex. 'CA' type : string postal_code : Postal code type : string street_address : Address type : string website_url : Filter by the a website url type : string description : Filter by description of the menu item type : string price : get menu items with a particular price value type : float price__gt : get menu items with a value greater than particular type : float price__gte : greater than or equal type : float price__lt : less than type : float price__lte : less than or equal type : float Returns: A dictionary with a data returned by the server Raises: HttpException with the error message from the server
[ "Locu", "Menu", "Item", "Search", "API", "Call", "Wrapper" ]
fcdf136b68333ab7055e623591801dd35df3bc45
https://github.com/Locu-Unofficial/locu-python/blob/fcdf136b68333ab7055e623591801dd35df3bc45/locu/api.py#L366-L428
train
lsst-sqre/documenteer
documenteer/stackdocs/stackcli.py
main
def main(ctx, root_project_dir, verbose): """stack-docs is a CLI for building LSST Stack documentation, such as pipelines.lsst.io. This command should be run on the "main" documentation repository, namely https://github.com/lsst/pipelines_lsst_io. The stack-docs command replaces the usual Makefile and sphinx-build system for Sphinx projects. This dedicated tool provide subcommands that are engineered specifically for building the ``pipelines_lsst_io`` project. The key commands provided by stack-docs are: - ``stack-docs build``: compile the pipelines.lsst.io site from the ``pipelines_lsst_io`` repository and linked packages. - ``stack-docs clean``: removes build products. Use this command to clear the build cache. See also: package-docs, a tool for building previews of package documentation. For more information about stack-docs, see https://documenteer.lsst.io. """ root_project_dir = discover_conf_py_directory(root_project_dir) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx.obj = {'root_project_dir': root_project_dir, 'verbose': verbose} # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose: log_level = logging.DEBUG else: log_level = logging.INFO logger = logging.getLogger('documenteer') logger.addHandler(logging.StreamHandler()) logger.setLevel(log_level)
python
def main(ctx, root_project_dir, verbose): """stack-docs is a CLI for building LSST Stack documentation, such as pipelines.lsst.io. This command should be run on the "main" documentation repository, namely https://github.com/lsst/pipelines_lsst_io. The stack-docs command replaces the usual Makefile and sphinx-build system for Sphinx projects. This dedicated tool provide subcommands that are engineered specifically for building the ``pipelines_lsst_io`` project. The key commands provided by stack-docs are: - ``stack-docs build``: compile the pipelines.lsst.io site from the ``pipelines_lsst_io`` repository and linked packages. - ``stack-docs clean``: removes build products. Use this command to clear the build cache. See also: package-docs, a tool for building previews of package documentation. For more information about stack-docs, see https://documenteer.lsst.io. """ root_project_dir = discover_conf_py_directory(root_project_dir) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx.obj = {'root_project_dir': root_project_dir, 'verbose': verbose} # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose: log_level = logging.DEBUG else: log_level = logging.INFO logger = logging.getLogger('documenteer') logger.addHandler(logging.StreamHandler()) logger.setLevel(log_level)
[ "def", "main", "(", "ctx", ",", "root_project_dir", ",", "verbose", ")", ":", "root_project_dir", "=", "discover_conf_py_directory", "(", "root_project_dir", ")", "# Subcommands should use the click.pass_obj decorator to get this", "# ctx.obj object as the first argument.", "ctx", ".", "obj", "=", "{", "'root_project_dir'", ":", "root_project_dir", ",", "'verbose'", ":", "verbose", "}", "# Set up application logging. This ensures that only documenteer's", "# logger is activated. If necessary, we can add other app's loggers too.", "if", "verbose", ":", "log_level", "=", "logging", ".", "DEBUG", "else", ":", "log_level", "=", "logging", ".", "INFO", "logger", "=", "logging", ".", "getLogger", "(", "'documenteer'", ")", "logger", ".", "addHandler", "(", "logging", ".", "StreamHandler", "(", ")", ")", "logger", ".", "setLevel", "(", "log_level", ")" ]
stack-docs is a CLI for building LSST Stack documentation, such as pipelines.lsst.io. This command should be run on the "main" documentation repository, namely https://github.com/lsst/pipelines_lsst_io. The stack-docs command replaces the usual Makefile and sphinx-build system for Sphinx projects. This dedicated tool provide subcommands that are engineered specifically for building the ``pipelines_lsst_io`` project. The key commands provided by stack-docs are: - ``stack-docs build``: compile the pipelines.lsst.io site from the ``pipelines_lsst_io`` repository and linked packages. - ``stack-docs clean``: removes build products. Use this command to clear the build cache. See also: package-docs, a tool for building previews of package documentation. For more information about stack-docs, see https://documenteer.lsst.io.
[ "stack", "-", "docs", "is", "a", "CLI", "for", "building", "LSST", "Stack", "documentation", "such", "as", "pipelines", ".", "lsst", ".", "io", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/stackcli.py#L38-L77
train
lsst-sqre/documenteer
documenteer/stackdocs/stackcli.py
help
def help(ctx, topic, **kw): """Show help for any command. """ # The help command implementation is taken from # https://www.burgundywall.com/post/having-click-help-subcommand if topic is None: click.echo(ctx.parent.get_help()) else: click.echo(main.commands[topic].get_help(ctx))
python
def help(ctx, topic, **kw): """Show help for any command. """ # The help command implementation is taken from # https://www.burgundywall.com/post/having-click-help-subcommand if topic is None: click.echo(ctx.parent.get_help()) else: click.echo(main.commands[topic].get_help(ctx))
[ "def", "help", "(", "ctx", ",", "topic", ",", "*", "*", "kw", ")", ":", "# The help command implementation is taken from", "# https://www.burgundywall.com/post/having-click-help-subcommand", "if", "topic", "is", "None", ":", "click", ".", "echo", "(", "ctx", ".", "parent", ".", "get_help", "(", ")", ")", "else", ":", "click", ".", "echo", "(", "main", ".", "commands", "[", "topic", "]", ".", "get_help", "(", "ctx", ")", ")" ]
Show help for any command.
[ "Show", "help", "for", "any", "command", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/stackcli.py#L83-L91
train
lsst-sqre/documenteer
documenteer/stackdocs/stackcli.py
clean
def clean(ctx): """Clean Sphinx build products. Use this command to clean out build products after a failed build, or in preparation for running a build from a clean state. This command removes the following directories from the ``pipelines_lsst_io`` directory: - ``_build`` (the Sphinx build itself) - ``modules`` (symlinks to the module doc directories of Stack packages) - ``packages`` (symlinks to the package doc directories of Stack packages) - ``py-api`` (pages created by automodapi for the Python API reference) """ logger = logging.getLogger(__name__) dirnames = ['py-api', '_build', 'modules', 'packages'] dirnames = [os.path.join(ctx.obj['root_project_dir'], dirname) for dirname in dirnames] for dirname in dirnames: if os.path.isdir(dirname): shutil.rmtree(dirname) logger.debug('Cleaned up %r', dirname) else: logger.debug('Did not clean up %r (missing)', dirname)
python
def clean(ctx): """Clean Sphinx build products. Use this command to clean out build products after a failed build, or in preparation for running a build from a clean state. This command removes the following directories from the ``pipelines_lsst_io`` directory: - ``_build`` (the Sphinx build itself) - ``modules`` (symlinks to the module doc directories of Stack packages) - ``packages`` (symlinks to the package doc directories of Stack packages) - ``py-api`` (pages created by automodapi for the Python API reference) """ logger = logging.getLogger(__name__) dirnames = ['py-api', '_build', 'modules', 'packages'] dirnames = [os.path.join(ctx.obj['root_project_dir'], dirname) for dirname in dirnames] for dirname in dirnames: if os.path.isdir(dirname): shutil.rmtree(dirname) logger.debug('Cleaned up %r', dirname) else: logger.debug('Did not clean up %r (missing)', dirname)
[ "def", "clean", "(", "ctx", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "dirnames", "=", "[", "'py-api'", ",", "'_build'", ",", "'modules'", ",", "'packages'", "]", "dirnames", "=", "[", "os", ".", "path", ".", "join", "(", "ctx", ".", "obj", "[", "'root_project_dir'", "]", ",", "dirname", ")", "for", "dirname", "in", "dirnames", "]", "for", "dirname", "in", "dirnames", ":", "if", "os", ".", "path", ".", "isdir", "(", "dirname", ")", ":", "shutil", ".", "rmtree", "(", "dirname", ")", "logger", ".", "debug", "(", "'Cleaned up %r'", ",", "dirname", ")", "else", ":", "logger", ".", "debug", "(", "'Did not clean up %r (missing)'", ",", "dirname", ")" ]
Clean Sphinx build products. Use this command to clean out build products after a failed build, or in preparation for running a build from a clean state. This command removes the following directories from the ``pipelines_lsst_io`` directory: - ``_build`` (the Sphinx build itself) - ``modules`` (symlinks to the module doc directories of Stack packages) - ``packages`` (symlinks to the package doc directories of Stack packages) - ``py-api`` (pages created by automodapi for the Python API reference)
[ "Clean", "Sphinx", "build", "products", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/stackcli.py#L134-L158
train
ehansis/ozelot
examples/leonardo/leonardo/kvstore/models.py
Entity.query_with_attributes
def query_with_attributes(type_to_query, client): """Query all entities of a specific type, with their attributes Args: type_to_query (str): type of entity to query client: DB client to perform query with Returns: pandas.DataFrame: table of entities, with attributes as columns """ session = client.create_session() # query all data query = session.query(Attribute.name, Attribute.value, Entity.id) \ .join(Entity) \ .filter(Entity.type == type_to_query) df = client.df_query(query) session.close() # don't store NaN values df = df.dropna(how='any') # pivot attribute names to columns, drop column names to one level # ('unstack' generated multi-level names) df = df.set_index(['id', 'name']).unstack().reset_index() # noinspection PyUnresolvedReferences df.columns = ['id'] + list(df.columns.get_level_values(1)[1:]) return df
python
def query_with_attributes(type_to_query, client): """Query all entities of a specific type, with their attributes Args: type_to_query (str): type of entity to query client: DB client to perform query with Returns: pandas.DataFrame: table of entities, with attributes as columns """ session = client.create_session() # query all data query = session.query(Attribute.name, Attribute.value, Entity.id) \ .join(Entity) \ .filter(Entity.type == type_to_query) df = client.df_query(query) session.close() # don't store NaN values df = df.dropna(how='any') # pivot attribute names to columns, drop column names to one level # ('unstack' generated multi-level names) df = df.set_index(['id', 'name']).unstack().reset_index() # noinspection PyUnresolvedReferences df.columns = ['id'] + list(df.columns.get_level_values(1)[1:]) return df
[ "def", "query_with_attributes", "(", "type_to_query", ",", "client", ")", ":", "session", "=", "client", ".", "create_session", "(", ")", "# query all data", "query", "=", "session", ".", "query", "(", "Attribute", ".", "name", ",", "Attribute", ".", "value", ",", "Entity", ".", "id", ")", ".", "join", "(", "Entity", ")", ".", "filter", "(", "Entity", ".", "type", "==", "type_to_query", ")", "df", "=", "client", ".", "df_query", "(", "query", ")", "session", ".", "close", "(", ")", "# don't store NaN values", "df", "=", "df", ".", "dropna", "(", "how", "=", "'any'", ")", "# pivot attribute names to columns, drop column names to one level", "# ('unstack' generated multi-level names)", "df", "=", "df", ".", "set_index", "(", "[", "'id'", ",", "'name'", "]", ")", ".", "unstack", "(", ")", ".", "reset_index", "(", ")", "# noinspection PyUnresolvedReferences", "df", ".", "columns", "=", "[", "'id'", "]", "+", "list", "(", "df", ".", "columns", ".", "get_level_values", "(", "1", ")", "[", "1", ":", "]", ")", "return", "df" ]
Query all entities of a specific type, with their attributes Args: type_to_query (str): type of entity to query client: DB client to perform query with Returns: pandas.DataFrame: table of entities, with attributes as columns
[ "Query", "all", "entities", "of", "a", "specific", "type", "with", "their", "attributes" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/leonardo/leonardo/kvstore/models.py#L18-L50
train
zsimic/runez
src/runez/serialize.py
Serializable.reset
def reset(self): """ Reset all fields of this object to class defaults """ for name in self.__dict__: if name.startswith("_"): continue attr = getattr(self, name) setattr(self, name, attr and attr.__class__())
python
def reset(self): """ Reset all fields of this object to class defaults """ for name in self.__dict__: if name.startswith("_"): continue attr = getattr(self, name) setattr(self, name, attr and attr.__class__())
[ "def", "reset", "(", "self", ")", ":", "for", "name", "in", "self", ".", "__dict__", ":", "if", "name", ".", "startswith", "(", "\"_\"", ")", ":", "continue", "attr", "=", "getattr", "(", "self", ",", "name", ")", "setattr", "(", "self", ",", "name", ",", "attr", "and", "attr", ".", "__class__", "(", ")", ")" ]
Reset all fields of this object to class defaults
[ "Reset", "all", "fields", "of", "this", "object", "to", "class", "defaults" ]
14363b719a1aae1528859a501a22d075ce0abfcc
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/serialize.py#L88-L97
train
ehansis/ozelot
examples/eurominder/eurominder/pipeline.py
geojson_polygon_to_mask
def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx): """Convert a GeoJSON polygon feature to a numpy array Args: feature (pygeoj.Feature): polygon feature to draw shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in lat_idx (func): function converting a latitude to the (fractional) row index in the map lon_idx (func): function converting a longitude to the (fractional) column index in the map Returns: np.array: mask, background is zero, foreground is one """ import matplotlib # specify 'agg' renderer, Mac renderer does not support what we want to do below matplotlib.use('agg') import matplotlib.pyplot as plt from matplotlib import patches import numpy as np # we can only do polygons right now if feature.geometry.type not in ('Polygon', 'MultiPolygon'): raise ValueError("Cannot handle feature of type " + feature.geometry.type) # fictional dpi - don't matter in the end dpi = 100 # -- start documentation include: poly-setup # make a new figure with no frame, no axes, with the correct size, black background fig = plt.figure(frameon=False, dpi=dpi, ) fig.set_size_inches(shape[1] / float(dpi), shape[0] / float(dpi)) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() # noinspection PyTypeChecker ax.set_xlim([0, shape[1]]) # noinspection PyTypeChecker ax.set_ylim([0, shape[0]]) fig.add_axes(ax) # -- end documentation include: poly-setup # for normal polygons make coordinates iterable if feature.geometry.type == 'Polygon': coords = [feature.geometry.coordinates] else: coords = feature.geometry.coordinates for poly_coords in coords: # the polygon may contain multiple outlines; the first is # always the outer one, the others are 'holes' for i, outline in enumerate(poly_coords): # inside/outside fill value: figure background is white by # default, draw inverted polygon and invert again later value = 0. if i == 0 else 1. # convert lats/lons to row/column indices in the array outline = np.array(outline) xs = lon_idx(outline[:, 0]) ys = lat_idx(outline[:, 1]) # draw the polygon poly = patches.Polygon(list(zip(xs, ys)), facecolor=(value, value, value), edgecolor='none', antialiased=True) ax.add_patch(poly) # -- start documentation include: poly-extract # extract the figure to a numpy array, fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') # reshape to a proper numpy array, keep one channel only data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))[:, :, 0] # -- end documentation include: poly-extract # make sure we get the right shape back assert data.shape[0] == shape[0] assert data.shape[1] == shape[1] # convert from uints back to floats and invert to get black background data = 1. - data.astype(float) / 255. # type: np.array # image is flipped horizontally w.r.t. map data = data[::-1, :] # done, clean up plt.close('all') return data
python
def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx): """Convert a GeoJSON polygon feature to a numpy array Args: feature (pygeoj.Feature): polygon feature to draw shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in lat_idx (func): function converting a latitude to the (fractional) row index in the map lon_idx (func): function converting a longitude to the (fractional) column index in the map Returns: np.array: mask, background is zero, foreground is one """ import matplotlib # specify 'agg' renderer, Mac renderer does not support what we want to do below matplotlib.use('agg') import matplotlib.pyplot as plt from matplotlib import patches import numpy as np # we can only do polygons right now if feature.geometry.type not in ('Polygon', 'MultiPolygon'): raise ValueError("Cannot handle feature of type " + feature.geometry.type) # fictional dpi - don't matter in the end dpi = 100 # -- start documentation include: poly-setup # make a new figure with no frame, no axes, with the correct size, black background fig = plt.figure(frameon=False, dpi=dpi, ) fig.set_size_inches(shape[1] / float(dpi), shape[0] / float(dpi)) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() # noinspection PyTypeChecker ax.set_xlim([0, shape[1]]) # noinspection PyTypeChecker ax.set_ylim([0, shape[0]]) fig.add_axes(ax) # -- end documentation include: poly-setup # for normal polygons make coordinates iterable if feature.geometry.type == 'Polygon': coords = [feature.geometry.coordinates] else: coords = feature.geometry.coordinates for poly_coords in coords: # the polygon may contain multiple outlines; the first is # always the outer one, the others are 'holes' for i, outline in enumerate(poly_coords): # inside/outside fill value: figure background is white by # default, draw inverted polygon and invert again later value = 0. if i == 0 else 1. # convert lats/lons to row/column indices in the array outline = np.array(outline) xs = lon_idx(outline[:, 0]) ys = lat_idx(outline[:, 1]) # draw the polygon poly = patches.Polygon(list(zip(xs, ys)), facecolor=(value, value, value), edgecolor='none', antialiased=True) ax.add_patch(poly) # -- start documentation include: poly-extract # extract the figure to a numpy array, fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') # reshape to a proper numpy array, keep one channel only data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))[:, :, 0] # -- end documentation include: poly-extract # make sure we get the right shape back assert data.shape[0] == shape[0] assert data.shape[1] == shape[1] # convert from uints back to floats and invert to get black background data = 1. - data.astype(float) / 255. # type: np.array # image is flipped horizontally w.r.t. map data = data[::-1, :] # done, clean up plt.close('all') return data
[ "def", "geojson_polygon_to_mask", "(", "feature", ",", "shape", ",", "lat_idx", ",", "lon_idx", ")", ":", "import", "matplotlib", "# specify 'agg' renderer, Mac renderer does not support what we want to do below", "matplotlib", ".", "use", "(", "'agg'", ")", "import", "matplotlib", ".", "pyplot", "as", "plt", "from", "matplotlib", "import", "patches", "import", "numpy", "as", "np", "# we can only do polygons right now", "if", "feature", ".", "geometry", ".", "type", "not", "in", "(", "'Polygon'", ",", "'MultiPolygon'", ")", ":", "raise", "ValueError", "(", "\"Cannot handle feature of type \"", "+", "feature", ".", "geometry", ".", "type", ")", "# fictional dpi - don't matter in the end", "dpi", "=", "100", "# -- start documentation include: poly-setup", "# make a new figure with no frame, no axes, with the correct size, black background", "fig", "=", "plt", ".", "figure", "(", "frameon", "=", "False", ",", "dpi", "=", "dpi", ",", ")", "fig", ".", "set_size_inches", "(", "shape", "[", "1", "]", "/", "float", "(", "dpi", ")", ",", "shape", "[", "0", "]", "/", "float", "(", "dpi", ")", ")", "ax", "=", "plt", ".", "Axes", "(", "fig", ",", "[", "0.", ",", "0.", ",", "1.", ",", "1.", "]", ")", "ax", ".", "set_axis_off", "(", ")", "# noinspection PyTypeChecker", "ax", ".", "set_xlim", "(", "[", "0", ",", "shape", "[", "1", "]", "]", ")", "# noinspection PyTypeChecker", "ax", ".", "set_ylim", "(", "[", "0", ",", "shape", "[", "0", "]", "]", ")", "fig", ".", "add_axes", "(", "ax", ")", "# -- end documentation include: poly-setup", "# for normal polygons make coordinates iterable", "if", "feature", ".", "geometry", ".", "type", "==", "'Polygon'", ":", "coords", "=", "[", "feature", ".", "geometry", ".", "coordinates", "]", "else", ":", "coords", "=", "feature", ".", "geometry", ".", "coordinates", "for", "poly_coords", "in", "coords", ":", "# the polygon may contain multiple outlines; the first is", "# always the outer one, the others are 'holes'", "for", "i", ",", "outline", "in", "enumerate", "(", "poly_coords", ")", ":", "# inside/outside fill value: figure background is white by", "# default, draw inverted polygon and invert again later", "value", "=", "0.", "if", "i", "==", "0", "else", "1.", "# convert lats/lons to row/column indices in the array", "outline", "=", "np", ".", "array", "(", "outline", ")", "xs", "=", "lon_idx", "(", "outline", "[", ":", ",", "0", "]", ")", "ys", "=", "lat_idx", "(", "outline", "[", ":", ",", "1", "]", ")", "# draw the polygon", "poly", "=", "patches", ".", "Polygon", "(", "list", "(", "zip", "(", "xs", ",", "ys", ")", ")", ",", "facecolor", "=", "(", "value", ",", "value", ",", "value", ")", ",", "edgecolor", "=", "'none'", ",", "antialiased", "=", "True", ")", "ax", ".", "add_patch", "(", "poly", ")", "# -- start documentation include: poly-extract", "# extract the figure to a numpy array,", "fig", ".", "canvas", ".", "draw", "(", ")", "data", "=", "np", ".", "fromstring", "(", "fig", ".", "canvas", ".", "tostring_rgb", "(", ")", ",", "dtype", "=", "np", ".", "uint8", ",", "sep", "=", "''", ")", "# reshape to a proper numpy array, keep one channel only", "data", "=", "data", ".", "reshape", "(", "fig", ".", "canvas", ".", "get_width_height", "(", ")", "[", ":", ":", "-", "1", "]", "+", "(", "3", ",", ")", ")", "[", ":", ",", ":", ",", "0", "]", "# -- end documentation include: poly-extract", "# make sure we get the right shape back", "assert", "data", ".", "shape", "[", "0", "]", "==", "shape", "[", "0", "]", "assert", "data", ".", "shape", "[", "1", "]", "==", "shape", "[", "1", "]", "# convert from uints back to floats and invert to get black background", "data", "=", "1.", "-", "data", ".", "astype", "(", "float", ")", "/", "255.", "# type: np.array", "# image is flipped horizontally w.r.t. map", "data", "=", "data", "[", ":", ":", "-", "1", ",", ":", "]", "# done, clean up", "plt", ".", "close", "(", "'all'", ")", "return", "data" ]
Convert a GeoJSON polygon feature to a numpy array Args: feature (pygeoj.Feature): polygon feature to draw shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in lat_idx (func): function converting a latitude to the (fractional) row index in the map lon_idx (func): function converting a longitude to the (fractional) column index in the map Returns: np.array: mask, background is zero, foreground is one
[ "Convert", "a", "GeoJSON", "polygon", "feature", "to", "a", "numpy", "array" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L549-L637
train
ehansis/ozelot
examples/eurominder/eurominder/pipeline.py
NUTS2InputFile.load
def load(self): """Load data, from default location Returns: pandas.DataFrame: columns 'key' (NUTS2 code), 'name' """ # read file, keep all values as strings df = pd.read_csv(self.input_file, sep=',', quotechar='"', encoding='utf-8', dtype=object) # wer are only interested in the NUTS code and description, rename them also df = df[['NUTS-Code', 'Description']] df.columns = ['key', 'name'] # we only want NUTS2 regions (4-digit codes) df = df[df['key'].str.len() == 4] # drop 'Extra Regio' codes ending in 'ZZ' df = df[df['key'].str[2:] != 'ZZ'] return df
python
def load(self): """Load data, from default location Returns: pandas.DataFrame: columns 'key' (NUTS2 code), 'name' """ # read file, keep all values as strings df = pd.read_csv(self.input_file, sep=',', quotechar='"', encoding='utf-8', dtype=object) # wer are only interested in the NUTS code and description, rename them also df = df[['NUTS-Code', 'Description']] df.columns = ['key', 'name'] # we only want NUTS2 regions (4-digit codes) df = df[df['key'].str.len() == 4] # drop 'Extra Regio' codes ending in 'ZZ' df = df[df['key'].str[2:] != 'ZZ'] return df
[ "def", "load", "(", "self", ")", ":", "# read file, keep all values as strings", "df", "=", "pd", ".", "read_csv", "(", "self", ".", "input_file", ",", "sep", "=", "','", ",", "quotechar", "=", "'\"'", ",", "encoding", "=", "'utf-8'", ",", "dtype", "=", "object", ")", "# wer are only interested in the NUTS code and description, rename them also", "df", "=", "df", "[", "[", "'NUTS-Code'", ",", "'Description'", "]", "]", "df", ".", "columns", "=", "[", "'key'", ",", "'name'", "]", "# we only want NUTS2 regions (4-digit codes)", "df", "=", "df", "[", "df", "[", "'key'", "]", ".", "str", ".", "len", "(", ")", "==", "4", "]", "# drop 'Extra Regio' codes ending in 'ZZ'", "df", "=", "df", "[", "df", "[", "'key'", "]", ".", "str", "[", "2", ":", "]", "!=", "'ZZ'", "]", "return", "df" ]
Load data, from default location Returns: pandas.DataFrame: columns 'key' (NUTS2 code), 'name'
[ "Load", "data", "from", "default", "location" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L50-L73
train
ehansis/ozelot
examples/eurominder/eurominder/pipeline.py
EuroStatsInputFile.input_file
def input_file(self): """Returns the input file name, with a default relative path """ return path.join(path.dirname(__file__), 'data', 'tgs{:s}.tsv'.format(self.number))
python
def input_file(self): """Returns the input file name, with a default relative path """ return path.join(path.dirname(__file__), 'data', 'tgs{:s}.tsv'.format(self.number))
[ "def", "input_file", "(", "self", ")", ":", "return", "path", ".", "join", "(", "path", ".", "dirname", "(", "__file__", ")", ",", "'data'", ",", "'tgs{:s}.tsv'", ".", "format", "(", "self", ".", "number", ")", ")" ]
Returns the input file name, with a default relative path
[ "Returns", "the", "input", "file", "name", "with", "a", "default", "relative", "path" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L133-L136
train
ehansis/ozelot
examples/eurominder/eurominder/pipeline.py
EuroStatsInputFile.load
def load(self, key_filter=None, header_preproc=None): """Load data table from tsv file, from default location Args: key_filter (str): additional filter for key column - regex matching key values to include; None for no filter header_preproc (func): function to apply to column headers to extract year numbers (as strings) Returns: pd.DataFrame: data """ # read file, keep all values as strings df = pd.read_csv(self.input_file, sep='\t', dtype=object) if key_filter is not None: # filter on key column (first column) df = df[df[df.columns[0]].str.match(key_filter)] # first column contains metadata, with NUTS2 region key as last (comma-separated) value meta_col = df.columns[0] df[meta_col] = df[meta_col].str.split(',').str[-1] # convert columns to numbers, skip first column (containing metadata) for col_name in df.columns[1:]: # some values have lower-case characters indicating footnotes, strip them stripped = df[col_name].str.replace(r'[a-z]', '') # convert to numbers, convert any remaining empty values (indicated by ':' in the input table) to NaN df[col_name] = pd.to_numeric(stripped, errors='coerce') # preprocess headers if header_preproc is not None: df.columns = list(df.columns[:1]) + [header_preproc(c) for c in df.columns[1:]] # rename columns, convert years to integers # noinspection PyTypeChecker df.columns = ['key'] + [int(y) for y in df.columns[1:]] return df
python
def load(self, key_filter=None, header_preproc=None): """Load data table from tsv file, from default location Args: key_filter (str): additional filter for key column - regex matching key values to include; None for no filter header_preproc (func): function to apply to column headers to extract year numbers (as strings) Returns: pd.DataFrame: data """ # read file, keep all values as strings df = pd.read_csv(self.input_file, sep='\t', dtype=object) if key_filter is not None: # filter on key column (first column) df = df[df[df.columns[0]].str.match(key_filter)] # first column contains metadata, with NUTS2 region key as last (comma-separated) value meta_col = df.columns[0] df[meta_col] = df[meta_col].str.split(',').str[-1] # convert columns to numbers, skip first column (containing metadata) for col_name in df.columns[1:]: # some values have lower-case characters indicating footnotes, strip them stripped = df[col_name].str.replace(r'[a-z]', '') # convert to numbers, convert any remaining empty values (indicated by ':' in the input table) to NaN df[col_name] = pd.to_numeric(stripped, errors='coerce') # preprocess headers if header_preproc is not None: df.columns = list(df.columns[:1]) + [header_preproc(c) for c in df.columns[1:]] # rename columns, convert years to integers # noinspection PyTypeChecker df.columns = ['key'] + [int(y) for y in df.columns[1:]] return df
[ "def", "load", "(", "self", ",", "key_filter", "=", "None", ",", "header_preproc", "=", "None", ")", ":", "# read file, keep all values as strings", "df", "=", "pd", ".", "read_csv", "(", "self", ".", "input_file", ",", "sep", "=", "'\\t'", ",", "dtype", "=", "object", ")", "if", "key_filter", "is", "not", "None", ":", "# filter on key column (first column)", "df", "=", "df", "[", "df", "[", "df", ".", "columns", "[", "0", "]", "]", ".", "str", ".", "match", "(", "key_filter", ")", "]", "# first column contains metadata, with NUTS2 region key as last (comma-separated) value", "meta_col", "=", "df", ".", "columns", "[", "0", "]", "df", "[", "meta_col", "]", "=", "df", "[", "meta_col", "]", ".", "str", ".", "split", "(", "','", ")", ".", "str", "[", "-", "1", "]", "# convert columns to numbers, skip first column (containing metadata)", "for", "col_name", "in", "df", ".", "columns", "[", "1", ":", "]", ":", "# some values have lower-case characters indicating footnotes, strip them", "stripped", "=", "df", "[", "col_name", "]", ".", "str", ".", "replace", "(", "r'[a-z]'", ",", "''", ")", "# convert to numbers, convert any remaining empty values (indicated by ':' in the input table) to NaN", "df", "[", "col_name", "]", "=", "pd", ".", "to_numeric", "(", "stripped", ",", "errors", "=", "'coerce'", ")", "# preprocess headers", "if", "header_preproc", "is", "not", "None", ":", "df", ".", "columns", "=", "list", "(", "df", ".", "columns", "[", ":", "1", "]", ")", "+", "[", "header_preproc", "(", "c", ")", "for", "c", "in", "df", ".", "columns", "[", "1", ":", "]", "]", "# rename columns, convert years to integers", "# noinspection PyTypeChecker", "df", ".", "columns", "=", "[", "'key'", "]", "+", "[", "int", "(", "y", ")", "for", "y", "in", "df", ".", "columns", "[", "1", ":", "]", "]", "return", "df" ]
Load data table from tsv file, from default location Args: key_filter (str): additional filter for key column - regex matching key values to include; None for no filter header_preproc (func): function to apply to column headers to extract year numbers (as strings) Returns: pd.DataFrame: data
[ "Load", "data", "table", "from", "tsv", "file", "from", "default", "location" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L138-L179
train
ehansis/ozelot
examples/eurominder/eurominder/pipeline.py
ClimateDataInputFile.load
def load(self): """Load the climate data as a map Returns: dict: {data: masked 3D numpy array containing climate data per month (first axis), lat_idx: function converting a latitude to the (fractional) row index in the map, lon_idx: function converting a longitude to the (fractional) column index in the map} """ from scipy.io import netcdf_file from scipy import interpolate import numpy as np # load file f = netcdf_file(self.input_file) # extract data, make explicity copies of data out = dict() lats = f.variables['lat'][:].copy() lons = f.variables['lon'][:].copy() # lons start at 0, this is bad for working with data in Europe because the map border runs right through; # roll array by half its width to get Europe into the map center out['data'] = np.roll(f.variables[self.variable_name][:, :, :].copy(), shift=len(lons) // 2, axis=2) lons = np.roll(lons, shift=len(lons) // 2) # avoid wraparound problems around zero by setting lon range to -180...180, this is # also the format used in the GeoJSON NUTS2 polygons lons[lons > 180] -= 360 # data contains some very negative value (~ -9e36) as 'invalid data' flag, convert this to a masked array out['data'] = np.ma.array(out['data']) out['data'][out['data'] < -1.e6] = np.ma.masked # -- start documentation include: climate-input-interp # build interpolators to convert lats/lons to row/column indices out['lat_idx'] = interpolate.interp1d(x=lats, y=np.arange(len(lats))) out['lon_idx'] = interpolate.interp1d(x=lons, y=np.arange(len(lons))) # -- end documentation include: climate-input-interp # clean up f.close() return out
python
def load(self): """Load the climate data as a map Returns: dict: {data: masked 3D numpy array containing climate data per month (first axis), lat_idx: function converting a latitude to the (fractional) row index in the map, lon_idx: function converting a longitude to the (fractional) column index in the map} """ from scipy.io import netcdf_file from scipy import interpolate import numpy as np # load file f = netcdf_file(self.input_file) # extract data, make explicity copies of data out = dict() lats = f.variables['lat'][:].copy() lons = f.variables['lon'][:].copy() # lons start at 0, this is bad for working with data in Europe because the map border runs right through; # roll array by half its width to get Europe into the map center out['data'] = np.roll(f.variables[self.variable_name][:, :, :].copy(), shift=len(lons) // 2, axis=2) lons = np.roll(lons, shift=len(lons) // 2) # avoid wraparound problems around zero by setting lon range to -180...180, this is # also the format used in the GeoJSON NUTS2 polygons lons[lons > 180] -= 360 # data contains some very negative value (~ -9e36) as 'invalid data' flag, convert this to a masked array out['data'] = np.ma.array(out['data']) out['data'][out['data'] < -1.e6] = np.ma.masked # -- start documentation include: climate-input-interp # build interpolators to convert lats/lons to row/column indices out['lat_idx'] = interpolate.interp1d(x=lats, y=np.arange(len(lats))) out['lon_idx'] = interpolate.interp1d(x=lons, y=np.arange(len(lons))) # -- end documentation include: climate-input-interp # clean up f.close() return out
[ "def", "load", "(", "self", ")", ":", "from", "scipy", ".", "io", "import", "netcdf_file", "from", "scipy", "import", "interpolate", "import", "numpy", "as", "np", "# load file", "f", "=", "netcdf_file", "(", "self", ".", "input_file", ")", "# extract data, make explicity copies of data", "out", "=", "dict", "(", ")", "lats", "=", "f", ".", "variables", "[", "'lat'", "]", "[", ":", "]", ".", "copy", "(", ")", "lons", "=", "f", ".", "variables", "[", "'lon'", "]", "[", ":", "]", ".", "copy", "(", ")", "# lons start at 0, this is bad for working with data in Europe because the map border runs right through;", "# roll array by half its width to get Europe into the map center", "out", "[", "'data'", "]", "=", "np", ".", "roll", "(", "f", ".", "variables", "[", "self", ".", "variable_name", "]", "[", ":", ",", ":", ",", ":", "]", ".", "copy", "(", ")", ",", "shift", "=", "len", "(", "lons", ")", "//", "2", ",", "axis", "=", "2", ")", "lons", "=", "np", ".", "roll", "(", "lons", ",", "shift", "=", "len", "(", "lons", ")", "//", "2", ")", "# avoid wraparound problems around zero by setting lon range to -180...180, this is", "# also the format used in the GeoJSON NUTS2 polygons", "lons", "[", "lons", ">", "180", "]", "-=", "360", "# data contains some very negative value (~ -9e36) as 'invalid data' flag, convert this to a masked array", "out", "[", "'data'", "]", "=", "np", ".", "ma", ".", "array", "(", "out", "[", "'data'", "]", ")", "out", "[", "'data'", "]", "[", "out", "[", "'data'", "]", "<", "-", "1.e6", "]", "=", "np", ".", "ma", ".", "masked", "# -- start documentation include: climate-input-interp", "# build interpolators to convert lats/lons to row/column indices", "out", "[", "'lat_idx'", "]", "=", "interpolate", ".", "interp1d", "(", "x", "=", "lats", ",", "y", "=", "np", ".", "arange", "(", "len", "(", "lats", ")", ")", ")", "out", "[", "'lon_idx'", "]", "=", "interpolate", ".", "interp1d", "(", "x", "=", "lons", ",", "y", "=", "np", ".", "arange", "(", "len", "(", "lons", ")", ")", ")", "# -- end documentation include: climate-input-interp", "# clean up", "f", ".", "close", "(", ")", "return", "out" ]
Load the climate data as a map Returns: dict: {data: masked 3D numpy array containing climate data per month (first axis), lat_idx: function converting a latitude to the (fractional) row index in the map, lon_idx: function converting a longitude to the (fractional) column index in the map}
[ "Load", "the", "climate", "data", "as", "a", "map" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L659-L701
train
ehansis/ozelot
examples/eurominder/eurominder/pipeline.py
LoadClimateData.clear
def clear(self): """Clear output of one climate variable """ # mark this task as incomplete self.mark_incomplete() # Delete the indicator metadata, this also deletes values by cascading. for suffix in list(CLIMATE_SEASON_SUFFIXES.values()): try: # noinspection PyUnresolvedReferences indicator = self.session.query(models.ClimateIndicator) \ .filter(models.ClimateIndicator.description == self.description + suffix) \ .one() self.session.delete(indicator) except NoResultFound: # Data didn't exist yet, no problem pass self.close_session()
python
def clear(self): """Clear output of one climate variable """ # mark this task as incomplete self.mark_incomplete() # Delete the indicator metadata, this also deletes values by cascading. for suffix in list(CLIMATE_SEASON_SUFFIXES.values()): try: # noinspection PyUnresolvedReferences indicator = self.session.query(models.ClimateIndicator) \ .filter(models.ClimateIndicator.description == self.description + suffix) \ .one() self.session.delete(indicator) except NoResultFound: # Data didn't exist yet, no problem pass self.close_session()
[ "def", "clear", "(", "self", ")", ":", "# mark this task as incomplete", "self", ".", "mark_incomplete", "(", ")", "# Delete the indicator metadata, this also deletes values by cascading.", "for", "suffix", "in", "list", "(", "CLIMATE_SEASON_SUFFIXES", ".", "values", "(", ")", ")", ":", "try", ":", "# noinspection PyUnresolvedReferences", "indicator", "=", "self", ".", "session", ".", "query", "(", "models", ".", "ClimateIndicator", ")", ".", "filter", "(", "models", ".", "ClimateIndicator", ".", "description", "==", "self", ".", "description", "+", "suffix", ")", ".", "one", "(", ")", "self", ".", "session", ".", "delete", "(", "indicator", ")", "except", "NoResultFound", ":", "# Data didn't exist yet, no problem", "pass", "self", ".", "close_session", "(", ")" ]
Clear output of one climate variable
[ "Clear", "output", "of", "one", "climate", "variable" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L731-L749
train
ehansis/ozelot
examples/eurominder/eurominder/pipeline.py
LoadClimateData.run
def run(self): """Load climate data and convert to indicator objects """ import numpy as np # get all NUTS region IDs, for linking values to region objects query = self.session.query(models.NUTS2Region.key, models.NUTS2Region.id) region_ids = self.client.df_query(query).set_index('key')['id'].to_dict() # load climate data and NUTS2 polygons data = next(self.requires()).load() nuts = NUTS2GeoJSONInputFile().load() # generated indicator IDs, keyed by season indicator_ids = dict() # climate data by season t_data = dict() # create new indicator objects for summer and winter, create averaged climate data for season, suffix in CLIMATE_SEASON_SUFFIXES.items(): # noinspection PyUnresolvedReferences indicator = models.ClimateIndicator(description=self.description + suffix) self.session.add(indicator) # commit, to get indicator ID filled self.session.commit() indicator_ids[season] = indicator.id # select winter or summer data by month index, average over time range if season == 'summer': t_data[season] = np.ma.average(data['data'][3:9, :, :], axis=0) else: # noinspection PyTypeChecker t_data[season] = np.ma.average(0.5 * (data['data'][0:3, :, :] + data['data'][9:12, :, :]), axis=0) # container for output objects, for bulk saving objects = [] # start value for manual object id generation current_value_id = models.ClimateValue.get_max_id(self.session) # for each region, get a mask, average climate variable over the mask and store the indicator value; # loop over features first, then over seasons, because mask generation is expensive for feature in nuts: # draw region mask (doesn't matter for which season we take the map shape) mask = geojson_polygon_to_mask(feature=feature, shape=t_data['summer'].shape, lat_idx=data['lat_idx'], lon_idx=data['lon_idx']) # create indicator values for summer and winter for season in list(CLIMATE_SEASON_SUFFIXES.keys()): # weighted average from region mask value = np.ma.average(t_data[season], weights=mask) # region ID must be cast to int (DBs don't like numpy dtypes from pandas) region_id = region_ids.get(feature.properties['NUTS_ID'], None) if region_id is not None: region_id = int(region_id) # append an indicator value, manually generate object IDs for bulk saving current_value_id += 1 objects.append(models.ClimateValue(id=current_value_id, value=value, region_id=region_id, indicator_id=indicator_ids[season])) # # print some debugging output # print self.variable_name + ' ' + season, feature.properties['NUTS_ID'], value # # generate some plots for debugging # from matplotlib import pyplot as plt # plt.subplot(211) # plt.imshow(0.02 * t_data + mask * t_data, interpolation='none') # plt.subplot(212) # plt.imshow(t_data, interpolation='none') # plt.savefig('/tmp/' + feature.properties['NUTS_ID'] + '.png') # bulk-save all objects self.session.bulk_save_objects(objects) self.session.commit() self.done()
python
def run(self): """Load climate data and convert to indicator objects """ import numpy as np # get all NUTS region IDs, for linking values to region objects query = self.session.query(models.NUTS2Region.key, models.NUTS2Region.id) region_ids = self.client.df_query(query).set_index('key')['id'].to_dict() # load climate data and NUTS2 polygons data = next(self.requires()).load() nuts = NUTS2GeoJSONInputFile().load() # generated indicator IDs, keyed by season indicator_ids = dict() # climate data by season t_data = dict() # create new indicator objects for summer and winter, create averaged climate data for season, suffix in CLIMATE_SEASON_SUFFIXES.items(): # noinspection PyUnresolvedReferences indicator = models.ClimateIndicator(description=self.description + suffix) self.session.add(indicator) # commit, to get indicator ID filled self.session.commit() indicator_ids[season] = indicator.id # select winter or summer data by month index, average over time range if season == 'summer': t_data[season] = np.ma.average(data['data'][3:9, :, :], axis=0) else: # noinspection PyTypeChecker t_data[season] = np.ma.average(0.5 * (data['data'][0:3, :, :] + data['data'][9:12, :, :]), axis=0) # container for output objects, for bulk saving objects = [] # start value for manual object id generation current_value_id = models.ClimateValue.get_max_id(self.session) # for each region, get a mask, average climate variable over the mask and store the indicator value; # loop over features first, then over seasons, because mask generation is expensive for feature in nuts: # draw region mask (doesn't matter for which season we take the map shape) mask = geojson_polygon_to_mask(feature=feature, shape=t_data['summer'].shape, lat_idx=data['lat_idx'], lon_idx=data['lon_idx']) # create indicator values for summer and winter for season in list(CLIMATE_SEASON_SUFFIXES.keys()): # weighted average from region mask value = np.ma.average(t_data[season], weights=mask) # region ID must be cast to int (DBs don't like numpy dtypes from pandas) region_id = region_ids.get(feature.properties['NUTS_ID'], None) if region_id is not None: region_id = int(region_id) # append an indicator value, manually generate object IDs for bulk saving current_value_id += 1 objects.append(models.ClimateValue(id=current_value_id, value=value, region_id=region_id, indicator_id=indicator_ids[season])) # # print some debugging output # print self.variable_name + ' ' + season, feature.properties['NUTS_ID'], value # # generate some plots for debugging # from matplotlib import pyplot as plt # plt.subplot(211) # plt.imshow(0.02 * t_data + mask * t_data, interpolation='none') # plt.subplot(212) # plt.imshow(t_data, interpolation='none') # plt.savefig('/tmp/' + feature.properties['NUTS_ID'] + '.png') # bulk-save all objects self.session.bulk_save_objects(objects) self.session.commit() self.done()
[ "def", "run", "(", "self", ")", ":", "import", "numpy", "as", "np", "# get all NUTS region IDs, for linking values to region objects", "query", "=", "self", ".", "session", ".", "query", "(", "models", ".", "NUTS2Region", ".", "key", ",", "models", ".", "NUTS2Region", ".", "id", ")", "region_ids", "=", "self", ".", "client", ".", "df_query", "(", "query", ")", ".", "set_index", "(", "'key'", ")", "[", "'id'", "]", ".", "to_dict", "(", ")", "# load climate data and NUTS2 polygons", "data", "=", "next", "(", "self", ".", "requires", "(", ")", ")", ".", "load", "(", ")", "nuts", "=", "NUTS2GeoJSONInputFile", "(", ")", ".", "load", "(", ")", "# generated indicator IDs, keyed by season", "indicator_ids", "=", "dict", "(", ")", "# climate data by season", "t_data", "=", "dict", "(", ")", "# create new indicator objects for summer and winter, create averaged climate data", "for", "season", ",", "suffix", "in", "CLIMATE_SEASON_SUFFIXES", ".", "items", "(", ")", ":", "# noinspection PyUnresolvedReferences", "indicator", "=", "models", ".", "ClimateIndicator", "(", "description", "=", "self", ".", "description", "+", "suffix", ")", "self", ".", "session", ".", "add", "(", "indicator", ")", "# commit, to get indicator ID filled", "self", ".", "session", ".", "commit", "(", ")", "indicator_ids", "[", "season", "]", "=", "indicator", ".", "id", "# select winter or summer data by month index, average over time range", "if", "season", "==", "'summer'", ":", "t_data", "[", "season", "]", "=", "np", ".", "ma", ".", "average", "(", "data", "[", "'data'", "]", "[", "3", ":", "9", ",", ":", ",", ":", "]", ",", "axis", "=", "0", ")", "else", ":", "# noinspection PyTypeChecker", "t_data", "[", "season", "]", "=", "np", ".", "ma", ".", "average", "(", "0.5", "*", "(", "data", "[", "'data'", "]", "[", "0", ":", "3", ",", ":", ",", ":", "]", "+", "data", "[", "'data'", "]", "[", "9", ":", "12", ",", ":", ",", ":", "]", ")", ",", "axis", "=", "0", ")", "# container for output objects, for bulk saving", "objects", "=", "[", "]", "# start value for manual object id generation", "current_value_id", "=", "models", ".", "ClimateValue", ".", "get_max_id", "(", "self", ".", "session", ")", "# for each region, get a mask, average climate variable over the mask and store the indicator value;", "# loop over features first, then over seasons, because mask generation is expensive", "for", "feature", "in", "nuts", ":", "# draw region mask (doesn't matter for which season we take the map shape)", "mask", "=", "geojson_polygon_to_mask", "(", "feature", "=", "feature", ",", "shape", "=", "t_data", "[", "'summer'", "]", ".", "shape", ",", "lat_idx", "=", "data", "[", "'lat_idx'", "]", ",", "lon_idx", "=", "data", "[", "'lon_idx'", "]", ")", "# create indicator values for summer and winter", "for", "season", "in", "list", "(", "CLIMATE_SEASON_SUFFIXES", ".", "keys", "(", ")", ")", ":", "# weighted average from region mask", "value", "=", "np", ".", "ma", ".", "average", "(", "t_data", "[", "season", "]", ",", "weights", "=", "mask", ")", "# region ID must be cast to int (DBs don't like numpy dtypes from pandas)", "region_id", "=", "region_ids", ".", "get", "(", "feature", ".", "properties", "[", "'NUTS_ID'", "]", ",", "None", ")", "if", "region_id", "is", "not", "None", ":", "region_id", "=", "int", "(", "region_id", ")", "# append an indicator value, manually generate object IDs for bulk saving", "current_value_id", "+=", "1", "objects", ".", "append", "(", "models", ".", "ClimateValue", "(", "id", "=", "current_value_id", ",", "value", "=", "value", ",", "region_id", "=", "region_id", ",", "indicator_id", "=", "indicator_ids", "[", "season", "]", ")", ")", "# # print some debugging output", "# print self.variable_name + ' ' + season, feature.properties['NUTS_ID'], value", "# # generate some plots for debugging", "# from matplotlib import pyplot as plt", "# plt.subplot(211)", "# plt.imshow(0.02 * t_data + mask * t_data, interpolation='none')", "# plt.subplot(212)", "# plt.imshow(t_data, interpolation='none')", "# plt.savefig('/tmp/' + feature.properties['NUTS_ID'] + '.png')", "# bulk-save all objects", "self", ".", "session", ".", "bulk_save_objects", "(", "objects", ")", "self", ".", "session", ".", "commit", "(", ")", "self", ".", "done", "(", ")" ]
Load climate data and convert to indicator objects
[ "Load", "climate", "data", "and", "convert", "to", "indicator", "objects" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/eurominder/eurominder/pipeline.py#L751-L836
train
untwisted/untwisted
untwisted/iostd.py
lose
def lose(spin): """ It is used to close TCP connection and unregister the Spin instance from untwisted reactor. Diagram: lose -> (int:err | socket.error:err) -> CLOSE_ERR """ try: spin.close() except Exception as excpt: err = excpt.args[0] spin.drive(CLOSE_ERR, err) finally: spin.destroy() spin.drive(LOST)
python
def lose(spin): """ It is used to close TCP connection and unregister the Spin instance from untwisted reactor. Diagram: lose -> (int:err | socket.error:err) -> CLOSE_ERR """ try: spin.close() except Exception as excpt: err = excpt.args[0] spin.drive(CLOSE_ERR, err) finally: spin.destroy() spin.drive(LOST)
[ "def", "lose", "(", "spin", ")", ":", "try", ":", "spin", ".", "close", "(", ")", "except", "Exception", "as", "excpt", ":", "err", "=", "excpt", ".", "args", "[", "0", "]", "spin", ".", "drive", "(", "CLOSE_ERR", ",", "err", ")", "finally", ":", "spin", ".", "destroy", "(", ")", "spin", ".", "drive", "(", "LOST", ")" ]
It is used to close TCP connection and unregister the Spin instance from untwisted reactor. Diagram: lose -> (int:err | socket.error:err) -> CLOSE_ERR
[ "It", "is", "used", "to", "close", "TCP", "connection", "and", "unregister", "the", "Spin", "instance", "from", "untwisted", "reactor", "." ]
8a8d9c8a8d0f3452d5de67cd760297bb5759f637
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/iostd.py#L10-L27
train
untwisted/untwisted
untwisted/iostd.py
create_server
def create_server(addr, port, backlog): """ Set up a TCP server and installs the basic handles Stdin, Stdout in the clients. Example: def send_data(server, client): # No need to install Stdin or Stdout. client.dump('foo bar!') server = create_server('0.0.0.0', 1024, 50) xmap(server, on_accept, send_data) """ server = Spin() server.bind((addr, port)) server.listen(backlog) Server(server) server.add_map(ACCEPT, lambda server, spin: install_basic_handles(spin)) return server
python
def create_server(addr, port, backlog): """ Set up a TCP server and installs the basic handles Stdin, Stdout in the clients. Example: def send_data(server, client): # No need to install Stdin or Stdout. client.dump('foo bar!') server = create_server('0.0.0.0', 1024, 50) xmap(server, on_accept, send_data) """ server = Spin() server.bind((addr, port)) server.listen(backlog) Server(server) server.add_map(ACCEPT, lambda server, spin: install_basic_handles(spin)) return server
[ "def", "create_server", "(", "addr", ",", "port", ",", "backlog", ")", ":", "server", "=", "Spin", "(", ")", "server", ".", "bind", "(", "(", "addr", ",", "port", ")", ")", "server", ".", "listen", "(", "backlog", ")", "Server", "(", "server", ")", "server", ".", "add_map", "(", "ACCEPT", ",", "lambda", "server", ",", "spin", ":", "install_basic_handles", "(", "spin", ")", ")", "return", "server" ]
Set up a TCP server and installs the basic handles Stdin, Stdout in the clients. Example: def send_data(server, client): # No need to install Stdin or Stdout. client.dump('foo bar!') server = create_server('0.0.0.0', 1024, 50) xmap(server, on_accept, send_data)
[ "Set", "up", "a", "TCP", "server", "and", "installs", "the", "basic", "handles", "Stdin", "Stdout", "in", "the", "clients", "." ]
8a8d9c8a8d0f3452d5de67cd760297bb5759f637
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/iostd.py#L37-L57
train
untwisted/untwisted
untwisted/iostd.py
create_client
def create_client(addr, port): """ Set up a TCP client and installs the basic handles Stdin, Stdout. def send_data(client): client.dump('GET / HTTP/1.1\r\n') xmap(client, LOAD, iostd.put) client = create_client('www.google.com.br', 80) xmap(client, CONNECT, send_data) """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # First attempt to connect otherwise it leaves # an unconnected spin instance in the reactor. sock.connect_ex((addr, port)) spin = Spin(sock) Client(spin) spin.add_map(CONNECT, install_basic_handles) spin.add_map(CONNECT_ERR, lambda con, err: lose(con)) return spin
python
def create_client(addr, port): """ Set up a TCP client and installs the basic handles Stdin, Stdout. def send_data(client): client.dump('GET / HTTP/1.1\r\n') xmap(client, LOAD, iostd.put) client = create_client('www.google.com.br', 80) xmap(client, CONNECT, send_data) """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # First attempt to connect otherwise it leaves # an unconnected spin instance in the reactor. sock.connect_ex((addr, port)) spin = Spin(sock) Client(spin) spin.add_map(CONNECT, install_basic_handles) spin.add_map(CONNECT_ERR, lambda con, err: lose(con)) return spin
[ "def", "create_client", "(", "addr", ",", "port", ")", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "# First attempt to connect otherwise it leaves", "# an unconnected spin instance in the reactor.", "sock", ".", "connect_ex", "(", "(", "addr", ",", "port", ")", ")", "spin", "=", "Spin", "(", "sock", ")", "Client", "(", "spin", ")", "spin", ".", "add_map", "(", "CONNECT", ",", "install_basic_handles", ")", "spin", ".", "add_map", "(", "CONNECT_ERR", ",", "lambda", "con", ",", "err", ":", "lose", "(", "con", ")", ")", "return", "spin" ]
Set up a TCP client and installs the basic handles Stdin, Stdout. def send_data(client): client.dump('GET / HTTP/1.1\r\n') xmap(client, LOAD, iostd.put) client = create_client('www.google.com.br', 80) xmap(client, CONNECT, send_data)
[ "Set", "up", "a", "TCP", "client", "and", "installs", "the", "basic", "handles", "Stdin", "Stdout", "." ]
8a8d9c8a8d0f3452d5de67cd760297bb5759f637
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/iostd.py#L67-L88
train
kataev/flake8-rst
flake8_rst/cli.py
main
def main(argv=None): # type: (Union[NoneType, List[str]]) -> NoneType """Execute the main bit of the application. This handles the creation of an instance of :class:`Application`, runs it, and then exits the application. :param list argv: The arguments to be passed to the application for parsing. """ app = application.Application() app.run(argv) app.exit()
python
def main(argv=None): # type: (Union[NoneType, List[str]]) -> NoneType """Execute the main bit of the application. This handles the creation of an instance of :class:`Application`, runs it, and then exits the application. :param list argv: The arguments to be passed to the application for parsing. """ app = application.Application() app.run(argv) app.exit()
[ "def", "main", "(", "argv", "=", "None", ")", ":", "# type: (Union[NoneType, List[str]]) -> NoneType", "app", "=", "application", ".", "Application", "(", ")", "app", ".", "run", "(", "argv", ")", "app", ".", "exit", "(", ")" ]
Execute the main bit of the application. This handles the creation of an instance of :class:`Application`, runs it, and then exits the application. :param list argv: The arguments to be passed to the application for parsing.
[ "Execute", "the", "main", "bit", "of", "the", "application", "." ]
ca6d41c7a309b9e8cd4fa6f428b82db96b6a986f
https://github.com/kataev/flake8-rst/blob/ca6d41c7a309b9e8cd4fa6f428b82db96b6a986f/flake8_rst/cli.py#L5-L17
train
mojaie/chorus
chorus/indigo.py
fingerprint_similarity
def fingerprint_similarity(mol1, mol2): """Calculate Indigo fingerprint similarity """ idmol1 = to_real_mol(mol1) idmol2 = to_real_mol(mol2) fp1 = idmol1.fingerprint("sim") fp2 = idmol2.fingerprint("sim") return round(idg.similarity(fp1, fp2, "tanimoto"), 2)
python
def fingerprint_similarity(mol1, mol2): """Calculate Indigo fingerprint similarity """ idmol1 = to_real_mol(mol1) idmol2 = to_real_mol(mol2) fp1 = idmol1.fingerprint("sim") fp2 = idmol2.fingerprint("sim") return round(idg.similarity(fp1, fp2, "tanimoto"), 2)
[ "def", "fingerprint_similarity", "(", "mol1", ",", "mol2", ")", ":", "idmol1", "=", "to_real_mol", "(", "mol1", ")", "idmol2", "=", "to_real_mol", "(", "mol2", ")", "fp1", "=", "idmol1", ".", "fingerprint", "(", "\"sim\"", ")", "fp2", "=", "idmol2", ".", "fingerprint", "(", "\"sim\"", ")", "return", "round", "(", "idg", ".", "similarity", "(", "fp1", ",", "fp2", ",", "\"tanimoto\"", ")", ",", "2", ")" ]
Calculate Indigo fingerprint similarity
[ "Calculate", "Indigo", "fingerprint", "similarity" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/indigo.py#L64-L71
train
wesleybeckner/salty
salty/core.py
devmodel_to_array
def devmodel_to_array(model_name, train_fraction=1): """ a standardized method of turning a dev_model object into training and testing arrays Parameters ---------- model_name: dev_model the dev_model object to be interrogated train_fraction: int the fraction to be reserved for training Returns ---------- X_train: array the input training array X_test: array the input testing array Y_train: array the output training array Y_test: array the output testing array """ model_outputs = -6 + model_name.Data_summary.shape[0] devmodel = model_name rawdf = devmodel.Data rawdf = rawdf.sample(frac=1) datadf = rawdf.select_dtypes(include=[np.number]) data = np.array(datadf) n = data.shape[0] d = data.shape[1] d -= model_outputs n_train = int(n * train_fraction) # set fraction for training n_test = n - n_train X_train = np.zeros((n_train, d)) # prepare train/test arrays X_test = np.zeros((n_test, d)) Y_train = np.zeros((n_train, model_outputs)) Y_test = np.zeros((n_test, model_outputs)) X_train[:] = data[:n_train, :-model_outputs] Y_train[:] = (data[:n_train, -model_outputs:].astype(float)) X_test[:] = data[n_train:, :-model_outputs] Y_test[:] = (data[n_train:, -model_outputs:].astype(float)) return X_train, Y_train, X_test, Y_test
python
def devmodel_to_array(model_name, train_fraction=1): """ a standardized method of turning a dev_model object into training and testing arrays Parameters ---------- model_name: dev_model the dev_model object to be interrogated train_fraction: int the fraction to be reserved for training Returns ---------- X_train: array the input training array X_test: array the input testing array Y_train: array the output training array Y_test: array the output testing array """ model_outputs = -6 + model_name.Data_summary.shape[0] devmodel = model_name rawdf = devmodel.Data rawdf = rawdf.sample(frac=1) datadf = rawdf.select_dtypes(include=[np.number]) data = np.array(datadf) n = data.shape[0] d = data.shape[1] d -= model_outputs n_train = int(n * train_fraction) # set fraction for training n_test = n - n_train X_train = np.zeros((n_train, d)) # prepare train/test arrays X_test = np.zeros((n_test, d)) Y_train = np.zeros((n_train, model_outputs)) Y_test = np.zeros((n_test, model_outputs)) X_train[:] = data[:n_train, :-model_outputs] Y_train[:] = (data[:n_train, -model_outputs:].astype(float)) X_test[:] = data[n_train:, :-model_outputs] Y_test[:] = (data[n_train:, -model_outputs:].astype(float)) return X_train, Y_train, X_test, Y_test
[ "def", "devmodel_to_array", "(", "model_name", ",", "train_fraction", "=", "1", ")", ":", "model_outputs", "=", "-", "6", "+", "model_name", ".", "Data_summary", ".", "shape", "[", "0", "]", "devmodel", "=", "model_name", "rawdf", "=", "devmodel", ".", "Data", "rawdf", "=", "rawdf", ".", "sample", "(", "frac", "=", "1", ")", "datadf", "=", "rawdf", ".", "select_dtypes", "(", "include", "=", "[", "np", ".", "number", "]", ")", "data", "=", "np", ".", "array", "(", "datadf", ")", "n", "=", "data", ".", "shape", "[", "0", "]", "d", "=", "data", ".", "shape", "[", "1", "]", "d", "-=", "model_outputs", "n_train", "=", "int", "(", "n", "*", "train_fraction", ")", "# set fraction for training", "n_test", "=", "n", "-", "n_train", "X_train", "=", "np", ".", "zeros", "(", "(", "n_train", ",", "d", ")", ")", "# prepare train/test arrays", "X_test", "=", "np", ".", "zeros", "(", "(", "n_test", ",", "d", ")", ")", "Y_train", "=", "np", ".", "zeros", "(", "(", "n_train", ",", "model_outputs", ")", ")", "Y_test", "=", "np", ".", "zeros", "(", "(", "n_test", ",", "model_outputs", ")", ")", "X_train", "[", ":", "]", "=", "data", "[", ":", "n_train", ",", ":", "-", "model_outputs", "]", "Y_train", "[", ":", "]", "=", "(", "data", "[", ":", "n_train", ",", "-", "model_outputs", ":", "]", ".", "astype", "(", "float", ")", ")", "X_test", "[", ":", "]", "=", "data", "[", "n_train", ":", ",", ":", "-", "model_outputs", "]", "Y_test", "[", ":", "]", "=", "(", "data", "[", "n_train", ":", ",", "-", "model_outputs", ":", "]", ".", "astype", "(", "float", ")", ")", "return", "X_train", ",", "Y_train", ",", "X_test", ",", "Y_test" ]
a standardized method of turning a dev_model object into training and testing arrays Parameters ---------- model_name: dev_model the dev_model object to be interrogated train_fraction: int the fraction to be reserved for training Returns ---------- X_train: array the input training array X_test: array the input testing array Y_train: array the output training array Y_test: array the output testing array
[ "a", "standardized", "method", "of", "turning", "a", "dev_model", "object", "into", "training", "and", "testing", "arrays" ]
ef17a97aea3e4f81fcd0359ce85b3438c0e6499b
https://github.com/wesleybeckner/salty/blob/ef17a97aea3e4f81fcd0359ce85b3438c0e6499b/salty/core.py#L162-L207
train
potash/drain
drain/exploration.py
dapply
def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs): """ Apply function to each step object in the index Args: fn: function to apply. If a list then each function is applied pairwise: whether to apply the function to pairs of steps symmetric, diagonal, block: passed to apply_pairwise when pairwise=True kwargs: a keyword arguments to pass to each function. Arguments with list value are grid searched using util.dict_product. Returns: a StepFrame or StepSeries """ search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1] functions = util.make_list(fn) search = list(product(functions, util.dict_product(kwargs))) results = [] for fn, kw in search: if not pairwise: r = self.index.to_series().apply(lambda step: fn(step, **kw)) else: r = apply_pairwise(self, fn, symmetric=symmetric, diagonal=diagonal, block=block, **kw) name = [] if len(functions) == 1 else [fn.__name__] name += util.dict_subset(kw, search_keys).values() if isinstance(r, pd.DataFrame): columns = pd.MultiIndex.from_tuples( [tuple(name + util.make_list(c)) for c in r.columns]) r.columns = columns else: r.name = tuple(name) results.append(r) if len(results) > 1: result = pd.concat(results, axis=1) # get subset of parameters that were searched over column_names = [] if len(functions) == 1 else [None] column_names += search_keys column_names += [None]*(len(result.columns.names)-len(column_names)) result.columns.names = column_names return StepFrame(result) else: result = results[0] if isinstance(result, pd.DataFrame): return StepFrame(result) else: result.name = functions[0].__name__ return StepSeries(result)
python
def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs): """ Apply function to each step object in the index Args: fn: function to apply. If a list then each function is applied pairwise: whether to apply the function to pairs of steps symmetric, diagonal, block: passed to apply_pairwise when pairwise=True kwargs: a keyword arguments to pass to each function. Arguments with list value are grid searched using util.dict_product. Returns: a StepFrame or StepSeries """ search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1] functions = util.make_list(fn) search = list(product(functions, util.dict_product(kwargs))) results = [] for fn, kw in search: if not pairwise: r = self.index.to_series().apply(lambda step: fn(step, **kw)) else: r = apply_pairwise(self, fn, symmetric=symmetric, diagonal=diagonal, block=block, **kw) name = [] if len(functions) == 1 else [fn.__name__] name += util.dict_subset(kw, search_keys).values() if isinstance(r, pd.DataFrame): columns = pd.MultiIndex.from_tuples( [tuple(name + util.make_list(c)) for c in r.columns]) r.columns = columns else: r.name = tuple(name) results.append(r) if len(results) > 1: result = pd.concat(results, axis=1) # get subset of parameters that were searched over column_names = [] if len(functions) == 1 else [None] column_names += search_keys column_names += [None]*(len(result.columns.names)-len(column_names)) result.columns.names = column_names return StepFrame(result) else: result = results[0] if isinstance(result, pd.DataFrame): return StepFrame(result) else: result.name = functions[0].__name__ return StepSeries(result)
[ "def", "dapply", "(", "self", ",", "fn", ",", "pairwise", "=", "False", ",", "symmetric", "=", "True", ",", "diagonal", "=", "False", ",", "block", "=", "None", ",", "*", "*", "kwargs", ")", ":", "search_keys", "=", "[", "k", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "isinstance", "(", "v", ",", "list", ")", "and", "len", "(", "v", ")", ">", "1", "]", "functions", "=", "util", ".", "make_list", "(", "fn", ")", "search", "=", "list", "(", "product", "(", "functions", ",", "util", ".", "dict_product", "(", "kwargs", ")", ")", ")", "results", "=", "[", "]", "for", "fn", ",", "kw", "in", "search", ":", "if", "not", "pairwise", ":", "r", "=", "self", ".", "index", ".", "to_series", "(", ")", ".", "apply", "(", "lambda", "step", ":", "fn", "(", "step", ",", "*", "*", "kw", ")", ")", "else", ":", "r", "=", "apply_pairwise", "(", "self", ",", "fn", ",", "symmetric", "=", "symmetric", ",", "diagonal", "=", "diagonal", ",", "block", "=", "block", ",", "*", "*", "kw", ")", "name", "=", "[", "]", "if", "len", "(", "functions", ")", "==", "1", "else", "[", "fn", ".", "__name__", "]", "name", "+=", "util", ".", "dict_subset", "(", "kw", ",", "search_keys", ")", ".", "values", "(", ")", "if", "isinstance", "(", "r", ",", "pd", ".", "DataFrame", ")", ":", "columns", "=", "pd", ".", "MultiIndex", ".", "from_tuples", "(", "[", "tuple", "(", "name", "+", "util", ".", "make_list", "(", "c", ")", ")", "for", "c", "in", "r", ".", "columns", "]", ")", "r", ".", "columns", "=", "columns", "else", ":", "r", ".", "name", "=", "tuple", "(", "name", ")", "results", ".", "append", "(", "r", ")", "if", "len", "(", "results", ")", ">", "1", ":", "result", "=", "pd", ".", "concat", "(", "results", ",", "axis", "=", "1", ")", "# get subset of parameters that were searched over", "column_names", "=", "[", "]", "if", "len", "(", "functions", ")", "==", "1", "else", "[", "None", "]", "column_names", "+=", "search_keys", "column_names", "+=", "[", "None", "]", "*", "(", "len", "(", "result", ".", "columns", ".", "names", ")", "-", "len", "(", "column_names", ")", ")", "result", ".", "columns", ".", "names", "=", "column_names", "return", "StepFrame", "(", "result", ")", "else", ":", "result", "=", "results", "[", "0", "]", "if", "isinstance", "(", "result", ",", "pd", ".", "DataFrame", ")", ":", "return", "StepFrame", "(", "result", ")", "else", ":", "result", ".", "name", "=", "functions", "[", "0", "]", ".", "__name__", "return", "StepSeries", "(", "result", ")" ]
Apply function to each step object in the index Args: fn: function to apply. If a list then each function is applied pairwise: whether to apply the function to pairs of steps symmetric, diagonal, block: passed to apply_pairwise when pairwise=True kwargs: a keyword arguments to pass to each function. Arguments with list value are grid searched using util.dict_product. Returns: a StepFrame or StepSeries
[ "Apply", "function", "to", "each", "step", "object", "in", "the", "index" ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/exploration.py#L125-L177
train
maljovec/topopy
topopy/ContourTree.py
ContourTree._identifyBranches
def _identifyBranches(self): """ A helper function for determining all of the branches in the tree. This should be called after the tree has been fully constructed and its nodes and edges are populated. """ if self.debug: sys.stdout.write("Identifying branches: ") start = time.clock() seen = set() self.branches = set() # Find all of the branching nodes in the tree, degree > 1 # That is, they appear in more than one edge for e1, e2 in self.edges: if e1 not in seen: seen.add(e1) else: self.branches.add(e1) if e2 not in seen: seen.add(e2) else: self.branches.add(e2) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
python
def _identifyBranches(self): """ A helper function for determining all of the branches in the tree. This should be called after the tree has been fully constructed and its nodes and edges are populated. """ if self.debug: sys.stdout.write("Identifying branches: ") start = time.clock() seen = set() self.branches = set() # Find all of the branching nodes in the tree, degree > 1 # That is, they appear in more than one edge for e1, e2 in self.edges: if e1 not in seen: seen.add(e1) else: self.branches.add(e1) if e2 not in seen: seen.add(e2) else: self.branches.add(e2) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "_identifyBranches", "(", "self", ")", ":", "if", "self", ".", "debug", ":", "sys", ".", "stdout", ".", "write", "(", "\"Identifying branches: \"", ")", "start", "=", "time", ".", "clock", "(", ")", "seen", "=", "set", "(", ")", "self", ".", "branches", "=", "set", "(", ")", "# Find all of the branching nodes in the tree, degree > 1", "# That is, they appear in more than one edge", "for", "e1", ",", "e2", "in", "self", ".", "edges", ":", "if", "e1", "not", "in", "seen", ":", "seen", ".", "add", "(", "e1", ")", "else", ":", "self", ".", "branches", ".", "add", "(", "e1", ")", "if", "e2", "not", "in", "seen", ":", "seen", ".", "add", "(", "e2", ")", "else", ":", "self", ".", "branches", ".", "add", "(", "e2", ")", "if", "self", ".", "debug", ":", "end", "=", "time", ".", "clock", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"%f s\\n\"", "%", "(", "end", "-", "start", ")", ")" ]
A helper function for determining all of the branches in the tree. This should be called after the tree has been fully constructed and its nodes and edges are populated.
[ "A", "helper", "function", "for", "determining", "all", "of", "the", "branches", "in", "the", "tree", ".", "This", "should", "be", "called", "after", "the", "tree", "has", "been", "fully", "constructed", "and", "its", "nodes", "and", "edges", "are", "populated", "." ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L144-L172
train
maljovec/topopy
topopy/ContourTree.py
ContourTree._identifySuperGraph
def _identifySuperGraph(self): """ A helper function for determining the condensed representation of the tree. That is, one that does not hold all of the internal nodes of the graph. The results will be stored in ContourTree.superNodes and ContourTree.superArcs. These two can be used to potentially speed up queries by limiting the searching on the graph to only nodes on these super arcs. """ if self.debug: sys.stdout.write("Condensing Graph: ") start = time.clock() G = nx.DiGraph() G.add_edges_from(self.edges) if self.short_circuit: self.superNodes = G.nodes() self.superArcs = G.edges() # There should be a way to populate this from the data we # have... return self.augmentedEdges = {} N = len(self.Y) processed = np.zeros(N) for node in range(N): # We can short circuit this here, since some of the nodes # will be handled within the while loops below. if processed[node]: continue # Loop through each internal node (see if below for # determining what is internal), trace up and down to a # node's first non-internal node in either direction # removing all of the internal nodes and pushing them into a # list. This list (removedNodes) will be put into a # dictionary keyed on the endpoints of the final super arc. if G.in_degree(node) == 1 and G.out_degree(node) == 1: # The sorted list of nodes that will be condensed by # this super arc removedNodes = [] # Trace down to a non-internal node lower_link = list(G.in_edges(node))[0][0] while ( G.in_degree(lower_link) == 1 and G.out_degree(lower_link) == 1 ): new_lower_link = list(G.in_edges(lower_link))[0][0] G.add_edge(new_lower_link, node) G.remove_node(lower_link) removedNodes.append(lower_link) lower_link = new_lower_link removedNodes.reverse() removedNodes.append(node) # Trace up to a non-internal node upper_link = list(G.out_edges(node))[0][1] while ( G.in_degree(upper_link) == 1 and G.out_degree(upper_link) == 1 ): new_upper_link = list(G.out_edges(upper_link))[0][1] G.add_edge(node, new_upper_link) G.remove_node(upper_link) removedNodes.append(upper_link) upper_link = new_upper_link G.add_edge(lower_link, upper_link) G.remove_node(node) self.augmentedEdges[(lower_link, upper_link)] = removedNodes # This is to help speed up the process by skipping nodes # we have already condensed, and to prevent us from not # being able to find nodes that have already been # removed. processed[removedNodes] = 1 self.superNodes = G.nodes() self.superArcs = G.edges() if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
python
def _identifySuperGraph(self): """ A helper function for determining the condensed representation of the tree. That is, one that does not hold all of the internal nodes of the graph. The results will be stored in ContourTree.superNodes and ContourTree.superArcs. These two can be used to potentially speed up queries by limiting the searching on the graph to only nodes on these super arcs. """ if self.debug: sys.stdout.write("Condensing Graph: ") start = time.clock() G = nx.DiGraph() G.add_edges_from(self.edges) if self.short_circuit: self.superNodes = G.nodes() self.superArcs = G.edges() # There should be a way to populate this from the data we # have... return self.augmentedEdges = {} N = len(self.Y) processed = np.zeros(N) for node in range(N): # We can short circuit this here, since some of the nodes # will be handled within the while loops below. if processed[node]: continue # Loop through each internal node (see if below for # determining what is internal), trace up and down to a # node's first non-internal node in either direction # removing all of the internal nodes and pushing them into a # list. This list (removedNodes) will be put into a # dictionary keyed on the endpoints of the final super arc. if G.in_degree(node) == 1 and G.out_degree(node) == 1: # The sorted list of nodes that will be condensed by # this super arc removedNodes = [] # Trace down to a non-internal node lower_link = list(G.in_edges(node))[0][0] while ( G.in_degree(lower_link) == 1 and G.out_degree(lower_link) == 1 ): new_lower_link = list(G.in_edges(lower_link))[0][0] G.add_edge(new_lower_link, node) G.remove_node(lower_link) removedNodes.append(lower_link) lower_link = new_lower_link removedNodes.reverse() removedNodes.append(node) # Trace up to a non-internal node upper_link = list(G.out_edges(node))[0][1] while ( G.in_degree(upper_link) == 1 and G.out_degree(upper_link) == 1 ): new_upper_link = list(G.out_edges(upper_link))[0][1] G.add_edge(node, new_upper_link) G.remove_node(upper_link) removedNodes.append(upper_link) upper_link = new_upper_link G.add_edge(lower_link, upper_link) G.remove_node(node) self.augmentedEdges[(lower_link, upper_link)] = removedNodes # This is to help speed up the process by skipping nodes # we have already condensed, and to prevent us from not # being able to find nodes that have already been # removed. processed[removedNodes] = 1 self.superNodes = G.nodes() self.superArcs = G.edges() if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "_identifySuperGraph", "(", "self", ")", ":", "if", "self", ".", "debug", ":", "sys", ".", "stdout", ".", "write", "(", "\"Condensing Graph: \"", ")", "start", "=", "time", ".", "clock", "(", ")", "G", "=", "nx", ".", "DiGraph", "(", ")", "G", ".", "add_edges_from", "(", "self", ".", "edges", ")", "if", "self", ".", "short_circuit", ":", "self", ".", "superNodes", "=", "G", ".", "nodes", "(", ")", "self", ".", "superArcs", "=", "G", ".", "edges", "(", ")", "# There should be a way to populate this from the data we", "# have...", "return", "self", ".", "augmentedEdges", "=", "{", "}", "N", "=", "len", "(", "self", ".", "Y", ")", "processed", "=", "np", ".", "zeros", "(", "N", ")", "for", "node", "in", "range", "(", "N", ")", ":", "# We can short circuit this here, since some of the nodes", "# will be handled within the while loops below.", "if", "processed", "[", "node", "]", ":", "continue", "# Loop through each internal node (see if below for", "# determining what is internal), trace up and down to a", "# node's first non-internal node in either direction", "# removing all of the internal nodes and pushing them into a", "# list. This list (removedNodes) will be put into a", "# dictionary keyed on the endpoints of the final super arc.", "if", "G", ".", "in_degree", "(", "node", ")", "==", "1", "and", "G", ".", "out_degree", "(", "node", ")", "==", "1", ":", "# The sorted list of nodes that will be condensed by", "# this super arc", "removedNodes", "=", "[", "]", "# Trace down to a non-internal node", "lower_link", "=", "list", "(", "G", ".", "in_edges", "(", "node", ")", ")", "[", "0", "]", "[", "0", "]", "while", "(", "G", ".", "in_degree", "(", "lower_link", ")", "==", "1", "and", "G", ".", "out_degree", "(", "lower_link", ")", "==", "1", ")", ":", "new_lower_link", "=", "list", "(", "G", ".", "in_edges", "(", "lower_link", ")", ")", "[", "0", "]", "[", "0", "]", "G", ".", "add_edge", "(", "new_lower_link", ",", "node", ")", "G", ".", "remove_node", "(", "lower_link", ")", "removedNodes", ".", "append", "(", "lower_link", ")", "lower_link", "=", "new_lower_link", "removedNodes", ".", "reverse", "(", ")", "removedNodes", ".", "append", "(", "node", ")", "# Trace up to a non-internal node", "upper_link", "=", "list", "(", "G", ".", "out_edges", "(", "node", ")", ")", "[", "0", "]", "[", "1", "]", "while", "(", "G", ".", "in_degree", "(", "upper_link", ")", "==", "1", "and", "G", ".", "out_degree", "(", "upper_link", ")", "==", "1", ")", ":", "new_upper_link", "=", "list", "(", "G", ".", "out_edges", "(", "upper_link", ")", ")", "[", "0", "]", "[", "1", "]", "G", ".", "add_edge", "(", "node", ",", "new_upper_link", ")", "G", ".", "remove_node", "(", "upper_link", ")", "removedNodes", ".", "append", "(", "upper_link", ")", "upper_link", "=", "new_upper_link", "G", ".", "add_edge", "(", "lower_link", ",", "upper_link", ")", "G", ".", "remove_node", "(", "node", ")", "self", ".", "augmentedEdges", "[", "(", "lower_link", ",", "upper_link", ")", "]", "=", "removedNodes", "# This is to help speed up the process by skipping nodes", "# we have already condensed, and to prevent us from not", "# being able to find nodes that have already been", "# removed.", "processed", "[", "removedNodes", "]", "=", "1", "self", ".", "superNodes", "=", "G", ".", "nodes", "(", ")", "self", ".", "superArcs", "=", "G", ".", "edges", "(", ")", "if", "self", ".", "debug", ":", "end", "=", "time", ".", "clock", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"%f s\\n\"", "%", "(", "end", "-", "start", ")", ")" ]
A helper function for determining the condensed representation of the tree. That is, one that does not hold all of the internal nodes of the graph. The results will be stored in ContourTree.superNodes and ContourTree.superArcs. These two can be used to potentially speed up queries by limiting the searching on the graph to only nodes on these super arcs.
[ "A", "helper", "function", "for", "determining", "the", "condensed", "representation", "of", "the", "tree", ".", "That", "is", "one", "that", "does", "not", "hold", "all", "of", "the", "internal", "nodes", "of", "the", "graph", ".", "The", "results", "will", "be", "stored", "in", "ContourTree", ".", "superNodes", "and", "ContourTree", ".", "superArcs", ".", "These", "two", "can", "be", "used", "to", "potentially", "speed", "up", "queries", "by", "limiting", "the", "searching", "on", "the", "graph", "to", "only", "nodes", "on", "these", "super", "arcs", "." ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L174-L263
train
maljovec/topopy
topopy/ContourTree.py
ContourTree.get_seeds
def get_seeds(self, threshold): """ Returns a list of seed points for isosurface extraction given a threshold value @ In, threshold, float, the isovalue for which we want to identify seed points for isosurface extraction """ seeds = [] for e1, e2 in self.superArcs: # Because we did some extra work in _process_tree, we can # safely assume e1 is lower than e2 if self.Y[e1] <= threshold <= self.Y[e2]: if (e1, e2) in self.augmentedEdges: # These should be sorted edgeList = self.augmentedEdges[(e1, e2)] elif (e2, e1) in self.augmentedEdges: e1, e2 = e2, e1 # These should be reverse sorted edgeList = list(reversed(self.augmentedEdges[(e1, e2)])) else: continue startNode = e1 for endNode in edgeList + [e2]: if self.Y[endNode] >= threshold: # Stop when you find the first point above the # threshold break startNode = endNode seeds.append(startNode) seeds.append(endNode) return seeds
python
def get_seeds(self, threshold): """ Returns a list of seed points for isosurface extraction given a threshold value @ In, threshold, float, the isovalue for which we want to identify seed points for isosurface extraction """ seeds = [] for e1, e2 in self.superArcs: # Because we did some extra work in _process_tree, we can # safely assume e1 is lower than e2 if self.Y[e1] <= threshold <= self.Y[e2]: if (e1, e2) in self.augmentedEdges: # These should be sorted edgeList = self.augmentedEdges[(e1, e2)] elif (e2, e1) in self.augmentedEdges: e1, e2 = e2, e1 # These should be reverse sorted edgeList = list(reversed(self.augmentedEdges[(e1, e2)])) else: continue startNode = e1 for endNode in edgeList + [e2]: if self.Y[endNode] >= threshold: # Stop when you find the first point above the # threshold break startNode = endNode seeds.append(startNode) seeds.append(endNode) return seeds
[ "def", "get_seeds", "(", "self", ",", "threshold", ")", ":", "seeds", "=", "[", "]", "for", "e1", ",", "e2", "in", "self", ".", "superArcs", ":", "# Because we did some extra work in _process_tree, we can", "# safely assume e1 is lower than e2", "if", "self", ".", "Y", "[", "e1", "]", "<=", "threshold", "<=", "self", ".", "Y", "[", "e2", "]", ":", "if", "(", "e1", ",", "e2", ")", "in", "self", ".", "augmentedEdges", ":", "# These should be sorted", "edgeList", "=", "self", ".", "augmentedEdges", "[", "(", "e1", ",", "e2", ")", "]", "elif", "(", "e2", ",", "e1", ")", "in", "self", ".", "augmentedEdges", ":", "e1", ",", "e2", "=", "e2", ",", "e1", "# These should be reverse sorted", "edgeList", "=", "list", "(", "reversed", "(", "self", ".", "augmentedEdges", "[", "(", "e1", ",", "e2", ")", "]", ")", ")", "else", ":", "continue", "startNode", "=", "e1", "for", "endNode", "in", "edgeList", "+", "[", "e2", "]", ":", "if", "self", ".", "Y", "[", "endNode", "]", ">=", "threshold", ":", "# Stop when you find the first point above the", "# threshold", "break", "startNode", "=", "endNode", "seeds", ".", "append", "(", "startNode", ")", "seeds", ".", "append", "(", "endNode", ")", "return", "seeds" ]
Returns a list of seed points for isosurface extraction given a threshold value @ In, threshold, float, the isovalue for which we want to identify seed points for isosurface extraction
[ "Returns", "a", "list", "of", "seed", "points", "for", "isosurface", "extraction", "given", "a", "threshold", "value" ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L265-L296
train
maljovec/topopy
topopy/ContourTree.py
ContourTree._construct_nx_tree
def _construct_nx_tree(self, thisTree, thatTree=None): """ A function for creating networkx instances that can be used more efficiently for graph manipulation than the MergeTree class. @ In, thisTree, a MergeTree instance for which we will construct a networkx graph @ In, thatTree, a MergeTree instance optionally used to speed up the processing by bypassing the fully augmented search and only focusing on the partially augmented split and join trees @ Out, nxTree, a networkx.Graph instance matching the details of the input tree. """ if self.debug: sys.stdout.write("Networkx Tree construction: ") start = time.clock() nxTree = nx.DiGraph() nxTree.add_edges_from(thisTree.edges) nodesOfThatTree = [] if thatTree is not None: nodesOfThatTree = thatTree.nodes.keys() # Fully or partially augment the join tree for (superNode, _), nodes in thisTree.augmentedEdges.items(): superNodeEdge = list(nxTree.out_edges(superNode)) if len(superNodeEdge) > 1: warnings.warn( "The supernode {} should have only a single " "emanating edge. Merge tree is invalidly " "structured".format(superNode) ) endNode = superNodeEdge[0][1] startNode = superNode nxTree.remove_edge(startNode, endNode) for node in nodes: if thatTree is None or node in nodesOfThatTree: nxTree.add_edge(startNode, node) startNode = node # Make sure this is not the root node trying to connect to # itself if startNode != endNode: nxTree.add_edge(startNode, endNode) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start)) return nxTree
python
def _construct_nx_tree(self, thisTree, thatTree=None): """ A function for creating networkx instances that can be used more efficiently for graph manipulation than the MergeTree class. @ In, thisTree, a MergeTree instance for which we will construct a networkx graph @ In, thatTree, a MergeTree instance optionally used to speed up the processing by bypassing the fully augmented search and only focusing on the partially augmented split and join trees @ Out, nxTree, a networkx.Graph instance matching the details of the input tree. """ if self.debug: sys.stdout.write("Networkx Tree construction: ") start = time.clock() nxTree = nx.DiGraph() nxTree.add_edges_from(thisTree.edges) nodesOfThatTree = [] if thatTree is not None: nodesOfThatTree = thatTree.nodes.keys() # Fully or partially augment the join tree for (superNode, _), nodes in thisTree.augmentedEdges.items(): superNodeEdge = list(nxTree.out_edges(superNode)) if len(superNodeEdge) > 1: warnings.warn( "The supernode {} should have only a single " "emanating edge. Merge tree is invalidly " "structured".format(superNode) ) endNode = superNodeEdge[0][1] startNode = superNode nxTree.remove_edge(startNode, endNode) for node in nodes: if thatTree is None or node in nodesOfThatTree: nxTree.add_edge(startNode, node) startNode = node # Make sure this is not the root node trying to connect to # itself if startNode != endNode: nxTree.add_edge(startNode, endNode) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start)) return nxTree
[ "def", "_construct_nx_tree", "(", "self", ",", "thisTree", ",", "thatTree", "=", "None", ")", ":", "if", "self", ".", "debug", ":", "sys", ".", "stdout", ".", "write", "(", "\"Networkx Tree construction: \"", ")", "start", "=", "time", ".", "clock", "(", ")", "nxTree", "=", "nx", ".", "DiGraph", "(", ")", "nxTree", ".", "add_edges_from", "(", "thisTree", ".", "edges", ")", "nodesOfThatTree", "=", "[", "]", "if", "thatTree", "is", "not", "None", ":", "nodesOfThatTree", "=", "thatTree", ".", "nodes", ".", "keys", "(", ")", "# Fully or partially augment the join tree", "for", "(", "superNode", ",", "_", ")", ",", "nodes", "in", "thisTree", ".", "augmentedEdges", ".", "items", "(", ")", ":", "superNodeEdge", "=", "list", "(", "nxTree", ".", "out_edges", "(", "superNode", ")", ")", "if", "len", "(", "superNodeEdge", ")", ">", "1", ":", "warnings", ".", "warn", "(", "\"The supernode {} should have only a single \"", "\"emanating edge. Merge tree is invalidly \"", "\"structured\"", ".", "format", "(", "superNode", ")", ")", "endNode", "=", "superNodeEdge", "[", "0", "]", "[", "1", "]", "startNode", "=", "superNode", "nxTree", ".", "remove_edge", "(", "startNode", ",", "endNode", ")", "for", "node", "in", "nodes", ":", "if", "thatTree", "is", "None", "or", "node", "in", "nodesOfThatTree", ":", "nxTree", ".", "add_edge", "(", "startNode", ",", "node", ")", "startNode", "=", "node", "# Make sure this is not the root node trying to connect to", "# itself", "if", "startNode", "!=", "endNode", ":", "nxTree", ".", "add_edge", "(", "startNode", ",", "endNode", ")", "if", "self", ".", "debug", ":", "end", "=", "time", ".", "clock", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"%f s\\n\"", "%", "(", "end", "-", "start", ")", ")", "return", "nxTree" ]
A function for creating networkx instances that can be used more efficiently for graph manipulation than the MergeTree class. @ In, thisTree, a MergeTree instance for which we will construct a networkx graph @ In, thatTree, a MergeTree instance optionally used to speed up the processing by bypassing the fully augmented search and only focusing on the partially augmented split and join trees @ Out, nxTree, a networkx.Graph instance matching the details of the input tree.
[ "A", "function", "for", "creating", "networkx", "instances", "that", "can", "be", "used", "more", "efficiently", "for", "graph", "manipulation", "than", "the", "MergeTree", "class", "." ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L298-L348
train
maljovec/topopy
topopy/ContourTree.py
ContourTree._process_tree
def _process_tree(self, thisTree, thatTree): """ A function that will process either a split or join tree with reference to the other tree and store it as part of this CT instance. @ In, thisTree, a networkx.Graph instance representing a merge tree for which we will process all of its leaf nodes into this CT object @ In, thatTree, a networkx.Graph instance representing the opposing merge tree which will need to be updated as nodes from thisTree are processed @ Out, None """ if self.debug: sys.stdout.write("Processing Tree: ") start = time.clock() # Get all of the leaf nodes that are not branches in the other # tree if len(thisTree.nodes()) > 1: leaves = set( [ v for v in thisTree.nodes() if thisTree.in_degree(v) == 0 and thatTree.in_degree(v) < 2 ] ) else: leaves = set() while len(leaves) > 0: v = leaves.pop() # if self.debug: # sys.stdout.write('\tProcessing {} -> {}\n' # .format(v, thisTree.edges(v)[0][1])) # Take the leaf and edge out of the input tree and place it # on the CT edges = list(thisTree.out_edges(v)) if len(edges) != 1: warnings.warn( "The node {} should have a single emanating " "edge.\n".format(v) ) e1 = edges[0][0] e2 = edges[0][1] # This may be a bit beside the point, but if we want all of # our edges pointing 'up,' we can verify that the edges we # add have the lower vertex pointing to the upper vertex. # This is useful only for nicely plotting with some graph # tools (graphviz/networkx), and I guess for consistency # sake. if self.Y[e1] < self.Y[e2]: self.edges.append((e1, e2)) else: self.edges.append((e2, e1)) # Removing the node will remove its constituent edges from # thisTree thisTree.remove_node(v) # This is the root node of the other tree if thatTree.out_degree(v) == 0: thatTree.remove_node(v) # if self.debug: # sys.stdout.write('\t\tRemoving root {} from other tree\n' # .format(v)) # This is a "regular" node in the other tree, suppress it # there, but be sure to glue the upper and lower portions # together else: # The other ends of the node being removed are added to # "that" tree if len(thatTree.in_edges(v)) > 0: startNode = list(thatTree.in_edges(v))[0][0] else: # This means we are at the root of the other tree, # we can safely remove this node without connecting # its predecessor with its descendant startNode = None if len(thatTree.out_edges(v)) > 0: endNode = list(thatTree.out_edges(v))[0][1] else: # This means we are at a leaf of the other tree, # we can safely remove this node without connecting # its predecessor with its descendant endNode = None if startNode is not None and endNode is not None: thatTree.add_edge(startNode, endNode) thatTree.remove_node(v) # if self.debug: # sys.stdout.write('\t\tSuppressing {} in other tree and ' # 'gluing {} to {}\n' # .format(v, startNode, endNode)) if len(thisTree.nodes()) > 1: leaves = set( [ v for v in thisTree.nodes() if thisTree.in_degree(v) == 0 and thatTree.in_degree(v) < 2 ] ) else: leaves = set() # if self.debug: # myMessage = '\t\tValid leaves: ' # sep = '' # for leaf in leaves: # myMessage += sep + str(leaf) # sep = ',' # sys.stdout.write(myMessage+'\n') if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
python
def _process_tree(self, thisTree, thatTree): """ A function that will process either a split or join tree with reference to the other tree and store it as part of this CT instance. @ In, thisTree, a networkx.Graph instance representing a merge tree for which we will process all of its leaf nodes into this CT object @ In, thatTree, a networkx.Graph instance representing the opposing merge tree which will need to be updated as nodes from thisTree are processed @ Out, None """ if self.debug: sys.stdout.write("Processing Tree: ") start = time.clock() # Get all of the leaf nodes that are not branches in the other # tree if len(thisTree.nodes()) > 1: leaves = set( [ v for v in thisTree.nodes() if thisTree.in_degree(v) == 0 and thatTree.in_degree(v) < 2 ] ) else: leaves = set() while len(leaves) > 0: v = leaves.pop() # if self.debug: # sys.stdout.write('\tProcessing {} -> {}\n' # .format(v, thisTree.edges(v)[0][1])) # Take the leaf and edge out of the input tree and place it # on the CT edges = list(thisTree.out_edges(v)) if len(edges) != 1: warnings.warn( "The node {} should have a single emanating " "edge.\n".format(v) ) e1 = edges[0][0] e2 = edges[0][1] # This may be a bit beside the point, but if we want all of # our edges pointing 'up,' we can verify that the edges we # add have the lower vertex pointing to the upper vertex. # This is useful only for nicely plotting with some graph # tools (graphviz/networkx), and I guess for consistency # sake. if self.Y[e1] < self.Y[e2]: self.edges.append((e1, e2)) else: self.edges.append((e2, e1)) # Removing the node will remove its constituent edges from # thisTree thisTree.remove_node(v) # This is the root node of the other tree if thatTree.out_degree(v) == 0: thatTree.remove_node(v) # if self.debug: # sys.stdout.write('\t\tRemoving root {} from other tree\n' # .format(v)) # This is a "regular" node in the other tree, suppress it # there, but be sure to glue the upper and lower portions # together else: # The other ends of the node being removed are added to # "that" tree if len(thatTree.in_edges(v)) > 0: startNode = list(thatTree.in_edges(v))[0][0] else: # This means we are at the root of the other tree, # we can safely remove this node without connecting # its predecessor with its descendant startNode = None if len(thatTree.out_edges(v)) > 0: endNode = list(thatTree.out_edges(v))[0][1] else: # This means we are at a leaf of the other tree, # we can safely remove this node without connecting # its predecessor with its descendant endNode = None if startNode is not None and endNode is not None: thatTree.add_edge(startNode, endNode) thatTree.remove_node(v) # if self.debug: # sys.stdout.write('\t\tSuppressing {} in other tree and ' # 'gluing {} to {}\n' # .format(v, startNode, endNode)) if len(thisTree.nodes()) > 1: leaves = set( [ v for v in thisTree.nodes() if thisTree.in_degree(v) == 0 and thatTree.in_degree(v) < 2 ] ) else: leaves = set() # if self.debug: # myMessage = '\t\tValid leaves: ' # sep = '' # for leaf in leaves: # myMessage += sep + str(leaf) # sep = ',' # sys.stdout.write(myMessage+'\n') if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "_process_tree", "(", "self", ",", "thisTree", ",", "thatTree", ")", ":", "if", "self", ".", "debug", ":", "sys", ".", "stdout", ".", "write", "(", "\"Processing Tree: \"", ")", "start", "=", "time", ".", "clock", "(", ")", "# Get all of the leaf nodes that are not branches in the other", "# tree", "if", "len", "(", "thisTree", ".", "nodes", "(", ")", ")", ">", "1", ":", "leaves", "=", "set", "(", "[", "v", "for", "v", "in", "thisTree", ".", "nodes", "(", ")", "if", "thisTree", ".", "in_degree", "(", "v", ")", "==", "0", "and", "thatTree", ".", "in_degree", "(", "v", ")", "<", "2", "]", ")", "else", ":", "leaves", "=", "set", "(", ")", "while", "len", "(", "leaves", ")", ">", "0", ":", "v", "=", "leaves", ".", "pop", "(", ")", "# if self.debug:", "# sys.stdout.write('\\tProcessing {} -> {}\\n'", "# .format(v, thisTree.edges(v)[0][1]))", "# Take the leaf and edge out of the input tree and place it", "# on the CT", "edges", "=", "list", "(", "thisTree", ".", "out_edges", "(", "v", ")", ")", "if", "len", "(", "edges", ")", "!=", "1", ":", "warnings", ".", "warn", "(", "\"The node {} should have a single emanating \"", "\"edge.\\n\"", ".", "format", "(", "v", ")", ")", "e1", "=", "edges", "[", "0", "]", "[", "0", "]", "e2", "=", "edges", "[", "0", "]", "[", "1", "]", "# This may be a bit beside the point, but if we want all of", "# our edges pointing 'up,' we can verify that the edges we", "# add have the lower vertex pointing to the upper vertex.", "# This is useful only for nicely plotting with some graph", "# tools (graphviz/networkx), and I guess for consistency", "# sake.", "if", "self", ".", "Y", "[", "e1", "]", "<", "self", ".", "Y", "[", "e2", "]", ":", "self", ".", "edges", ".", "append", "(", "(", "e1", ",", "e2", ")", ")", "else", ":", "self", ".", "edges", ".", "append", "(", "(", "e2", ",", "e1", ")", ")", "# Removing the node will remove its constituent edges from", "# thisTree", "thisTree", ".", "remove_node", "(", "v", ")", "# This is the root node of the other tree", "if", "thatTree", ".", "out_degree", "(", "v", ")", "==", "0", ":", "thatTree", ".", "remove_node", "(", "v", ")", "# if self.debug:", "# sys.stdout.write('\\t\\tRemoving root {} from other tree\\n'", "# .format(v))", "# This is a \"regular\" node in the other tree, suppress it", "# there, but be sure to glue the upper and lower portions", "# together", "else", ":", "# The other ends of the node being removed are added to", "# \"that\" tree", "if", "len", "(", "thatTree", ".", "in_edges", "(", "v", ")", ")", ">", "0", ":", "startNode", "=", "list", "(", "thatTree", ".", "in_edges", "(", "v", ")", ")", "[", "0", "]", "[", "0", "]", "else", ":", "# This means we are at the root of the other tree,", "# we can safely remove this node without connecting", "# its predecessor with its descendant", "startNode", "=", "None", "if", "len", "(", "thatTree", ".", "out_edges", "(", "v", ")", ")", ">", "0", ":", "endNode", "=", "list", "(", "thatTree", ".", "out_edges", "(", "v", ")", ")", "[", "0", "]", "[", "1", "]", "else", ":", "# This means we are at a leaf of the other tree,", "# we can safely remove this node without connecting", "# its predecessor with its descendant", "endNode", "=", "None", "if", "startNode", "is", "not", "None", "and", "endNode", "is", "not", "None", ":", "thatTree", ".", "add_edge", "(", "startNode", ",", "endNode", ")", "thatTree", ".", "remove_node", "(", "v", ")", "# if self.debug:", "# sys.stdout.write('\\t\\tSuppressing {} in other tree and '", "# 'gluing {} to {}\\n'", "# .format(v, startNode, endNode))", "if", "len", "(", "thisTree", ".", "nodes", "(", ")", ")", ">", "1", ":", "leaves", "=", "set", "(", "[", "v", "for", "v", "in", "thisTree", ".", "nodes", "(", ")", "if", "thisTree", ".", "in_degree", "(", "v", ")", "==", "0", "and", "thatTree", ".", "in_degree", "(", "v", ")", "<", "2", "]", ")", "else", ":", "leaves", "=", "set", "(", ")", "# if self.debug:", "# myMessage = '\\t\\tValid leaves: '", "# sep = ''", "# for leaf in leaves:", "# myMessage += sep + str(leaf)", "# sep = ','", "# sys.stdout.write(myMessage+'\\n')", "if", "self", ".", "debug", ":", "end", "=", "time", ".", "clock", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"%f s\\n\"", "%", "(", "end", "-", "start", ")", ")" ]
A function that will process either a split or join tree with reference to the other tree and store it as part of this CT instance. @ In, thisTree, a networkx.Graph instance representing a merge tree for which we will process all of its leaf nodes into this CT object @ In, thatTree, a networkx.Graph instance representing the opposing merge tree which will need to be updated as nodes from thisTree are processed @ Out, None
[ "A", "function", "that", "will", "process", "either", "a", "split", "or", "join", "tree", "with", "reference", "to", "the", "other", "tree", "and", "store", "it", "as", "part", "of", "this", "CT", "instance", "." ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L350-L472
train
lsst-sqre/documenteer
documenteer/sphinxconfig/utils.py
read_git_branch
def read_git_branch(): """Obtain the current branch name from the Git repository. If on Travis CI, use the ``TRAVIS_BRANCH`` environment variable. """ if os.getenv('TRAVIS'): return os.getenv('TRAVIS_BRANCH') else: try: repo = git.repo.base.Repo(search_parent_directories=True) return repo.active_branch.name except Exception: return ''
python
def read_git_branch(): """Obtain the current branch name from the Git repository. If on Travis CI, use the ``TRAVIS_BRANCH`` environment variable. """ if os.getenv('TRAVIS'): return os.getenv('TRAVIS_BRANCH') else: try: repo = git.repo.base.Repo(search_parent_directories=True) return repo.active_branch.name except Exception: return ''
[ "def", "read_git_branch", "(", ")", ":", "if", "os", ".", "getenv", "(", "'TRAVIS'", ")", ":", "return", "os", ".", "getenv", "(", "'TRAVIS_BRANCH'", ")", "else", ":", "try", ":", "repo", "=", "git", ".", "repo", ".", "base", ".", "Repo", "(", "search_parent_directories", "=", "True", ")", "return", "repo", ".", "active_branch", ".", "name", "except", "Exception", ":", "return", "''" ]
Obtain the current branch name from the Git repository. If on Travis CI, use the ``TRAVIS_BRANCH`` environment variable.
[ "Obtain", "the", "current", "branch", "name", "from", "the", "Git", "repository", ".", "If", "on", "Travis", "CI", "use", "the", "TRAVIS_BRANCH", "environment", "variable", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L17-L28
train
lsst-sqre/documenteer
documenteer/sphinxconfig/utils.py
read_git_commit_timestamp
def read_git_commit_timestamp(repo_path=None): """Obtain the timestamp from the current head commit of a Git repository. Parameters ---------- repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory. Returns ------- commit_timestamp : `datetime.datetime` The datetime of the head commit. """ repo = git.repo.base.Repo(path=repo_path, search_parent_directories=True) head_commit = repo.head.commit return head_commit.committed_datetime
python
def read_git_commit_timestamp(repo_path=None): """Obtain the timestamp from the current head commit of a Git repository. Parameters ---------- repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory. Returns ------- commit_timestamp : `datetime.datetime` The datetime of the head commit. """ repo = git.repo.base.Repo(path=repo_path, search_parent_directories=True) head_commit = repo.head.commit return head_commit.committed_datetime
[ "def", "read_git_commit_timestamp", "(", "repo_path", "=", "None", ")", ":", "repo", "=", "git", ".", "repo", ".", "base", ".", "Repo", "(", "path", "=", "repo_path", ",", "search_parent_directories", "=", "True", ")", "head_commit", "=", "repo", ".", "head", ".", "commit", "return", "head_commit", ".", "committed_datetime" ]
Obtain the timestamp from the current head commit of a Git repository. Parameters ---------- repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory. Returns ------- commit_timestamp : `datetime.datetime` The datetime of the head commit.
[ "Obtain", "the", "timestamp", "from", "the", "current", "head", "commit", "of", "a", "Git", "repository", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L31-L47
train
lsst-sqre/documenteer
documenteer/sphinxconfig/utils.py
read_git_commit_timestamp_for_file
def read_git_commit_timestamp_for_file(filepath, repo_path=None): """Obtain the timestamp for the most recent commit to a given file in a Git repository. Parameters ---------- filepath : `str` Repository-relative path for a file. repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory. Returns ------- commit_timestamp : `datetime.datetime` The datetime of a the most recent commit to the given file. Raises ------ IOError Raised if the ``filepath`` does not exist in the Git repository. """ repo = git.repo.base.Repo(path=repo_path, search_parent_directories=True) head_commit = repo.head.commit # most recent commit datetime of the given file for commit in head_commit.iter_parents(filepath): return commit.committed_datetime # Only get here if git could not find the file path in the history raise IOError('File {} not found'.format(filepath))
python
def read_git_commit_timestamp_for_file(filepath, repo_path=None): """Obtain the timestamp for the most recent commit to a given file in a Git repository. Parameters ---------- filepath : `str` Repository-relative path for a file. repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory. Returns ------- commit_timestamp : `datetime.datetime` The datetime of a the most recent commit to the given file. Raises ------ IOError Raised if the ``filepath`` does not exist in the Git repository. """ repo = git.repo.base.Repo(path=repo_path, search_parent_directories=True) head_commit = repo.head.commit # most recent commit datetime of the given file for commit in head_commit.iter_parents(filepath): return commit.committed_datetime # Only get here if git could not find the file path in the history raise IOError('File {} not found'.format(filepath))
[ "def", "read_git_commit_timestamp_for_file", "(", "filepath", ",", "repo_path", "=", "None", ")", ":", "repo", "=", "git", ".", "repo", ".", "base", ".", "Repo", "(", "path", "=", "repo_path", ",", "search_parent_directories", "=", "True", ")", "head_commit", "=", "repo", ".", "head", ".", "commit", "# most recent commit datetime of the given file", "for", "commit", "in", "head_commit", ".", "iter_parents", "(", "filepath", ")", ":", "return", "commit", ".", "committed_datetime", "# Only get here if git could not find the file path in the history", "raise", "IOError", "(", "'File {} not found'", ".", "format", "(", "filepath", ")", ")" ]
Obtain the timestamp for the most recent commit to a given file in a Git repository. Parameters ---------- filepath : `str` Repository-relative path for a file. repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory. Returns ------- commit_timestamp : `datetime.datetime` The datetime of a the most recent commit to the given file. Raises ------ IOError Raised if the ``filepath`` does not exist in the Git repository.
[ "Obtain", "the", "timestamp", "for", "the", "most", "recent", "commit", "to", "a", "given", "file", "in", "a", "Git", "repository", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L50-L80
train
lsst-sqre/documenteer
documenteer/sphinxconfig/utils.py
get_filepaths_with_extension
def get_filepaths_with_extension(extname, root_dir='.'): """Get relative filepaths of files in a directory, and sub-directories, with the given extension. Parameters ---------- extname : `str` Extension name (e.g. 'txt', 'rst'). Extension comparison is case-insensitive. root_dir : `str`, optional Root directory. Current working directory by default. Returns ------- filepaths : `list` of `str` File paths, relative to ``root_dir``, with the given extension. """ # needed for comparison with os.path.splitext if not extname.startswith('.'): extname = '.' + extname # for case-insensitivity extname = extname.lower() root_dir = os.path.abspath(root_dir) selected_filenames = [] for dirname, sub_dirnames, filenames in os.walk(root_dir): for filename in filenames: if os.path.splitext(filename)[-1].lower() == extname: full_filename = os.path.join(dirname, filename) selected_filenames.append( os.path.relpath(full_filename, start=root_dir)) return selected_filenames
python
def get_filepaths_with_extension(extname, root_dir='.'): """Get relative filepaths of files in a directory, and sub-directories, with the given extension. Parameters ---------- extname : `str` Extension name (e.g. 'txt', 'rst'). Extension comparison is case-insensitive. root_dir : `str`, optional Root directory. Current working directory by default. Returns ------- filepaths : `list` of `str` File paths, relative to ``root_dir``, with the given extension. """ # needed for comparison with os.path.splitext if not extname.startswith('.'): extname = '.' + extname # for case-insensitivity extname = extname.lower() root_dir = os.path.abspath(root_dir) selected_filenames = [] for dirname, sub_dirnames, filenames in os.walk(root_dir): for filename in filenames: if os.path.splitext(filename)[-1].lower() == extname: full_filename = os.path.join(dirname, filename) selected_filenames.append( os.path.relpath(full_filename, start=root_dir)) return selected_filenames
[ "def", "get_filepaths_with_extension", "(", "extname", ",", "root_dir", "=", "'.'", ")", ":", "# needed for comparison with os.path.splitext", "if", "not", "extname", ".", "startswith", "(", "'.'", ")", ":", "extname", "=", "'.'", "+", "extname", "# for case-insensitivity", "extname", "=", "extname", ".", "lower", "(", ")", "root_dir", "=", "os", ".", "path", ".", "abspath", "(", "root_dir", ")", "selected_filenames", "=", "[", "]", "for", "dirname", ",", "sub_dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "root_dir", ")", ":", "for", "filename", "in", "filenames", ":", "if", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "-", "1", "]", ".", "lower", "(", ")", "==", "extname", ":", "full_filename", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", "selected_filenames", ".", "append", "(", "os", ".", "path", ".", "relpath", "(", "full_filename", ",", "start", "=", "root_dir", ")", ")", "return", "selected_filenames" ]
Get relative filepaths of files in a directory, and sub-directories, with the given extension. Parameters ---------- extname : `str` Extension name (e.g. 'txt', 'rst'). Extension comparison is case-insensitive. root_dir : `str`, optional Root directory. Current working directory by default. Returns ------- filepaths : `list` of `str` File paths, relative to ``root_dir``, with the given extension.
[ "Get", "relative", "filepaths", "of", "files", "in", "a", "directory", "and", "sub", "-", "directories", "with", "the", "given", "extension", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L83-L116
train
lsst-sqre/documenteer
documenteer/sphinxconfig/utils.py
get_project_content_commit_date
def get_project_content_commit_date(root_dir='.', exclusions=None): """Get the datetime for the most recent commit to a project that affected Sphinx content. *Content* is considered any file with one of these extensions: - ``rst`` (README.rst and LICENSE.rst are excluded) - ``ipynb`` - ``png`` - ``jpeg`` - ``jpg`` - ``svg`` - ``gif`` This function allows project infrastructure and configuration files to be updated without changing the timestamp. Parameters ---------- root_dir : `str`, optional Root directory. This is the current working directory by default. exclusions : `list` of `str`, optional List of file paths or directory paths to ignore. Returns ------- commit_date : `datetime.datetime` Datetime of the most recent content commit. Raises ------ RuntimeError Raised if no content files are found. """ logger = logging.getLogger(__name__) # Supported 'content' extensions extensions = ('rst', 'ipynb', 'png', 'jpeg', 'jpg', 'svg', 'gif') content_paths = [] for extname in extensions: content_paths += get_filepaths_with_extension( extname, root_dir=root_dir) # Known files that should be excluded; lower case for comparison exclude = Matcher(exclusions if exclusions else ['readme.rst', 'license.rst']) # filter out excluded files content_paths = [p for p in content_paths if not (exclude(p) or exclude(p.split(os.path.sep)[0]))] logger.debug('Found content paths: {}'.format(', '.join(content_paths))) if not content_paths: raise RuntimeError('No content files found in {}'.format(root_dir)) commit_datetimes = [] for filepath in content_paths: try: datetime = read_git_commit_timestamp_for_file( filepath, repo_path=root_dir) commit_datetimes.append(datetime) except IOError: logger.warning( 'Could not get commit for {}, skipping'.format(filepath)) if not commit_datetimes: raise RuntimeError('No content commits could be found') latest_datetime = max(commit_datetimes) return latest_datetime
python
def get_project_content_commit_date(root_dir='.', exclusions=None): """Get the datetime for the most recent commit to a project that affected Sphinx content. *Content* is considered any file with one of these extensions: - ``rst`` (README.rst and LICENSE.rst are excluded) - ``ipynb`` - ``png`` - ``jpeg`` - ``jpg`` - ``svg`` - ``gif`` This function allows project infrastructure and configuration files to be updated without changing the timestamp. Parameters ---------- root_dir : `str`, optional Root directory. This is the current working directory by default. exclusions : `list` of `str`, optional List of file paths or directory paths to ignore. Returns ------- commit_date : `datetime.datetime` Datetime of the most recent content commit. Raises ------ RuntimeError Raised if no content files are found. """ logger = logging.getLogger(__name__) # Supported 'content' extensions extensions = ('rst', 'ipynb', 'png', 'jpeg', 'jpg', 'svg', 'gif') content_paths = [] for extname in extensions: content_paths += get_filepaths_with_extension( extname, root_dir=root_dir) # Known files that should be excluded; lower case for comparison exclude = Matcher(exclusions if exclusions else ['readme.rst', 'license.rst']) # filter out excluded files content_paths = [p for p in content_paths if not (exclude(p) or exclude(p.split(os.path.sep)[0]))] logger.debug('Found content paths: {}'.format(', '.join(content_paths))) if not content_paths: raise RuntimeError('No content files found in {}'.format(root_dir)) commit_datetimes = [] for filepath in content_paths: try: datetime = read_git_commit_timestamp_for_file( filepath, repo_path=root_dir) commit_datetimes.append(datetime) except IOError: logger.warning( 'Could not get commit for {}, skipping'.format(filepath)) if not commit_datetimes: raise RuntimeError('No content commits could be found') latest_datetime = max(commit_datetimes) return latest_datetime
[ "def", "get_project_content_commit_date", "(", "root_dir", "=", "'.'", ",", "exclusions", "=", "None", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# Supported 'content' extensions", "extensions", "=", "(", "'rst'", ",", "'ipynb'", ",", "'png'", ",", "'jpeg'", ",", "'jpg'", ",", "'svg'", ",", "'gif'", ")", "content_paths", "=", "[", "]", "for", "extname", "in", "extensions", ":", "content_paths", "+=", "get_filepaths_with_extension", "(", "extname", ",", "root_dir", "=", "root_dir", ")", "# Known files that should be excluded; lower case for comparison", "exclude", "=", "Matcher", "(", "exclusions", "if", "exclusions", "else", "[", "'readme.rst'", ",", "'license.rst'", "]", ")", "# filter out excluded files", "content_paths", "=", "[", "p", "for", "p", "in", "content_paths", "if", "not", "(", "exclude", "(", "p", ")", "or", "exclude", "(", "p", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "0", "]", ")", ")", "]", "logger", ".", "debug", "(", "'Found content paths: {}'", ".", "format", "(", "', '", ".", "join", "(", "content_paths", ")", ")", ")", "if", "not", "content_paths", ":", "raise", "RuntimeError", "(", "'No content files found in {}'", ".", "format", "(", "root_dir", ")", ")", "commit_datetimes", "=", "[", "]", "for", "filepath", "in", "content_paths", ":", "try", ":", "datetime", "=", "read_git_commit_timestamp_for_file", "(", "filepath", ",", "repo_path", "=", "root_dir", ")", "commit_datetimes", ".", "append", "(", "datetime", ")", "except", "IOError", ":", "logger", ".", "warning", "(", "'Could not get commit for {}, skipping'", ".", "format", "(", "filepath", ")", ")", "if", "not", "commit_datetimes", ":", "raise", "RuntimeError", "(", "'No content commits could be found'", ")", "latest_datetime", "=", "max", "(", "commit_datetimes", ")", "return", "latest_datetime" ]
Get the datetime for the most recent commit to a project that affected Sphinx content. *Content* is considered any file with one of these extensions: - ``rst`` (README.rst and LICENSE.rst are excluded) - ``ipynb`` - ``png`` - ``jpeg`` - ``jpg`` - ``svg`` - ``gif`` This function allows project infrastructure and configuration files to be updated without changing the timestamp. Parameters ---------- root_dir : `str`, optional Root directory. This is the current working directory by default. exclusions : `list` of `str`, optional List of file paths or directory paths to ignore. Returns ------- commit_date : `datetime.datetime` Datetime of the most recent content commit. Raises ------ RuntimeError Raised if no content files are found.
[ "Get", "the", "datetime", "for", "the", "most", "recent", "commit", "to", "a", "project", "that", "affected", "Sphinx", "content", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L119-L192
train
lsst-sqre/documenteer
documenteer/sphinxconfig/utils.py
form_ltd_edition_name
def form_ltd_edition_name(git_ref_name=None): """Form the LSST the Docs edition name for this branch, using the same logic as LTD Keeper does for transforming branch names into edition names. Parameters ---------- git_ref_name : `str` Name of the git branch (or git ref, in general, like a tag) that. Notes ----- The LTD Keeper (github.com/lsst-sqre/ltd-keeper) logic is being replicated here because Keeper is server side code and this is client-side and it's not yet clear this warrants being refactored into a common dependency. See ``keeper.utils.auto_slugify_edition``. """ if git_ref_name is None: name = read_git_branch() else: name = git_ref_name # First, try to use the JIRA ticket number m = TICKET_BRANCH_PATTERN.match(name) if m is not None: return m.group(1) # Or use a tagged version m = TAG_PATTERN.match(name) if m is not None: return name if name == 'master': # using this terminology for LTD Dasher name = 'Current' # Otherwise, reproduce the LTD slug name = name.replace('/', '-') name = name.replace('_', '-') name = name.replace('.', '-') return name
python
def form_ltd_edition_name(git_ref_name=None): """Form the LSST the Docs edition name for this branch, using the same logic as LTD Keeper does for transforming branch names into edition names. Parameters ---------- git_ref_name : `str` Name of the git branch (or git ref, in general, like a tag) that. Notes ----- The LTD Keeper (github.com/lsst-sqre/ltd-keeper) logic is being replicated here because Keeper is server side code and this is client-side and it's not yet clear this warrants being refactored into a common dependency. See ``keeper.utils.auto_slugify_edition``. """ if git_ref_name is None: name = read_git_branch() else: name = git_ref_name # First, try to use the JIRA ticket number m = TICKET_BRANCH_PATTERN.match(name) if m is not None: return m.group(1) # Or use a tagged version m = TAG_PATTERN.match(name) if m is not None: return name if name == 'master': # using this terminology for LTD Dasher name = 'Current' # Otherwise, reproduce the LTD slug name = name.replace('/', '-') name = name.replace('_', '-') name = name.replace('.', '-') return name
[ "def", "form_ltd_edition_name", "(", "git_ref_name", "=", "None", ")", ":", "if", "git_ref_name", "is", "None", ":", "name", "=", "read_git_branch", "(", ")", "else", ":", "name", "=", "git_ref_name", "# First, try to use the JIRA ticket number", "m", "=", "TICKET_BRANCH_PATTERN", ".", "match", "(", "name", ")", "if", "m", "is", "not", "None", ":", "return", "m", ".", "group", "(", "1", ")", "# Or use a tagged version", "m", "=", "TAG_PATTERN", ".", "match", "(", "name", ")", "if", "m", "is", "not", "None", ":", "return", "name", "if", "name", "==", "'master'", ":", "# using this terminology for LTD Dasher", "name", "=", "'Current'", "# Otherwise, reproduce the LTD slug", "name", "=", "name", ".", "replace", "(", "'/'", ",", "'-'", ")", "name", "=", "name", ".", "replace", "(", "'_'", ",", "'-'", ")", "name", "=", "name", ".", "replace", "(", "'.'", ",", "'-'", ")", "return", "name" ]
Form the LSST the Docs edition name for this branch, using the same logic as LTD Keeper does for transforming branch names into edition names. Parameters ---------- git_ref_name : `str` Name of the git branch (or git ref, in general, like a tag) that. Notes ----- The LTD Keeper (github.com/lsst-sqre/ltd-keeper) logic is being replicated here because Keeper is server side code and this is client-side and it's not yet clear this warrants being refactored into a common dependency. See ``keeper.utils.auto_slugify_edition``.
[ "Form", "the", "LSST", "the", "Docs", "edition", "name", "for", "this", "branch", "using", "the", "same", "logic", "as", "LTD", "Keeper", "does", "for", "transforming", "branch", "names", "into", "edition", "names", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/utils.py#L195-L235
train
fkarb/xltable
xltable/workbook.py
Workbook.itersheets
def itersheets(self): """ Iterates over the worksheets in the book, and sets the active worksheet as the current one before yielding. """ for ws in self.worksheets: # Expression with no explicit table specified will use None # when calling get_table, which should return the current worksheet/table prev_ws = self.active_worksheet self.active_worksheet = ws try: yield ws finally: self.active_worksheet = prev_ws
python
def itersheets(self): """ Iterates over the worksheets in the book, and sets the active worksheet as the current one before yielding. """ for ws in self.worksheets: # Expression with no explicit table specified will use None # when calling get_table, which should return the current worksheet/table prev_ws = self.active_worksheet self.active_worksheet = ws try: yield ws finally: self.active_worksheet = prev_ws
[ "def", "itersheets", "(", "self", ")", ":", "for", "ws", "in", "self", ".", "worksheets", ":", "# Expression with no explicit table specified will use None", "# when calling get_table, which should return the current worksheet/table", "prev_ws", "=", "self", ".", "active_worksheet", "self", ".", "active_worksheet", "=", "ws", "try", ":", "yield", "ws", "finally", ":", "self", ".", "active_worksheet", "=", "prev_ws" ]
Iterates over the worksheets in the book, and sets the active worksheet as the current one before yielding.
[ "Iterates", "over", "the", "worksheets", "in", "the", "book", "and", "sets", "the", "active", "worksheet", "as", "the", "current", "one", "before", "yielding", "." ]
7a592642d27ad5ee90d2aa8c26338abaa9d84bea
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L47-L60
train
fkarb/xltable
xltable/workbook.py
Workbook.to_xlsx
def to_xlsx(self, **kwargs): """ Write workbook to a .xlsx file using xlsxwriter. Return a xlsxwriter.workbook.Workbook. :param kwargs: Extra arguments passed to the xlsxwriter.Workbook constructor. """ from xlsxwriter.workbook import Workbook as _Workbook self.workbook_obj = _Workbook(**kwargs) self.workbook_obj.set_calc_mode(self.calc_mode) for worksheet in self.itersheets(): worksheet.to_xlsx(workbook=self) self.workbook_obj.filename = self.filename if self.filename: self.workbook_obj.close() return self.workbook_obj
python
def to_xlsx(self, **kwargs): """ Write workbook to a .xlsx file using xlsxwriter. Return a xlsxwriter.workbook.Workbook. :param kwargs: Extra arguments passed to the xlsxwriter.Workbook constructor. """ from xlsxwriter.workbook import Workbook as _Workbook self.workbook_obj = _Workbook(**kwargs) self.workbook_obj.set_calc_mode(self.calc_mode) for worksheet in self.itersheets(): worksheet.to_xlsx(workbook=self) self.workbook_obj.filename = self.filename if self.filename: self.workbook_obj.close() return self.workbook_obj
[ "def", "to_xlsx", "(", "self", ",", "*", "*", "kwargs", ")", ":", "from", "xlsxwriter", ".", "workbook", "import", "Workbook", "as", "_Workbook", "self", ".", "workbook_obj", "=", "_Workbook", "(", "*", "*", "kwargs", ")", "self", ".", "workbook_obj", ".", "set_calc_mode", "(", "self", ".", "calc_mode", ")", "for", "worksheet", "in", "self", ".", "itersheets", "(", ")", ":", "worksheet", ".", "to_xlsx", "(", "workbook", "=", "self", ")", "self", ".", "workbook_obj", ".", "filename", "=", "self", ".", "filename", "if", "self", ".", "filename", ":", "self", ".", "workbook_obj", ".", "close", "(", ")", "return", "self", ".", "workbook_obj" ]
Write workbook to a .xlsx file using xlsxwriter. Return a xlsxwriter.workbook.Workbook. :param kwargs: Extra arguments passed to the xlsxwriter.Workbook constructor.
[ "Write", "workbook", "to", "a", ".", "xlsx", "file", "using", "xlsxwriter", ".", "Return", "a", "xlsxwriter", ".", "workbook", ".", "Workbook", "." ]
7a592642d27ad5ee90d2aa8c26338abaa9d84bea
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L62-L80
train
fkarb/xltable
xltable/workbook.py
Workbook.get_table
def get_table(self, name): """ Return a table, worksheet pair for the named table """ if name is None: assert self.active_table, "Can't get table without name unless an active table is set" name = self.active_table.name if self.active_worksheet: table = self.active_worksheet.get_table(name) assert table is self.active_table, "Active table is not from the active sheet" return table, self.active_worksheet for ws in self.worksheets: try: table = ws.get_table(name) if table is self.active_table: return table, ws except KeyError: pass raise RuntimeError("Active table not found in any sheet") # if the tablename explicitly uses the sheetname find the right sheet if "!" in name: ws_name, table_name = map(lambda x: x.strip("'"), name.split("!", 1)) for ws in self.worksheets: if ws.name == ws_name: table = ws.get_table(table_name) return table, ws raise KeyError(name) # otherwise look in the current table if self.active_worksheet: table = self.active_worksheet.get_table(name) return table, self.active_worksheet # or fallback to the first matching name in any table for ws in self.worksheets: try: table = ws.get_table(name) return table, ws except KeyError: pass raise KeyError(name)
python
def get_table(self, name): """ Return a table, worksheet pair for the named table """ if name is None: assert self.active_table, "Can't get table without name unless an active table is set" name = self.active_table.name if self.active_worksheet: table = self.active_worksheet.get_table(name) assert table is self.active_table, "Active table is not from the active sheet" return table, self.active_worksheet for ws in self.worksheets: try: table = ws.get_table(name) if table is self.active_table: return table, ws except KeyError: pass raise RuntimeError("Active table not found in any sheet") # if the tablename explicitly uses the sheetname find the right sheet if "!" in name: ws_name, table_name = map(lambda x: x.strip("'"), name.split("!", 1)) for ws in self.worksheets: if ws.name == ws_name: table = ws.get_table(table_name) return table, ws raise KeyError(name) # otherwise look in the current table if self.active_worksheet: table = self.active_worksheet.get_table(name) return table, self.active_worksheet # or fallback to the first matching name in any table for ws in self.worksheets: try: table = ws.get_table(name) return table, ws except KeyError: pass raise KeyError(name)
[ "def", "get_table", "(", "self", ",", "name", ")", ":", "if", "name", "is", "None", ":", "assert", "self", ".", "active_table", ",", "\"Can't get table without name unless an active table is set\"", "name", "=", "self", ".", "active_table", ".", "name", "if", "self", ".", "active_worksheet", ":", "table", "=", "self", ".", "active_worksheet", ".", "get_table", "(", "name", ")", "assert", "table", "is", "self", ".", "active_table", ",", "\"Active table is not from the active sheet\"", "return", "table", ",", "self", ".", "active_worksheet", "for", "ws", "in", "self", ".", "worksheets", ":", "try", ":", "table", "=", "ws", ".", "get_table", "(", "name", ")", "if", "table", "is", "self", ".", "active_table", ":", "return", "table", ",", "ws", "except", "KeyError", ":", "pass", "raise", "RuntimeError", "(", "\"Active table not found in any sheet\"", ")", "# if the tablename explicitly uses the sheetname find the right sheet", "if", "\"!\"", "in", "name", ":", "ws_name", ",", "table_name", "=", "map", "(", "lambda", "x", ":", "x", ".", "strip", "(", "\"'\"", ")", ",", "name", ".", "split", "(", "\"!\"", ",", "1", ")", ")", "for", "ws", "in", "self", ".", "worksheets", ":", "if", "ws", ".", "name", "==", "ws_name", ":", "table", "=", "ws", ".", "get_table", "(", "table_name", ")", "return", "table", ",", "ws", "raise", "KeyError", "(", "name", ")", "# otherwise look in the current table", "if", "self", ".", "active_worksheet", ":", "table", "=", "self", ".", "active_worksheet", ".", "get_table", "(", "name", ")", "return", "table", ",", "self", ".", "active_worksheet", "# or fallback to the first matching name in any table", "for", "ws", "in", "self", ".", "worksheets", ":", "try", ":", "table", "=", "ws", ".", "get_table", "(", "name", ")", "return", "table", ",", "ws", "except", "KeyError", ":", "pass", "raise", "KeyError", "(", "name", ")" ]
Return a table, worksheet pair for the named table
[ "Return", "a", "table", "worksheet", "pair", "for", "the", "named", "table" ]
7a592642d27ad5ee90d2aa8c26338abaa9d84bea
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/workbook.py#L141-L186
train
mastro35/flows
flows/Actions/Action.py
Action.send_message
def send_message(self, output): """ Send a message to the socket """ file_system_event = None if self.my_action_input: file_system_event = self.my_action_input.file_system_event or None output_action = ActionInput(file_system_event, output, self.name, "*") Global.MESSAGE_DISPATCHER.send_message(output_action)
python
def send_message(self, output): """ Send a message to the socket """ file_system_event = None if self.my_action_input: file_system_event = self.my_action_input.file_system_event or None output_action = ActionInput(file_system_event, output, self.name, "*") Global.MESSAGE_DISPATCHER.send_message(output_action)
[ "def", "send_message", "(", "self", ",", "output", ")", ":", "file_system_event", "=", "None", "if", "self", ".", "my_action_input", ":", "file_system_event", "=", "self", ".", "my_action_input", ".", "file_system_event", "or", "None", "output_action", "=", "ActionInput", "(", "file_system_event", ",", "output", ",", "self", ".", "name", ",", "\"*\"", ")", "Global", ".", "MESSAGE_DISPATCHER", ".", "send_message", "(", "output_action", ")" ]
Send a message to the socket
[ "Send", "a", "message", "to", "the", "socket" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L107-L121
train
mastro35/flows
flows/Actions/Action.py
Action.stop
def stop(self): ''' Stop the current action ''' Global.LOGGER.debug(f"action {self.name} stopped") self.is_running = False self.on_stop()
python
def stop(self): ''' Stop the current action ''' Global.LOGGER.debug(f"action {self.name} stopped") self.is_running = False self.on_stop()
[ "def", "stop", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"action {self.name} stopped\"", ")", "self", ".", "is_running", "=", "False", "self", ".", "on_stop", "(", ")" ]
Stop the current action
[ "Stop", "the", "current", "action" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L123-L127
train
mastro35/flows
flows/Actions/Action.py
Action.run
def run(self): """ Start the action """ Global.LOGGER.debug(f"action {self.name} is running") for tmp_monitored_input in self.monitored_input: sender = "*" + tmp_monitored_input + "*" Global.LOGGER.debug(f"action {self.name} is monitoring {sender}") while self.is_running: try: time.sleep(Global.CONFIG_MANAGER.sleep_interval) self.on_cycle() except Exception as exc: Global.LOGGER.error(f"error while running the action {self.name}: {str(exc)}")
python
def run(self): """ Start the action """ Global.LOGGER.debug(f"action {self.name} is running") for tmp_monitored_input in self.monitored_input: sender = "*" + tmp_monitored_input + "*" Global.LOGGER.debug(f"action {self.name} is monitoring {sender}") while self.is_running: try: time.sleep(Global.CONFIG_MANAGER.sleep_interval) self.on_cycle() except Exception as exc: Global.LOGGER.error(f"error while running the action {self.name}: {str(exc)}")
[ "def", "run", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"action {self.name} is running\"", ")", "for", "tmp_monitored_input", "in", "self", ".", "monitored_input", ":", "sender", "=", "\"*\"", "+", "tmp_monitored_input", "+", "\"*\"", "Global", ".", "LOGGER", ".", "debug", "(", "f\"action {self.name} is monitoring {sender}\"", ")", "while", "self", ".", "is_running", ":", "try", ":", "time", ".", "sleep", "(", "Global", ".", "CONFIG_MANAGER", ".", "sleep_interval", ")", "self", ".", "on_cycle", "(", ")", "except", "Exception", "as", "exc", ":", "Global", ".", "LOGGER", ".", "error", "(", "f\"error while running the action {self.name}: {str(exc)}\"", ")" ]
Start the action
[ "Start", "the", "action" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L129-L145
train
mastro35/flows
flows/Actions/Action.py
Action.create_action_for_code
def create_action_for_code(cls, action_code, name, configuration, managed_input): """ Factory method to create an instance of an Action from an input code """ Global.LOGGER.debug(f"creating action {name} for code {action_code}") Global.LOGGER.debug(f"configuration length: {len(configuration)}") Global.LOGGER.debug(f"input: {managed_input}") # get the actions catalog my_actions_file = Action.search_actions() # load custom actions to find the right one for filename in my_actions_file: module_name = os.path.basename(os.path.normpath(filename))[:-3] # garbage collect all the modules you load if they are not necessary context = {} Action.load_module(module_name, filename) for subclass in Action.__subclasses__(): if subclass.type == action_code: action_class = subclass action = action_class(name, configuration, managed_input) return action subclass = None gc.collect()
python
def create_action_for_code(cls, action_code, name, configuration, managed_input): """ Factory method to create an instance of an Action from an input code """ Global.LOGGER.debug(f"creating action {name} for code {action_code}") Global.LOGGER.debug(f"configuration length: {len(configuration)}") Global.LOGGER.debug(f"input: {managed_input}") # get the actions catalog my_actions_file = Action.search_actions() # load custom actions to find the right one for filename in my_actions_file: module_name = os.path.basename(os.path.normpath(filename))[:-3] # garbage collect all the modules you load if they are not necessary context = {} Action.load_module(module_name, filename) for subclass in Action.__subclasses__(): if subclass.type == action_code: action_class = subclass action = action_class(name, configuration, managed_input) return action subclass = None gc.collect()
[ "def", "create_action_for_code", "(", "cls", ",", "action_code", ",", "name", ",", "configuration", ",", "managed_input", ")", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"creating action {name} for code {action_code}\"", ")", "Global", ".", "LOGGER", ".", "debug", "(", "f\"configuration length: {len(configuration)}\"", ")", "Global", ".", "LOGGER", ".", "debug", "(", "f\"input: {managed_input}\"", ")", "# get the actions catalog", "my_actions_file", "=", "Action", ".", "search_actions", "(", ")", "# load custom actions to find the right one", "for", "filename", "in", "my_actions_file", ":", "module_name", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "normpath", "(", "filename", ")", ")", "[", ":", "-", "3", "]", "# garbage collect all the modules you load if they are not necessary", "context", "=", "{", "}", "Action", ".", "load_module", "(", "module_name", ",", "filename", ")", "for", "subclass", "in", "Action", ".", "__subclasses__", "(", ")", ":", "if", "subclass", ".", "type", "==", "action_code", ":", "action_class", "=", "subclass", "action", "=", "action_class", "(", "name", ",", "configuration", ",", "managed_input", ")", "return", "action", "subclass", "=", "None", "gc", ".", "collect", "(", ")" ]
Factory method to create an instance of an Action from an input code
[ "Factory", "method", "to", "create", "an", "instance", "of", "an", "Action", "from", "an", "input", "code" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/Action.py#L214-L238
train
bluekeyes/sphinx-javalink
javalink/loader.py
extract_class
def extract_class(jar, name): """Extracts a LinkableClass from a jar. Args: jar: An open ZipFile instance. name: A string containing the binary name of a class. Raises: KeyError: The class does not exist in the jar. """ with jar.open(name) as entry: return LinkableClass(javatools.unpack_class(entry))
python
def extract_class(jar, name): """Extracts a LinkableClass from a jar. Args: jar: An open ZipFile instance. name: A string containing the binary name of a class. Raises: KeyError: The class does not exist in the jar. """ with jar.open(name) as entry: return LinkableClass(javatools.unpack_class(entry))
[ "def", "extract_class", "(", "jar", ",", "name", ")", ":", "with", "jar", ".", "open", "(", "name", ")", "as", "entry", ":", "return", "LinkableClass", "(", "javatools", ".", "unpack_class", "(", "entry", ")", ")" ]
Extracts a LinkableClass from a jar. Args: jar: An open ZipFile instance. name: A string containing the binary name of a class. Raises: KeyError: The class does not exist in the jar.
[ "Extracts", "a", "LinkableClass", "from", "a", "jar", "." ]
490e37506efa53e95ad88a665e347536e75b6254
https://github.com/bluekeyes/sphinx-javalink/blob/490e37506efa53e95ad88a665e347536e75b6254/javalink/loader.py#L10-L22
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
TaskApiDirective._format_summary_node
def _format_summary_node(self, task_class): """Format a section node containg a summary of a Task class's key APIs. """ modulename = task_class.__module__ classname = task_class.__name__ nodes = [] nodes.append( self._format_class_nodes(task_class)) nodes.append( self._format_config_nodes(modulename, classname) ) methods = ('run', 'runDataRef') for method in methods: if hasattr(task_class, method): method_obj = getattr(task_class, method) nodes.append( self._format_method_nodes(method_obj, modulename, classname)) return nodes
python
def _format_summary_node(self, task_class): """Format a section node containg a summary of a Task class's key APIs. """ modulename = task_class.__module__ classname = task_class.__name__ nodes = [] nodes.append( self._format_class_nodes(task_class)) nodes.append( self._format_config_nodes(modulename, classname) ) methods = ('run', 'runDataRef') for method in methods: if hasattr(task_class, method): method_obj = getattr(task_class, method) nodes.append( self._format_method_nodes(method_obj, modulename, classname)) return nodes
[ "def", "_format_summary_node", "(", "self", ",", "task_class", ")", ":", "modulename", "=", "task_class", ".", "__module__", "classname", "=", "task_class", ".", "__name__", "nodes", "=", "[", "]", "nodes", ".", "append", "(", "self", ".", "_format_class_nodes", "(", "task_class", ")", ")", "nodes", ".", "append", "(", "self", ".", "_format_config_nodes", "(", "modulename", ",", "classname", ")", ")", "methods", "=", "(", "'run'", ",", "'runDataRef'", ")", "for", "method", "in", "methods", ":", "if", "hasattr", "(", "task_class", ",", "method", ")", ":", "method_obj", "=", "getattr", "(", "task_class", ",", "method", ")", "nodes", ".", "append", "(", "self", ".", "_format_method_nodes", "(", "method_obj", ",", "modulename", ",", "classname", ")", ")", "return", "nodes" ]
Format a section node containg a summary of a Task class's key APIs.
[ "Format", "a", "section", "node", "containg", "a", "summary", "of", "a", "Task", "class", "s", "key", "APIs", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L62-L84
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
TaskApiDirective._format_class_nodes
def _format_class_nodes(self, task_class): """Create a ``desc`` node summarizing the class docstring. """ # Patterned after PyObject.handle_signature in Sphinx. # https://github.com/sphinx-doc/sphinx/blob/3e57ea0a5253ac198c1bff16c40abe71951bb586/sphinx/domains/python.py#L246 modulename = task_class.__module__ classname = task_class.__name__ fullname = '.'.join((modulename, classname)) # The signature term signature = Signature(task_class, bound_method=False) desc_sig_node = self._format_signature( signature, modulename, classname, fullname, 'py:class') # The content is the one-sentence summary. content_node = desc_content() content_node += self._create_doc_summary(task_class, fullname, 'py:class') desc_node = desc() desc_node['noindex'] = True desc_node['domain'] = 'py' desc_node['objtype'] = 'class' desc_node += desc_sig_node desc_node += content_node return desc_node
python
def _format_class_nodes(self, task_class): """Create a ``desc`` node summarizing the class docstring. """ # Patterned after PyObject.handle_signature in Sphinx. # https://github.com/sphinx-doc/sphinx/blob/3e57ea0a5253ac198c1bff16c40abe71951bb586/sphinx/domains/python.py#L246 modulename = task_class.__module__ classname = task_class.__name__ fullname = '.'.join((modulename, classname)) # The signature term signature = Signature(task_class, bound_method=False) desc_sig_node = self._format_signature( signature, modulename, classname, fullname, 'py:class') # The content is the one-sentence summary. content_node = desc_content() content_node += self._create_doc_summary(task_class, fullname, 'py:class') desc_node = desc() desc_node['noindex'] = True desc_node['domain'] = 'py' desc_node['objtype'] = 'class' desc_node += desc_sig_node desc_node += content_node return desc_node
[ "def", "_format_class_nodes", "(", "self", ",", "task_class", ")", ":", "# Patterned after PyObject.handle_signature in Sphinx.", "# https://github.com/sphinx-doc/sphinx/blob/3e57ea0a5253ac198c1bff16c40abe71951bb586/sphinx/domains/python.py#L246", "modulename", "=", "task_class", ".", "__module__", "classname", "=", "task_class", ".", "__name__", "fullname", "=", "'.'", ".", "join", "(", "(", "modulename", ",", "classname", ")", ")", "# The signature term", "signature", "=", "Signature", "(", "task_class", ",", "bound_method", "=", "False", ")", "desc_sig_node", "=", "self", ".", "_format_signature", "(", "signature", ",", "modulename", ",", "classname", ",", "fullname", ",", "'py:class'", ")", "# The content is the one-sentence summary.", "content_node", "=", "desc_content", "(", ")", "content_node", "+=", "self", ".", "_create_doc_summary", "(", "task_class", ",", "fullname", ",", "'py:class'", ")", "desc_node", "=", "desc", "(", ")", "desc_node", "[", "'noindex'", "]", "=", "True", "desc_node", "[", "'domain'", "]", "=", "'py'", "desc_node", "[", "'objtype'", "]", "=", "'class'", "desc_node", "+=", "desc_sig_node", "desc_node", "+=", "content_node", "return", "desc_node" ]
Create a ``desc`` node summarizing the class docstring.
[ "Create", "a", "desc", "node", "summarizing", "the", "class", "docstring", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L86-L112
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
TaskApiDirective._format_method_nodes
def _format_method_nodes(self, task_method, modulename, classname): """Create a ``desc`` node summarizing a method docstring. """ methodname = task_method.__name__ fullname = '.'.join((modulename, classname, methodname)) # The signature term signature = Signature(task_method, bound_method=True) desc_sig_node = self._format_signature( signature, modulename, classname, fullname, 'py:meth') # The content is the one-sentence summary. content_node = desc_content() content_node += self._create_doc_summary(task_method, fullname, 'py:meth') desc_node = desc() desc_node['noindex'] = True desc_node['domain'] = 'py' desc_node['objtype'] = 'method' desc_node += desc_sig_node desc_node += content_node return desc_node
python
def _format_method_nodes(self, task_method, modulename, classname): """Create a ``desc`` node summarizing a method docstring. """ methodname = task_method.__name__ fullname = '.'.join((modulename, classname, methodname)) # The signature term signature = Signature(task_method, bound_method=True) desc_sig_node = self._format_signature( signature, modulename, classname, fullname, 'py:meth') # The content is the one-sentence summary. content_node = desc_content() content_node += self._create_doc_summary(task_method, fullname, 'py:meth') desc_node = desc() desc_node['noindex'] = True desc_node['domain'] = 'py' desc_node['objtype'] = 'method' desc_node += desc_sig_node desc_node += content_node return desc_node
[ "def", "_format_method_nodes", "(", "self", ",", "task_method", ",", "modulename", ",", "classname", ")", ":", "methodname", "=", "task_method", ".", "__name__", "fullname", "=", "'.'", ".", "join", "(", "(", "modulename", ",", "classname", ",", "methodname", ")", ")", "# The signature term", "signature", "=", "Signature", "(", "task_method", ",", "bound_method", "=", "True", ")", "desc_sig_node", "=", "self", ".", "_format_signature", "(", "signature", ",", "modulename", ",", "classname", ",", "fullname", ",", "'py:meth'", ")", "# The content is the one-sentence summary.", "content_node", "=", "desc_content", "(", ")", "content_node", "+=", "self", ".", "_create_doc_summary", "(", "task_method", ",", "fullname", ",", "'py:meth'", ")", "desc_node", "=", "desc", "(", ")", "desc_node", "[", "'noindex'", "]", "=", "True", "desc_node", "[", "'domain'", "]", "=", "'py'", "desc_node", "[", "'objtype'", "]", "=", "'method'", "desc_node", "+=", "desc_sig_node", "desc_node", "+=", "content_node", "return", "desc_node" ]
Create a ``desc`` node summarizing a method docstring.
[ "Create", "a", "desc", "node", "summarizing", "a", "method", "docstring", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L114-L136
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
TaskApiDirective._create_doc_summary
def _create_doc_summary(self, obj, fullname, refrole): """Create a paragraph containing the object's one-sentence docstring summary with a link to further documentation. The paragrah should be inserted into the ``desc`` node's ``desc_content``. """ summary_text = extract_docstring_summary(get_docstring(obj)) summary_text = summary_text.strip() # Strip the last "." because the linked ellipses take its place if summary_text.endswith('.'): summary_text = summary_text.rstrip('.') content_node_p = nodes.paragraph(text=summary_text) content_node_p += self._create_api_details_link(fullname, refrole) return content_node_p
python
def _create_doc_summary(self, obj, fullname, refrole): """Create a paragraph containing the object's one-sentence docstring summary with a link to further documentation. The paragrah should be inserted into the ``desc`` node's ``desc_content``. """ summary_text = extract_docstring_summary(get_docstring(obj)) summary_text = summary_text.strip() # Strip the last "." because the linked ellipses take its place if summary_text.endswith('.'): summary_text = summary_text.rstrip('.') content_node_p = nodes.paragraph(text=summary_text) content_node_p += self._create_api_details_link(fullname, refrole) return content_node_p
[ "def", "_create_doc_summary", "(", "self", ",", "obj", ",", "fullname", ",", "refrole", ")", ":", "summary_text", "=", "extract_docstring_summary", "(", "get_docstring", "(", "obj", ")", ")", "summary_text", "=", "summary_text", ".", "strip", "(", ")", "# Strip the last \".\" because the linked ellipses take its place", "if", "summary_text", ".", "endswith", "(", "'.'", ")", ":", "summary_text", "=", "summary_text", ".", "rstrip", "(", "'.'", ")", "content_node_p", "=", "nodes", ".", "paragraph", "(", "text", "=", "summary_text", ")", "content_node_p", "+=", "self", ".", "_create_api_details_link", "(", "fullname", ",", "refrole", ")", "return", "content_node_p" ]
Create a paragraph containing the object's one-sentence docstring summary with a link to further documentation. The paragrah should be inserted into the ``desc`` node's ``desc_content``.
[ "Create", "a", "paragraph", "containing", "the", "object", "s", "one", "-", "sentence", "docstring", "summary", "with", "a", "link", "to", "further", "documentation", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L175-L189
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
TaskApiDirective._create_api_details_link
def _create_api_details_link(self, fullname, refrole): """Appends a link to the API docs, labelled as "...", that is appended to the content paragraph of an API description. This affordance indicates that more documentation is available, and that by clicking on the ellipsis the user can find that documentation. """ ref_text = '... <{}>'.format(fullname) xref = PyXRefRole() xref_nodes, _ = xref( refrole, ref_text, ref_text, self.lineno, self.state.inliner) return xref_nodes
python
def _create_api_details_link(self, fullname, refrole): """Appends a link to the API docs, labelled as "...", that is appended to the content paragraph of an API description. This affordance indicates that more documentation is available, and that by clicking on the ellipsis the user can find that documentation. """ ref_text = '... <{}>'.format(fullname) xref = PyXRefRole() xref_nodes, _ = xref( refrole, ref_text, ref_text, self.lineno, self.state.inliner) return xref_nodes
[ "def", "_create_api_details_link", "(", "self", ",", "fullname", ",", "refrole", ")", ":", "ref_text", "=", "'... <{}>'", ".", "format", "(", "fullname", ")", "xref", "=", "PyXRefRole", "(", ")", "xref_nodes", ",", "_", "=", "xref", "(", "refrole", ",", "ref_text", ",", "ref_text", ",", "self", ".", "lineno", ",", "self", ".", "state", ".", "inliner", ")", "return", "xref_nodes" ]
Appends a link to the API docs, labelled as "...", that is appended to the content paragraph of an API description. This affordance indicates that more documentation is available, and that by clicking on the ellipsis the user can find that documentation.
[ "Appends", "a", "link", "to", "the", "API", "docs", "labelled", "as", "...", "that", "is", "appended", "to", "the", "content", "paragraph", "of", "an", "API", "description", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L191-L204
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
TaskApiDirective._format_config_nodes
def _format_config_nodes(self, modulename, classname): """Create a ``desc`` node summarizing the config attribute The ``config`` attribute is not statically available from a task class. This method manually creates a signature and docstring for the config attribute. """ fullname = '{0}.{1}.config'.format(modulename, classname) # The signature term desc_sig_node = desc_signature() desc_sig_node['module'] = modulename desc_sig_node['class'] = classname desc_sig_node['fullname'] = fullname prefix = 'attribute' desc_sig_node += desc_annotation(prefix, prefix) desc_sig_name_node = desc_addname('config', 'config') # Fakes the look of a cross reference. desc_sig_name_node['classes'].extend(['xref', 'py']) desc_sig_node += desc_sig_name_node # The content is the one-sentence summary. summary_text = ( 'Access configuration fields and retargetable subtasks.' ) content_node_p = nodes.paragraph(text=summary_text) content_node = desc_content() content_node += content_node_p desc_node = desc() desc_node['noindex'] = True desc_node['domain'] = 'py' desc_node['objtype'] = 'attribute' desc_node += desc_sig_node desc_node += content_node return desc_node
python
def _format_config_nodes(self, modulename, classname): """Create a ``desc`` node summarizing the config attribute The ``config`` attribute is not statically available from a task class. This method manually creates a signature and docstring for the config attribute. """ fullname = '{0}.{1}.config'.format(modulename, classname) # The signature term desc_sig_node = desc_signature() desc_sig_node['module'] = modulename desc_sig_node['class'] = classname desc_sig_node['fullname'] = fullname prefix = 'attribute' desc_sig_node += desc_annotation(prefix, prefix) desc_sig_name_node = desc_addname('config', 'config') # Fakes the look of a cross reference. desc_sig_name_node['classes'].extend(['xref', 'py']) desc_sig_node += desc_sig_name_node # The content is the one-sentence summary. summary_text = ( 'Access configuration fields and retargetable subtasks.' ) content_node_p = nodes.paragraph(text=summary_text) content_node = desc_content() content_node += content_node_p desc_node = desc() desc_node['noindex'] = True desc_node['domain'] = 'py' desc_node['objtype'] = 'attribute' desc_node += desc_sig_node desc_node += content_node return desc_node
[ "def", "_format_config_nodes", "(", "self", ",", "modulename", ",", "classname", ")", ":", "fullname", "=", "'{0}.{1}.config'", ".", "format", "(", "modulename", ",", "classname", ")", "# The signature term", "desc_sig_node", "=", "desc_signature", "(", ")", "desc_sig_node", "[", "'module'", "]", "=", "modulename", "desc_sig_node", "[", "'class'", "]", "=", "classname", "desc_sig_node", "[", "'fullname'", "]", "=", "fullname", "prefix", "=", "'attribute'", "desc_sig_node", "+=", "desc_annotation", "(", "prefix", ",", "prefix", ")", "desc_sig_name_node", "=", "desc_addname", "(", "'config'", ",", "'config'", ")", "# Fakes the look of a cross reference.", "desc_sig_name_node", "[", "'classes'", "]", ".", "extend", "(", "[", "'xref'", ",", "'py'", "]", ")", "desc_sig_node", "+=", "desc_sig_name_node", "# The content is the one-sentence summary.", "summary_text", "=", "(", "'Access configuration fields and retargetable subtasks.'", ")", "content_node_p", "=", "nodes", ".", "paragraph", "(", "text", "=", "summary_text", ")", "content_node", "=", "desc_content", "(", ")", "content_node", "+=", "content_node_p", "desc_node", "=", "desc", "(", ")", "desc_node", "[", "'noindex'", "]", "=", "True", "desc_node", "[", "'domain'", "]", "=", "'py'", "desc_node", "[", "'objtype'", "]", "=", "'attribute'", "desc_node", "+=", "desc_sig_node", "desc_node", "+=", "content_node", "return", "desc_node" ]
Create a ``desc`` node summarizing the config attribute The ``config`` attribute is not statically available from a task class. This method manually creates a signature and docstring for the config attribute.
[ "Create", "a", "desc", "node", "summarizing", "the", "config", "attribute" ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L206-L242
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
TaskApiDirective._format_import_example
def _format_import_example(self, task_class): """Generate nodes that show a code sample demonstrating how to import the task class. Parameters ---------- task_class : ``lsst.pipe.base.Task``-type The Task class. Returns ------- nodes : `list` of docutils nodes Docutils nodes showing a class import statement. """ code = 'from {0.__module__} import {0.__name__}'.format(task_class) # This is a bare-bones version of what Sphinx's code-block directive # does. The 'language' attr triggers the pygments treatment. literal_node = nodes.literal_block(code, code) literal_node['language'] = 'py' return [literal_node]
python
def _format_import_example(self, task_class): """Generate nodes that show a code sample demonstrating how to import the task class. Parameters ---------- task_class : ``lsst.pipe.base.Task``-type The Task class. Returns ------- nodes : `list` of docutils nodes Docutils nodes showing a class import statement. """ code = 'from {0.__module__} import {0.__name__}'.format(task_class) # This is a bare-bones version of what Sphinx's code-block directive # does. The 'language' attr triggers the pygments treatment. literal_node = nodes.literal_block(code, code) literal_node['language'] = 'py' return [literal_node]
[ "def", "_format_import_example", "(", "self", ",", "task_class", ")", ":", "code", "=", "'from {0.__module__} import {0.__name__}'", ".", "format", "(", "task_class", ")", "# This is a bare-bones version of what Sphinx's code-block directive", "# does. The 'language' attr triggers the pygments treatment.", "literal_node", "=", "nodes", ".", "literal_block", "(", "code", ",", "code", ")", "literal_node", "[", "'language'", "]", "=", "'py'", "return", "[", "literal_node", "]" ]
Generate nodes that show a code sample demonstrating how to import the task class. Parameters ---------- task_class : ``lsst.pipe.base.Task``-type The Task class. Returns ------- nodes : `list` of docutils nodes Docutils nodes showing a class import statement.
[ "Generate", "nodes", "that", "show", "a", "code", "sample", "demonstrating", "how", "to", "import", "the", "task", "class", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L244-L265
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/pyapisummary.py
TaskApiDirective._format_api_docs_link_message
def _format_api_docs_link_message(self, task_class): """Format a message referring the reader to the full API docs. Parameters ---------- task_class : ``lsst.pipe.base.Task``-type The Task class. Returns ------- nodes : `list` of docutils nodes Docutils nodes showing a link to the full API docs. """ fullname = '{0.__module__}.{0.__name__}'.format(task_class) p_node = nodes.paragraph() _ = 'See the ' p_node += nodes.Text(_, _) xref = PyXRefRole() xref_nodes, _ = xref( 'py:class', '~' + fullname, '~' + fullname, self.lineno, self.state.inliner) p_node += xref_nodes _ = ' API reference for complete details.' p_node += nodes.Text(_, _) seealso_node = seealso() seealso_node += p_node return [seealso_node]
python
def _format_api_docs_link_message(self, task_class): """Format a message referring the reader to the full API docs. Parameters ---------- task_class : ``lsst.pipe.base.Task``-type The Task class. Returns ------- nodes : `list` of docutils nodes Docutils nodes showing a link to the full API docs. """ fullname = '{0.__module__}.{0.__name__}'.format(task_class) p_node = nodes.paragraph() _ = 'See the ' p_node += nodes.Text(_, _) xref = PyXRefRole() xref_nodes, _ = xref( 'py:class', '~' + fullname, '~' + fullname, self.lineno, self.state.inliner) p_node += xref_nodes _ = ' API reference for complete details.' p_node += nodes.Text(_, _) seealso_node = seealso() seealso_node += p_node return [seealso_node]
[ "def", "_format_api_docs_link_message", "(", "self", ",", "task_class", ")", ":", "fullname", "=", "'{0.__module__}.{0.__name__}'", ".", "format", "(", "task_class", ")", "p_node", "=", "nodes", ".", "paragraph", "(", ")", "_", "=", "'See the '", "p_node", "+=", "nodes", ".", "Text", "(", "_", ",", "_", ")", "xref", "=", "PyXRefRole", "(", ")", "xref_nodes", ",", "_", "=", "xref", "(", "'py:class'", ",", "'~'", "+", "fullname", ",", "'~'", "+", "fullname", ",", "self", ".", "lineno", ",", "self", ".", "state", ".", "inliner", ")", "p_node", "+=", "xref_nodes", "_", "=", "' API reference for complete details.'", "p_node", "+=", "nodes", ".", "Text", "(", "_", ",", "_", ")", "seealso_node", "=", "seealso", "(", ")", "seealso_node", "+=", "p_node", "return", "[", "seealso_node", "]" ]
Format a message referring the reader to the full API docs. Parameters ---------- task_class : ``lsst.pipe.base.Task``-type The Task class. Returns ------- nodes : `list` of docutils nodes Docutils nodes showing a link to the full API docs.
[ "Format", "a", "message", "referring", "the", "reader", "to", "the", "full", "API", "docs", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/pyapisummary.py#L267-L303
train
nathan-hoad/aiomanhole
aiomanhole/__init__.py
InteractiveInterpreter.send_exception
def send_exception(self): """When an exception has occurred, write the traceback to the user.""" self.compiler.reset() exc = traceback.format_exc() self.writer.write(exc.encode('utf8')) yield from self.writer.drain()
python
def send_exception(self): """When an exception has occurred, write the traceback to the user.""" self.compiler.reset() exc = traceback.format_exc() self.writer.write(exc.encode('utf8')) yield from self.writer.drain()
[ "def", "send_exception", "(", "self", ")", ":", "self", ".", "compiler", ".", "reset", "(", ")", "exc", "=", "traceback", ".", "format_exc", "(", ")", "self", ".", "writer", ".", "write", "(", "exc", ".", "encode", "(", "'utf8'", ")", ")", "yield", "from", "self", ".", "writer", ".", "drain", "(", ")" ]
When an exception has occurred, write the traceback to the user.
[ "When", "an", "exception", "has", "occurred", "write", "the", "traceback", "to", "the", "user", "." ]
a13394c79e1878cde67aa2637ae5664df468ed04
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L66-L73
train
nathan-hoad/aiomanhole
aiomanhole/__init__.py
InteractiveInterpreter.handle_one_command
def handle_one_command(self): """Process a single command. May have many lines.""" while True: yield from self.write_prompt() codeobj = yield from self.read_command() if codeobj is not None: yield from self.run_command(codeobj)
python
def handle_one_command(self): """Process a single command. May have many lines.""" while True: yield from self.write_prompt() codeobj = yield from self.read_command() if codeobj is not None: yield from self.run_command(codeobj)
[ "def", "handle_one_command", "(", "self", ")", ":", "while", "True", ":", "yield", "from", "self", ".", "write_prompt", "(", ")", "codeobj", "=", "yield", "from", "self", ".", "read_command", "(", ")", "if", "codeobj", "is", "not", "None", ":", "yield", "from", "self", ".", "run_command", "(", "codeobj", ")" ]
Process a single command. May have many lines.
[ "Process", "a", "single", "command", ".", "May", "have", "many", "lines", "." ]
a13394c79e1878cde67aa2637ae5664df468ed04
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L88-L96
train
nathan-hoad/aiomanhole
aiomanhole/__init__.py
InteractiveInterpreter.run_command
def run_command(self, codeobj): """Execute a compiled code object, and write the output back to the client.""" try: value, stdout = yield from self.attempt_exec(codeobj, self.namespace) except Exception: yield from self.send_exception() return else: yield from self.send_output(value, stdout)
python
def run_command(self, codeobj): """Execute a compiled code object, and write the output back to the client.""" try: value, stdout = yield from self.attempt_exec(codeobj, self.namespace) except Exception: yield from self.send_exception() return else: yield from self.send_output(value, stdout)
[ "def", "run_command", "(", "self", ",", "codeobj", ")", ":", "try", ":", "value", ",", "stdout", "=", "yield", "from", "self", ".", "attempt_exec", "(", "codeobj", ",", "self", ".", "namespace", ")", "except", "Exception", ":", "yield", "from", "self", ".", "send_exception", "(", ")", "return", "else", ":", "yield", "from", "self", ".", "send_output", "(", "value", ",", "stdout", ")" ]
Execute a compiled code object, and write the output back to the client.
[ "Execute", "a", "compiled", "code", "object", "and", "write", "the", "output", "back", "to", "the", "client", "." ]
a13394c79e1878cde67aa2637ae5664df468ed04
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L99-L107
train
nathan-hoad/aiomanhole
aiomanhole/__init__.py
InteractiveInterpreter.read_command
def read_command(self): """Read a command from the user line by line. Returns a code object suitable for execution. """ reader = self.reader line = yield from reader.readline() if line == b'': # lost connection raise ConnectionResetError() try: # skip the newline to make CommandCompiler work as advertised codeobj = self.attempt_compile(line.rstrip(b'\n')) except SyntaxError: yield from self.send_exception() return return codeobj
python
def read_command(self): """Read a command from the user line by line. Returns a code object suitable for execution. """ reader = self.reader line = yield from reader.readline() if line == b'': # lost connection raise ConnectionResetError() try: # skip the newline to make CommandCompiler work as advertised codeobj = self.attempt_compile(line.rstrip(b'\n')) except SyntaxError: yield from self.send_exception() return return codeobj
[ "def", "read_command", "(", "self", ")", ":", "reader", "=", "self", ".", "reader", "line", "=", "yield", "from", "reader", ".", "readline", "(", ")", "if", "line", "==", "b''", ":", "# lost connection", "raise", "ConnectionResetError", "(", ")", "try", ":", "# skip the newline to make CommandCompiler work as advertised", "codeobj", "=", "self", ".", "attempt_compile", "(", "line", ".", "rstrip", "(", "b'\\n'", ")", ")", "except", "SyntaxError", ":", "yield", "from", "self", ".", "send_exception", "(", ")", "return", "return", "codeobj" ]
Read a command from the user line by line. Returns a code object suitable for execution.
[ "Read", "a", "command", "from", "the", "user", "line", "by", "line", "." ]
a13394c79e1878cde67aa2637ae5664df468ed04
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L121-L140
train
nathan-hoad/aiomanhole
aiomanhole/__init__.py
InteractiveInterpreter.send_output
def send_output(self, value, stdout): """Write the output or value of the expression back to user. >>> 5 5 >>> print('cash rules everything around me') cash rules everything around me """ writer = self.writer if value is not None: writer.write('{!r}\n'.format(value).encode('utf8')) if stdout: writer.write(stdout.encode('utf8')) yield from writer.drain()
python
def send_output(self, value, stdout): """Write the output or value of the expression back to user. >>> 5 5 >>> print('cash rules everything around me') cash rules everything around me """ writer = self.writer if value is not None: writer.write('{!r}\n'.format(value).encode('utf8')) if stdout: writer.write(stdout.encode('utf8')) yield from writer.drain()
[ "def", "send_output", "(", "self", ",", "value", ",", "stdout", ")", ":", "writer", "=", "self", ".", "writer", "if", "value", "is", "not", "None", ":", "writer", ".", "write", "(", "'{!r}\\n'", ".", "format", "(", "value", ")", ".", "encode", "(", "'utf8'", ")", ")", "if", "stdout", ":", "writer", ".", "write", "(", "stdout", ".", "encode", "(", "'utf8'", ")", ")", "yield", "from", "writer", ".", "drain", "(", ")" ]
Write the output or value of the expression back to user. >>> 5 5 >>> print('cash rules everything around me') cash rules everything around me
[ "Write", "the", "output", "or", "value", "of", "the", "expression", "back", "to", "user", "." ]
a13394c79e1878cde67aa2637ae5664df468ed04
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L143-L160
train
jslang/responsys
responsys/client.py
InteractClient.call
def call(self, method, *args): """ Calls the service method defined with the arguments provided """ try: response = getattr(self.client.service, method)(*args) except (URLError, SSLError) as e: log.exception('Failed to connect to responsys service') raise ConnectError("Request to service timed out") except WebFault as web_fault: fault_name = getattr(web_fault.fault, 'faultstring', None) error = str(web_fault.fault.detail) if fault_name == 'TableFault': raise TableFault(error) if fault_name == 'ListFault': raise ListFault(error) if fault_name == 'API_LIMIT_EXCEEDED': raise ApiLimitError(error) if fault_name == 'AccountFault': raise AccountFault(error) raise ServiceError(web_fault.fault, web_fault.document) return response
python
def call(self, method, *args): """ Calls the service method defined with the arguments provided """ try: response = getattr(self.client.service, method)(*args) except (URLError, SSLError) as e: log.exception('Failed to connect to responsys service') raise ConnectError("Request to service timed out") except WebFault as web_fault: fault_name = getattr(web_fault.fault, 'faultstring', None) error = str(web_fault.fault.detail) if fault_name == 'TableFault': raise TableFault(error) if fault_name == 'ListFault': raise ListFault(error) if fault_name == 'API_LIMIT_EXCEEDED': raise ApiLimitError(error) if fault_name == 'AccountFault': raise AccountFault(error) raise ServiceError(web_fault.fault, web_fault.document) return response
[ "def", "call", "(", "self", ",", "method", ",", "*", "args", ")", ":", "try", ":", "response", "=", "getattr", "(", "self", ".", "client", ".", "service", ",", "method", ")", "(", "*", "args", ")", "except", "(", "URLError", ",", "SSLError", ")", "as", "e", ":", "log", ".", "exception", "(", "'Failed to connect to responsys service'", ")", "raise", "ConnectError", "(", "\"Request to service timed out\"", ")", "except", "WebFault", "as", "web_fault", ":", "fault_name", "=", "getattr", "(", "web_fault", ".", "fault", ",", "'faultstring'", ",", "None", ")", "error", "=", "str", "(", "web_fault", ".", "fault", ".", "detail", ")", "if", "fault_name", "==", "'TableFault'", ":", "raise", "TableFault", "(", "error", ")", "if", "fault_name", "==", "'ListFault'", ":", "raise", "ListFault", "(", "error", ")", "if", "fault_name", "==", "'API_LIMIT_EXCEEDED'", ":", "raise", "ApiLimitError", "(", "error", ")", "if", "fault_name", "==", "'AccountFault'", ":", "raise", "AccountFault", "(", "error", ")", "raise", "ServiceError", "(", "web_fault", ".", "fault", ",", "web_fault", ".", "document", ")", "return", "response" ]
Calls the service method defined with the arguments provided
[ "Calls", "the", "service", "method", "defined", "with", "the", "arguments", "provided" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L110-L131
train
jslang/responsys
responsys/client.py
InteractClient.connect
def connect(self): """ Connects to the Responsys soap service Uses the credentials passed to the client init to login and setup the session id returned. Returns True on successful connection, otherwise False. """ if self.session and self.session.is_expired: # Close the session to avoid max concurrent session errors self.disconnect(abandon_session=True) if not self.session: try: login_result = self.login(self.username, self.password) except AccountFault: log.error('Login failed, invalid username or password') raise else: self.session = login_result.session_id self.connected = time() return self.connected
python
def connect(self): """ Connects to the Responsys soap service Uses the credentials passed to the client init to login and setup the session id returned. Returns True on successful connection, otherwise False. """ if self.session and self.session.is_expired: # Close the session to avoid max concurrent session errors self.disconnect(abandon_session=True) if not self.session: try: login_result = self.login(self.username, self.password) except AccountFault: log.error('Login failed, invalid username or password') raise else: self.session = login_result.session_id self.connected = time() return self.connected
[ "def", "connect", "(", "self", ")", ":", "if", "self", ".", "session", "and", "self", ".", "session", ".", "is_expired", ":", "# Close the session to avoid max concurrent session errors", "self", ".", "disconnect", "(", "abandon_session", "=", "True", ")", "if", "not", "self", ".", "session", ":", "try", ":", "login_result", "=", "self", ".", "login", "(", "self", ".", "username", ",", "self", ".", "password", ")", "except", "AccountFault", ":", "log", ".", "error", "(", "'Login failed, invalid username or password'", ")", "raise", "else", ":", "self", ".", "session", "=", "login_result", ".", "session_id", "self", ".", "connected", "=", "time", "(", ")", "return", "self", ".", "connected" ]
Connects to the Responsys soap service Uses the credentials passed to the client init to login and setup the session id returned. Returns True on successful connection, otherwise False.
[ "Connects", "to", "the", "Responsys", "soap", "service" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L133-L155
train
jslang/responsys
responsys/client.py
InteractClient.disconnect
def disconnect(self, abandon_session=False): """ Disconnects from the Responsys soap service Calls the service logout method and destroys the client's session information. Returns True on success, False otherwise. """ self.connected = False if (self.session and self.session.is_expired) or abandon_session: try: self.logout() except: log.warning( 'Logout call to responsys failed, session may have not been terminated', exc_info=True ) del self.session return True
python
def disconnect(self, abandon_session=False): """ Disconnects from the Responsys soap service Calls the service logout method and destroys the client's session information. Returns True on success, False otherwise. """ self.connected = False if (self.session and self.session.is_expired) or abandon_session: try: self.logout() except: log.warning( 'Logout call to responsys failed, session may have not been terminated', exc_info=True ) del self.session return True
[ "def", "disconnect", "(", "self", ",", "abandon_session", "=", "False", ")", ":", "self", ".", "connected", "=", "False", "if", "(", "self", ".", "session", "and", "self", ".", "session", ".", "is_expired", ")", "or", "abandon_session", ":", "try", ":", "self", ".", "logout", "(", ")", "except", ":", "log", ".", "warning", "(", "'Logout call to responsys failed, session may have not been terminated'", ",", "exc_info", "=", "True", ")", "del", "self", ".", "session", "return", "True" ]
Disconnects from the Responsys soap service Calls the service logout method and destroys the client's session information. Returns True on success, False otherwise.
[ "Disconnects", "from", "the", "Responsys", "soap", "service" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L157-L173
train
jslang/responsys
responsys/client.py
InteractClient.merge_list_members
def merge_list_members(self, list_, record_data, merge_rule): """ Responsys.mergeListMembers call Accepts: InteractObject list_ RecordData record_data ListMergeRule merge_rule Returns a MergeResult """ list_ = list_.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) merge_rule = merge_rule.get_soap_object(self.client) return MergeResult(self.call('mergeListMembers', list_, record_data, merge_rule))
python
def merge_list_members(self, list_, record_data, merge_rule): """ Responsys.mergeListMembers call Accepts: InteractObject list_ RecordData record_data ListMergeRule merge_rule Returns a MergeResult """ list_ = list_.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) merge_rule = merge_rule.get_soap_object(self.client) return MergeResult(self.call('mergeListMembers', list_, record_data, merge_rule))
[ "def", "merge_list_members", "(", "self", ",", "list_", ",", "record_data", ",", "merge_rule", ")", ":", "list_", "=", "list_", ".", "get_soap_object", "(", "self", ".", "client", ")", "record_data", "=", "record_data", ".", "get_soap_object", "(", "self", ".", "client", ")", "merge_rule", "=", "merge_rule", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "MergeResult", "(", "self", ".", "call", "(", "'mergeListMembers'", ",", "list_", ",", "record_data", ",", "merge_rule", ")", ")" ]
Responsys.mergeListMembers call Accepts: InteractObject list_ RecordData record_data ListMergeRule merge_rule Returns a MergeResult
[ "Responsys", ".", "mergeListMembers", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L205-L218
train
jslang/responsys
responsys/client.py
InteractClient.merge_list_members_RIID
def merge_list_members_RIID(self, list_, record_data, merge_rule): """ Responsys.mergeListMembersRIID call Accepts: InteractObject list_ RecordData record_data ListMergeRule merge_rule Returns a RecipientResult """ list_ = list_.get_soap_object(self.client) result = self.call('mergeListMembersRIID', list_, record_data, merge_rule) return RecipientResult(result.recipientResult)
python
def merge_list_members_RIID(self, list_, record_data, merge_rule): """ Responsys.mergeListMembersRIID call Accepts: InteractObject list_ RecordData record_data ListMergeRule merge_rule Returns a RecipientResult """ list_ = list_.get_soap_object(self.client) result = self.call('mergeListMembersRIID', list_, record_data, merge_rule) return RecipientResult(result.recipientResult)
[ "def", "merge_list_members_RIID", "(", "self", ",", "list_", ",", "record_data", ",", "merge_rule", ")", ":", "list_", "=", "list_", ".", "get_soap_object", "(", "self", ".", "client", ")", "result", "=", "self", ".", "call", "(", "'mergeListMembersRIID'", ",", "list_", ",", "record_data", ",", "merge_rule", ")", "return", "RecipientResult", "(", "result", ".", "recipientResult", ")" ]
Responsys.mergeListMembersRIID call Accepts: InteractObject list_ RecordData record_data ListMergeRule merge_rule Returns a RecipientResult
[ "Responsys", ".", "mergeListMembersRIID", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L220-L232
train
jslang/responsys
responsys/client.py
InteractClient.delete_list_members
def delete_list_members(self, list_, query_column, ids_to_delete): """ Responsys.deleteListMembers call Accepts: InteractObject list_ string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances """ list_ = list_.get_soap_object(self.client) result = self.call('deleteListMembers', list_, query_column, ids_to_delete) if hasattr(result, '__iter__'): return [DeleteResult(delete_result) for delete_result in result] return [DeleteResult(result)]
python
def delete_list_members(self, list_, query_column, ids_to_delete): """ Responsys.deleteListMembers call Accepts: InteractObject list_ string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances """ list_ = list_.get_soap_object(self.client) result = self.call('deleteListMembers', list_, query_column, ids_to_delete) if hasattr(result, '__iter__'): return [DeleteResult(delete_result) for delete_result in result] return [DeleteResult(result)]
[ "def", "delete_list_members", "(", "self", ",", "list_", ",", "query_column", ",", "ids_to_delete", ")", ":", "list_", "=", "list_", ".", "get_soap_object", "(", "self", ".", "client", ")", "result", "=", "self", ".", "call", "(", "'deleteListMembers'", ",", "list_", ",", "query_column", ",", "ids_to_delete", ")", "if", "hasattr", "(", "result", ",", "'__iter__'", ")", ":", "return", "[", "DeleteResult", "(", "delete_result", ")", "for", "delete_result", "in", "result", "]", "return", "[", "DeleteResult", "(", "result", ")", "]" ]
Responsys.deleteListMembers call Accepts: InteractObject list_ string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances
[ "Responsys", ".", "deleteListMembers", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L234-L249
train
jslang/responsys
responsys/client.py
InteractClient.retrieve_list_members
def retrieve_list_members(self, list_, query_column, field_list, ids_to_retrieve): """ Responsys.retrieveListMembers call Accepts: InteractObject list_ string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list field_list list ids_to_retrieve Returns a RecordData instance """ list_ = list_.get_soap_object(self.client) result = self.call('retrieveListMembers', list_, query_column, field_list, ids_to_retrieve) return RecordData.from_soap_type(result.recordData)
python
def retrieve_list_members(self, list_, query_column, field_list, ids_to_retrieve): """ Responsys.retrieveListMembers call Accepts: InteractObject list_ string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list field_list list ids_to_retrieve Returns a RecordData instance """ list_ = list_.get_soap_object(self.client) result = self.call('retrieveListMembers', list_, query_column, field_list, ids_to_retrieve) return RecordData.from_soap_type(result.recordData)
[ "def", "retrieve_list_members", "(", "self", ",", "list_", ",", "query_column", ",", "field_list", ",", "ids_to_retrieve", ")", ":", "list_", "=", "list_", ".", "get_soap_object", "(", "self", ".", "client", ")", "result", "=", "self", ".", "call", "(", "'retrieveListMembers'", ",", "list_", ",", "query_column", ",", "field_list", ",", "ids_to_retrieve", ")", "return", "RecordData", ".", "from_soap_type", "(", "result", ".", "recordData", ")" ]
Responsys.retrieveListMembers call Accepts: InteractObject list_ string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list field_list list ids_to_retrieve Returns a RecordData instance
[ "Responsys", ".", "retrieveListMembers", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L251-L265
train
jslang/responsys
responsys/client.py
InteractClient.create_table
def create_table(self, table, fields): """ Responsys.createTable call Accepts: InteractObject table list fields Returns True on success """ table = table.get_soap_object(self.client) return self.call('createTable', table, fields)
python
def create_table(self, table, fields): """ Responsys.createTable call Accepts: InteractObject table list fields Returns True on success """ table = table.get_soap_object(self.client) return self.call('createTable', table, fields)
[ "def", "create_table", "(", "self", ",", "table", ",", "fields", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "self", ".", "call", "(", "'createTable'", ",", "table", ",", "fields", ")" ]
Responsys.createTable call Accepts: InteractObject table list fields Returns True on success
[ "Responsys", ".", "createTable", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L268-L278
train
jslang/responsys
responsys/client.py
InteractClient.create_table_with_pk
def create_table_with_pk(self, table, fields, primary_keys): """ Responsys.createTableWithPK call Accepts: InteractObject table list fields list primary_keys Returns True on success """ table = table.get_soap_object(self.client) return self.call('createTableWithPK', table, fields, primary_keys)
python
def create_table_with_pk(self, table, fields, primary_keys): """ Responsys.createTableWithPK call Accepts: InteractObject table list fields list primary_keys Returns True on success """ table = table.get_soap_object(self.client) return self.call('createTableWithPK', table, fields, primary_keys)
[ "def", "create_table_with_pk", "(", "self", ",", "table", ",", "fields", ",", "primary_keys", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "self", ".", "call", "(", "'createTableWithPK'", ",", "table", ",", "fields", ",", "primary_keys", ")" ]
Responsys.createTableWithPK call Accepts: InteractObject table list fields list primary_keys Returns True on success
[ "Responsys", ".", "createTableWithPK", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L280-L291
train
jslang/responsys
responsys/client.py
InteractClient.delete_table
def delete_table(self, table): """ Responsys.deleteTable call Accepts: InteractObject table Returns True on success """ table = table.get_soap_object(self.client) return self.call('deleteTable', table)
python
def delete_table(self, table): """ Responsys.deleteTable call Accepts: InteractObject table Returns True on success """ table = table.get_soap_object(self.client) return self.call('deleteTable', table)
[ "def", "delete_table", "(", "self", ",", "table", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "self", ".", "call", "(", "'deleteTable'", ",", "table", ")" ]
Responsys.deleteTable call Accepts: InteractObject table Returns True on success
[ "Responsys", ".", "deleteTable", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L293-L302
train
jslang/responsys
responsys/client.py
InteractClient.delete_profile_extension_members
def delete_profile_extension_members(self, profile_extension, query_column, ids_to_delete): """ Responsys.deleteProfileExtensionRecords call Accepts: InteractObject profile_extension list field_list list ids_to_retrieve string query_column default: 'RIID' Returns list of DeleteResults """ profile_extension = profile_extension.get_soap_object(self.client) result = self.call( 'deleteProfileExtensionMembers', profile_extension, query_column, ids_to_delete) if hasattr(result, '__iter__'): return [DeleteResult(delete_result) for delete_result in result] return [DeleteResult(result)]
python
def delete_profile_extension_members(self, profile_extension, query_column, ids_to_delete): """ Responsys.deleteProfileExtensionRecords call Accepts: InteractObject profile_extension list field_list list ids_to_retrieve string query_column default: 'RIID' Returns list of DeleteResults """ profile_extension = profile_extension.get_soap_object(self.client) result = self.call( 'deleteProfileExtensionMembers', profile_extension, query_column, ids_to_delete) if hasattr(result, '__iter__'): return [DeleteResult(delete_result) for delete_result in result] return [DeleteResult(result)]
[ "def", "delete_profile_extension_members", "(", "self", ",", "profile_extension", ",", "query_column", ",", "ids_to_delete", ")", ":", "profile_extension", "=", "profile_extension", ".", "get_soap_object", "(", "self", ".", "client", ")", "result", "=", "self", ".", "call", "(", "'deleteProfileExtensionMembers'", ",", "profile_extension", ",", "query_column", ",", "ids_to_delete", ")", "if", "hasattr", "(", "result", ",", "'__iter__'", ")", ":", "return", "[", "DeleteResult", "(", "delete_result", ")", "for", "delete_result", "in", "result", "]", "return", "[", "DeleteResult", "(", "result", ")", "]" ]
Responsys.deleteProfileExtensionRecords call Accepts: InteractObject profile_extension list field_list list ids_to_retrieve string query_column default: 'RIID' Returns list of DeleteResults
[ "Responsys", ".", "deleteProfileExtensionRecords", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L304-L321
train
jslang/responsys
responsys/client.py
InteractClient.retrieve_profile_extension_records
def retrieve_profile_extension_records(self, profile_extension, field_list, ids_to_retrieve, query_column='RIID'): """ Responsys.retrieveProfileExtensionRecords call Accepts: InteractObject profile_extension list field_list list ids_to_retrieve string query_column default: 'RIID' Returns RecordData """ profile_extension = profile_extension.get_soap_object(self.client) return RecordData.from_soap_type( self.call('retrieveProfileExtensionRecords', profile_extension, query_column, field_list, ids_to_retrieve))
python
def retrieve_profile_extension_records(self, profile_extension, field_list, ids_to_retrieve, query_column='RIID'): """ Responsys.retrieveProfileExtensionRecords call Accepts: InteractObject profile_extension list field_list list ids_to_retrieve string query_column default: 'RIID' Returns RecordData """ profile_extension = profile_extension.get_soap_object(self.client) return RecordData.from_soap_type( self.call('retrieveProfileExtensionRecords', profile_extension, query_column, field_list, ids_to_retrieve))
[ "def", "retrieve_profile_extension_records", "(", "self", ",", "profile_extension", ",", "field_list", ",", "ids_to_retrieve", ",", "query_column", "=", "'RIID'", ")", ":", "profile_extension", "=", "profile_extension", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "RecordData", ".", "from_soap_type", "(", "self", ".", "call", "(", "'retrieveProfileExtensionRecords'", ",", "profile_extension", ",", "query_column", ",", "field_list", ",", "ids_to_retrieve", ")", ")" ]
Responsys.retrieveProfileExtensionRecords call Accepts: InteractObject profile_extension list field_list list ids_to_retrieve string query_column default: 'RIID' Returns RecordData
[ "Responsys", ".", "retrieveProfileExtensionRecords", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L323-L339
train
jslang/responsys
responsys/client.py
InteractClient.truncate_table
def truncate_table(self, table): """ Responsys.truncateTable call Accepts: InteractObject table Returns True on success """ table = table.get_soap_object(self.client) return self.call('truncateTable', table)
python
def truncate_table(self, table): """ Responsys.truncateTable call Accepts: InteractObject table Returns True on success """ table = table.get_soap_object(self.client) return self.call('truncateTable', table)
[ "def", "truncate_table", "(", "self", ",", "table", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "self", ".", "call", "(", "'truncateTable'", ",", "table", ")" ]
Responsys.truncateTable call Accepts: InteractObject table Returns True on success
[ "Responsys", ".", "truncateTable", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L341-L350
train
jslang/responsys
responsys/client.py
InteractClient.delete_table_records
def delete_table_records(self, table, query_column, ids_to_delete): """ Responsys.deleteTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances """ table = table.get_soap_object(self.client) result = self.call('deleteTableRecords', table, query_column, ids_to_delete) if hasattr(result, '__iter__'): return [DeleteResult(delete_result) for delete_result in result] return [DeleteResult(result)]
python
def delete_table_records(self, table, query_column, ids_to_delete): """ Responsys.deleteTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances """ table = table.get_soap_object(self.client) result = self.call('deleteTableRecords', table, query_column, ids_to_delete) if hasattr(result, '__iter__'): return [DeleteResult(delete_result) for delete_result in result] return [DeleteResult(result)]
[ "def", "delete_table_records", "(", "self", ",", "table", ",", "query_column", ",", "ids_to_delete", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "result", "=", "self", ".", "call", "(", "'deleteTableRecords'", ",", "table", ",", "query_column", ",", "ids_to_delete", ")", "if", "hasattr", "(", "result", ",", "'__iter__'", ")", ":", "return", "[", "DeleteResult", "(", "delete_result", ")", "for", "delete_result", "in", "result", "]", "return", "[", "DeleteResult", "(", "result", ")", "]" ]
Responsys.deleteTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list ids_to_delete Returns a list of DeleteResult instances
[ "Responsys", ".", "deleteTableRecords", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L352-L367
train
jslang/responsys
responsys/client.py
InteractClient.merge_table_records
def merge_table_records(self, table, record_data, match_column_names): """ Responsys.mergeTableRecords call Accepts: InteractObject table RecordData record_data list match_column_names Returns a MergeResult """ table = table.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) return MergeResult(self.call( 'mergeTableRecords', table, record_data, match_column_names))
python
def merge_table_records(self, table, record_data, match_column_names): """ Responsys.mergeTableRecords call Accepts: InteractObject table RecordData record_data list match_column_names Returns a MergeResult """ table = table.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) return MergeResult(self.call( 'mergeTableRecords', table, record_data, match_column_names))
[ "def", "merge_table_records", "(", "self", ",", "table", ",", "record_data", ",", "match_column_names", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "record_data", "=", "record_data", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "MergeResult", "(", "self", ".", "call", "(", "'mergeTableRecords'", ",", "table", ",", "record_data", ",", "match_column_names", ")", ")" ]
Responsys.mergeTableRecords call Accepts: InteractObject table RecordData record_data list match_column_names Returns a MergeResult
[ "Responsys", ".", "mergeTableRecords", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L369-L382
train
jslang/responsys
responsys/client.py
InteractClient.merge_table_records_with_pk
def merge_table_records_with_pk(self, table, record_data, insert_on_no_match, update_on_match): """ Responsys.mergeTableRecordsWithPK call Accepts: InteractObject table RecordData record_data string insert_on_no_match string update_on_match Returns a MergeResult """ table = table.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) return MergeResult(self.call( 'mergeTableRecordsWithPK', table, record_data, insert_on_no_match, update_on_match))
python
def merge_table_records_with_pk(self, table, record_data, insert_on_no_match, update_on_match): """ Responsys.mergeTableRecordsWithPK call Accepts: InteractObject table RecordData record_data string insert_on_no_match string update_on_match Returns a MergeResult """ table = table.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) return MergeResult(self.call( 'mergeTableRecordsWithPK', table, record_data, insert_on_no_match, update_on_match))
[ "def", "merge_table_records_with_pk", "(", "self", ",", "table", ",", "record_data", ",", "insert_on_no_match", ",", "update_on_match", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "record_data", "=", "record_data", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "MergeResult", "(", "self", ".", "call", "(", "'mergeTableRecordsWithPK'", ",", "table", ",", "record_data", ",", "insert_on_no_match", ",", "update_on_match", ")", ")" ]
Responsys.mergeTableRecordsWithPK call Accepts: InteractObject table RecordData record_data string insert_on_no_match string update_on_match Returns a MergeResult
[ "Responsys", ".", "mergeTableRecordsWithPK", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L384-L398
train
jslang/responsys
responsys/client.py
InteractClient.merge_into_profile_extension
def merge_into_profile_extension(self, profile_extension, record_data, match_column, insert_on_no_match, update_on_match): """ Responsys.mergeIntoProfileExtension call Accepts: InteractObject profile_extension RecordData record_data string match_column string insert_on_no_match string update_on_match Returns a RecipientResult """ profile_extension = profile_extension.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) results = self.call( 'mergeIntoProfileExtension', profile_extension, record_data, match_column, insert_on_no_match, update_on_match) return [RecipientResult(result) for result in results]
python
def merge_into_profile_extension(self, profile_extension, record_data, match_column, insert_on_no_match, update_on_match): """ Responsys.mergeIntoProfileExtension call Accepts: InteractObject profile_extension RecordData record_data string match_column string insert_on_no_match string update_on_match Returns a RecipientResult """ profile_extension = profile_extension.get_soap_object(self.client) record_data = record_data.get_soap_object(self.client) results = self.call( 'mergeIntoProfileExtension', profile_extension, record_data, match_column, insert_on_no_match, update_on_match) return [RecipientResult(result) for result in results]
[ "def", "merge_into_profile_extension", "(", "self", ",", "profile_extension", ",", "record_data", ",", "match_column", ",", "insert_on_no_match", ",", "update_on_match", ")", ":", "profile_extension", "=", "profile_extension", ".", "get_soap_object", "(", "self", ".", "client", ")", "record_data", "=", "record_data", ".", "get_soap_object", "(", "self", ".", "client", ")", "results", "=", "self", ".", "call", "(", "'mergeIntoProfileExtension'", ",", "profile_extension", ",", "record_data", ",", "match_column", ",", "insert_on_no_match", ",", "update_on_match", ")", "return", "[", "RecipientResult", "(", "result", ")", "for", "result", "in", "results", "]" ]
Responsys.mergeIntoProfileExtension call Accepts: InteractObject profile_extension RecordData record_data string match_column string insert_on_no_match string update_on_match Returns a RecipientResult
[ "Responsys", ".", "mergeIntoProfileExtension", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L400-L418
train
jslang/responsys
responsys/client.py
InteractClient.retrieve_table_records
def retrieve_table_records(self, table, query_column, field_list, ids_to_retrieve): """ Responsys.retrieveTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list field_list list ids_to_retrieve Returns a RecordData """ table = table.get_soap_object(self.client) return RecordData.from_soap_type(self.call( 'retrieveTableRecords', table, query_column, field_list, ids_to_retrieve))
python
def retrieve_table_records(self, table, query_column, field_list, ids_to_retrieve): """ Responsys.retrieveTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list field_list list ids_to_retrieve Returns a RecordData """ table = table.get_soap_object(self.client) return RecordData.from_soap_type(self.call( 'retrieveTableRecords', table, query_column, field_list, ids_to_retrieve))
[ "def", "retrieve_table_records", "(", "self", ",", "table", ",", "query_column", ",", "field_list", ",", "ids_to_retrieve", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "RecordData", ".", "from_soap_type", "(", "self", ".", "call", "(", "'retrieveTableRecords'", ",", "table", ",", "query_column", ",", "field_list", ",", "ids_to_retrieve", ")", ")" ]
Responsys.retrieveTableRecords call Accepts: InteractObject table string query_column possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER' list field_list list ids_to_retrieve Returns a RecordData
[ "Responsys", ".", "retrieveTableRecords", "call" ]
9b355a444c0c75dff41064502c1e2b76dfd5cb93
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L420-L434
train
bluekeyes/sphinx-javalink
javalink/ref.py
normalize_docroot
def normalize_docroot(app, root): """Creates a package-list URL and a link base from a docroot element. Args: app: the global app object root: the docroot element [string or dictionary] """ srcdir = app.env.srcdir default_version = app.config.javalink_default_version if isinstance(root, basestring): (url, base) = _parse_docroot_str(srcdir, root) return {'root': url, 'base': base, 'version': default_version} else: normalized = {} normalized['root'] = _parse_docroot_str(srcdir, root['root'])[0] if 'base' in root: normalized['base'] = _parse_docroot_str(srcdir, root['base'])[1] else: normalized['base'] = _parse_docroot_str(srcdir, root['root'])[1] if 'version' in root: normalized['version'] = root['version'] else: normalized['version'] = default_version return normalized
python
def normalize_docroot(app, root): """Creates a package-list URL and a link base from a docroot element. Args: app: the global app object root: the docroot element [string or dictionary] """ srcdir = app.env.srcdir default_version = app.config.javalink_default_version if isinstance(root, basestring): (url, base) = _parse_docroot_str(srcdir, root) return {'root': url, 'base': base, 'version': default_version} else: normalized = {} normalized['root'] = _parse_docroot_str(srcdir, root['root'])[0] if 'base' in root: normalized['base'] = _parse_docroot_str(srcdir, root['base'])[1] else: normalized['base'] = _parse_docroot_str(srcdir, root['root'])[1] if 'version' in root: normalized['version'] = root['version'] else: normalized['version'] = default_version return normalized
[ "def", "normalize_docroot", "(", "app", ",", "root", ")", ":", "srcdir", "=", "app", ".", "env", ".", "srcdir", "default_version", "=", "app", ".", "config", ".", "javalink_default_version", "if", "isinstance", "(", "root", ",", "basestring", ")", ":", "(", "url", ",", "base", ")", "=", "_parse_docroot_str", "(", "srcdir", ",", "root", ")", "return", "{", "'root'", ":", "url", ",", "'base'", ":", "base", ",", "'version'", ":", "default_version", "}", "else", ":", "normalized", "=", "{", "}", "normalized", "[", "'root'", "]", "=", "_parse_docroot_str", "(", "srcdir", ",", "root", "[", "'root'", "]", ")", "[", "0", "]", "if", "'base'", "in", "root", ":", "normalized", "[", "'base'", "]", "=", "_parse_docroot_str", "(", "srcdir", ",", "root", "[", "'base'", "]", ")", "[", "1", "]", "else", ":", "normalized", "[", "'base'", "]", "=", "_parse_docroot_str", "(", "srcdir", ",", "root", "[", "'root'", "]", ")", "[", "1", "]", "if", "'version'", "in", "root", ":", "normalized", "[", "'version'", "]", "=", "root", "[", "'version'", "]", "else", ":", "normalized", "[", "'version'", "]", "=", "default_version", "return", "normalized" ]
Creates a package-list URL and a link base from a docroot element. Args: app: the global app object root: the docroot element [string or dictionary]
[ "Creates", "a", "package", "-", "list", "URL", "and", "a", "link", "base", "from", "a", "docroot", "element", "." ]
490e37506efa53e95ad88a665e347536e75b6254
https://github.com/bluekeyes/sphinx-javalink/blob/490e37506efa53e95ad88a665e347536e75b6254/javalink/ref.py#L296-L324
train
mojaie/chorus
chorus/descriptor.py
assign_valence
def assign_valence(mol): """Assign pi electron and hydrogens""" for u, v, bond in mol.bonds_iter(): if bond.order == 2: mol.atom(u).pi = 1 mol.atom(v).pi = 1 if mol.atom(u).symbol == "O" and not mol.atom(u).charge: mol.atom(v).carbonyl_C = 1 if mol.atom(v).symbol == "O" and not mol.atom(v).charge: mol.atom(u).carbonyl_C = 1 elif bond.order == 3: mol.atom(u).pi = mol.atom(v).pi = 2 max_nbr = {"C": 4, "Si": 4, "N": 3, "P": 3, "As": 3, "O": 2, "S": 2, "Se": 2, "F": 1, "Cl": 1, "Br": 1, "I": 1} for i, nbrs in mol.neighbors_iter(): atom = mol.atom(i) if len(nbrs) == 2 and all(bond.order == 2 for bond in nbrs.values()): atom.pi = 2 # sp (allene, ketene) if atom.symbol in max_nbr: h_cnt = max_nbr[atom.symbol] - len(nbrs) - atom.pi + atom.charge if h_cnt > 0: mol.atom(i).add_hydrogen(h_cnt) mol.descriptors.add("Valence")
python
def assign_valence(mol): """Assign pi electron and hydrogens""" for u, v, bond in mol.bonds_iter(): if bond.order == 2: mol.atom(u).pi = 1 mol.atom(v).pi = 1 if mol.atom(u).symbol == "O" and not mol.atom(u).charge: mol.atom(v).carbonyl_C = 1 if mol.atom(v).symbol == "O" and not mol.atom(v).charge: mol.atom(u).carbonyl_C = 1 elif bond.order == 3: mol.atom(u).pi = mol.atom(v).pi = 2 max_nbr = {"C": 4, "Si": 4, "N": 3, "P": 3, "As": 3, "O": 2, "S": 2, "Se": 2, "F": 1, "Cl": 1, "Br": 1, "I": 1} for i, nbrs in mol.neighbors_iter(): atom = mol.atom(i) if len(nbrs) == 2 and all(bond.order == 2 for bond in nbrs.values()): atom.pi = 2 # sp (allene, ketene) if atom.symbol in max_nbr: h_cnt = max_nbr[atom.symbol] - len(nbrs) - atom.pi + atom.charge if h_cnt > 0: mol.atom(i).add_hydrogen(h_cnt) mol.descriptors.add("Valence")
[ "def", "assign_valence", "(", "mol", ")", ":", "for", "u", ",", "v", ",", "bond", "in", "mol", ".", "bonds_iter", "(", ")", ":", "if", "bond", ".", "order", "==", "2", ":", "mol", ".", "atom", "(", "u", ")", ".", "pi", "=", "1", "mol", ".", "atom", "(", "v", ")", ".", "pi", "=", "1", "if", "mol", ".", "atom", "(", "u", ")", ".", "symbol", "==", "\"O\"", "and", "not", "mol", ".", "atom", "(", "u", ")", ".", "charge", ":", "mol", ".", "atom", "(", "v", ")", ".", "carbonyl_C", "=", "1", "if", "mol", ".", "atom", "(", "v", ")", ".", "symbol", "==", "\"O\"", "and", "not", "mol", ".", "atom", "(", "v", ")", ".", "charge", ":", "mol", ".", "atom", "(", "u", ")", ".", "carbonyl_C", "=", "1", "elif", "bond", ".", "order", "==", "3", ":", "mol", ".", "atom", "(", "u", ")", ".", "pi", "=", "mol", ".", "atom", "(", "v", ")", ".", "pi", "=", "2", "max_nbr", "=", "{", "\"C\"", ":", "4", ",", "\"Si\"", ":", "4", ",", "\"N\"", ":", "3", ",", "\"P\"", ":", "3", ",", "\"As\"", ":", "3", ",", "\"O\"", ":", "2", ",", "\"S\"", ":", "2", ",", "\"Se\"", ":", "2", ",", "\"F\"", ":", "1", ",", "\"Cl\"", ":", "1", ",", "\"Br\"", ":", "1", ",", "\"I\"", ":", "1", "}", "for", "i", ",", "nbrs", "in", "mol", ".", "neighbors_iter", "(", ")", ":", "atom", "=", "mol", ".", "atom", "(", "i", ")", "if", "len", "(", "nbrs", ")", "==", "2", "and", "all", "(", "bond", ".", "order", "==", "2", "for", "bond", "in", "nbrs", ".", "values", "(", ")", ")", ":", "atom", ".", "pi", "=", "2", "# sp (allene, ketene)", "if", "atom", ".", "symbol", "in", "max_nbr", ":", "h_cnt", "=", "max_nbr", "[", "atom", ".", "symbol", "]", "-", "len", "(", "nbrs", ")", "-", "atom", ".", "pi", "+", "atom", ".", "charge", "if", "h_cnt", ">", "0", ":", "mol", ".", "atom", "(", "i", ")", ".", "add_hydrogen", "(", "h_cnt", ")", "mol", ".", "descriptors", ".", "add", "(", "\"Valence\"", ")" ]
Assign pi electron and hydrogens
[ "Assign", "pi", "electron", "and", "hydrogens" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/descriptor.py#L10-L32
train
mojaie/chorus
chorus/descriptor.py
assign_charge
def assign_charge(mol, force_recalc=False): """Assign charges in physiological condition""" # TODO: not implemented yet mol.require("Aromatic") for i, nbrs in mol.neighbors_iter(): atom = mol.atom(i) nbrcnt = len(nbrs) if atom.symbol == "N": if not atom.pi: # non-conjugated amines are anion mol.atom(i).charge_phys = 1 elif nbrcnt == 1 and atom.pi == 2: # amidine, guanidine are conjugated cation ni = list(nbrs.keys())[0] conj = False sp2n = None for nni, nnb in mol.neighbors(ni).items(): if mol.atom(nni).symbol == "N" and nnb.order == 2 \ and not mol.atom(nni).aromatic: mol.atom(nni).charge_conj = 1 conj = True elif mol.atom(nni).symbol == "N" and nni != i: sp2n = nni if conj: mol.atom(i).charge_phys = 1 if sp2n is not None: mol.atom(sp2n).charge_conj = 1 elif atom.symbol == "O" and nbrcnt == 1 and atom.pi == 2: # oxoacid are conjugated anion ni = list(nbrs.keys())[0] conj = False if mol.atom(ni).symbol == "N": mol.atom(i).n_oxide = True mol.atom(ni).n_oxide = True for nni, nnb in mol.neighbors(ni).items(): if mol.atom(nni).symbol in ("O", "S") \ and nnb.order == 2 and not mol.atom(ni).n_oxide: mol.atom(nni).charge_conj = -1 conj = True if conj: mol.atom(i).charge_phys = -1 elif atom.symbol == "S" and nbrcnt == 1: # thiophenols are anion ni = list(nbrs.keys())[0] if mol.atom(ni).aromatic: mol.atom(i).charge_phys = -1 mol.charge_assigned = True mol.descriptors.add("Phys_charge")
python
def assign_charge(mol, force_recalc=False): """Assign charges in physiological condition""" # TODO: not implemented yet mol.require("Aromatic") for i, nbrs in mol.neighbors_iter(): atom = mol.atom(i) nbrcnt = len(nbrs) if atom.symbol == "N": if not atom.pi: # non-conjugated amines are anion mol.atom(i).charge_phys = 1 elif nbrcnt == 1 and atom.pi == 2: # amidine, guanidine are conjugated cation ni = list(nbrs.keys())[0] conj = False sp2n = None for nni, nnb in mol.neighbors(ni).items(): if mol.atom(nni).symbol == "N" and nnb.order == 2 \ and not mol.atom(nni).aromatic: mol.atom(nni).charge_conj = 1 conj = True elif mol.atom(nni).symbol == "N" and nni != i: sp2n = nni if conj: mol.atom(i).charge_phys = 1 if sp2n is not None: mol.atom(sp2n).charge_conj = 1 elif atom.symbol == "O" and nbrcnt == 1 and atom.pi == 2: # oxoacid are conjugated anion ni = list(nbrs.keys())[0] conj = False if mol.atom(ni).symbol == "N": mol.atom(i).n_oxide = True mol.atom(ni).n_oxide = True for nni, nnb in mol.neighbors(ni).items(): if mol.atom(nni).symbol in ("O", "S") \ and nnb.order == 2 and not mol.atom(ni).n_oxide: mol.atom(nni).charge_conj = -1 conj = True if conj: mol.atom(i).charge_phys = -1 elif atom.symbol == "S" and nbrcnt == 1: # thiophenols are anion ni = list(nbrs.keys())[0] if mol.atom(ni).aromatic: mol.atom(i).charge_phys = -1 mol.charge_assigned = True mol.descriptors.add("Phys_charge")
[ "def", "assign_charge", "(", "mol", ",", "force_recalc", "=", "False", ")", ":", "# TODO: not implemented yet", "mol", ".", "require", "(", "\"Aromatic\"", ")", "for", "i", ",", "nbrs", "in", "mol", ".", "neighbors_iter", "(", ")", ":", "atom", "=", "mol", ".", "atom", "(", "i", ")", "nbrcnt", "=", "len", "(", "nbrs", ")", "if", "atom", ".", "symbol", "==", "\"N\"", ":", "if", "not", "atom", ".", "pi", ":", "# non-conjugated amines are anion", "mol", ".", "atom", "(", "i", ")", ".", "charge_phys", "=", "1", "elif", "nbrcnt", "==", "1", "and", "atom", ".", "pi", "==", "2", ":", "# amidine, guanidine are conjugated cation", "ni", "=", "list", "(", "nbrs", ".", "keys", "(", ")", ")", "[", "0", "]", "conj", "=", "False", "sp2n", "=", "None", "for", "nni", ",", "nnb", "in", "mol", ".", "neighbors", "(", "ni", ")", ".", "items", "(", ")", ":", "if", "mol", ".", "atom", "(", "nni", ")", ".", "symbol", "==", "\"N\"", "and", "nnb", ".", "order", "==", "2", "and", "not", "mol", ".", "atom", "(", "nni", ")", ".", "aromatic", ":", "mol", ".", "atom", "(", "nni", ")", ".", "charge_conj", "=", "1", "conj", "=", "True", "elif", "mol", ".", "atom", "(", "nni", ")", ".", "symbol", "==", "\"N\"", "and", "nni", "!=", "i", ":", "sp2n", "=", "nni", "if", "conj", ":", "mol", ".", "atom", "(", "i", ")", ".", "charge_phys", "=", "1", "if", "sp2n", "is", "not", "None", ":", "mol", ".", "atom", "(", "sp2n", ")", ".", "charge_conj", "=", "1", "elif", "atom", ".", "symbol", "==", "\"O\"", "and", "nbrcnt", "==", "1", "and", "atom", ".", "pi", "==", "2", ":", "# oxoacid are conjugated anion", "ni", "=", "list", "(", "nbrs", ".", "keys", "(", ")", ")", "[", "0", "]", "conj", "=", "False", "if", "mol", ".", "atom", "(", "ni", ")", ".", "symbol", "==", "\"N\"", ":", "mol", ".", "atom", "(", "i", ")", ".", "n_oxide", "=", "True", "mol", ".", "atom", "(", "ni", ")", ".", "n_oxide", "=", "True", "for", "nni", ",", "nnb", "in", "mol", ".", "neighbors", "(", "ni", ")", ".", "items", "(", ")", ":", "if", "mol", ".", "atom", "(", "nni", ")", ".", "symbol", "in", "(", "\"O\"", ",", "\"S\"", ")", "and", "nnb", ".", "order", "==", "2", "and", "not", "mol", ".", "atom", "(", "ni", ")", ".", "n_oxide", ":", "mol", ".", "atom", "(", "nni", ")", ".", "charge_conj", "=", "-", "1", "conj", "=", "True", "if", "conj", ":", "mol", ".", "atom", "(", "i", ")", ".", "charge_phys", "=", "-", "1", "elif", "atom", ".", "symbol", "==", "\"S\"", "and", "nbrcnt", "==", "1", ":", "# thiophenols are anion", "ni", "=", "list", "(", "nbrs", ".", "keys", "(", ")", ")", "[", "0", "]", "if", "mol", ".", "atom", "(", "ni", ")", ".", "aromatic", ":", "mol", ".", "atom", "(", "i", ")", ".", "charge_phys", "=", "-", "1", "mol", ".", "charge_assigned", "=", "True", "mol", ".", "descriptors", ".", "add", "(", "\"Phys_charge\"", ")" ]
Assign charges in physiological condition
[ "Assign", "charges", "in", "physiological", "condition" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/descriptor.py#L99-L146
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/taskutils.py
get_type
def get_type(type_name): """Get a type given its importable name. Parameters ---------- task_name : `str` Name of the Python type, such as ``mypackage.MyClass``. Returns ------- object The object. """ parts = type_name.split('.') if len(parts) < 2: raise SphinxError( 'Type must be fully-qualified, ' 'of the form ``module.MyClass``. Got: {}'.format(type_name) ) module_name = ".".join(parts[0:-1]) name = parts[-1] return getattr(import_module(module_name), name)
python
def get_type(type_name): """Get a type given its importable name. Parameters ---------- task_name : `str` Name of the Python type, such as ``mypackage.MyClass``. Returns ------- object The object. """ parts = type_name.split('.') if len(parts) < 2: raise SphinxError( 'Type must be fully-qualified, ' 'of the form ``module.MyClass``. Got: {}'.format(type_name) ) module_name = ".".join(parts[0:-1]) name = parts[-1] return getattr(import_module(module_name), name)
[ "def", "get_type", "(", "type_name", ")", ":", "parts", "=", "type_name", ".", "split", "(", "'.'", ")", "if", "len", "(", "parts", ")", "<", "2", ":", "raise", "SphinxError", "(", "'Type must be fully-qualified, '", "'of the form ``module.MyClass``. Got: {}'", ".", "format", "(", "type_name", ")", ")", "module_name", "=", "\".\"", ".", "join", "(", "parts", "[", "0", ":", "-", "1", "]", ")", "name", "=", "parts", "[", "-", "1", "]", "return", "getattr", "(", "import_module", "(", "module_name", ")", ",", "name", ")" ]
Get a type given its importable name. Parameters ---------- task_name : `str` Name of the Python type, such as ``mypackage.MyClass``. Returns ------- object The object.
[ "Get", "a", "type", "given", "its", "importable", "name", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L35-L56
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/taskutils.py
get_task_config_fields
def get_task_config_fields(config_class): """Get all configuration Fields from a Config class. Parameters ---------- config_class : ``lsst.pipe.base.Config``-type The configuration class (not an instance) corresponding to a Task. Returns ------- config_fields : `dict` Mapping where keys are the config attribute names and values are subclasses of ``lsst.pex.config.Field``. The mapping is alphabetically ordered by attribute name. """ from lsst.pex.config import Field def is_config_field(obj): return isinstance(obj, Field) return _get_alphabetical_members(config_class, is_config_field)
python
def get_task_config_fields(config_class): """Get all configuration Fields from a Config class. Parameters ---------- config_class : ``lsst.pipe.base.Config``-type The configuration class (not an instance) corresponding to a Task. Returns ------- config_fields : `dict` Mapping where keys are the config attribute names and values are subclasses of ``lsst.pex.config.Field``. The mapping is alphabetically ordered by attribute name. """ from lsst.pex.config import Field def is_config_field(obj): return isinstance(obj, Field) return _get_alphabetical_members(config_class, is_config_field)
[ "def", "get_task_config_fields", "(", "config_class", ")", ":", "from", "lsst", ".", "pex", ".", "config", "import", "Field", "def", "is_config_field", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "Field", ")", "return", "_get_alphabetical_members", "(", "config_class", ",", "is_config_field", ")" ]
Get all configuration Fields from a Config class. Parameters ---------- config_class : ``lsst.pipe.base.Config``-type The configuration class (not an instance) corresponding to a Task. Returns ------- config_fields : `dict` Mapping where keys are the config attribute names and values are subclasses of ``lsst.pex.config.Field``. The mapping is alphabetically ordered by attribute name.
[ "Get", "all", "configuration", "Fields", "from", "a", "Config", "class", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L59-L79
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/taskutils.py
get_subtask_fields
def get_subtask_fields(config_class): """Get all configurable subtask fields from a Config class. Parameters ---------- config_class : ``lsst.pipe.base.Config``-type The configuration class (not an instance) corresponding to a Task. Returns ------- subtask_fields : `dict` Mapping where keys are the config attribute names and values are subclasses of ``lsst.pex.config.ConfigurableField`` or ``RegistryField``). The mapping is alphabetically ordered by attribute name. """ from lsst.pex.config import ConfigurableField, RegistryField def is_subtask_field(obj): return isinstance(obj, (ConfigurableField, RegistryField)) return _get_alphabetical_members(config_class, is_subtask_field)
python
def get_subtask_fields(config_class): """Get all configurable subtask fields from a Config class. Parameters ---------- config_class : ``lsst.pipe.base.Config``-type The configuration class (not an instance) corresponding to a Task. Returns ------- subtask_fields : `dict` Mapping where keys are the config attribute names and values are subclasses of ``lsst.pex.config.ConfigurableField`` or ``RegistryField``). The mapping is alphabetically ordered by attribute name. """ from lsst.pex.config import ConfigurableField, RegistryField def is_subtask_field(obj): return isinstance(obj, (ConfigurableField, RegistryField)) return _get_alphabetical_members(config_class, is_subtask_field)
[ "def", "get_subtask_fields", "(", "config_class", ")", ":", "from", "lsst", ".", "pex", ".", "config", "import", "ConfigurableField", ",", "RegistryField", "def", "is_subtask_field", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "(", "ConfigurableField", ",", "RegistryField", ")", ")", "return", "_get_alphabetical_members", "(", "config_class", ",", "is_subtask_field", ")" ]
Get all configurable subtask fields from a Config class. Parameters ---------- config_class : ``lsst.pipe.base.Config``-type The configuration class (not an instance) corresponding to a Task. Returns ------- subtask_fields : `dict` Mapping where keys are the config attribute names and values are subclasses of ``lsst.pex.config.ConfigurableField`` or ``RegistryField``). The mapping is alphabetically ordered by attribute name.
[ "Get", "all", "configurable", "subtask", "fields", "from", "a", "Config", "class", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L82-L103
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/taskutils.py
_get_alphabetical_members
def _get_alphabetical_members(obj, predicate): """Get members of an object, sorted alphabetically. Parameters ---------- obj An object. predicate : callable Callable that takes an attribute and returns a bool of whether the attribute should be returned or not. Returns ------- members : `dict` Dictionary of - Keys: attribute name - Values: attribute The dictionary is ordered according to the attribute name. Notes ----- This uses the insertion-order-preserved nature of `dict` in Python 3.6+. See also -------- `inspect.getmembers` """ fields = dict(inspect.getmembers(obj, predicate)) keys = list(fields.keys()) keys.sort() return {k: fields[k] for k in keys}
python
def _get_alphabetical_members(obj, predicate): """Get members of an object, sorted alphabetically. Parameters ---------- obj An object. predicate : callable Callable that takes an attribute and returns a bool of whether the attribute should be returned or not. Returns ------- members : `dict` Dictionary of - Keys: attribute name - Values: attribute The dictionary is ordered according to the attribute name. Notes ----- This uses the insertion-order-preserved nature of `dict` in Python 3.6+. See also -------- `inspect.getmembers` """ fields = dict(inspect.getmembers(obj, predicate)) keys = list(fields.keys()) keys.sort() return {k: fields[k] for k in keys}
[ "def", "_get_alphabetical_members", "(", "obj", ",", "predicate", ")", ":", "fields", "=", "dict", "(", "inspect", ".", "getmembers", "(", "obj", ",", "predicate", ")", ")", "keys", "=", "list", "(", "fields", ".", "keys", "(", ")", ")", "keys", ".", "sort", "(", ")", "return", "{", "k", ":", "fields", "[", "k", "]", "for", "k", "in", "keys", "}" ]
Get members of an object, sorted alphabetically. Parameters ---------- obj An object. predicate : callable Callable that takes an attribute and returns a bool of whether the attribute should be returned or not. Returns ------- members : `dict` Dictionary of - Keys: attribute name - Values: attribute The dictionary is ordered according to the attribute name. Notes ----- This uses the insertion-order-preserved nature of `dict` in Python 3.6+. See also -------- `inspect.getmembers`
[ "Get", "members", "of", "an", "object", "sorted", "alphabetically", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L106-L138
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/taskutils.py
typestring
def typestring(obj): """Make a string for the object's type Parameters ---------- obj : obj Python object. Returns ------- `str` String representation of the object's type. This is the type's importable namespace. Examples -------- >>> import docutils.nodes >>> para = docutils.nodes.paragraph() >>> typestring(para) 'docutils.nodes.paragraph' """ obj_type = type(obj) return '.'.join((obj_type.__module__, obj_type.__name__))
python
def typestring(obj): """Make a string for the object's type Parameters ---------- obj : obj Python object. Returns ------- `str` String representation of the object's type. This is the type's importable namespace. Examples -------- >>> import docutils.nodes >>> para = docutils.nodes.paragraph() >>> typestring(para) 'docutils.nodes.paragraph' """ obj_type = type(obj) return '.'.join((obj_type.__module__, obj_type.__name__))
[ "def", "typestring", "(", "obj", ")", ":", "obj_type", "=", "type", "(", "obj", ")", "return", "'.'", ".", "join", "(", "(", "obj_type", ".", "__module__", ",", "obj_type", ".", "__name__", ")", ")" ]
Make a string for the object's type Parameters ---------- obj : obj Python object. Returns ------- `str` String representation of the object's type. This is the type's importable namespace. Examples -------- >>> import docutils.nodes >>> para = docutils.nodes.paragraph() >>> typestring(para) 'docutils.nodes.paragraph'
[ "Make", "a", "string", "for", "the", "object", "s", "type" ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L141-L163
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/taskutils.py
get_docstring
def get_docstring(obj): """Extract the docstring from an object as individual lines. Parameters ---------- obj : object The Python object (class, function or method) to extract docstrings from. Returns ------- lines : `list` of `str` Individual docstring lines with common indentation removed, and newline characters stripped. Notes ----- If the object does not have a docstring, a docstring with the content ``"Undocumented."`` is created. """ docstring = getdoc(obj, allow_inherited=True) if docstring is None: logger = getLogger(__name__) logger.warning("Object %s doesn't have a docstring.", obj) docstring = 'Undocumented' # ignore is simply the number of initial lines to ignore when determining # the docstring's baseline indent level. We really want "1" here. return prepare_docstring(docstring, ignore=1)
python
def get_docstring(obj): """Extract the docstring from an object as individual lines. Parameters ---------- obj : object The Python object (class, function or method) to extract docstrings from. Returns ------- lines : `list` of `str` Individual docstring lines with common indentation removed, and newline characters stripped. Notes ----- If the object does not have a docstring, a docstring with the content ``"Undocumented."`` is created. """ docstring = getdoc(obj, allow_inherited=True) if docstring is None: logger = getLogger(__name__) logger.warning("Object %s doesn't have a docstring.", obj) docstring = 'Undocumented' # ignore is simply the number of initial lines to ignore when determining # the docstring's baseline indent level. We really want "1" here. return prepare_docstring(docstring, ignore=1)
[ "def", "get_docstring", "(", "obj", ")", ":", "docstring", "=", "getdoc", "(", "obj", ",", "allow_inherited", "=", "True", ")", "if", "docstring", "is", "None", ":", "logger", "=", "getLogger", "(", "__name__", ")", "logger", ".", "warning", "(", "\"Object %s doesn't have a docstring.\"", ",", "obj", ")", "docstring", "=", "'Undocumented'", "# ignore is simply the number of initial lines to ignore when determining", "# the docstring's baseline indent level. We really want \"1\" here.", "return", "prepare_docstring", "(", "docstring", ",", "ignore", "=", "1", ")" ]
Extract the docstring from an object as individual lines. Parameters ---------- obj : object The Python object (class, function or method) to extract docstrings from. Returns ------- lines : `list` of `str` Individual docstring lines with common indentation removed, and newline characters stripped. Notes ----- If the object does not have a docstring, a docstring with the content ``"Undocumented."`` is created.
[ "Extract", "the", "docstring", "from", "an", "object", "as", "individual", "lines", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L166-L193
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/taskutils.py
extract_docstring_summary
def extract_docstring_summary(docstring): """Get the first summary sentence from a docstring. Parameters ---------- docstring : `list` of `str` Output from `get_docstring`. Returns ------- summary : `str` The plain-text summary sentence from the docstring. """ summary_lines = [] for line in docstring: if line == '': break else: summary_lines.append(line) return ' '.join(summary_lines)
python
def extract_docstring_summary(docstring): """Get the first summary sentence from a docstring. Parameters ---------- docstring : `list` of `str` Output from `get_docstring`. Returns ------- summary : `str` The plain-text summary sentence from the docstring. """ summary_lines = [] for line in docstring: if line == '': break else: summary_lines.append(line) return ' '.join(summary_lines)
[ "def", "extract_docstring_summary", "(", "docstring", ")", ":", "summary_lines", "=", "[", "]", "for", "line", "in", "docstring", ":", "if", "line", "==", "''", ":", "break", "else", ":", "summary_lines", ".", "append", "(", "line", ")", "return", "' '", ".", "join", "(", "summary_lines", ")" ]
Get the first summary sentence from a docstring. Parameters ---------- docstring : `list` of `str` Output from `get_docstring`. Returns ------- summary : `str` The plain-text summary sentence from the docstring.
[ "Get", "the", "first", "summary", "sentence", "from", "a", "docstring", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L196-L215
train
ehansis/ozelot
examples/superheroes/superheroes/pipeline.py
LoadMovieAppearances.run
def run(self): """Run loading of movie appearances. The wiki page structure for this part cannot be easily handled by simple xpath queries. We need to iterate over the respective portion of the page and parse appearances. """ # make all requests via a cache instance request_cache = cache.get_request_cache() # DB session to operate in session = client.get_client().create_session() # clear completion flag for this task self.mark_incomplete() # list of universes seen in character appearances universes = [] # don't auto-flush the session for queries, this causes issues with the 'id' field of newly # created MovieAppearance instances with session.no_autoflush: # get all movies movies = session.query(models.Movie).all() # iterate over all movies and build appearance objects for movie in movies: # retrieve movie article, keep main article content only, parse article = request_cache.get("http://marvel.wikia.com" + movie.url, xpath="//article[@id='WikiaMainContent']", rate_limit=0.5) doc = html.fromstring(article) # find heading for appearances, this is a span inside an h2; go to the h2 node = doc.xpath("//span[@id='Appearances']")[0] node = node.getparent() # Appearance type is given by <p><b>... some text ...</b></p> tags. Sometimes the first # group of appearances carries no such label, assume it's the featured characters. appearance_type = "Featured Characters" # walk along the tree; character lists are in <ul>s, labels in <p>s; # the next h2 ends the character listing node = node.getnext() while node is not None and node.tag != 'h2': if node.tag == 'ul' and ('characters' in appearance_type.lower() or 'villains' in appearance_type.lower()): # starts a new list of stuff; only enter here if the previous label was for characters; # use iter() to iterate over all 'li' items (also those of nested lists) for li in node.iter('li'): # inside the list element, find all 'a's; iterate over child nodes, don't use iter(), # since we want don't want to find 'a's of sub-elements in a nested list here for a in li: if a.tag != 'a': continue # there are 'a's in the list that wrap imags, don't use these; also don't use # links that lead to somewhere else than the wiki if "image" in a.get("class", "") or not a.get("href").startswith("/wiki/"): continue match = re.search(r'\(.*?\)', a.get('href')) if match: universes.append(match.group()[1:-1]) # accept the first matching href, build a new appearance object, then skip to next li try: character = session.query(models.Character) \ .filter(models.Character.url == a.get("href")) \ .one() # -- start documentation include: many-to-many-generation appearance = models.MovieAppearance(movie_id=movie.id, character_id=character.id, appearance_type=appearance_type) session.add(appearance) # -- end documentation include: many-to-many-generation except NoResultFound: # none found, ignore pass # break looping over 'a's once we have found one, go to next 'li' break elif node.tag == 'p': # new character class (or label for locations, items, ...) appearance_type = " ".join(node.itertext()).strip().strip(':').strip() node = node.getnext() print("\nNumber of character appearances per universe: ") print(pd.Series(data=universes).value_counts()) # done, save all data, finalize task session.commit() session.close() self.mark_complete()
python
def run(self): """Run loading of movie appearances. The wiki page structure for this part cannot be easily handled by simple xpath queries. We need to iterate over the respective portion of the page and parse appearances. """ # make all requests via a cache instance request_cache = cache.get_request_cache() # DB session to operate in session = client.get_client().create_session() # clear completion flag for this task self.mark_incomplete() # list of universes seen in character appearances universes = [] # don't auto-flush the session for queries, this causes issues with the 'id' field of newly # created MovieAppearance instances with session.no_autoflush: # get all movies movies = session.query(models.Movie).all() # iterate over all movies and build appearance objects for movie in movies: # retrieve movie article, keep main article content only, parse article = request_cache.get("http://marvel.wikia.com" + movie.url, xpath="//article[@id='WikiaMainContent']", rate_limit=0.5) doc = html.fromstring(article) # find heading for appearances, this is a span inside an h2; go to the h2 node = doc.xpath("//span[@id='Appearances']")[0] node = node.getparent() # Appearance type is given by <p><b>... some text ...</b></p> tags. Sometimes the first # group of appearances carries no such label, assume it's the featured characters. appearance_type = "Featured Characters" # walk along the tree; character lists are in <ul>s, labels in <p>s; # the next h2 ends the character listing node = node.getnext() while node is not None and node.tag != 'h2': if node.tag == 'ul' and ('characters' in appearance_type.lower() or 'villains' in appearance_type.lower()): # starts a new list of stuff; only enter here if the previous label was for characters; # use iter() to iterate over all 'li' items (also those of nested lists) for li in node.iter('li'): # inside the list element, find all 'a's; iterate over child nodes, don't use iter(), # since we want don't want to find 'a's of sub-elements in a nested list here for a in li: if a.tag != 'a': continue # there are 'a's in the list that wrap imags, don't use these; also don't use # links that lead to somewhere else than the wiki if "image" in a.get("class", "") or not a.get("href").startswith("/wiki/"): continue match = re.search(r'\(.*?\)', a.get('href')) if match: universes.append(match.group()[1:-1]) # accept the first matching href, build a new appearance object, then skip to next li try: character = session.query(models.Character) \ .filter(models.Character.url == a.get("href")) \ .one() # -- start documentation include: many-to-many-generation appearance = models.MovieAppearance(movie_id=movie.id, character_id=character.id, appearance_type=appearance_type) session.add(appearance) # -- end documentation include: many-to-many-generation except NoResultFound: # none found, ignore pass # break looping over 'a's once we have found one, go to next 'li' break elif node.tag == 'p': # new character class (or label for locations, items, ...) appearance_type = " ".join(node.itertext()).strip().strip(':').strip() node = node.getnext() print("\nNumber of character appearances per universe: ") print(pd.Series(data=universes).value_counts()) # done, save all data, finalize task session.commit() session.close() self.mark_complete()
[ "def", "run", "(", "self", ")", ":", "# make all requests via a cache instance", "request_cache", "=", "cache", ".", "get_request_cache", "(", ")", "# DB session to operate in", "session", "=", "client", ".", "get_client", "(", ")", ".", "create_session", "(", ")", "# clear completion flag for this task", "self", ".", "mark_incomplete", "(", ")", "# list of universes seen in character appearances", "universes", "=", "[", "]", "# don't auto-flush the session for queries, this causes issues with the 'id' field of newly", "# created MovieAppearance instances", "with", "session", ".", "no_autoflush", ":", "# get all movies", "movies", "=", "session", ".", "query", "(", "models", ".", "Movie", ")", ".", "all", "(", ")", "# iterate over all movies and build appearance objects", "for", "movie", "in", "movies", ":", "# retrieve movie article, keep main article content only, parse", "article", "=", "request_cache", ".", "get", "(", "\"http://marvel.wikia.com\"", "+", "movie", ".", "url", ",", "xpath", "=", "\"//article[@id='WikiaMainContent']\"", ",", "rate_limit", "=", "0.5", ")", "doc", "=", "html", ".", "fromstring", "(", "article", ")", "# find heading for appearances, this is a span inside an h2; go to the h2", "node", "=", "doc", ".", "xpath", "(", "\"//span[@id='Appearances']\"", ")", "[", "0", "]", "node", "=", "node", ".", "getparent", "(", ")", "# Appearance type is given by <p><b>... some text ...</b></p> tags. Sometimes the first", "# group of appearances carries no such label, assume it's the featured characters.", "appearance_type", "=", "\"Featured Characters\"", "# walk along the tree; character lists are in <ul>s, labels in <p>s;", "# the next h2 ends the character listing", "node", "=", "node", ".", "getnext", "(", ")", "while", "node", "is", "not", "None", "and", "node", ".", "tag", "!=", "'h2'", ":", "if", "node", ".", "tag", "==", "'ul'", "and", "(", "'characters'", "in", "appearance_type", ".", "lower", "(", ")", "or", "'villains'", "in", "appearance_type", ".", "lower", "(", ")", ")", ":", "# starts a new list of stuff; only enter here if the previous label was for characters;", "# use iter() to iterate over all 'li' items (also those of nested lists)", "for", "li", "in", "node", ".", "iter", "(", "'li'", ")", ":", "# inside the list element, find all 'a's; iterate over child nodes, don't use iter(),", "# since we want don't want to find 'a's of sub-elements in a nested list here", "for", "a", "in", "li", ":", "if", "a", ".", "tag", "!=", "'a'", ":", "continue", "# there are 'a's in the list that wrap imags, don't use these; also don't use", "# links that lead to somewhere else than the wiki", "if", "\"image\"", "in", "a", ".", "get", "(", "\"class\"", ",", "\"\"", ")", "or", "not", "a", ".", "get", "(", "\"href\"", ")", ".", "startswith", "(", "\"/wiki/\"", ")", ":", "continue", "match", "=", "re", ".", "search", "(", "r'\\(.*?\\)'", ",", "a", ".", "get", "(", "'href'", ")", ")", "if", "match", ":", "universes", ".", "append", "(", "match", ".", "group", "(", ")", "[", "1", ":", "-", "1", "]", ")", "# accept the first matching href, build a new appearance object, then skip to next li", "try", ":", "character", "=", "session", ".", "query", "(", "models", ".", "Character", ")", ".", "filter", "(", "models", ".", "Character", ".", "url", "==", "a", ".", "get", "(", "\"href\"", ")", ")", ".", "one", "(", ")", "# -- start documentation include: many-to-many-generation", "appearance", "=", "models", ".", "MovieAppearance", "(", "movie_id", "=", "movie", ".", "id", ",", "character_id", "=", "character", ".", "id", ",", "appearance_type", "=", "appearance_type", ")", "session", ".", "add", "(", "appearance", ")", "# -- end documentation include: many-to-many-generation", "except", "NoResultFound", ":", "# none found, ignore", "pass", "# break looping over 'a's once we have found one, go to next 'li'", "break", "elif", "node", ".", "tag", "==", "'p'", ":", "# new character class (or label for locations, items, ...)", "appearance_type", "=", "\" \"", ".", "join", "(", "node", ".", "itertext", "(", ")", ")", ".", "strip", "(", ")", ".", "strip", "(", "':'", ")", ".", "strip", "(", ")", "node", "=", "node", ".", "getnext", "(", ")", "print", "(", "\"\\nNumber of character appearances per universe: \"", ")", "print", "(", "pd", ".", "Series", "(", "data", "=", "universes", ")", ".", "value_counts", "(", ")", ")", "# done, save all data, finalize task", "session", ".", "commit", "(", ")", "session", ".", "close", "(", ")", "self", ".", "mark_complete", "(", ")" ]
Run loading of movie appearances. The wiki page structure for this part cannot be easily handled by simple xpath queries. We need to iterate over the respective portion of the page and parse appearances.
[ "Run", "loading", "of", "movie", "appearances", "." ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/pipeline.py#L635-L733
train
ehansis/ozelot
examples/superheroes/superheroes/pipeline.py
InflationAdjustMovieBudgets.run
def run(self): """Compute and store inflation-adjusted movie budgets """ self.mark_incomplete() session = client.get_client().create_session() # load CPI data cpi = ConsumerPriceIndexFile().load() # max year we have CPI data for max_cpi_year = cpi['Year'].max() # extract annual average only, index by year cpi = cpi.set_index('Year')['Annual'] # process all movies for movie in session.query(models.Movie).all(): # we can only compute an inflation-adjusted budget if we know the year and budget if movie.year is not None and movie.budget is not None: if movie.year > max_cpi_year: # if movie is too new, don't inflation-adjust movie.budget_inflation_adjusted = movie.budget else: movie.budget_inflation_adjusted = movie.budget * cpi.loc[max_cpi_year] / cpi.loc[movie.year] # done, save all data, finalize task session.commit() session.close() self.mark_complete()
python
def run(self): """Compute and store inflation-adjusted movie budgets """ self.mark_incomplete() session = client.get_client().create_session() # load CPI data cpi = ConsumerPriceIndexFile().load() # max year we have CPI data for max_cpi_year = cpi['Year'].max() # extract annual average only, index by year cpi = cpi.set_index('Year')['Annual'] # process all movies for movie in session.query(models.Movie).all(): # we can only compute an inflation-adjusted budget if we know the year and budget if movie.year is not None and movie.budget is not None: if movie.year > max_cpi_year: # if movie is too new, don't inflation-adjust movie.budget_inflation_adjusted = movie.budget else: movie.budget_inflation_adjusted = movie.budget * cpi.loc[max_cpi_year] / cpi.loc[movie.year] # done, save all data, finalize task session.commit() session.close() self.mark_complete()
[ "def", "run", "(", "self", ")", ":", "self", ".", "mark_incomplete", "(", ")", "session", "=", "client", ".", "get_client", "(", ")", ".", "create_session", "(", ")", "# load CPI data", "cpi", "=", "ConsumerPriceIndexFile", "(", ")", ".", "load", "(", ")", "# max year we have CPI data for", "max_cpi_year", "=", "cpi", "[", "'Year'", "]", ".", "max", "(", ")", "# extract annual average only, index by year", "cpi", "=", "cpi", ".", "set_index", "(", "'Year'", ")", "[", "'Annual'", "]", "# process all movies", "for", "movie", "in", "session", ".", "query", "(", "models", ".", "Movie", ")", ".", "all", "(", ")", ":", "# we can only compute an inflation-adjusted budget if we know the year and budget", "if", "movie", ".", "year", "is", "not", "None", "and", "movie", ".", "budget", "is", "not", "None", ":", "if", "movie", ".", "year", ">", "max_cpi_year", ":", "# if movie is too new, don't inflation-adjust", "movie", ".", "budget_inflation_adjusted", "=", "movie", ".", "budget", "else", ":", "movie", ".", "budget_inflation_adjusted", "=", "movie", ".", "budget", "*", "cpi", ".", "loc", "[", "max_cpi_year", "]", "/", "cpi", ".", "loc", "[", "movie", ".", "year", "]", "# done, save all data, finalize task", "session", ".", "commit", "(", ")", "session", ".", "close", "(", ")", "self", ".", "mark_complete", "(", ")" ]
Compute and store inflation-adjusted movie budgets
[ "Compute", "and", "store", "inflation", "-", "adjusted", "movie", "budgets" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/pipeline.py#L822-L850
train
potash/drain
drain/metrics.py
_argsort
def _argsort(y_score, k=None): """ Returns the indexes in descending order of the top k score or all scores if k is None """ ranks = y_score.argsort() argsort = ranks[::-1] if k is not None: argsort = argsort[0:k] return argsort
python
def _argsort(y_score, k=None): """ Returns the indexes in descending order of the top k score or all scores if k is None """ ranks = y_score.argsort() argsort = ranks[::-1] if k is not None: argsort = argsort[0:k] return argsort
[ "def", "_argsort", "(", "y_score", ",", "k", "=", "None", ")", ":", "ranks", "=", "y_score", ".", "argsort", "(", ")", "argsort", "=", "ranks", "[", ":", ":", "-", "1", "]", "if", "k", "is", "not", "None", ":", "argsort", "=", "argsort", "[", "0", ":", "k", "]", "return", "argsort" ]
Returns the indexes in descending order of the top k score or all scores if k is None
[ "Returns", "the", "indexes", "in", "descending", "order", "of", "the", "top", "k", "score", "or", "all", "scores", "if", "k", "is", "None" ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L16-L26
train
potash/drain
drain/metrics.py
count
def count(y_true, y_score=None, countna=False): """ Counts the number of examples. If countna is False then only count labeled examples, i.e. those with y_true not NaN """ if not countna: return (~np.isnan(to_float(y_true))).sum() else: return len(y_true)
python
def count(y_true, y_score=None, countna=False): """ Counts the number of examples. If countna is False then only count labeled examples, i.e. those with y_true not NaN """ if not countna: return (~np.isnan(to_float(y_true))).sum() else: return len(y_true)
[ "def", "count", "(", "y_true", ",", "y_score", "=", "None", ",", "countna", "=", "False", ")", ":", "if", "not", "countna", ":", "return", "(", "~", "np", ".", "isnan", "(", "to_float", "(", "y_true", ")", ")", ")", ".", "sum", "(", ")", "else", ":", "return", "len", "(", "y_true", ")" ]
Counts the number of examples. If countna is False then only count labeled examples, i.e. those with y_true not NaN
[ "Counts", "the", "number", "of", "examples", ".", "If", "countna", "is", "False", "then", "only", "count", "labeled", "examples", "i", ".", "e", ".", "those", "with", "y_true", "not", "NaN" ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L40-L48
train
potash/drain
drain/metrics.py
count_series
def count_series(y_true, y_score, countna=False): """ Returns series whose i-th entry is the number of examples in the top i """ y_true, y_score = to_float(y_true, y_score) top = _argsort(y_score) if not countna: a = (~np.isnan(y_true[top])).cumsum() else: a = range(1, len(y_true)+1) return pd.Series(a, index=range(1, len(a)+1))
python
def count_series(y_true, y_score, countna=False): """ Returns series whose i-th entry is the number of examples in the top i """ y_true, y_score = to_float(y_true, y_score) top = _argsort(y_score) if not countna: a = (~np.isnan(y_true[top])).cumsum() else: a = range(1, len(y_true)+1) return pd.Series(a, index=range(1, len(a)+1))
[ "def", "count_series", "(", "y_true", ",", "y_score", ",", "countna", "=", "False", ")", ":", "y_true", ",", "y_score", "=", "to_float", "(", "y_true", ",", "y_score", ")", "top", "=", "_argsort", "(", "y_score", ")", "if", "not", "countna", ":", "a", "=", "(", "~", "np", ".", "isnan", "(", "y_true", "[", "top", "]", ")", ")", ".", "cumsum", "(", ")", "else", ":", "a", "=", "range", "(", "1", ",", "len", "(", "y_true", ")", "+", "1", ")", "return", "pd", ".", "Series", "(", "a", ",", "index", "=", "range", "(", "1", ",", "len", "(", "a", ")", "+", "1", ")", ")" ]
Returns series whose i-th entry is the number of examples in the top i
[ "Returns", "series", "whose", "i", "-", "th", "entry", "is", "the", "number", "of", "examples", "in", "the", "top", "i" ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L51-L63
train
potash/drain
drain/metrics.py
baseline
def baseline(y_true, y_score=None): """ Number of positive labels divided by number of labels, or zero if there are no labels """ if len(y_true) > 0: return np.nansum(y_true)/count(y_true, countna=False) else: return 0.0
python
def baseline(y_true, y_score=None): """ Number of positive labels divided by number of labels, or zero if there are no labels """ if len(y_true) > 0: return np.nansum(y_true)/count(y_true, countna=False) else: return 0.0
[ "def", "baseline", "(", "y_true", ",", "y_score", "=", "None", ")", ":", "if", "len", "(", "y_true", ")", ">", "0", ":", "return", "np", ".", "nansum", "(", "y_true", ")", "/", "count", "(", "y_true", ",", "countna", "=", "False", ")", "else", ":", "return", "0.0" ]
Number of positive labels divided by number of labels, or zero if there are no labels
[ "Number", "of", "positive", "labels", "divided", "by", "number", "of", "labels", "or", "zero", "if", "there", "are", "no", "labels" ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L66-L74
train
potash/drain
drain/metrics.py
roc_auc
def roc_auc(y_true, y_score): """ Returns are under the ROC curve """ notnull = ~np.isnan(y_true) fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true[notnull], y_score[notnull]) return sklearn.metrics.auc(fpr, tpr)
python
def roc_auc(y_true, y_score): """ Returns are under the ROC curve """ notnull = ~np.isnan(y_true) fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true[notnull], y_score[notnull]) return sklearn.metrics.auc(fpr, tpr)
[ "def", "roc_auc", "(", "y_true", ",", "y_score", ")", ":", "notnull", "=", "~", "np", ".", "isnan", "(", "y_true", ")", "fpr", ",", "tpr", ",", "thresholds", "=", "sklearn", ".", "metrics", ".", "roc_curve", "(", "y_true", "[", "notnull", "]", ",", "y_score", "[", "notnull", "]", ")", "return", "sklearn", ".", "metrics", ".", "auc", "(", "fpr", ",", "tpr", ")" ]
Returns are under the ROC curve
[ "Returns", "are", "under", "the", "ROC", "curve" ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L77-L83
train
potash/drain
drain/metrics.py
recall_series
def recall_series(y_true, y_score, k=None, value=True): """ Returns series of length k whose i-th entry is the recall in the top i """ y_true, y_score = to_float(y_true, y_score) top = _argsort(y_score, k) if not value: y_true = 1-y_true a = np.nan_to_num(y_true[top]).cumsum() return pd.Series(a, index=np.arange(1, len(a)+1))
python
def recall_series(y_true, y_score, k=None, value=True): """ Returns series of length k whose i-th entry is the recall in the top i """ y_true, y_score = to_float(y_true, y_score) top = _argsort(y_score, k) if not value: y_true = 1-y_true a = np.nan_to_num(y_true[top]).cumsum() return pd.Series(a, index=np.arange(1, len(a)+1))
[ "def", "recall_series", "(", "y_true", ",", "y_score", ",", "k", "=", "None", ",", "value", "=", "True", ")", ":", "y_true", ",", "y_score", "=", "to_float", "(", "y_true", ",", "y_score", ")", "top", "=", "_argsort", "(", "y_score", ",", "k", ")", "if", "not", "value", ":", "y_true", "=", "1", "-", "y_true", "a", "=", "np", ".", "nan_to_num", "(", "y_true", "[", "top", "]", ")", ".", "cumsum", "(", ")", "return", "pd", ".", "Series", "(", "a", ",", "index", "=", "np", ".", "arange", "(", "1", ",", "len", "(", "a", ")", "+", "1", ")", ")" ]
Returns series of length k whose i-th entry is the recall in the top i
[ "Returns", "series", "of", "length", "k", "whose", "i", "-", "th", "entry", "is", "the", "recall", "in", "the", "top", "i" ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/metrics.py#L143-L154
train
hbldh/imdirect
imdirect/_autorotate.py
autorotate
def autorotate(image, orientation=None): """Rotate and return an image according to its Exif information. ROTATION_NEEDED = { 1: 0, 2: 0 (Mirrored), 3: 180, 4: 180 (Mirrored), 5: -90 (Mirrored), 6: -90, 7: 90 (Mirrored), 8: 90, } Args: image (PIL.Image.Image): PIL image to rotate orientation (): Optional orientation value in [1, 8] Returns: A :py:class:`~PIL.Image.Image` image. """ orientation_value = orientation if orientation else \ image._getexif().get(EXIF_KEYS.get('Orientation')) if orientation_value is None: raise ImDirectException("No orientation available in Exif " "tag or given explicitly.") if orientation_value in (1, 2): i = image elif orientation_value in (3, 4): i = image.transpose(Image.ROTATE_180) elif orientation_value in (5, 6): i = image.transpose(Image.ROTATE_270) elif orientation_value in (7, 8): i = image.transpose(Image.ROTATE_90) else: i = image if orientation_value in (2, 4, 5, 7): i = i.transpose(Image.FLIP_LEFT_RIGHT) return i
python
def autorotate(image, orientation=None): """Rotate and return an image according to its Exif information. ROTATION_NEEDED = { 1: 0, 2: 0 (Mirrored), 3: 180, 4: 180 (Mirrored), 5: -90 (Mirrored), 6: -90, 7: 90 (Mirrored), 8: 90, } Args: image (PIL.Image.Image): PIL image to rotate orientation (): Optional orientation value in [1, 8] Returns: A :py:class:`~PIL.Image.Image` image. """ orientation_value = orientation if orientation else \ image._getexif().get(EXIF_KEYS.get('Orientation')) if orientation_value is None: raise ImDirectException("No orientation available in Exif " "tag or given explicitly.") if orientation_value in (1, 2): i = image elif orientation_value in (3, 4): i = image.transpose(Image.ROTATE_180) elif orientation_value in (5, 6): i = image.transpose(Image.ROTATE_270) elif orientation_value in (7, 8): i = image.transpose(Image.ROTATE_90) else: i = image if orientation_value in (2, 4, 5, 7): i = i.transpose(Image.FLIP_LEFT_RIGHT) return i
[ "def", "autorotate", "(", "image", ",", "orientation", "=", "None", ")", ":", "orientation_value", "=", "orientation", "if", "orientation", "else", "image", ".", "_getexif", "(", ")", ".", "get", "(", "EXIF_KEYS", ".", "get", "(", "'Orientation'", ")", ")", "if", "orientation_value", "is", "None", ":", "raise", "ImDirectException", "(", "\"No orientation available in Exif \"", "\"tag or given explicitly.\"", ")", "if", "orientation_value", "in", "(", "1", ",", "2", ")", ":", "i", "=", "image", "elif", "orientation_value", "in", "(", "3", ",", "4", ")", ":", "i", "=", "image", ".", "transpose", "(", "Image", ".", "ROTATE_180", ")", "elif", "orientation_value", "in", "(", "5", ",", "6", ")", ":", "i", "=", "image", ".", "transpose", "(", "Image", ".", "ROTATE_270", ")", "elif", "orientation_value", "in", "(", "7", ",", "8", ")", ":", "i", "=", "image", ".", "transpose", "(", "Image", ".", "ROTATE_90", ")", "else", ":", "i", "=", "image", "if", "orientation_value", "in", "(", "2", ",", "4", ",", "5", ",", "7", ")", ":", "i", "=", "i", ".", "transpose", "(", "Image", ".", "FLIP_LEFT_RIGHT", ")", "return", "i" ]
Rotate and return an image according to its Exif information. ROTATION_NEEDED = { 1: 0, 2: 0 (Mirrored), 3: 180, 4: 180 (Mirrored), 5: -90 (Mirrored), 6: -90, 7: 90 (Mirrored), 8: 90, } Args: image (PIL.Image.Image): PIL image to rotate orientation (): Optional orientation value in [1, 8] Returns: A :py:class:`~PIL.Image.Image` image.
[ "Rotate", "and", "return", "an", "image", "according", "to", "its", "Exif", "information", "." ]
12275b9f9faa6952cde2f2fe612bbefab215251a
https://github.com/hbldh/imdirect/blob/12275b9f9faa6952cde2f2fe612bbefab215251a/imdirect/_autorotate.py#L47-L89
train
hbldh/imdirect
imdirect/_autorotate.py
imdirect_open
def imdirect_open(fp): """Opens, identifies the given image file, and rotates it if it is a JPEG. Note that this method does NOT employ the lazy loading methodology that the PIL Images otherwise use. This is done to avoid having to save new Args: fp: A filename (string), pathlib.Path object or a file-like object. Returns: The image as an :py:class:`~PIL.Image.Image` object. Raises: IOError: If the file cannot be found, or the image cannot be opened and identified. """ img = pil_open(fp, 'r') if img.format == 'JPEG': # Read Exif tag on image. if isinstance(fp, string_types): exif = piexif.load(text_type_to_use(fp)) else: fp.seek(0) exif = piexif.load(fp.read()) # If orientation field is missing or equal to 1, nothing needs to be done. orientation_value = exif.get('0th', {}).get(piexif.ImageIFD.Orientation) if orientation_value is None or orientation_value == 1: return img # Otherwise, rotate the image and update the exif accordingly. img_rot = autorotate(img) exif = update_exif_for_rotated_image(exif) # Now, lets restore the output image to # PIL.JpegImagePlugin.JpegImageFile class with the correct, # updated Exif information. # Save image as JPEG to get a correct byte representation of # the image and then read it back. with io.BytesIO() as bio: img_rot.save(bio, format='jpeg', exif=piexif.dump(exif)) bio.seek(0) img_rot_new = pil_open(bio, 'r') # Since we use a BytesIO we need to avoid the lazy # loading of the PIL image. Therefore, we explicitly # load the data here. img_rot_new.load() img = img_rot_new return img
python
def imdirect_open(fp): """Opens, identifies the given image file, and rotates it if it is a JPEG. Note that this method does NOT employ the lazy loading methodology that the PIL Images otherwise use. This is done to avoid having to save new Args: fp: A filename (string), pathlib.Path object or a file-like object. Returns: The image as an :py:class:`~PIL.Image.Image` object. Raises: IOError: If the file cannot be found, or the image cannot be opened and identified. """ img = pil_open(fp, 'r') if img.format == 'JPEG': # Read Exif tag on image. if isinstance(fp, string_types): exif = piexif.load(text_type_to_use(fp)) else: fp.seek(0) exif = piexif.load(fp.read()) # If orientation field is missing or equal to 1, nothing needs to be done. orientation_value = exif.get('0th', {}).get(piexif.ImageIFD.Orientation) if orientation_value is None or orientation_value == 1: return img # Otherwise, rotate the image and update the exif accordingly. img_rot = autorotate(img) exif = update_exif_for_rotated_image(exif) # Now, lets restore the output image to # PIL.JpegImagePlugin.JpegImageFile class with the correct, # updated Exif information. # Save image as JPEG to get a correct byte representation of # the image and then read it back. with io.BytesIO() as bio: img_rot.save(bio, format='jpeg', exif=piexif.dump(exif)) bio.seek(0) img_rot_new = pil_open(bio, 'r') # Since we use a BytesIO we need to avoid the lazy # loading of the PIL image. Therefore, we explicitly # load the data here. img_rot_new.load() img = img_rot_new return img
[ "def", "imdirect_open", "(", "fp", ")", ":", "img", "=", "pil_open", "(", "fp", ",", "'r'", ")", "if", "img", ".", "format", "==", "'JPEG'", ":", "# Read Exif tag on image.", "if", "isinstance", "(", "fp", ",", "string_types", ")", ":", "exif", "=", "piexif", ".", "load", "(", "text_type_to_use", "(", "fp", ")", ")", "else", ":", "fp", ".", "seek", "(", "0", ")", "exif", "=", "piexif", ".", "load", "(", "fp", ".", "read", "(", ")", ")", "# If orientation field is missing or equal to 1, nothing needs to be done.", "orientation_value", "=", "exif", ".", "get", "(", "'0th'", ",", "{", "}", ")", ".", "get", "(", "piexif", ".", "ImageIFD", ".", "Orientation", ")", "if", "orientation_value", "is", "None", "or", "orientation_value", "==", "1", ":", "return", "img", "# Otherwise, rotate the image and update the exif accordingly.", "img_rot", "=", "autorotate", "(", "img", ")", "exif", "=", "update_exif_for_rotated_image", "(", "exif", ")", "# Now, lets restore the output image to", "# PIL.JpegImagePlugin.JpegImageFile class with the correct,", "# updated Exif information.", "# Save image as JPEG to get a correct byte representation of", "# the image and then read it back.", "with", "io", ".", "BytesIO", "(", ")", "as", "bio", ":", "img_rot", ".", "save", "(", "bio", ",", "format", "=", "'jpeg'", ",", "exif", "=", "piexif", ".", "dump", "(", "exif", ")", ")", "bio", ".", "seek", "(", "0", ")", "img_rot_new", "=", "pil_open", "(", "bio", ",", "'r'", ")", "# Since we use a BytesIO we need to avoid the lazy", "# loading of the PIL image. Therefore, we explicitly", "# load the data here.", "img_rot_new", ".", "load", "(", ")", "img", "=", "img_rot_new", "return", "img" ]
Opens, identifies the given image file, and rotates it if it is a JPEG. Note that this method does NOT employ the lazy loading methodology that the PIL Images otherwise use. This is done to avoid having to save new Args: fp: A filename (string), pathlib.Path object or a file-like object. Returns: The image as an :py:class:`~PIL.Image.Image` object. Raises: IOError: If the file cannot be found, or the image cannot be opened and identified.
[ "Opens", "identifies", "the", "given", "image", "file", "and", "rotates", "it", "if", "it", "is", "a", "JPEG", "." ]
12275b9f9faa6952cde2f2fe612bbefab215251a
https://github.com/hbldh/imdirect/blob/12275b9f9faa6952cde2f2fe612bbefab215251a/imdirect/_autorotate.py#L168-L218
train
hbldh/imdirect
imdirect/_autorotate.py
monkey_patch
def monkey_patch(enabled=True): """Monkey patching PIL.Image.open method Args: enabled (bool): If the monkey patch should be activated or deactivated. """ if enabled: Image.open = imdirect_open else: Image.open = pil_open
python
def monkey_patch(enabled=True): """Monkey patching PIL.Image.open method Args: enabled (bool): If the monkey patch should be activated or deactivated. """ if enabled: Image.open = imdirect_open else: Image.open = pil_open
[ "def", "monkey_patch", "(", "enabled", "=", "True", ")", ":", "if", "enabled", ":", "Image", ".", "open", "=", "imdirect_open", "else", ":", "Image", ".", "open", "=", "pil_open" ]
Monkey patching PIL.Image.open method Args: enabled (bool): If the monkey patch should be activated or deactivated.
[ "Monkey", "patching", "PIL", ".", "Image", ".", "open", "method" ]
12275b9f9faa6952cde2f2fe612bbefab215251a
https://github.com/hbldh/imdirect/blob/12275b9f9faa6952cde2f2fe612bbefab215251a/imdirect/_autorotate.py#L221-L232
train
hbldh/imdirect
imdirect/_autorotate.py
save_with_exif_info
def save_with_exif_info(img, *args, **kwargs): """Saves an image using PIL, preserving the exif information. Args: img (PIL.Image.Image): *args: The arguments for the `save` method of the Image class. **kwargs: The keywords for the `save` method of the Image class. """ if 'exif' in kwargs: exif = kwargs.pop('exif') else: exif = img.info.get('exif') img.save(*args, exif=exif, **kwargs)
python
def save_with_exif_info(img, *args, **kwargs): """Saves an image using PIL, preserving the exif information. Args: img (PIL.Image.Image): *args: The arguments for the `save` method of the Image class. **kwargs: The keywords for the `save` method of the Image class. """ if 'exif' in kwargs: exif = kwargs.pop('exif') else: exif = img.info.get('exif') img.save(*args, exif=exif, **kwargs)
[ "def", "save_with_exif_info", "(", "img", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'exif'", "in", "kwargs", ":", "exif", "=", "kwargs", ".", "pop", "(", "'exif'", ")", "else", ":", "exif", "=", "img", ".", "info", ".", "get", "(", "'exif'", ")", "img", ".", "save", "(", "*", "args", ",", "exif", "=", "exif", ",", "*", "*", "kwargs", ")" ]
Saves an image using PIL, preserving the exif information. Args: img (PIL.Image.Image): *args: The arguments for the `save` method of the Image class. **kwargs: The keywords for the `save` method of the Image class.
[ "Saves", "an", "image", "using", "PIL", "preserving", "the", "exif", "information", "." ]
12275b9f9faa6952cde2f2fe612bbefab215251a
https://github.com/hbldh/imdirect/blob/12275b9f9faa6952cde2f2fe612bbefab215251a/imdirect/_autorotate.py#L235-L248
train
redhat-cip/python-dciclient
dciclient/v1/api/base.py
create
def create(context, resource, **kwargs): """Create a resource""" data = utils.sanitize_kwargs(**kwargs) uri = '%s/%s' % (context.dci_cs_api, resource) r = context.session.post(uri, timeout=HTTP_TIMEOUT, json=data) return r
python
def create(context, resource, **kwargs): """Create a resource""" data = utils.sanitize_kwargs(**kwargs) uri = '%s/%s' % (context.dci_cs_api, resource) r = context.session.post(uri, timeout=HTTP_TIMEOUT, json=data) return r
[ "def", "create", "(", "context", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "data", "=", "utils", ".", "sanitize_kwargs", "(", "*", "*", "kwargs", ")", "uri", "=", "'%s/%s'", "%", "(", "context", ".", "dci_cs_api", ",", "resource", ")", "r", "=", "context", ".", "session", ".", "post", "(", "uri", ",", "timeout", "=", "HTTP_TIMEOUT", ",", "json", "=", "data", ")", "return", "r" ]
Create a resource
[ "Create", "a", "resource" ]
a4aa5899062802bbe4c30a075d8447f8d222d214
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L22-L28
train
redhat-cip/python-dciclient
dciclient/v1/api/base.py
get
def get(context, resource, **kwargs): """List a specific resource""" uri = '%s/%s/%s' % (context.dci_cs_api, resource, kwargs.pop('id')) r = context.session.get(uri, timeout=HTTP_TIMEOUT, params=kwargs) return r
python
def get(context, resource, **kwargs): """List a specific resource""" uri = '%s/%s/%s' % (context.dci_cs_api, resource, kwargs.pop('id')) r = context.session.get(uri, timeout=HTTP_TIMEOUT, params=kwargs) return r
[ "def", "get", "(", "context", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "'%s/%s/%s'", "%", "(", "context", ".", "dci_cs_api", ",", "resource", ",", "kwargs", ".", "pop", "(", "'id'", ")", ")", "r", "=", "context", ".", "session", ".", "get", "(", "uri", ",", "timeout", "=", "HTTP_TIMEOUT", ",", "params", "=", "kwargs", ")", "return", "r" ]
List a specific resource
[ "List", "a", "specific", "resource" ]
a4aa5899062802bbe4c30a075d8447f8d222d214
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L69-L73
train
redhat-cip/python-dciclient
dciclient/v1/api/base.py
get_data
def get_data(context, resource, **kwargs): """Retrieve data field from a resource""" url_suffix = '' if 'keys' in kwargs and kwargs['keys']: url_suffix = '/?keys=%s' % ','.join(kwargs.pop('keys')) uri = '%s/%s/%s/data%s' % (context.dci_cs_api, resource, kwargs.pop('id'), url_suffix) r = context.session.get(uri, timeout=HTTP_TIMEOUT, params=kwargs) return r
python
def get_data(context, resource, **kwargs): """Retrieve data field from a resource""" url_suffix = '' if 'keys' in kwargs and kwargs['keys']: url_suffix = '/?keys=%s' % ','.join(kwargs.pop('keys')) uri = '%s/%s/%s/data%s' % (context.dci_cs_api, resource, kwargs.pop('id'), url_suffix) r = context.session.get(uri, timeout=HTTP_TIMEOUT, params=kwargs) return r
[ "def", "get_data", "(", "context", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "url_suffix", "=", "''", "if", "'keys'", "in", "kwargs", "and", "kwargs", "[", "'keys'", "]", ":", "url_suffix", "=", "'/?keys=%s'", "%", "','", ".", "join", "(", "kwargs", ".", "pop", "(", "'keys'", ")", ")", "uri", "=", "'%s/%s/%s/data%s'", "%", "(", "context", ".", "dci_cs_api", ",", "resource", ",", "kwargs", ".", "pop", "(", "'id'", ")", ",", "url_suffix", ")", "r", "=", "context", ".", "session", ".", "get", "(", "uri", ",", "timeout", "=", "HTTP_TIMEOUT", ",", "params", "=", "kwargs", ")", "return", "r" ]
Retrieve data field from a resource
[ "Retrieve", "data", "field", "from", "a", "resource" ]
a4aa5899062802bbe4c30a075d8447f8d222d214
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L76-L87
train
redhat-cip/python-dciclient
dciclient/v1/api/base.py
update
def update(context, resource, **kwargs): """Update a specific resource""" etag = kwargs.pop('etag') id = kwargs.pop('id') data = utils.sanitize_kwargs(**kwargs) uri = '%s/%s/%s' % (context.dci_cs_api, resource, id) r = context.session.put(uri, timeout=HTTP_TIMEOUT, headers={'If-match': etag}, json=data) return r
python
def update(context, resource, **kwargs): """Update a specific resource""" etag = kwargs.pop('etag') id = kwargs.pop('id') data = utils.sanitize_kwargs(**kwargs) uri = '%s/%s/%s' % (context.dci_cs_api, resource, id) r = context.session.put(uri, timeout=HTTP_TIMEOUT, headers={'If-match': etag}, json=data) return r
[ "def", "update", "(", "context", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "etag", "=", "kwargs", ".", "pop", "(", "'etag'", ")", "id", "=", "kwargs", ".", "pop", "(", "'id'", ")", "data", "=", "utils", ".", "sanitize_kwargs", "(", "*", "*", "kwargs", ")", "uri", "=", "'%s/%s/%s'", "%", "(", "context", ".", "dci_cs_api", ",", "resource", ",", "id", ")", "r", "=", "context", ".", "session", ".", "put", "(", "uri", ",", "timeout", "=", "HTTP_TIMEOUT", ",", "headers", "=", "{", "'If-match'", ":", "etag", "}", ",", "json", "=", "data", ")", "return", "r" ]
Update a specific resource
[ "Update", "a", "specific", "resource" ]
a4aa5899062802bbe4c30a075d8447f8d222d214
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L90-L99
train
redhat-cip/python-dciclient
dciclient/v1/api/base.py
delete
def delete(context, resource, id, **kwargs): """Delete a specific resource""" etag = kwargs.pop('etag', None) id = id subresource = kwargs.pop('subresource', None) subresource_id = kwargs.pop('subresource_id', None) uri = '%s/%s/%s' % (context.dci_cs_api, resource, id) if subresource: uri = '%s/%s/%s' % (uri, subresource, subresource_id) r = context.session.delete(uri, timeout=HTTP_TIMEOUT, headers={'If-match': etag}) return r
python
def delete(context, resource, id, **kwargs): """Delete a specific resource""" etag = kwargs.pop('etag', None) id = id subresource = kwargs.pop('subresource', None) subresource_id = kwargs.pop('subresource_id', None) uri = '%s/%s/%s' % (context.dci_cs_api, resource, id) if subresource: uri = '%s/%s/%s' % (uri, subresource, subresource_id) r = context.session.delete(uri, timeout=HTTP_TIMEOUT, headers={'If-match': etag}) return r
[ "def", "delete", "(", "context", ",", "resource", ",", "id", ",", "*", "*", "kwargs", ")", ":", "etag", "=", "kwargs", ".", "pop", "(", "'etag'", ",", "None", ")", "id", "=", "id", "subresource", "=", "kwargs", ".", "pop", "(", "'subresource'", ",", "None", ")", "subresource_id", "=", "kwargs", ".", "pop", "(", "'subresource_id'", ",", "None", ")", "uri", "=", "'%s/%s/%s'", "%", "(", "context", ".", "dci_cs_api", ",", "resource", ",", "id", ")", "if", "subresource", ":", "uri", "=", "'%s/%s/%s'", "%", "(", "uri", ",", "subresource", ",", "subresource_id", ")", "r", "=", "context", ".", "session", ".", "delete", "(", "uri", ",", "timeout", "=", "HTTP_TIMEOUT", ",", "headers", "=", "{", "'If-match'", ":", "etag", "}", ")", "return", "r" ]
Delete a specific resource
[ "Delete", "a", "specific", "resource" ]
a4aa5899062802bbe4c30a075d8447f8d222d214
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L102-L116
train
redhat-cip/python-dciclient
dciclient/v1/api/base.py
purge
def purge(context, resource, **kwargs): """Purge resource type.""" uri = '%s/%s/purge' % (context.dci_cs_api, resource) if 'force' in kwargs and kwargs['force']: r = context.session.post(uri, timeout=HTTP_TIMEOUT) else: r = context.session.get(uri, timeout=HTTP_TIMEOUT) return r
python
def purge(context, resource, **kwargs): """Purge resource type.""" uri = '%s/%s/purge' % (context.dci_cs_api, resource) if 'force' in kwargs and kwargs['force']: r = context.session.post(uri, timeout=HTTP_TIMEOUT) else: r = context.session.get(uri, timeout=HTTP_TIMEOUT) return r
[ "def", "purge", "(", "context", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "'%s/%s/purge'", "%", "(", "context", ".", "dci_cs_api", ",", "resource", ")", "if", "'force'", "in", "kwargs", "and", "kwargs", "[", "'force'", "]", ":", "r", "=", "context", ".", "session", ".", "post", "(", "uri", ",", "timeout", "=", "HTTP_TIMEOUT", ")", "else", ":", "r", "=", "context", ".", "session", ".", "get", "(", "uri", ",", "timeout", "=", "HTTP_TIMEOUT", ")", "return", "r" ]
Purge resource type.
[ "Purge", "resource", "type", "." ]
a4aa5899062802bbe4c30a075d8447f8d222d214
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/base.py#L119-L126
train
lsst-sqre/documenteer
documenteer/sphinxext/utils.py
parse_rst_content
def parse_rst_content(content, state): """Parse rST-formatted string content into docutils nodes Parameters ---------- content : `str` ReStructuredText-formatted content state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. Returns ------- instance from ``docutils.nodes`` Docutils node representing the ``content``. """ # http://www.sphinx-doc.org/en/master/extdev/markupapi.html # #parsing-directive-content-as-rest container_node = nodes.section() container_node.document = state.document viewlist = ViewList() for i, line in enumerate(content.splitlines()): viewlist.append(line, source='', offset=i) with switch_source_input(state, viewlist): state.nested_parse(viewlist, 0, container_node) return container_node.children
python
def parse_rst_content(content, state): """Parse rST-formatted string content into docutils nodes Parameters ---------- content : `str` ReStructuredText-formatted content state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. Returns ------- instance from ``docutils.nodes`` Docutils node representing the ``content``. """ # http://www.sphinx-doc.org/en/master/extdev/markupapi.html # #parsing-directive-content-as-rest container_node = nodes.section() container_node.document = state.document viewlist = ViewList() for i, line in enumerate(content.splitlines()): viewlist.append(line, source='', offset=i) with switch_source_input(state, viewlist): state.nested_parse(viewlist, 0, container_node) return container_node.children
[ "def", "parse_rst_content", "(", "content", ",", "state", ")", ":", "# http://www.sphinx-doc.org/en/master/extdev/markupapi.html", "# #parsing-directive-content-as-rest", "container_node", "=", "nodes", ".", "section", "(", ")", "container_node", ".", "document", "=", "state", ".", "document", "viewlist", "=", "ViewList", "(", ")", "for", "i", ",", "line", "in", "enumerate", "(", "content", ".", "splitlines", "(", ")", ")", ":", "viewlist", ".", "append", "(", "line", ",", "source", "=", "''", ",", "offset", "=", "i", ")", "with", "switch_source_input", "(", "state", ",", "viewlist", ")", ":", "state", ".", "nested_parse", "(", "viewlist", ",", "0", ",", "container_node", ")", "return", "container_node", ".", "children" ]
Parse rST-formatted string content into docutils nodes Parameters ---------- content : `str` ReStructuredText-formatted content state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. Returns ------- instance from ``docutils.nodes`` Docutils node representing the ``content``.
[ "Parse", "rST", "-", "formatted", "string", "content", "into", "docutils", "nodes" ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L14-L41
train