repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
smnorris/bcdata
bcdata/wfs.py
check_cache
def check_cache(path): """Return true if the cache file holding list of all datasets does not exist or is older than 30 days """ if not os.path.exists(path): return True else: # check the age mod_date = datetime.fromtimestamp(os.path.getmtime(path)) if mod_date < (datetime.now() - timedelta(days=30)): return True else: return False
python
def check_cache(path): """Return true if the cache file holding list of all datasets does not exist or is older than 30 days """ if not os.path.exists(path): return True else: # check the age mod_date = datetime.fromtimestamp(os.path.getmtime(path)) if mod_date < (datetime.now() - timedelta(days=30)): return True else: return False
[ "def", "check_cache", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "True", "else", ":", "# check the age", "mod_date", "=", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "path", ")", ")", "if", "mod_date", "<", "(", "datetime", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "30", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
Return true if the cache file holding list of all datasets does not exist or is older than 30 days
[ "Return", "true", "if", "the", "cache", "file", "holding", "list", "of", "all", "datasets", "does", "not", "exist", "or", "is", "older", "than", "30", "days" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L36-L48
train
smnorris/bcdata
bcdata/wfs.py
bcdc_package_show
def bcdc_package_show(package): """Query DataBC Catalogue API about given package """ params = {"id": package} r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params) if r.status_code != 200: raise ValueError("{d} is not present in DataBC API list".format(d=package)) return r.json()["result"]
python
def bcdc_package_show(package): """Query DataBC Catalogue API about given package """ params = {"id": package} r = requests.get(bcdata.BCDC_API_URL + "package_show", params=params) if r.status_code != 200: raise ValueError("{d} is not present in DataBC API list".format(d=package)) return r.json()["result"]
[ "def", "bcdc_package_show", "(", "package", ")", ":", "params", "=", "{", "\"id\"", ":", "package", "}", "r", "=", "requests", ".", "get", "(", "bcdata", ".", "BCDC_API_URL", "+", "\"package_show\"", ",", "params", "=", "params", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "ValueError", "(", "\"{d} is not present in DataBC API list\"", ".", "format", "(", "d", "=", "package", ")", ")", "return", "r", ".", "json", "(", ")", "[", "\"result\"", "]" ]
Query DataBC Catalogue API about given package
[ "Query", "DataBC", "Catalogue", "API", "about", "given", "package" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L51-L58
train
smnorris/bcdata
bcdata/wfs.py
list_tables
def list_tables(refresh=False, cache_file=None): """Return a list of all datasets available via WFS """ # default cache listing all objects available is # ~/.bcdata if not cache_file: cache_file = os.path.join(str(Path.home()), ".bcdata") # regenerate the cache if: # - the cache file doesn't exist # - we force a refresh # - the cache is older than 1 month if refresh or check_cache(cache_file): wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)] with open(cache_file, "w") as outfile: json.dump(sorted(bcdata_objects), outfile) else: with open(cache_file, "r") as infile: bcdata_objects = json.load(infile) return bcdata_objects
python
def list_tables(refresh=False, cache_file=None): """Return a list of all datasets available via WFS """ # default cache listing all objects available is # ~/.bcdata if not cache_file: cache_file = os.path.join(str(Path.home()), ".bcdata") # regenerate the cache if: # - the cache file doesn't exist # - we force a refresh # - the cache is older than 1 month if refresh or check_cache(cache_file): wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") bcdata_objects = [i.strip("pub:") for i in list(wfs.contents)] with open(cache_file, "w") as outfile: json.dump(sorted(bcdata_objects), outfile) else: with open(cache_file, "r") as infile: bcdata_objects = json.load(infile) return bcdata_objects
[ "def", "list_tables", "(", "refresh", "=", "False", ",", "cache_file", "=", "None", ")", ":", "# default cache listing all objects available is", "# ~/.bcdata", "if", "not", "cache_file", ":", "cache_file", "=", "os", ".", "path", ".", "join", "(", "str", "(", "Path", ".", "home", "(", ")", ")", ",", "\".bcdata\"", ")", "# regenerate the cache if:", "# - the cache file doesn't exist", "# - we force a refresh", "# - the cache is older than 1 month", "if", "refresh", "or", "check_cache", "(", "cache_file", ")", ":", "wfs", "=", "WebFeatureService", "(", "url", "=", "bcdata", ".", "OWS_URL", ",", "version", "=", "\"2.0.0\"", ")", "bcdata_objects", "=", "[", "i", ".", "strip", "(", "\"pub:\"", ")", "for", "i", "in", "list", "(", "wfs", ".", "contents", ")", "]", "with", "open", "(", "cache_file", ",", "\"w\"", ")", "as", "outfile", ":", "json", ".", "dump", "(", "sorted", "(", "bcdata_objects", ")", ",", "outfile", ")", "else", ":", "with", "open", "(", "cache_file", ",", "\"r\"", ")", "as", "infile", ":", "bcdata_objects", "=", "json", ".", "load", "(", "infile", ")", "return", "bcdata_objects" ]
Return a list of all datasets available via WFS
[ "Return", "a", "list", "of", "all", "datasets", "available", "via", "WFS" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L70-L91
train
smnorris/bcdata
bcdata/wfs.py
make_request
def make_request(parameters): """Submit a getfeature request to DataBC WFS and return features """ r = requests.get(bcdata.WFS_URL, params=parameters) return r.json()["features"]
python
def make_request(parameters): """Submit a getfeature request to DataBC WFS and return features """ r = requests.get(bcdata.WFS_URL, params=parameters) return r.json()["features"]
[ "def", "make_request", "(", "parameters", ")", ":", "r", "=", "requests", ".", "get", "(", "bcdata", ".", "WFS_URL", ",", "params", "=", "parameters", ")", "return", "r", ".", "json", "(", ")", "[", "\"features\"", "]" ]
Submit a getfeature request to DataBC WFS and return features
[ "Submit", "a", "getfeature", "request", "to", "DataBC", "WFS", "and", "return", "features" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L113-L117
train
smnorris/bcdata
bcdata/wfs.py
define_request
def define_request( dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000 ): """Define the getfeature request parameters required to download a dataset References: - http://www.opengeospatial.org/standards/wfs - http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html - http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html """ # validate the table name and find out how many features it holds table = validate_name(dataset) n = bcdata.get_count(table, query=query) # DataBC WFS getcapabilities says that it supports paging, # and the spec says that responses should include 'next URI' # (section 7.7.4.4.1).... # But I do not see any next uri in the responses. Instead of following # the paged urls, for datasets with >10k records, just generate urls # based on number of features in the dataset. chunks = math.ceil(n / pagesize) # if making several requests, we need to sort by something if chunks > 1 and not sortby: sortby = get_sortkey(table) # build the request parameters for each chunk param_dicts = [] for i in range(chunks): request = { "service": "WFS", "version": "2.0.0", "request": "GetFeature", "typeName": table, "outputFormat": "json", "SRSNAME": crs, } if sortby: request["sortby"] = sortby if query: request["CQL_FILTER"] = query if bounds: request["bbox"] = ",".join([str(b) for b in bounds]) if chunks > 1: request["startIndex"] = i * pagesize request["count"] = pagesize param_dicts.append(request) return param_dicts
python
def define_request( dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000 ): """Define the getfeature request parameters required to download a dataset References: - http://www.opengeospatial.org/standards/wfs - http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html - http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html """ # validate the table name and find out how many features it holds table = validate_name(dataset) n = bcdata.get_count(table, query=query) # DataBC WFS getcapabilities says that it supports paging, # and the spec says that responses should include 'next URI' # (section 7.7.4.4.1).... # But I do not see any next uri in the responses. Instead of following # the paged urls, for datasets with >10k records, just generate urls # based on number of features in the dataset. chunks = math.ceil(n / pagesize) # if making several requests, we need to sort by something if chunks > 1 and not sortby: sortby = get_sortkey(table) # build the request parameters for each chunk param_dicts = [] for i in range(chunks): request = { "service": "WFS", "version": "2.0.0", "request": "GetFeature", "typeName": table, "outputFormat": "json", "SRSNAME": crs, } if sortby: request["sortby"] = sortby if query: request["CQL_FILTER"] = query if bounds: request["bbox"] = ",".join([str(b) for b in bounds]) if chunks > 1: request["startIndex"] = i * pagesize request["count"] = pagesize param_dicts.append(request) return param_dicts
[ "def", "define_request", "(", "dataset", ",", "query", "=", "None", ",", "crs", "=", "\"epsg:4326\"", ",", "bounds", "=", "None", ",", "sortby", "=", "None", ",", "pagesize", "=", "10000", ")", ":", "# validate the table name and find out how many features it holds", "table", "=", "validate_name", "(", "dataset", ")", "n", "=", "bcdata", ".", "get_count", "(", "table", ",", "query", "=", "query", ")", "# DataBC WFS getcapabilities says that it supports paging,", "# and the spec says that responses should include 'next URI'", "# (section 7.7.4.4.1)....", "# But I do not see any next uri in the responses. Instead of following", "# the paged urls, for datasets with >10k records, just generate urls", "# based on number of features in the dataset.", "chunks", "=", "math", ".", "ceil", "(", "n", "/", "pagesize", ")", "# if making several requests, we need to sort by something", "if", "chunks", ">", "1", "and", "not", "sortby", ":", "sortby", "=", "get_sortkey", "(", "table", ")", "# build the request parameters for each chunk", "param_dicts", "=", "[", "]", "for", "i", "in", "range", "(", "chunks", ")", ":", "request", "=", "{", "\"service\"", ":", "\"WFS\"", ",", "\"version\"", ":", "\"2.0.0\"", ",", "\"request\"", ":", "\"GetFeature\"", ",", "\"typeName\"", ":", "table", ",", "\"outputFormat\"", ":", "\"json\"", ",", "\"SRSNAME\"", ":", "crs", ",", "}", "if", "sortby", ":", "request", "[", "\"sortby\"", "]", "=", "sortby", "if", "query", ":", "request", "[", "\"CQL_FILTER\"", "]", "=", "query", "if", "bounds", ":", "request", "[", "\"bbox\"", "]", "=", "\",\"", ".", "join", "(", "[", "str", "(", "b", ")", "for", "b", "in", "bounds", "]", ")", "if", "chunks", ">", "1", ":", "request", "[", "\"startIndex\"", "]", "=", "i", "*", "pagesize", "request", "[", "\"count\"", "]", "=", "pagesize", "param_dicts", ".", "append", "(", "request", ")", "return", "param_dicts" ]
Define the getfeature request parameters required to download a dataset References: - http://www.opengeospatial.org/standards/wfs - http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html - http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
[ "Define", "the", "getfeature", "request", "parameters", "required", "to", "download", "a", "dataset" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L120-L167
train
smnorris/bcdata
bcdata/wfs.py
get_data
def get_data( dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000, max_workers=5, ): """Get GeoJSON featurecollection from DataBC WFS """ param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize) with ThreadPoolExecutor(max_workers=max_workers) as executor: results = executor.map(make_request, param_dicts) outjson = dict(type="FeatureCollection", features=[]) for result in results: outjson["features"] += result return outjson
python
def get_data( dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000, max_workers=5, ): """Get GeoJSON featurecollection from DataBC WFS """ param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize) with ThreadPoolExecutor(max_workers=max_workers) as executor: results = executor.map(make_request, param_dicts) outjson = dict(type="FeatureCollection", features=[]) for result in results: outjson["features"] += result return outjson
[ "def", "get_data", "(", "dataset", ",", "query", "=", "None", ",", "crs", "=", "\"epsg:4326\"", ",", "bounds", "=", "None", ",", "sortby", "=", "None", ",", "pagesize", "=", "10000", ",", "max_workers", "=", "5", ",", ")", ":", "param_dicts", "=", "define_request", "(", "dataset", ",", "query", ",", "crs", ",", "bounds", ",", "sortby", ",", "pagesize", ")", "with", "ThreadPoolExecutor", "(", "max_workers", "=", "max_workers", ")", "as", "executor", ":", "results", "=", "executor", ".", "map", "(", "make_request", ",", "param_dicts", ")", "outjson", "=", "dict", "(", "type", "=", "\"FeatureCollection\"", ",", "features", "=", "[", "]", ")", "for", "result", "in", "results", ":", "outjson", "[", "\"features\"", "]", "+=", "result", "return", "outjson" ]
Get GeoJSON featurecollection from DataBC WFS
[ "Get", "GeoJSON", "featurecollection", "from", "DataBC", "WFS" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L170-L189
train
smnorris/bcdata
bcdata/wfs.py
get_features
def get_features( dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000, max_workers=5, ): """Yield features from DataBC WFS """ param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize) with ThreadPoolExecutor(max_workers=max_workers) as executor: for result in executor.map(make_request, param_dicts): for feature in result: yield feature
python
def get_features( dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000, max_workers=5, ): """Yield features from DataBC WFS """ param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize) with ThreadPoolExecutor(max_workers=max_workers) as executor: for result in executor.map(make_request, param_dicts): for feature in result: yield feature
[ "def", "get_features", "(", "dataset", ",", "query", "=", "None", ",", "crs", "=", "\"epsg:4326\"", ",", "bounds", "=", "None", ",", "sortby", "=", "None", ",", "pagesize", "=", "10000", ",", "max_workers", "=", "5", ",", ")", ":", "param_dicts", "=", "define_request", "(", "dataset", ",", "query", ",", "crs", ",", "bounds", ",", "sortby", ",", "pagesize", ")", "with", "ThreadPoolExecutor", "(", "max_workers", "=", "max_workers", ")", "as", "executor", ":", "for", "result", "in", "executor", ".", "map", "(", "make_request", ",", "param_dicts", ")", ":", "for", "feature", "in", "result", ":", "yield", "feature" ]
Yield features from DataBC WFS
[ "Yield", "features", "from", "DataBC", "WFS" ]
de6b5bbc28d85e36613b51461911ee0a72a146c5
https://github.com/smnorris/bcdata/blob/de6b5bbc28d85e36613b51461911ee0a72a146c5/bcdata/wfs.py#L192-L208
train
pneff/wsgiservice
wsgiservice/routing.py
Router._get_sorted
def _get_sorted(self, resources): """Order the resources by priority - the most specific paths come first. :param resources: List of :class:`wsgiservice.resource.Resource` classes to be served by this application. """ tmp = [] for resource in resources: path = resource._path # Each slash counts as 10 priority, each variable takes one away priority = path.count('/') * 10 - path.count('{') tmp.append((priority, resource)) return [resource for prio, resource in reversed(sorted(tmp))]
python
def _get_sorted(self, resources): """Order the resources by priority - the most specific paths come first. :param resources: List of :class:`wsgiservice.resource.Resource` classes to be served by this application. """ tmp = [] for resource in resources: path = resource._path # Each slash counts as 10 priority, each variable takes one away priority = path.count('/') * 10 - path.count('{') tmp.append((priority, resource)) return [resource for prio, resource in reversed(sorted(tmp))]
[ "def", "_get_sorted", "(", "self", ",", "resources", ")", ":", "tmp", "=", "[", "]", "for", "resource", "in", "resources", ":", "path", "=", "resource", ".", "_path", "# Each slash counts as 10 priority, each variable takes one away", "priority", "=", "path", ".", "count", "(", "'/'", ")", "*", "10", "-", "path", ".", "count", "(", "'{'", ")", "tmp", ".", "append", "(", "(", "priority", ",", "resource", ")", ")", "return", "[", "resource", "for", "prio", ",", "resource", "in", "reversed", "(", "sorted", "(", "tmp", ")", ")", "]" ]
Order the resources by priority - the most specific paths come first. :param resources: List of :class:`wsgiservice.resource.Resource` classes to be served by this application.
[ "Order", "the", "resources", "by", "priority", "-", "the", "most", "specific", "paths", "come", "first", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/routing.py#L24-L37
train
mkoura/dump2polarion
dump2polarion/exporters/transform_projects.py
set_cfme_caselevel
def set_cfme_caselevel(testcase, caselevels): """Converts tier to caselevel.""" tier = testcase.get("caselevel") if tier is None: return try: caselevel = caselevels[int(tier)] except IndexError: # invalid value caselevel = "component" except ValueError: # there's already string value return testcase["caselevel"] = caselevel
python
def set_cfme_caselevel(testcase, caselevels): """Converts tier to caselevel.""" tier = testcase.get("caselevel") if tier is None: return try: caselevel = caselevels[int(tier)] except IndexError: # invalid value caselevel = "component" except ValueError: # there's already string value return testcase["caselevel"] = caselevel
[ "def", "set_cfme_caselevel", "(", "testcase", ",", "caselevels", ")", ":", "tier", "=", "testcase", ".", "get", "(", "\"caselevel\"", ")", "if", "tier", "is", "None", ":", "return", "try", ":", "caselevel", "=", "caselevels", "[", "int", "(", "tier", ")", "]", "except", "IndexError", ":", "# invalid value", "caselevel", "=", "\"component\"", "except", "ValueError", ":", "# there's already string value", "return", "testcase", "[", "\"caselevel\"", "]", "=", "caselevel" ]
Converts tier to caselevel.
[ "Converts", "tier", "to", "caselevel", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/transform_projects.py#L22-L37
train
mkoura/dump2polarion
dump2polarion/exporters/transform_projects.py
get_requirements_transform_cfme
def get_requirements_transform_cfme(config): """Return requirement transformation function for CFME.""" def requirement_transform(requirement): """Requirements transform for CFME.""" requirement = copy.deepcopy(requirement) if "id" in requirement: del requirement["id"] return requirement return requirement_transform
python
def get_requirements_transform_cfme(config): """Return requirement transformation function for CFME.""" def requirement_transform(requirement): """Requirements transform for CFME.""" requirement = copy.deepcopy(requirement) if "id" in requirement: del requirement["id"] return requirement return requirement_transform
[ "def", "get_requirements_transform_cfme", "(", "config", ")", ":", "def", "requirement_transform", "(", "requirement", ")", ":", "\"\"\"Requirements transform for CFME.\"\"\"", "requirement", "=", "copy", ".", "deepcopy", "(", "requirement", ")", "if", "\"id\"", "in", "requirement", ":", "del", "requirement", "[", "\"id\"", "]", "return", "requirement", "return", "requirement_transform" ]
Return requirement transformation function for CFME.
[ "Return", "requirement", "transformation", "function", "for", "CFME", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/transform_projects.py#L110-L122
train
mkoura/dump2polarion
dump2polarion/exporters/transform_projects.py
get_requirements_transform_cloudtp
def get_requirements_transform_cloudtp(config): """Return requirement transformation function for CLOUDTP.""" def requirement_transform(requirement): """Requirements transform for CLOUDTP.""" requirement = copy.deepcopy(requirement) if "id" in requirement: del requirement["id"] # TODO: testing purposes, remove once ready if not requirement.get("assignee-id"): requirement["assignee-id"] = "mkourim" if not requirement.get("approver-ids"): requirement["approver-ids"] = "mkourim:approved" return requirement return requirement_transform
python
def get_requirements_transform_cloudtp(config): """Return requirement transformation function for CLOUDTP.""" def requirement_transform(requirement): """Requirements transform for CLOUDTP.""" requirement = copy.deepcopy(requirement) if "id" in requirement: del requirement["id"] # TODO: testing purposes, remove once ready if not requirement.get("assignee-id"): requirement["assignee-id"] = "mkourim" if not requirement.get("approver-ids"): requirement["approver-ids"] = "mkourim:approved" return requirement return requirement_transform
[ "def", "get_requirements_transform_cloudtp", "(", "config", ")", ":", "def", "requirement_transform", "(", "requirement", ")", ":", "\"\"\"Requirements transform for CLOUDTP.\"\"\"", "requirement", "=", "copy", ".", "deepcopy", "(", "requirement", ")", "if", "\"id\"", "in", "requirement", ":", "del", "requirement", "[", "\"id\"", "]", "# TODO: testing purposes, remove once ready", "if", "not", "requirement", ".", "get", "(", "\"assignee-id\"", ")", ":", "requirement", "[", "\"assignee-id\"", "]", "=", "\"mkourim\"", "if", "not", "requirement", ".", "get", "(", "\"approver-ids\"", ")", ":", "requirement", "[", "\"approver-ids\"", "]", "=", "\"mkourim:approved\"", "return", "requirement", "return", "requirement_transform" ]
Return requirement transformation function for CLOUDTP.
[ "Return", "requirement", "transformation", "function", "for", "CLOUDTP", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/exporters/transform_projects.py#L179-L196
train
oz123/blogit
blogit/blogit.py
render_archive
def render_archive(entries): """Creates the archive page""" context = GLOBAL_TEMPLATE_CONTEXT.copy() context['entries'] = entries _render(context, 'archive_index.html', os.path.join(CONFIG['output_to'], 'archive/index.html')),
python
def render_archive(entries): """Creates the archive page""" context = GLOBAL_TEMPLATE_CONTEXT.copy() context['entries'] = entries _render(context, 'archive_index.html', os.path.join(CONFIG['output_to'], 'archive/index.html')),
[ "def", "render_archive", "(", "entries", ")", ":", "context", "=", "GLOBAL_TEMPLATE_CONTEXT", ".", "copy", "(", ")", "context", "[", "'entries'", "]", "=", "entries", "_render", "(", "context", ",", "'archive_index.html'", ",", "os", ".", "path", ".", "join", "(", "CONFIG", "[", "'output_to'", "]", ",", "'archive/index.html'", ")", ")", "," ]
Creates the archive page
[ "Creates", "the", "archive", "page" ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L349-L354
train
oz123/blogit
blogit/blogit.py
find_new_posts_and_pages
def find_new_posts_and_pages(db): """Walk content dir, put each post and page in the database""" Q = Query() for root, dirs, files in os.walk(CONFIG['content_root']): for filename in sorted([f for f in files if f.endswith(('md', 'markdown'))]): fullpath = os.path.join(root, filename) _p = fullpath.split(CONFIG['content_root'])[-1].lstrip('/') new_mtime = int(os.path.getmtime(fullpath)) e, item = None, None for collection in ['posts', 'pages']: item = db[collection].get(Q.filename == _p) if item: if new_mtime > item['mtime']: db[collection].update({'mtime': new_mtime}, doc_ids=[item.doc_id]) e = Entry(fullpath, doc_id=item.doc_id) break if not item: e = Entry(fullpath) if e: yield e, e.id
python
def find_new_posts_and_pages(db): """Walk content dir, put each post and page in the database""" Q = Query() for root, dirs, files in os.walk(CONFIG['content_root']): for filename in sorted([f for f in files if f.endswith(('md', 'markdown'))]): fullpath = os.path.join(root, filename) _p = fullpath.split(CONFIG['content_root'])[-1].lstrip('/') new_mtime = int(os.path.getmtime(fullpath)) e, item = None, None for collection in ['posts', 'pages']: item = db[collection].get(Q.filename == _p) if item: if new_mtime > item['mtime']: db[collection].update({'mtime': new_mtime}, doc_ids=[item.doc_id]) e = Entry(fullpath, doc_id=item.doc_id) break if not item: e = Entry(fullpath) if e: yield e, e.id
[ "def", "find_new_posts_and_pages", "(", "db", ")", ":", "Q", "=", "Query", "(", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "CONFIG", "[", "'content_root'", "]", ")", ":", "for", "filename", "in", "sorted", "(", "[", "f", "for", "f", "in", "files", "if", "f", ".", "endswith", "(", "(", "'md'", ",", "'markdown'", ")", ")", "]", ")", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", "_p", "=", "fullpath", ".", "split", "(", "CONFIG", "[", "'content_root'", "]", ")", "[", "-", "1", "]", ".", "lstrip", "(", "'/'", ")", "new_mtime", "=", "int", "(", "os", ".", "path", ".", "getmtime", "(", "fullpath", ")", ")", "e", ",", "item", "=", "None", ",", "None", "for", "collection", "in", "[", "'posts'", ",", "'pages'", "]", ":", "item", "=", "db", "[", "collection", "]", ".", "get", "(", "Q", ".", "filename", "==", "_p", ")", "if", "item", ":", "if", "new_mtime", ">", "item", "[", "'mtime'", "]", ":", "db", "[", "collection", "]", ".", "update", "(", "{", "'mtime'", ":", "new_mtime", "}", ",", "doc_ids", "=", "[", "item", ".", "doc_id", "]", ")", "e", "=", "Entry", "(", "fullpath", ",", "doc_id", "=", "item", ".", "doc_id", ")", "break", "if", "not", "item", ":", "e", "=", "Entry", "(", "fullpath", ")", "if", "e", ":", "yield", "e", ",", "e", ".", "id" ]
Walk content dir, put each post and page in the database
[ "Walk", "content", "dir", "put", "each", "post", "and", "page", "in", "the", "database" ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L357-L381
train
oz123/blogit
blogit/blogit.py
_get_last_entries
def _get_last_entries(db, qty): """get all entries and the last qty entries""" doc_ids = [post.doc_id for post in db.posts.all()] doc_ids = sorted(doc_ids, reverse=True) # bug: here we shoud only render doc_ids[:qty] # but we can't use mtimes for sorting. We'll need to add ptime for the # database (publish time) entries = [Entry(os.path.join(CONFIG['content_root'], db.posts.get(doc_id=doc_id)['filename']), doc_id) for doc_id in doc_ids] # return _sort_entries(entries)[:qty] entries.sort(key=operator.attrgetter('date'), reverse=True) return entries[:qty], entries
python
def _get_last_entries(db, qty): """get all entries and the last qty entries""" doc_ids = [post.doc_id for post in db.posts.all()] doc_ids = sorted(doc_ids, reverse=True) # bug: here we shoud only render doc_ids[:qty] # but we can't use mtimes for sorting. We'll need to add ptime for the # database (publish time) entries = [Entry(os.path.join(CONFIG['content_root'], db.posts.get(doc_id=doc_id)['filename']), doc_id) for doc_id in doc_ids] # return _sort_entries(entries)[:qty] entries.sort(key=operator.attrgetter('date'), reverse=True) return entries[:qty], entries
[ "def", "_get_last_entries", "(", "db", ",", "qty", ")", ":", "doc_ids", "=", "[", "post", ".", "doc_id", "for", "post", "in", "db", ".", "posts", ".", "all", "(", ")", "]", "doc_ids", "=", "sorted", "(", "doc_ids", ",", "reverse", "=", "True", ")", "# bug: here we shoud only render doc_ids[:qty]", "# but we can't use mtimes for sorting. We'll need to add ptime for the", "# database (publish time)", "entries", "=", "[", "Entry", "(", "os", ".", "path", ".", "join", "(", "CONFIG", "[", "'content_root'", "]", ",", "db", ".", "posts", ".", "get", "(", "doc_id", "=", "doc_id", ")", "[", "'filename'", "]", ")", ",", "doc_id", ")", "for", "doc_id", "in", "doc_ids", "]", "# return _sort_entries(entries)[:qty]", "entries", ".", "sort", "(", "key", "=", "operator", ".", "attrgetter", "(", "'date'", ")", ",", "reverse", "=", "True", ")", "return", "entries", "[", ":", "qty", "]", ",", "entries" ]
get all entries and the last qty entries
[ "get", "all", "entries", "and", "the", "last", "qty", "entries" ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L384-L396
train
oz123/blogit
blogit/blogit.py
update_index
def update_index(entries): """find the last 10 entries in the database and create the main page. Each entry in has an doc_id, so we only get the last 10 doc_ids. This method also updates the ATOM feed. """ context = GLOBAL_TEMPLATE_CONTEXT.copy() context['entries'] = entries context['last_build'] = datetime.datetime.now().strftime( "%Y-%m-%dT%H:%M:%SZ") list(map(lambda x: _render(context, x[0], os.path.join(CONFIG['output_to'], x[1])), (('entry_index.html', 'index.html'), ('atom.xml', 'atom.xml'))))
python
def update_index(entries): """find the last 10 entries in the database and create the main page. Each entry in has an doc_id, so we only get the last 10 doc_ids. This method also updates the ATOM feed. """ context = GLOBAL_TEMPLATE_CONTEXT.copy() context['entries'] = entries context['last_build'] = datetime.datetime.now().strftime( "%Y-%m-%dT%H:%M:%SZ") list(map(lambda x: _render(context, x[0], os.path.join(CONFIG['output_to'], x[1])), (('entry_index.html', 'index.html'), ('atom.xml', 'atom.xml'))))
[ "def", "update_index", "(", "entries", ")", ":", "context", "=", "GLOBAL_TEMPLATE_CONTEXT", ".", "copy", "(", ")", "context", "[", "'entries'", "]", "=", "entries", "context", "[", "'last_build'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", "list", "(", "map", "(", "lambda", "x", ":", "_render", "(", "context", ",", "x", "[", "0", "]", ",", "os", ".", "path", ".", "join", "(", "CONFIG", "[", "'output_to'", "]", ",", "x", "[", "1", "]", ")", ")", ",", "(", "(", "'entry_index.html'", ",", "'index.html'", ")", ",", "(", "'atom.xml'", ",", "'atom.xml'", ")", ")", ")", ")" ]
find the last 10 entries in the database and create the main page. Each entry in has an doc_id, so we only get the last 10 doc_ids. This method also updates the ATOM feed.
[ "find", "the", "last", "10", "entries", "in", "the", "database", "and", "create", "the", "main", "page", ".", "Each", "entry", "in", "has", "an", "doc_id", "so", "we", "only", "get", "the", "last", "10", "doc_ids", "." ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L399-L413
train
oz123/blogit
blogit/blogit.py
build
def build(config): """Incremental build of the website""" logger.info("\nRendering website now...\n") logger.info("entries:") tags = dict() entries = list() for post, post_id in find_new_posts_and_pages(DB): # this method will also parse the post's tags and # update the db collection containing the tags. if post.render(): if post.header['kind'] in ['writing', 'link']: for tag in post.tags: tag.posts = [post_id] tags[tag.name] = tag entries.append(post) logger.info("%s" % post.path) for name, to in tags.items(): logger.info("updating tag %s" % name) to.render() # This is expensive, we should insert only the recent entries # to the index using BeautifulSoup # update index logger.info("Updating index") last_entries, all_entries = _get_last_entries(DB, config['INDEX_SIZE']) last_entries = list(_filter_none_public(last_entries)) update_index(last_entries) # update archive logger.info("Updating archive") # This is expensive, we should insert only the recent entries # to the archive using BeautifulSoup entries = [Entry.entry_from_db( os.path.join(CONFIG['content_root'], e.get('filename')), e.doc_id) for e in DB.posts.all()] all_entries = list(_filter_none_public(all_entries)) all_entries.sort(key=operator.attrgetter('date'), reverse=True) render_archive(all_entries[config['ARCHIVE_SIZE']:])
python
def build(config): """Incremental build of the website""" logger.info("\nRendering website now...\n") logger.info("entries:") tags = dict() entries = list() for post, post_id in find_new_posts_and_pages(DB): # this method will also parse the post's tags and # update the db collection containing the tags. if post.render(): if post.header['kind'] in ['writing', 'link']: for tag in post.tags: tag.posts = [post_id] tags[tag.name] = tag entries.append(post) logger.info("%s" % post.path) for name, to in tags.items(): logger.info("updating tag %s" % name) to.render() # This is expensive, we should insert only the recent entries # to the index using BeautifulSoup # update index logger.info("Updating index") last_entries, all_entries = _get_last_entries(DB, config['INDEX_SIZE']) last_entries = list(_filter_none_public(last_entries)) update_index(last_entries) # update archive logger.info("Updating archive") # This is expensive, we should insert only the recent entries # to the archive using BeautifulSoup entries = [Entry.entry_from_db( os.path.join(CONFIG['content_root'], e.get('filename')), e.doc_id) for e in DB.posts.all()] all_entries = list(_filter_none_public(all_entries)) all_entries.sort(key=operator.attrgetter('date'), reverse=True) render_archive(all_entries[config['ARCHIVE_SIZE']:])
[ "def", "build", "(", "config", ")", ":", "logger", ".", "info", "(", "\"\\nRendering website now...\\n\"", ")", "logger", ".", "info", "(", "\"entries:\"", ")", "tags", "=", "dict", "(", ")", "entries", "=", "list", "(", ")", "for", "post", ",", "post_id", "in", "find_new_posts_and_pages", "(", "DB", ")", ":", "# this method will also parse the post's tags and", "# update the db collection containing the tags.", "if", "post", ".", "render", "(", ")", ":", "if", "post", ".", "header", "[", "'kind'", "]", "in", "[", "'writing'", ",", "'link'", "]", ":", "for", "tag", "in", "post", ".", "tags", ":", "tag", ".", "posts", "=", "[", "post_id", "]", "tags", "[", "tag", ".", "name", "]", "=", "tag", "entries", ".", "append", "(", "post", ")", "logger", ".", "info", "(", "\"%s\"", "%", "post", ".", "path", ")", "for", "name", ",", "to", "in", "tags", ".", "items", "(", ")", ":", "logger", ".", "info", "(", "\"updating tag %s\"", "%", "name", ")", "to", ".", "render", "(", ")", "# This is expensive, we should insert only the recent entries", "# to the index using BeautifulSoup", "# update index", "logger", ".", "info", "(", "\"Updating index\"", ")", "last_entries", ",", "all_entries", "=", "_get_last_entries", "(", "DB", ",", "config", "[", "'INDEX_SIZE'", "]", ")", "last_entries", "=", "list", "(", "_filter_none_public", "(", "last_entries", ")", ")", "update_index", "(", "last_entries", ")", "# update archive", "logger", ".", "info", "(", "\"Updating archive\"", ")", "# This is expensive, we should insert only the recent entries", "# to the archive using BeautifulSoup", "entries", "=", "[", "Entry", ".", "entry_from_db", "(", "os", ".", "path", ".", "join", "(", "CONFIG", "[", "'content_root'", "]", ",", "e", ".", "get", "(", "'filename'", ")", ")", ",", "e", ".", "doc_id", ")", "for", "e", "in", "DB", ".", "posts", ".", "all", "(", ")", "]", "all_entries", "=", "list", "(", "_filter_none_public", "(", "all_entries", ")", ")", "all_entries", ".", "sort", "(", "key", "=", "operator", ".", "attrgetter", "(", "'date'", ")", ",", "reverse", "=", "True", ")", "render_archive", "(", "all_entries", "[", "config", "[", "'ARCHIVE_SIZE'", "]", ":", "]", ")" ]
Incremental build of the website
[ "Incremental", "build", "of", "the", "website" ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L423-L464
train
oz123/blogit
blogit/blogit.py
preview
def preview(): # pragma: no coverage """launch an HTTP to preview the website""" Handler = http.server.SimpleHTTPRequestHandler socketserver.TCPServer.allow_reuse_address = True port = CONFIG['http_port'] httpd = socketserver.TCPServer(("", port), Handler) os.chdir(CONFIG['output_to']) try: logger.info("and ready to test at " "http://127.0.0.1:%d" % CONFIG['http_port']) logger.info("Hit Ctrl+C to exit") httpd.serve_forever() except KeyboardInterrupt: httpd.shutdown()
python
def preview(): # pragma: no coverage """launch an HTTP to preview the website""" Handler = http.server.SimpleHTTPRequestHandler socketserver.TCPServer.allow_reuse_address = True port = CONFIG['http_port'] httpd = socketserver.TCPServer(("", port), Handler) os.chdir(CONFIG['output_to']) try: logger.info("and ready to test at " "http://127.0.0.1:%d" % CONFIG['http_port']) logger.info("Hit Ctrl+C to exit") httpd.serve_forever() except KeyboardInterrupt: httpd.shutdown()
[ "def", "preview", "(", ")", ":", "# pragma: no coverage", "Handler", "=", "http", ".", "server", ".", "SimpleHTTPRequestHandler", "socketserver", ".", "TCPServer", ".", "allow_reuse_address", "=", "True", "port", "=", "CONFIG", "[", "'http_port'", "]", "httpd", "=", "socketserver", ".", "TCPServer", "(", "(", "\"\"", ",", "port", ")", ",", "Handler", ")", "os", ".", "chdir", "(", "CONFIG", "[", "'output_to'", "]", ")", "try", ":", "logger", ".", "info", "(", "\"and ready to test at \"", "\"http://127.0.0.1:%d\"", "%", "CONFIG", "[", "'http_port'", "]", ")", "logger", ".", "info", "(", "\"Hit Ctrl+C to exit\"", ")", "httpd", ".", "serve_forever", "(", ")", "except", "KeyboardInterrupt", ":", "httpd", ".", "shutdown", "(", ")" ]
launch an HTTP to preview the website
[ "launch", "an", "HTTP", "to", "preview", "the", "website" ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L467-L480
train
oz123/blogit
blogit/blogit.py
Tag.entries
def entries(self): """return the actual lists of entries tagged with""" Tags = Query() tag = self.table.get(Tags.name == self.name) posts = tag['post_ids'] for id in posts: post = self.db.posts.get(doc_id=id) if not post: # pragma: no coverage raise ValueError("No post found for doc_id %s" % id) yield Entry(os.path.join(CONFIG['content_root'], post['filename']), id)
python
def entries(self): """return the actual lists of entries tagged with""" Tags = Query() tag = self.table.get(Tags.name == self.name) posts = tag['post_ids'] for id in posts: post = self.db.posts.get(doc_id=id) if not post: # pragma: no coverage raise ValueError("No post found for doc_id %s" % id) yield Entry(os.path.join(CONFIG['content_root'], post['filename']), id)
[ "def", "entries", "(", "self", ")", ":", "Tags", "=", "Query", "(", ")", "tag", "=", "self", ".", "table", ".", "get", "(", "Tags", ".", "name", "==", "self", ".", "name", ")", "posts", "=", "tag", "[", "'post_ids'", "]", "for", "id", "in", "posts", ":", "post", "=", "self", ".", "db", ".", "posts", ".", "get", "(", "doc_id", "=", "id", ")", "if", "not", "post", ":", "# pragma: no coverage", "raise", "ValueError", "(", "\"No post found for doc_id %s\"", "%", "id", ")", "yield", "Entry", "(", "os", ".", "path", ".", "join", "(", "CONFIG", "[", "'content_root'", "]", ",", "post", "[", "'filename'", "]", ")", ",", "id", ")" ]
return the actual lists of entries tagged with
[ "return", "the", "actual", "lists", "of", "entries", "tagged", "with" ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L142-L152
train
oz123/blogit
blogit/blogit.py
Tag.render
def render(self): """Render html page and atom feed""" context = GLOBAL_TEMPLATE_CONTEXT.copy() context['tag'] = self entries = list(self.entries) entries.sort(key=operator.attrgetter('date'), reverse=True) context['entries'] = entries # render html page render_to = os.path.join(CONFIG['output_to'], 'tags', self.slug) if not os.path.exists(render_to): # pragma: no coverage os.makedirs(render_to) _render(context, 'tag_index.html', os.path.join(render_to, 'index.html')) # noqa # render atom.xml context['entries'] = context['entries'][:10] context['last_build'] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ") # noqa _render(context, 'atom.xml', os.path.join(render_to, 'atom.xml')) return True
python
def render(self): """Render html page and atom feed""" context = GLOBAL_TEMPLATE_CONTEXT.copy() context['tag'] = self entries = list(self.entries) entries.sort(key=operator.attrgetter('date'), reverse=True) context['entries'] = entries # render html page render_to = os.path.join(CONFIG['output_to'], 'tags', self.slug) if not os.path.exists(render_to): # pragma: no coverage os.makedirs(render_to) _render(context, 'tag_index.html', os.path.join(render_to, 'index.html')) # noqa # render atom.xml context['entries'] = context['entries'][:10] context['last_build'] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ") # noqa _render(context, 'atom.xml', os.path.join(render_to, 'atom.xml')) return True
[ "def", "render", "(", "self", ")", ":", "context", "=", "GLOBAL_TEMPLATE_CONTEXT", ".", "copy", "(", ")", "context", "[", "'tag'", "]", "=", "self", "entries", "=", "list", "(", "self", ".", "entries", ")", "entries", ".", "sort", "(", "key", "=", "operator", ".", "attrgetter", "(", "'date'", ")", ",", "reverse", "=", "True", ")", "context", "[", "'entries'", "]", "=", "entries", "# render html page", "render_to", "=", "os", ".", "path", ".", "join", "(", "CONFIG", "[", "'output_to'", "]", ",", "'tags'", ",", "self", ".", "slug", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "render_to", ")", ":", "# pragma: no coverage", "os", ".", "makedirs", "(", "render_to", ")", "_render", "(", "context", ",", "'tag_index.html'", ",", "os", ".", "path", ".", "join", "(", "render_to", ",", "'index.html'", ")", ")", "# noqa", "# render atom.xml", "context", "[", "'entries'", "]", "=", "context", "[", "'entries'", "]", "[", ":", "10", "]", "context", "[", "'last_build'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", "# noqa", "_render", "(", "context", ",", "'atom.xml'", ",", "os", ".", "path", ".", "join", "(", "render_to", ",", "'atom.xml'", ")", ")", "return", "True" ]
Render html page and atom feed
[ "Render", "html", "page", "and", "atom", "feed" ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L154-L173
train
oz123/blogit
blogit/blogit.py
Entry.tags
def tags(self): """this property is always called after prepare""" if 'tags' in self.header: tags = [Tag(t) for t in self.header['tags']] list(map(lambda t: setattr(t, 'posts', [self.id]), tags)) return tags else: return []
python
def tags(self): """this property is always called after prepare""" if 'tags' in self.header: tags = [Tag(t) for t in self.header['tags']] list(map(lambda t: setattr(t, 'posts', [self.id]), tags)) return tags else: return []
[ "def", "tags", "(", "self", ")", ":", "if", "'tags'", "in", "self", ".", "header", ":", "tags", "=", "[", "Tag", "(", "t", ")", "for", "t", "in", "self", ".", "header", "[", "'tags'", "]", "]", "list", "(", "map", "(", "lambda", "t", ":", "setattr", "(", "t", ",", "'posts'", ",", "[", "self", ".", "id", "]", ")", ",", "tags", ")", ")", "return", "tags", "else", ":", "return", "[", "]" ]
this property is always called after prepare
[ "this", "property", "is", "always", "called", "after", "prepare" ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L276-L283
train
oz123/blogit
blogit/blogit.py
Entry.prepare
def prepare(self): self.body_html = markdown( codecs.open(self.abspath, 'r').read(), extras=['fenced-code-blocks', 'hilite', 'tables', 'metadata']) self.header = self.body_html.metadata """a blog post without tags causes an error ...""" if 'tags' in self.header: # pages can lack tags self.header['tags'] = [t.strip().lower() for t in self.header['tags'].split(',')] else: self.header['tags'] = ("",) self.date = self.header.get('published', datetime.datetime.now()) if isinstance(self.date, str): self.date = datetime.datetime.strptime(self.date, "%Y-%m-%d") for k, v in self.header.items(): try: setattr(self, k, v) except AttributeError: pass if self.id: return rec = {'filename': self.path, 'mtime': int(os.path.getmtime(self.abspath))} if self.header['kind'] == 'writing': _id = Entry.db.posts.insert(rec) elif self.header['kind'] == 'page': _id = Entry.db.pages.insert(rec) self.id = _id
python
def prepare(self): self.body_html = markdown( codecs.open(self.abspath, 'r').read(), extras=['fenced-code-blocks', 'hilite', 'tables', 'metadata']) self.header = self.body_html.metadata """a blog post without tags causes an error ...""" if 'tags' in self.header: # pages can lack tags self.header['tags'] = [t.strip().lower() for t in self.header['tags'].split(',')] else: self.header['tags'] = ("",) self.date = self.header.get('published', datetime.datetime.now()) if isinstance(self.date, str): self.date = datetime.datetime.strptime(self.date, "%Y-%m-%d") for k, v in self.header.items(): try: setattr(self, k, v) except AttributeError: pass if self.id: return rec = {'filename': self.path, 'mtime': int(os.path.getmtime(self.abspath))} if self.header['kind'] == 'writing': _id = Entry.db.posts.insert(rec) elif self.header['kind'] == 'page': _id = Entry.db.pages.insert(rec) self.id = _id
[ "def", "prepare", "(", "self", ")", ":", "self", ".", "body_html", "=", "markdown", "(", "codecs", ".", "open", "(", "self", ".", "abspath", ",", "'r'", ")", ".", "read", "(", ")", ",", "extras", "=", "[", "'fenced-code-blocks'", ",", "'hilite'", ",", "'tables'", ",", "'metadata'", "]", ")", "self", ".", "header", "=", "self", ".", "body_html", ".", "metadata", "if", "'tags'", "in", "self", ".", "header", ":", "# pages can lack tags", "self", ".", "header", "[", "'tags'", "]", "=", "[", "t", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "t", "in", "self", ".", "header", "[", "'tags'", "]", ".", "split", "(", "','", ")", "]", "else", ":", "self", ".", "header", "[", "'tags'", "]", "=", "(", "\"\"", ",", ")", "self", ".", "date", "=", "self", ".", "header", ".", "get", "(", "'published'", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "if", "isinstance", "(", "self", ".", "date", ",", "str", ")", ":", "self", ".", "date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "self", ".", "date", ",", "\"%Y-%m-%d\"", ")", "for", "k", ",", "v", "in", "self", ".", "header", ".", "items", "(", ")", ":", "try", ":", "setattr", "(", "self", ",", "k", ",", "v", ")", "except", "AttributeError", ":", "pass", "if", "self", ".", "id", ":", "return", "rec", "=", "{", "'filename'", ":", "self", ".", "path", ",", "'mtime'", ":", "int", "(", "os", ".", "path", ".", "getmtime", "(", "self", ".", "abspath", ")", ")", "}", "if", "self", ".", "header", "[", "'kind'", "]", "==", "'writing'", ":", "_id", "=", "Entry", ".", "db", ".", "posts", ".", "insert", "(", "rec", ")", "elif", "self", ".", "header", "[", "'kind'", "]", "==", "'page'", ":", "_id", "=", "Entry", ".", "db", ".", "pages", ".", "insert", "(", "rec", ")", "self", ".", "id", "=", "_id" ]
a blog post without tags causes an error ...
[ "a", "blog", "post", "without", "tags", "causes", "an", "error", "..." ]
15b94969fa43aaf8dc677a8184b144ae8c0f7700
https://github.com/oz123/blogit/blob/15b94969fa43aaf8dc677a8184b144ae8c0f7700/blogit/blogit.py#L285-L323
train
openearth/mmi-python
mmi/cli.py
tracker
def tracker(): """start a tracker to register running models""" application = mmi.tracker.app() application.listen(22222) logger.info('serving at port 22222') tornado.ioloop.IOLoop.instance().start()
python
def tracker(): """start a tracker to register running models""" application = mmi.tracker.app() application.listen(22222) logger.info('serving at port 22222') tornado.ioloop.IOLoop.instance().start()
[ "def", "tracker", "(", ")", ":", "application", "=", "mmi", ".", "tracker", ".", "app", "(", ")", "application", ".", "listen", "(", "22222", ")", "logger", ".", "info", "(", "'serving at port 22222'", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "start", "(", ")" ]
start a tracker to register running models
[ "start", "a", "tracker", "to", "register", "running", "models" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/cli.py#L26-L31
train
openearth/mmi-python
mmi/cli.py
runner
def runner( engine, configfile, output_vars, interval, pause, mpi, tracker, port, bmi_class ): """ run a BMI compatible model """ # keep track of info # update mpi information or use rank 0 runner = mmi.runner.Runner( engine=engine, configfile=configfile, output_vars=output_vars, interval=interval, pause=pause, mpi=mpi, tracker=tracker, port=port, bmi_class=bmi_class ) runner.run()
python
def runner( engine, configfile, output_vars, interval, pause, mpi, tracker, port, bmi_class ): """ run a BMI compatible model """ # keep track of info # update mpi information or use rank 0 runner = mmi.runner.Runner( engine=engine, configfile=configfile, output_vars=output_vars, interval=interval, pause=pause, mpi=mpi, tracker=tracker, port=port, bmi_class=bmi_class ) runner.run()
[ "def", "runner", "(", "engine", ",", "configfile", ",", "output_vars", ",", "interval", ",", "pause", ",", "mpi", ",", "tracker", ",", "port", ",", "bmi_class", ")", ":", "# keep track of info", "# update mpi information or use rank 0", "runner", "=", "mmi", ".", "runner", ".", "Runner", "(", "engine", "=", "engine", ",", "configfile", "=", "configfile", ",", "output_vars", "=", "output_vars", ",", "interval", "=", "interval", ",", "pause", "=", "pause", ",", "mpi", "=", "mpi", ",", "tracker", "=", "tracker", ",", "port", "=", "port", ",", "bmi_class", "=", "bmi_class", ")", "runner", ".", "run", "(", ")" ]
run a BMI compatible model
[ "run", "a", "BMI", "compatible", "model" ]
a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d
https://github.com/openearth/mmi-python/blob/a2f4ac96b1e7f2fa903f668b3e05c4e86ad42e8d/mmi/cli.py#L77-L104
train
ckcollab/polished
polished/backends/mixins/polisher.py
PolisherMixin.do_extra_polishing
def do_extra_polishing(self): ''' Goes over each EXTRA_POLISH_FUNCTION to see if it applies to this page, if so, calls it ''' for f in self.EXTRA_POLISH_FUNCTIONS: if not hasattr(f, 'polish_commit_indexes'): if hasattr(f, 'polish_urls') and self.URL in f.polish_urls: f() if not hasattr(f, 'polish_urls'): if hasattr(f, 'polish_commit_indexes') and self.CURRENT_COMMIT_INDEX in f.polish_commit_indexes: f() if hasattr(f, 'polish_commit_indexes') and hasattr(f, 'polish_urls'): if self.URL in f.polish_urls and self.CURRENT_COMMIT_INDEX in f.polish_commit_indexes: f()
python
def do_extra_polishing(self): ''' Goes over each EXTRA_POLISH_FUNCTION to see if it applies to this page, if so, calls it ''' for f in self.EXTRA_POLISH_FUNCTIONS: if not hasattr(f, 'polish_commit_indexes'): if hasattr(f, 'polish_urls') and self.URL in f.polish_urls: f() if not hasattr(f, 'polish_urls'): if hasattr(f, 'polish_commit_indexes') and self.CURRENT_COMMIT_INDEX in f.polish_commit_indexes: f() if hasattr(f, 'polish_commit_indexes') and hasattr(f, 'polish_urls'): if self.URL in f.polish_urls and self.CURRENT_COMMIT_INDEX in f.polish_commit_indexes: f()
[ "def", "do_extra_polishing", "(", "self", ")", ":", "for", "f", "in", "self", ".", "EXTRA_POLISH_FUNCTIONS", ":", "if", "not", "hasattr", "(", "f", ",", "'polish_commit_indexes'", ")", ":", "if", "hasattr", "(", "f", ",", "'polish_urls'", ")", "and", "self", ".", "URL", "in", "f", ".", "polish_urls", ":", "f", "(", ")", "if", "not", "hasattr", "(", "f", ",", "'polish_urls'", ")", ":", "if", "hasattr", "(", "f", ",", "'polish_commit_indexes'", ")", "and", "self", ".", "CURRENT_COMMIT_INDEX", "in", "f", ".", "polish_commit_indexes", ":", "f", "(", ")", "if", "hasattr", "(", "f", ",", "'polish_commit_indexes'", ")", "and", "hasattr", "(", "f", ",", "'polish_urls'", ")", ":", "if", "self", ".", "URL", "in", "f", ".", "polish_urls", "and", "self", ".", "CURRENT_COMMIT_INDEX", "in", "f", ".", "polish_commit_indexes", ":", "f", "(", ")" ]
Goes over each EXTRA_POLISH_FUNCTION to see if it applies to this page, if so, calls it
[ "Goes", "over", "each", "EXTRA_POLISH_FUNCTION", "to", "see", "if", "it", "applies", "to", "this", "page", "if", "so", "calls", "it" ]
5a00b2fbe569bc957d1647c0849fd344db29b644
https://github.com/ckcollab/polished/blob/5a00b2fbe569bc957d1647c0849fd344db29b644/polished/backends/mixins/polisher.py#L30-L45
train
skioo/django-customer-billing
billing/models.py
Invoice.total_charges
def total_charges(self): """ Represents the 'goods' acquired in the invoice. """ selected_charges = Charge.objects \ .filter(invoice=self) \ .charges() \ .exclude(product_code=CARRIED_FORWARD) return total_amount(selected_charges)
python
def total_charges(self): """ Represents the 'goods' acquired in the invoice. """ selected_charges = Charge.objects \ .filter(invoice=self) \ .charges() \ .exclude(product_code=CARRIED_FORWARD) return total_amount(selected_charges)
[ "def", "total_charges", "(", "self", ")", ":", "selected_charges", "=", "Charge", ".", "objects", ".", "filter", "(", "invoice", "=", "self", ")", ".", "charges", "(", ")", ".", "exclude", "(", "product_code", "=", "CARRIED_FORWARD", ")", "return", "total_amount", "(", "selected_charges", ")" ]
Represents the 'goods' acquired in the invoice.
[ "Represents", "the", "goods", "acquired", "in", "the", "invoice", "." ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/models.py#L123-L131
train
skioo/django-customer-billing
billing/models.py
Invoice.due
def due(self): """ The amount due for this invoice. Takes into account all entities in the invoice. Can be < 0 if the invoice was overpaid. """ invoice_charges = Charge.objects.filter(invoice=self) invoice_transactions = Transaction.successful.filter(invoice=self) return total_amount(invoice_charges) - total_amount(invoice_transactions)
python
def due(self): """ The amount due for this invoice. Takes into account all entities in the invoice. Can be < 0 if the invoice was overpaid. """ invoice_charges = Charge.objects.filter(invoice=self) invoice_transactions = Transaction.successful.filter(invoice=self) return total_amount(invoice_charges) - total_amount(invoice_transactions)
[ "def", "due", "(", "self", ")", ":", "invoice_charges", "=", "Charge", ".", "objects", ".", "filter", "(", "invoice", "=", "self", ")", "invoice_transactions", "=", "Transaction", ".", "successful", ".", "filter", "(", "invoice", "=", "self", ")", "return", "total_amount", "(", "invoice_charges", ")", "-", "total_amount", "(", "invoice_transactions", ")" ]
The amount due for this invoice. Takes into account all entities in the invoice. Can be < 0 if the invoice was overpaid.
[ "The", "amount", "due", "for", "this", "invoice", ".", "Takes", "into", "account", "all", "entities", "in", "the", "invoice", ".", "Can", "be", "<", "0", "if", "the", "invoice", "was", "overpaid", "." ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/models.py#L133-L140
train
darvid/biome
setup.py
setup
def setup(): """Package setup entrypoint.""" install_requirements = ["attrdict"] if sys.version_info[:2] < (3, 4): install_requirements.append("pathlib") setup_requirements = ['six', 'setuptools>=17.1', 'setuptools_scm'] needs_sphinx = { 'build_sphinx', 'docs', 'upload_docs', }.intersection(sys.argv) if needs_sphinx: setup_requirements.append('sphinx') setuptools.setup( author="David Gidwani", author_email="[email protected]", classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Topic :: Software Development", "Topic :: Software Development :: Libraries :: Python Modules", ], description="Painless access to namespaced environment variables", download_url="https://github.com/darvid/biome/tarball/0.1", install_requires=install_requirements, keywords="conf config configuration environment", license="BSD", long_description=readme(), name="biome", package_dir={'': 'src'}, packages=setuptools.find_packages('./src'), setup_requires=setup_requirements, tests_require=["pytest"], url="https://github.com/darvid/biome", use_scm_version=True, )
python
def setup(): """Package setup entrypoint.""" install_requirements = ["attrdict"] if sys.version_info[:2] < (3, 4): install_requirements.append("pathlib") setup_requirements = ['six', 'setuptools>=17.1', 'setuptools_scm'] needs_sphinx = { 'build_sphinx', 'docs', 'upload_docs', }.intersection(sys.argv) if needs_sphinx: setup_requirements.append('sphinx') setuptools.setup( author="David Gidwani", author_email="[email protected]", classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Topic :: Software Development", "Topic :: Software Development :: Libraries :: Python Modules", ], description="Painless access to namespaced environment variables", download_url="https://github.com/darvid/biome/tarball/0.1", install_requires=install_requirements, keywords="conf config configuration environment", license="BSD", long_description=readme(), name="biome", package_dir={'': 'src'}, packages=setuptools.find_packages('./src'), setup_requires=setup_requirements, tests_require=["pytest"], url="https://github.com/darvid/biome", use_scm_version=True, )
[ "def", "setup", "(", ")", ":", "install_requirements", "=", "[", "\"attrdict\"", "]", "if", "sys", ".", "version_info", "[", ":", "2", "]", "<", "(", "3", ",", "4", ")", ":", "install_requirements", ".", "append", "(", "\"pathlib\"", ")", "setup_requirements", "=", "[", "'six'", ",", "'setuptools>=17.1'", ",", "'setuptools_scm'", "]", "needs_sphinx", "=", "{", "'build_sphinx'", ",", "'docs'", ",", "'upload_docs'", ",", "}", ".", "intersection", "(", "sys", ".", "argv", ")", "if", "needs_sphinx", ":", "setup_requirements", ".", "append", "(", "'sphinx'", ")", "setuptools", ".", "setup", "(", "author", "=", "\"David Gidwani\"", ",", "author_email", "=", "\"[email protected]\"", ",", "classifiers", "=", "[", "\"Development Status :: 4 - Beta\"", ",", "\"Intended Audience :: Developers\"", ",", "\"License :: OSI Approved :: BSD License\"", ",", "\"Operating System :: OS Independent\"", ",", "\"Programming Language :: Python\"", ",", "\"Programming Language :: Python :: 2\"", ",", "\"Programming Language :: Python :: 2.7\"", ",", "\"Programming Language :: Python :: 3\"", ",", "\"Programming Language :: Python :: 3.3\"", ",", "\"Programming Language :: Python :: 3.4\"", ",", "\"Topic :: Software Development\"", ",", "\"Topic :: Software Development :: Libraries :: Python Modules\"", ",", "]", ",", "description", "=", "\"Painless access to namespaced environment variables\"", ",", "download_url", "=", "\"https://github.com/darvid/biome/tarball/0.1\"", ",", "install_requires", "=", "install_requirements", ",", "keywords", "=", "\"conf config configuration environment\"", ",", "license", "=", "\"BSD\"", ",", "long_description", "=", "readme", "(", ")", ",", "name", "=", "\"biome\"", ",", "package_dir", "=", "{", "''", ":", "'src'", "}", ",", "packages", "=", "setuptools", ".", "find_packages", "(", "'./src'", ")", ",", "setup_requires", "=", "setup_requirements", ",", "tests_require", "=", "[", "\"pytest\"", "]", ",", "url", "=", "\"https://github.com/darvid/biome\"", ",", "use_scm_version", "=", "True", ",", ")" ]
Package setup entrypoint.
[ "Package", "setup", "entrypoint", "." ]
e1f1945165df9def31af42e5e13b623e1de97f01
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/setup.py#L25-L68
train
rehandalal/flask-funnel
flask_funnel/extensions.py
preprocessor
def preprocessor(accepts, exports, flag=None): """Decorator to add a new preprocessor""" def decorator(f): preprocessors.append((accepts, exports, flag, f)) return f return decorator
python
def preprocessor(accepts, exports, flag=None): """Decorator to add a new preprocessor""" def decorator(f): preprocessors.append((accepts, exports, flag, f)) return f return decorator
[ "def", "preprocessor", "(", "accepts", ",", "exports", ",", "flag", "=", "None", ")", ":", "def", "decorator", "(", "f", ")", ":", "preprocessors", ".", "append", "(", "(", "accepts", ",", "exports", ",", "flag", ",", "f", ")", ")", "return", "f", "return", "decorator" ]
Decorator to add a new preprocessor
[ "Decorator", "to", "add", "a", "new", "preprocessor" ]
b635cf52d1c9133c748aab7465edd7caef48e433
https://github.com/rehandalal/flask-funnel/blob/b635cf52d1c9133c748aab7465edd7caef48e433/flask_funnel/extensions.py#L13-L18
train
rehandalal/flask-funnel
flask_funnel/extensions.py
postprocessor
def postprocessor(accepts, flag=None): """Decorator to add a new postprocessor""" def decorator(f): postprocessors.append((accepts, flag, f)) return f return decorator
python
def postprocessor(accepts, flag=None): """Decorator to add a new postprocessor""" def decorator(f): postprocessors.append((accepts, flag, f)) return f return decorator
[ "def", "postprocessor", "(", "accepts", ",", "flag", "=", "None", ")", ":", "def", "decorator", "(", "f", ")", ":", "postprocessors", ".", "append", "(", "(", "accepts", ",", "flag", ",", "f", ")", ")", "return", "f", "return", "decorator" ]
Decorator to add a new postprocessor
[ "Decorator", "to", "add", "a", "new", "postprocessor" ]
b635cf52d1c9133c748aab7465edd7caef48e433
https://github.com/rehandalal/flask-funnel/blob/b635cf52d1c9133c748aab7465edd7caef48e433/flask_funnel/extensions.py#L21-L26
train
rehandalal/flask-funnel
flask_funnel/extensions.py
coffee
def coffee(input, output, **kw): """Process CoffeeScript files""" subprocess.call([current_app.config.get('COFFEE_BIN'), '-c', '-o', output, input])
python
def coffee(input, output, **kw): """Process CoffeeScript files""" subprocess.call([current_app.config.get('COFFEE_BIN'), '-c', '-o', output, input])
[ "def", "coffee", "(", "input", ",", "output", ",", "*", "*", "kw", ")", ":", "subprocess", ".", "call", "(", "[", "current_app", ".", "config", ".", "get", "(", "'COFFEE_BIN'", ")", ",", "'-c'", ",", "'-o'", ",", "output", ",", "input", "]", ")" ]
Process CoffeeScript files
[ "Process", "CoffeeScript", "files" ]
b635cf52d1c9133c748aab7465edd7caef48e433
https://github.com/rehandalal/flask-funnel/blob/b635cf52d1c9133c748aab7465edd7caef48e433/flask_funnel/extensions.py#L30-L33
train
skioo/django-customer-billing
billing/actions/charges.py
cancel_charge
def cancel_charge(charge_id: str) -> None: """ Cancels an existing charge. If the charge was already cancelled then an Exception is raised. If it is not in an invoice then the charge is deleted, otherwise a Credit object is created to reverse the Charge. :param charge_id: The id of the charge to cancel. """ logger.info('cancelling-charge', charge_id=charge_id) with transaction.atomic(): charge = Charge.all_charges.get(pk=charge_id) if charge.deleted: raise ChargeAlreadyCancelledError("Cannot cancel deleted charge.") if Charge.all_charges.filter(reverses=charge_id).exists(): raise ChargeAlreadyCancelledError("Cannot cancel reversed charge.") if charge.invoice is None: charge.deleted = True charge.save() else: add_charge( account_id=charge.account_id, reverses_id=charge_id, amount=-charge.amount, product_code=REVERSAL_PRODUCT_CODE)
python
def cancel_charge(charge_id: str) -> None: """ Cancels an existing charge. If the charge was already cancelled then an Exception is raised. If it is not in an invoice then the charge is deleted, otherwise a Credit object is created to reverse the Charge. :param charge_id: The id of the charge to cancel. """ logger.info('cancelling-charge', charge_id=charge_id) with transaction.atomic(): charge = Charge.all_charges.get(pk=charge_id) if charge.deleted: raise ChargeAlreadyCancelledError("Cannot cancel deleted charge.") if Charge.all_charges.filter(reverses=charge_id).exists(): raise ChargeAlreadyCancelledError("Cannot cancel reversed charge.") if charge.invoice is None: charge.deleted = True charge.save() else: add_charge( account_id=charge.account_id, reverses_id=charge_id, amount=-charge.amount, product_code=REVERSAL_PRODUCT_CODE)
[ "def", "cancel_charge", "(", "charge_id", ":", "str", ")", "->", "None", ":", "logger", ".", "info", "(", "'cancelling-charge'", ",", "charge_id", "=", "charge_id", ")", "with", "transaction", ".", "atomic", "(", ")", ":", "charge", "=", "Charge", ".", "all_charges", ".", "get", "(", "pk", "=", "charge_id", ")", "if", "charge", ".", "deleted", ":", "raise", "ChargeAlreadyCancelledError", "(", "\"Cannot cancel deleted charge.\"", ")", "if", "Charge", ".", "all_charges", ".", "filter", "(", "reverses", "=", "charge_id", ")", ".", "exists", "(", ")", ":", "raise", "ChargeAlreadyCancelledError", "(", "\"Cannot cancel reversed charge.\"", ")", "if", "charge", ".", "invoice", "is", "None", ":", "charge", ".", "deleted", "=", "True", "charge", ".", "save", "(", ")", "else", ":", "add_charge", "(", "account_id", "=", "charge", ".", "account_id", ",", "reverses_id", "=", "charge_id", ",", "amount", "=", "-", "charge", ".", "amount", ",", "product_code", "=", "REVERSAL_PRODUCT_CODE", ")" ]
Cancels an existing charge. If the charge was already cancelled then an Exception is raised. If it is not in an invoice then the charge is deleted, otherwise a Credit object is created to reverse the Charge. :param charge_id: The id of the charge to cancel.
[ "Cancels", "an", "existing", "charge", "." ]
6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d
https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/actions/charges.py#L16-L46
train
pneff/wsgiservice
wsgiservice/application.py
Application._log_request
def _log_request(self, request): """Log the most important parts of this request. :param request: Object representing the current request. :type request: :class:`webob.Request` """ msg = [] for d in self.LOG_DATA: val = getattr(request, d) if val: msg.append(d + ': ' + repr(val)) for d in self.LOG_HEADERS: if d in request.headers and request.headers[d]: msg.append(d + ': ' + repr(request.headers[d])) logger.info("Request information: %s", ', '.join(msg))
python
def _log_request(self, request): """Log the most important parts of this request. :param request: Object representing the current request. :type request: :class:`webob.Request` """ msg = [] for d in self.LOG_DATA: val = getattr(request, d) if val: msg.append(d + ': ' + repr(val)) for d in self.LOG_HEADERS: if d in request.headers and request.headers[d]: msg.append(d + ': ' + repr(request.headers[d])) logger.info("Request information: %s", ', '.join(msg))
[ "def", "_log_request", "(", "self", ",", "request", ")", ":", "msg", "=", "[", "]", "for", "d", "in", "self", ".", "LOG_DATA", ":", "val", "=", "getattr", "(", "request", ",", "d", ")", "if", "val", ":", "msg", ".", "append", "(", "d", "+", "': '", "+", "repr", "(", "val", ")", ")", "for", "d", "in", "self", ".", "LOG_HEADERS", ":", "if", "d", "in", "request", ".", "headers", "and", "request", ".", "headers", "[", "d", "]", ":", "msg", ".", "append", "(", "d", "+", "': '", "+", "repr", "(", "request", ".", "headers", "[", "d", "]", ")", ")", "logger", ".", "info", "(", "\"Request information: %s\"", ",", "', '", ".", "join", "(", "msg", ")", ")" ]
Log the most important parts of this request. :param request: Object representing the current request. :type request: :class:`webob.Request`
[ "Log", "the", "most", "important", "parts", "of", "this", "request", "." ]
03c064ac2e8c53a1aac9c7b99970f23cf79e20f4
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/application.py#L66-L80
train
MikaSoftware/py-mortgagekit
mortgagekit/calculator.py
MortgageCalculator.get_mortgage_payment_per_payment_frequency
def get_mortgage_payment_per_payment_frequency(self): """ Function will return the amount paid per payment based on the frequency. """ # Calculate the interest rate per the payment parameters: r = self.get_interest_rate_per_payment_frequency() # Calculate the total number of payments given the parameters: n = self.get_total_number_of_payments_per_frequency() # Variables used as number holders. p = self._loan_amount mortgage = None top = None bottom = None top = r + 1 top = math.pow(top, n) top = r * top bottom = r + 1 bottom = math.pow(bottom, n) bottom = bottom - 1 if bottom == 0: return Money(amount=0.00, currency=self._currency) mortgage = (top / bottom) mortgage = mortgage * p return mortgage
python
def get_mortgage_payment_per_payment_frequency(self): """ Function will return the amount paid per payment based on the frequency. """ # Calculate the interest rate per the payment parameters: r = self.get_interest_rate_per_payment_frequency() # Calculate the total number of payments given the parameters: n = self.get_total_number_of_payments_per_frequency() # Variables used as number holders. p = self._loan_amount mortgage = None top = None bottom = None top = r + 1 top = math.pow(top, n) top = r * top bottom = r + 1 bottom = math.pow(bottom, n) bottom = bottom - 1 if bottom == 0: return Money(amount=0.00, currency=self._currency) mortgage = (top / bottom) mortgage = mortgage * p return mortgage
[ "def", "get_mortgage_payment_per_payment_frequency", "(", "self", ")", ":", "# Calculate the interest rate per the payment parameters:", "r", "=", "self", ".", "get_interest_rate_per_payment_frequency", "(", ")", "# Calculate the total number of payments given the parameters:", "n", "=", "self", ".", "get_total_number_of_payments_per_frequency", "(", ")", "# Variables used as number holders.", "p", "=", "self", ".", "_loan_amount", "mortgage", "=", "None", "top", "=", "None", "bottom", "=", "None", "top", "=", "r", "+", "1", "top", "=", "math", ".", "pow", "(", "top", ",", "n", ")", "top", "=", "r", "*", "top", "bottom", "=", "r", "+", "1", "bottom", "=", "math", ".", "pow", "(", "bottom", ",", "n", ")", "bottom", "=", "bottom", "-", "1", "if", "bottom", "==", "0", ":", "return", "Money", "(", "amount", "=", "0.00", ",", "currency", "=", "self", ".", "_currency", ")", "mortgage", "=", "(", "top", "/", "bottom", ")", "mortgage", "=", "mortgage", "*", "p", "return", "mortgage" ]
Function will return the amount paid per payment based on the frequency.
[ "Function", "will", "return", "the", "amount", "paid", "per", "payment", "based", "on", "the", "frequency", "." ]
27697d2c12afdd56308f1012e25bb231c1e24ecf
https://github.com/MikaSoftware/py-mortgagekit/blob/27697d2c12afdd56308f1012e25bb231c1e24ecf/mortgagekit/calculator.py#L103-L133
train
SpringerPE/python-cfconfigurator
cfconfigurator/cf.py
CF.info
def info(self): """Gets info endpoint. Used to perform login auth.""" url = self.api_url + self.info_url resp = self.session.get(url) if resp.status_code != 200: error = {'description': "Info HTTP response not valid"} raise CFException(error, resp.status_code) try: info = resp.json() except ValueError as e: error = {'description': "Info HTTP response not valid, %s" % str(e)} raise CFException(error, resp.status_code) return info
python
def info(self): """Gets info endpoint. Used to perform login auth.""" url = self.api_url + self.info_url resp = self.session.get(url) if resp.status_code != 200: error = {'description': "Info HTTP response not valid"} raise CFException(error, resp.status_code) try: info = resp.json() except ValueError as e: error = {'description': "Info HTTP response not valid, %s" % str(e)} raise CFException(error, resp.status_code) return info
[ "def", "info", "(", "self", ")", ":", "url", "=", "self", ".", "api_url", "+", "self", ".", "info_url", "resp", "=", "self", ".", "session", ".", "get", "(", "url", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "error", "=", "{", "'description'", ":", "\"Info HTTP response not valid\"", "}", "raise", "CFException", "(", "error", ",", "resp", ".", "status_code", ")", "try", ":", "info", "=", "resp", ".", "json", "(", ")", "except", "ValueError", "as", "e", ":", "error", "=", "{", "'description'", ":", "\"Info HTTP response not valid, %s\"", "%", "str", "(", "e", ")", "}", "raise", "CFException", "(", "error", ",", "resp", ".", "status_code", ")", "return", "info" ]
Gets info endpoint. Used to perform login auth.
[ "Gets", "info", "endpoint", ".", "Used", "to", "perform", "login", "auth", "." ]
198b4e00cd9e362abee726c0242c1d5f986eb073
https://github.com/SpringerPE/python-cfconfigurator/blob/198b4e00cd9e362abee726c0242c1d5f986eb073/cfconfigurator/cf.py#L90-L102
train
SpringerPE/python-cfconfigurator
cfconfigurator/cf.py
CF.clean_blobstore_cache
def clean_blobstore_cache(self): """Deletes all of the existing buildpack caches in the blobstore""" url = self.api_url + self.blobstores_builpack_cache_url resp, rcode = self.request('DELETE', url) if rcode != 202: raise CFException(resp, rcode) return resp
python
def clean_blobstore_cache(self): """Deletes all of the existing buildpack caches in the blobstore""" url = self.api_url + self.blobstores_builpack_cache_url resp, rcode = self.request('DELETE', url) if rcode != 202: raise CFException(resp, rcode) return resp
[ "def", "clean_blobstore_cache", "(", "self", ")", ":", "url", "=", "self", ".", "api_url", "+", "self", ".", "blobstores_builpack_cache_url", "resp", ",", "rcode", "=", "self", ".", "request", "(", "'DELETE'", ",", "url", ")", "if", "rcode", "!=", "202", ":", "raise", "CFException", "(", "resp", ",", "rcode", ")", "return", "resp" ]
Deletes all of the existing buildpack caches in the blobstore
[ "Deletes", "all", "of", "the", "existing", "buildpack", "caches", "in", "the", "blobstore" ]
198b4e00cd9e362abee726c0242c1d5f986eb073
https://github.com/SpringerPE/python-cfconfigurator/blob/198b4e00cd9e362abee726c0242c1d5f986eb073/cfconfigurator/cf.py#L192-L198
train
marcosfelt/cheminventory_python
cheminventory/api.py
ChemInventory.search
def search(self, query, locations: list=None): '''Search using the CAS number, barcode or chemical name ''' cas_number = re.search(r"\b[1-9]{1}[0-9]{1,5}-\d{2}-\d\b", str(query)) if cas_number: query = cas_number[0] search_type = 'cas' else: try: query = int(query) search_type = 'barcode' except ValueError: query = f"%{query}%" search_type = 'name' if not locations: locations = self.get_locations(filter_to_my_group=True) locations = [loc.inventory_id for loc in locations] data = { 'groupid': self.groupid, 'searchtype': search_type, 'searchterm': query, 'limitlocations': locations.append(1) } r = self._post('search-search', referer_path='search', data=data) #return a list of container objects if r['searchresults']['containers']: containers = [] for container in r['searchresults']['containers']: loc = Location(name=container.get('location')) ct = Container( inventory_id = container.get('id'), compound_id = container.get('sid'), name=container.get('containername'), location=loc, size=container.get('size'), smiles=container.get('smiles'), cas=container.get('cas'), comments=container.get('comments'), barcode=container.get('barcode'), supplier=container.get('supplier'), date_acquired=container.get('dateacquired'), owner=container.get('owner')) containers.append(ct) return containers else: return []
python
def search(self, query, locations: list=None): '''Search using the CAS number, barcode or chemical name ''' cas_number = re.search(r"\b[1-9]{1}[0-9]{1,5}-\d{2}-\d\b", str(query)) if cas_number: query = cas_number[0] search_type = 'cas' else: try: query = int(query) search_type = 'barcode' except ValueError: query = f"%{query}%" search_type = 'name' if not locations: locations = self.get_locations(filter_to_my_group=True) locations = [loc.inventory_id for loc in locations] data = { 'groupid': self.groupid, 'searchtype': search_type, 'searchterm': query, 'limitlocations': locations.append(1) } r = self._post('search-search', referer_path='search', data=data) #return a list of container objects if r['searchresults']['containers']: containers = [] for container in r['searchresults']['containers']: loc = Location(name=container.get('location')) ct = Container( inventory_id = container.get('id'), compound_id = container.get('sid'), name=container.get('containername'), location=loc, size=container.get('size'), smiles=container.get('smiles'), cas=container.get('cas'), comments=container.get('comments'), barcode=container.get('barcode'), supplier=container.get('supplier'), date_acquired=container.get('dateacquired'), owner=container.get('owner')) containers.append(ct) return containers else: return []
[ "def", "search", "(", "self", ",", "query", ",", "locations", ":", "list", "=", "None", ")", ":", "cas_number", "=", "re", ".", "search", "(", "r\"\\b[1-9]{1}[0-9]{1,5}-\\d{2}-\\d\\b\"", ",", "str", "(", "query", ")", ")", "if", "cas_number", ":", "query", "=", "cas_number", "[", "0", "]", "search_type", "=", "'cas'", "else", ":", "try", ":", "query", "=", "int", "(", "query", ")", "search_type", "=", "'barcode'", "except", "ValueError", ":", "query", "=", "f\"%{query}%\"", "search_type", "=", "'name'", "if", "not", "locations", ":", "locations", "=", "self", ".", "get_locations", "(", "filter_to_my_group", "=", "True", ")", "locations", "=", "[", "loc", ".", "inventory_id", "for", "loc", "in", "locations", "]", "data", "=", "{", "'groupid'", ":", "self", ".", "groupid", ",", "'searchtype'", ":", "search_type", ",", "'searchterm'", ":", "query", ",", "'limitlocations'", ":", "locations", ".", "append", "(", "1", ")", "}", "r", "=", "self", ".", "_post", "(", "'search-search'", ",", "referer_path", "=", "'search'", ",", "data", "=", "data", ")", "#return a list of container objects", "if", "r", "[", "'searchresults'", "]", "[", "'containers'", "]", ":", "containers", "=", "[", "]", "for", "container", "in", "r", "[", "'searchresults'", "]", "[", "'containers'", "]", ":", "loc", "=", "Location", "(", "name", "=", "container", ".", "get", "(", "'location'", ")", ")", "ct", "=", "Container", "(", "inventory_id", "=", "container", ".", "get", "(", "'id'", ")", ",", "compound_id", "=", "container", ".", "get", "(", "'sid'", ")", ",", "name", "=", "container", ".", "get", "(", "'containername'", ")", ",", "location", "=", "loc", ",", "size", "=", "container", ".", "get", "(", "'size'", ")", ",", "smiles", "=", "container", ".", "get", "(", "'smiles'", ")", ",", "cas", "=", "container", ".", "get", "(", "'cas'", ")", ",", "comments", "=", "container", ".", "get", "(", "'comments'", ")", ",", "barcode", "=", "container", ".", "get", "(", "'barcode'", ")", ",", "supplier", "=", "container", ".", "get", "(", "'supplier'", ")", ",", "date_acquired", "=", "container", ".", "get", "(", "'dateacquired'", ")", ",", "owner", "=", "container", ".", "get", "(", "'owner'", ")", ")", "containers", ".", "append", "(", "ct", ")", "return", "containers", "else", ":", "return", "[", "]" ]
Search using the CAS number, barcode or chemical name
[ "Search", "using", "the", "CAS", "number", "barcode", "or", "chemical", "name" ]
fa7d67a3741ba7095b30377ac52842997a649012
https://github.com/marcosfelt/cheminventory_python/blob/fa7d67a3741ba7095b30377ac52842997a649012/cheminventory/api.py#L44-L90
train
marcosfelt/cheminventory_python
cheminventory/api.py
ChemInventory.get_groups
def get_groups(self): '''Retrieve groups listed in ChemInventory''' resp = self._post('general-retrievelocations', 'locations') final_resp = [] if resp['groupinfo']: for group in resp['groupinfo']: final_resp.append(Group( name=group.get('name'), inventory_id=group.get('id') )) return final_resp
python
def get_groups(self): '''Retrieve groups listed in ChemInventory''' resp = self._post('general-retrievelocations', 'locations') final_resp = [] if resp['groupinfo']: for group in resp['groupinfo']: final_resp.append(Group( name=group.get('name'), inventory_id=group.get('id') )) return final_resp
[ "def", "get_groups", "(", "self", ")", ":", "resp", "=", "self", ".", "_post", "(", "'general-retrievelocations'", ",", "'locations'", ")", "final_resp", "=", "[", "]", "if", "resp", "[", "'groupinfo'", "]", ":", "for", "group", "in", "resp", "[", "'groupinfo'", "]", ":", "final_resp", ".", "append", "(", "Group", "(", "name", "=", "group", ".", "get", "(", "'name'", ")", ",", "inventory_id", "=", "group", ".", "get", "(", "'id'", ")", ")", ")", "return", "final_resp" ]
Retrieve groups listed in ChemInventory
[ "Retrieve", "groups", "listed", "in", "ChemInventory" ]
fa7d67a3741ba7095b30377ac52842997a649012
https://github.com/marcosfelt/cheminventory_python/blob/fa7d67a3741ba7095b30377ac52842997a649012/cheminventory/api.py#L120-L130
train
marcosfelt/cheminventory_python
cheminventory/api.py
ChemInventory.get_locations
def get_locations(self, filter_to_my_group=False): """Retrieve Locations listed in ChemInventory""" resp = self._post('general-retrievelocations', 'locations') groups = {} if resp['groupinfo']: for group in resp['groupinfo']: groups[group['id']] = Group( name=group.get('name'), inventory_id=group.get('id') ) final_resp = [] if resp['data']: if filter_to_my_group: resp['data'] = {self.groupid: resp['data'][self.groupid]} for groupid, sublocation in resp['data'].items(): if type(sublocation) is dict: sublocation = [loc for _, loc in sublocation.items()] sublocation = flatten_list(sublocation) if type(sublocation) is list: sublocation = flatten_list(sublocation) for location in sublocation: group = groups[groupid] final_resp.append(Location( name=location.get('name'), inventory_id=location.get('id'), parent=location.get('parent'), group=group, barcode=location.get('barcode') )) return final_resp
python
def get_locations(self, filter_to_my_group=False): """Retrieve Locations listed in ChemInventory""" resp = self._post('general-retrievelocations', 'locations') groups = {} if resp['groupinfo']: for group in resp['groupinfo']: groups[group['id']] = Group( name=group.get('name'), inventory_id=group.get('id') ) final_resp = [] if resp['data']: if filter_to_my_group: resp['data'] = {self.groupid: resp['data'][self.groupid]} for groupid, sublocation in resp['data'].items(): if type(sublocation) is dict: sublocation = [loc for _, loc in sublocation.items()] sublocation = flatten_list(sublocation) if type(sublocation) is list: sublocation = flatten_list(sublocation) for location in sublocation: group = groups[groupid] final_resp.append(Location( name=location.get('name'), inventory_id=location.get('id'), parent=location.get('parent'), group=group, barcode=location.get('barcode') )) return final_resp
[ "def", "get_locations", "(", "self", ",", "filter_to_my_group", "=", "False", ")", ":", "resp", "=", "self", ".", "_post", "(", "'general-retrievelocations'", ",", "'locations'", ")", "groups", "=", "{", "}", "if", "resp", "[", "'groupinfo'", "]", ":", "for", "group", "in", "resp", "[", "'groupinfo'", "]", ":", "groups", "[", "group", "[", "'id'", "]", "]", "=", "Group", "(", "name", "=", "group", ".", "get", "(", "'name'", ")", ",", "inventory_id", "=", "group", ".", "get", "(", "'id'", ")", ")", "final_resp", "=", "[", "]", "if", "resp", "[", "'data'", "]", ":", "if", "filter_to_my_group", ":", "resp", "[", "'data'", "]", "=", "{", "self", ".", "groupid", ":", "resp", "[", "'data'", "]", "[", "self", ".", "groupid", "]", "}", "for", "groupid", ",", "sublocation", "in", "resp", "[", "'data'", "]", ".", "items", "(", ")", ":", "if", "type", "(", "sublocation", ")", "is", "dict", ":", "sublocation", "=", "[", "loc", "for", "_", ",", "loc", "in", "sublocation", ".", "items", "(", ")", "]", "sublocation", "=", "flatten_list", "(", "sublocation", ")", "if", "type", "(", "sublocation", ")", "is", "list", ":", "sublocation", "=", "flatten_list", "(", "sublocation", ")", "for", "location", "in", "sublocation", ":", "group", "=", "groups", "[", "groupid", "]", "final_resp", ".", "append", "(", "Location", "(", "name", "=", "location", ".", "get", "(", "'name'", ")", ",", "inventory_id", "=", "location", ".", "get", "(", "'id'", ")", ",", "parent", "=", "location", ".", "get", "(", "'parent'", ")", ",", "group", "=", "group", ",", "barcode", "=", "location", ".", "get", "(", "'barcode'", ")", ")", ")", "return", "final_resp" ]
Retrieve Locations listed in ChemInventory
[ "Retrieve", "Locations", "listed", "in", "ChemInventory" ]
fa7d67a3741ba7095b30377ac52842997a649012
https://github.com/marcosfelt/cheminventory_python/blob/fa7d67a3741ba7095b30377ac52842997a649012/cheminventory/api.py#L132-L161
train
marcosfelt/cheminventory_python
cheminventory/api.py
ChemInventory.get_containers
def get_containers(self, include_only=[]): """Download all the containers owned by a group Arguments --------- include_only: List containg `Group` or `Location` objects Search only over a list of groups or locations """ locations = self.get_locations() if len(locations) == 0: raise ValueError("No locations for containers exist in Cheminventory") final_locations = [] if include_only: for location in locations: check = location in include_only or location.group in include_only if check: final_locations.append(location) if len(final_locations)==0: raise ValueError(f"Location(s) or group(s) {include_only} is/are not in the database.") else: final_locations = locations containers = [] for location in final_locations: containers += self._get_location_containers(location.inventory_id) return containers
python
def get_containers(self, include_only=[]): """Download all the containers owned by a group Arguments --------- include_only: List containg `Group` or `Location` objects Search only over a list of groups or locations """ locations = self.get_locations() if len(locations) == 0: raise ValueError("No locations for containers exist in Cheminventory") final_locations = [] if include_only: for location in locations: check = location in include_only or location.group in include_only if check: final_locations.append(location) if len(final_locations)==0: raise ValueError(f"Location(s) or group(s) {include_only} is/are not in the database.") else: final_locations = locations containers = [] for location in final_locations: containers += self._get_location_containers(location.inventory_id) return containers
[ "def", "get_containers", "(", "self", ",", "include_only", "=", "[", "]", ")", ":", "locations", "=", "self", ".", "get_locations", "(", ")", "if", "len", "(", "locations", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No locations for containers exist in Cheminventory\"", ")", "final_locations", "=", "[", "]", "if", "include_only", ":", "for", "location", "in", "locations", ":", "check", "=", "location", "in", "include_only", "or", "location", ".", "group", "in", "include_only", "if", "check", ":", "final_locations", ".", "append", "(", "location", ")", "if", "len", "(", "final_locations", ")", "==", "0", ":", "raise", "ValueError", "(", "f\"Location(s) or group(s) {include_only} is/are not in the database.\"", ")", "else", ":", "final_locations", "=", "locations", "containers", "=", "[", "]", "for", "location", "in", "final_locations", ":", "containers", "+=", "self", ".", "_get_location_containers", "(", "location", ".", "inventory_id", ")", "return", "containers" ]
Download all the containers owned by a group Arguments --------- include_only: List containg `Group` or `Location` objects Search only over a list of groups or locations
[ "Download", "all", "the", "containers", "owned", "by", "a", "group" ]
fa7d67a3741ba7095b30377ac52842997a649012
https://github.com/marcosfelt/cheminventory_python/blob/fa7d67a3741ba7095b30377ac52842997a649012/cheminventory/api.py#L163-L189
train
mkoura/dump2polarion
dump2polarion/utils.py
get_unicode_str
def get_unicode_str(obj): """Makes sure obj is a unicode string.""" if isinstance(obj, six.text_type): return obj if isinstance(obj, six.binary_type): return obj.decode("utf-8", errors="ignore") return six.text_type(obj)
python
def get_unicode_str(obj): """Makes sure obj is a unicode string.""" if isinstance(obj, six.text_type): return obj if isinstance(obj, six.binary_type): return obj.decode("utf-8", errors="ignore") return six.text_type(obj)
[ "def", "get_unicode_str", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "six", ".", "text_type", ")", ":", "return", "obj", "if", "isinstance", "(", "obj", ",", "six", ".", "binary_type", ")", ":", "return", "obj", ".", "decode", "(", "\"utf-8\"", ",", "errors", "=", "\"ignore\"", ")", "return", "six", ".", "text_type", "(", "obj", ")" ]
Makes sure obj is a unicode string.
[ "Makes", "sure", "obj", "is", "a", "unicode", "string", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/utils.py#L30-L36
train
mkoura/dump2polarion
dump2polarion/utils.py
init_log
def init_log(log_level): """Initializes logging.""" log_level = log_level or "INFO" logging.basicConfig( format="%(name)s:%(levelname)s:%(message)s", level=getattr(logging, log_level.upper(), logging.INFO), )
python
def init_log(log_level): """Initializes logging.""" log_level = log_level or "INFO" logging.basicConfig( format="%(name)s:%(levelname)s:%(message)s", level=getattr(logging, log_level.upper(), logging.INFO), )
[ "def", "init_log", "(", "log_level", ")", ":", "log_level", "=", "log_level", "or", "\"INFO\"", "logging", ".", "basicConfig", "(", "format", "=", "\"%(name)s:%(levelname)s:%(message)s\"", ",", "level", "=", "getattr", "(", "logging", ",", "log_level", ".", "upper", "(", ")", ",", "logging", ".", "INFO", ")", ",", ")" ]
Initializes logging.
[ "Initializes", "logging", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/utils.py#L39-L45
train
mkoura/dump2polarion
dump2polarion/utils.py
get_xml_root
def get_xml_root(xml_file): """Returns XML root.""" try: xml_root = etree.parse(os.path.expanduser(xml_file), NO_BLANKS_PARSER).getroot() # pylint: disable=broad-except except Exception as err: raise Dump2PolarionException("Failed to parse XML file '{}': {}".format(xml_file, err)) return xml_root
python
def get_xml_root(xml_file): """Returns XML root.""" try: xml_root = etree.parse(os.path.expanduser(xml_file), NO_BLANKS_PARSER).getroot() # pylint: disable=broad-except except Exception as err: raise Dump2PolarionException("Failed to parse XML file '{}': {}".format(xml_file, err)) return xml_root
[ "def", "get_xml_root", "(", "xml_file", ")", ":", "try", ":", "xml_root", "=", "etree", ".", "parse", "(", "os", ".", "path", ".", "expanduser", "(", "xml_file", ")", ",", "NO_BLANKS_PARSER", ")", ".", "getroot", "(", ")", "# pylint: disable=broad-except", "except", "Exception", "as", "err", ":", "raise", "Dump2PolarionException", "(", "\"Failed to parse XML file '{}': {}\"", ".", "format", "(", "xml_file", ",", "err", ")", ")", "return", "xml_root" ]
Returns XML root.
[ "Returns", "XML", "root", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/utils.py#L104-L111
train
mkoura/dump2polarion
dump2polarion/utils.py
get_xml_root_from_str
def get_xml_root_from_str(xml_str): """Returns XML root from string.""" try: xml_root = etree.fromstring(xml_str.encode("utf-8"), NO_BLANKS_PARSER) # pylint: disable=broad-except except Exception as err: raise Dump2PolarionException("Failed to parse XML string: {}".format(err)) return xml_root
python
def get_xml_root_from_str(xml_str): """Returns XML root from string.""" try: xml_root = etree.fromstring(xml_str.encode("utf-8"), NO_BLANKS_PARSER) # pylint: disable=broad-except except Exception as err: raise Dump2PolarionException("Failed to parse XML string: {}".format(err)) return xml_root
[ "def", "get_xml_root_from_str", "(", "xml_str", ")", ":", "try", ":", "xml_root", "=", "etree", ".", "fromstring", "(", "xml_str", ".", "encode", "(", "\"utf-8\"", ")", ",", "NO_BLANKS_PARSER", ")", "# pylint: disable=broad-except", "except", "Exception", "as", "err", ":", "raise", "Dump2PolarionException", "(", "\"Failed to parse XML string: {}\"", ".", "format", "(", "err", ")", ")", "return", "xml_root" ]
Returns XML root from string.
[ "Returns", "XML", "root", "from", "string", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/utils.py#L114-L121
train
mkoura/dump2polarion
dump2polarion/utils.py
prettify_xml
def prettify_xml(xml_root): """Returns pretty-printed string representation of element tree.""" xml_string = etree.tostring(xml_root, encoding="utf-8", xml_declaration=True, pretty_print=True) return get_unicode_str(xml_string)
python
def prettify_xml(xml_root): """Returns pretty-printed string representation of element tree.""" xml_string = etree.tostring(xml_root, encoding="utf-8", xml_declaration=True, pretty_print=True) return get_unicode_str(xml_string)
[ "def", "prettify_xml", "(", "xml_root", ")", ":", "xml_string", "=", "etree", ".", "tostring", "(", "xml_root", ",", "encoding", "=", "\"utf-8\"", ",", "xml_declaration", "=", "True", ",", "pretty_print", "=", "True", ")", "return", "get_unicode_str", "(", "xml_string", ")" ]
Returns pretty-printed string representation of element tree.
[ "Returns", "pretty", "-", "printed", "string", "representation", "of", "element", "tree", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/utils.py#L129-L132
train
mkoura/dump2polarion
dump2polarion/utils.py
get_session
def get_session(credentials, config): """Gets requests session.""" session = requests.Session() session.verify = False auth_url = config.get("auth_url") if auth_url: cookie = session.post( auth_url, data={ "j_username": credentials[0], "j_password": credentials[1], "submit": "Log In", "rememberme": "true", }, headers={"Content-Type": "application/x-www-form-urlencoded"}, ) if not cookie: raise Dump2PolarionException("Cookie was not retrieved from {}.".format(auth_url)) else: # TODO: can be removed once basic auth is discontinued on prod session.auth = credentials return session
python
def get_session(credentials, config): """Gets requests session.""" session = requests.Session() session.verify = False auth_url = config.get("auth_url") if auth_url: cookie = session.post( auth_url, data={ "j_username": credentials[0], "j_password": credentials[1], "submit": "Log In", "rememberme": "true", }, headers={"Content-Type": "application/x-www-form-urlencoded"}, ) if not cookie: raise Dump2PolarionException("Cookie was not retrieved from {}.".format(auth_url)) else: # TODO: can be removed once basic auth is discontinued on prod session.auth = credentials return session
[ "def", "get_session", "(", "credentials", ",", "config", ")", ":", "session", "=", "requests", ".", "Session", "(", ")", "session", ".", "verify", "=", "False", "auth_url", "=", "config", ".", "get", "(", "\"auth_url\"", ")", "if", "auth_url", ":", "cookie", "=", "session", ".", "post", "(", "auth_url", ",", "data", "=", "{", "\"j_username\"", ":", "credentials", "[", "0", "]", ",", "\"j_password\"", ":", "credentials", "[", "1", "]", ",", "\"submit\"", ":", "\"Log In\"", ",", "\"rememberme\"", ":", "\"true\"", ",", "}", ",", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/x-www-form-urlencoded\"", "}", ",", ")", "if", "not", "cookie", ":", "raise", "Dump2PolarionException", "(", "\"Cookie was not retrieved from {}.\"", ".", "format", "(", "auth_url", ")", ")", "else", ":", "# TODO: can be removed once basic auth is discontinued on prod", "session", ".", "auth", "=", "credentials", "return", "session" ]
Gets requests session.
[ "Gets", "requests", "session", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/utils.py#L135-L158
train
mkoura/dump2polarion
dump2polarion/utils.py
find_vcs_root
def find_vcs_root(path, dirs=(".git",)): """Searches up from a given path to find the project root.""" prev, path = None, os.path.abspath(path) while prev != path: if any(os.path.exists(os.path.join(path, d)) for d in dirs): return path prev, path = path, os.path.abspath(os.path.join(path, os.pardir)) return None
python
def find_vcs_root(path, dirs=(".git",)): """Searches up from a given path to find the project root.""" prev, path = None, os.path.abspath(path) while prev != path: if any(os.path.exists(os.path.join(path, d)) for d in dirs): return path prev, path = path, os.path.abspath(os.path.join(path, os.pardir)) return None
[ "def", "find_vcs_root", "(", "path", ",", "dirs", "=", "(", "\".git\"", ",", ")", ")", ":", "prev", ",", "path", "=", "None", ",", "os", ".", "path", ".", "abspath", "(", "path", ")", "while", "prev", "!=", "path", ":", "if", "any", "(", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "d", ")", ")", "for", "d", "in", "dirs", ")", ":", "return", "path", "prev", ",", "path", "=", "path", ",", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "os", ".", "pardir", ")", ")", "return", "None" ]
Searches up from a given path to find the project root.
[ "Searches", "up", "from", "a", "given", "path", "to", "find", "the", "project", "root", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/utils.py#L161-L168
train
klen/zeta-library
zetalibrary/packer.py
Packer.pack
def pack(self): " Pack and save file " pack_name = self.args.prefix + op.basename(self.path) pack_path = op.join(self.args.output or self.basedir, pack_name) self.out("Packing: %s" % self.path) self.out("Output: %s" % pack_path) if self.args.format: ext = self.get_ext(self.path) self.parsers[ext] = self.args.format out = "".join(self.merge(self.parse(self.path))) try: open(pack_path, 'w').write(out) self.out("Linked file saved as: '%s'." % pack_path) except IOError, ex: raise ZetaError(ex)
python
def pack(self): " Pack and save file " pack_name = self.args.prefix + op.basename(self.path) pack_path = op.join(self.args.output or self.basedir, pack_name) self.out("Packing: %s" % self.path) self.out("Output: %s" % pack_path) if self.args.format: ext = self.get_ext(self.path) self.parsers[ext] = self.args.format out = "".join(self.merge(self.parse(self.path))) try: open(pack_path, 'w').write(out) self.out("Linked file saved as: '%s'." % pack_path) except IOError, ex: raise ZetaError(ex)
[ "def", "pack", "(", "self", ")", ":", "pack_name", "=", "self", ".", "args", ".", "prefix", "+", "op", ".", "basename", "(", "self", ".", "path", ")", "pack_path", "=", "op", ".", "join", "(", "self", ".", "args", ".", "output", "or", "self", ".", "basedir", ",", "pack_name", ")", "self", ".", "out", "(", "\"Packing: %s\"", "%", "self", ".", "path", ")", "self", ".", "out", "(", "\"Output: %s\"", "%", "pack_path", ")", "if", "self", ".", "args", ".", "format", ":", "ext", "=", "self", ".", "get_ext", "(", "self", ".", "path", ")", "self", ".", "parsers", "[", "ext", "]", "=", "self", ".", "args", ".", "format", "out", "=", "\"\"", ".", "join", "(", "self", ".", "merge", "(", "self", ".", "parse", "(", "self", ".", "path", ")", ")", ")", "try", ":", "open", "(", "pack_path", ",", "'w'", ")", ".", "write", "(", "out", ")", "self", ".", "out", "(", "\"Linked file saved as: '%s'.\"", "%", "pack_path", ")", "except", "IOError", ",", "ex", ":", "raise", "ZetaError", "(", "ex", ")" ]
Pack and save file
[ "Pack", "and", "save", "file" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/packer.py#L22-L39
train
klen/zeta-library
zetalibrary/packer.py
Packer.parse_path
def parse_path(self, path, curdir): " Normilize path. " if path.startswith('http://'): return path elif path.startswith('zeta://'): zpath = op.join(LIBDIR, path[len('zeta://'):]) if self.args.directory and not op.exists(zpath): return op.join(self.args.directory, path[len('zeta://'):]) return zpath return op.abspath(op.normpath(op.join(curdir, path)))
python
def parse_path(self, path, curdir): " Normilize path. " if path.startswith('http://'): return path elif path.startswith('zeta://'): zpath = op.join(LIBDIR, path[len('zeta://'):]) if self.args.directory and not op.exists(zpath): return op.join(self.args.directory, path[len('zeta://'):]) return zpath return op.abspath(op.normpath(op.join(curdir, path)))
[ "def", "parse_path", "(", "self", ",", "path", ",", "curdir", ")", ":", "if", "path", ".", "startswith", "(", "'http://'", ")", ":", "return", "path", "elif", "path", ".", "startswith", "(", "'zeta://'", ")", ":", "zpath", "=", "op", ".", "join", "(", "LIBDIR", ",", "path", "[", "len", "(", "'zeta://'", ")", ":", "]", ")", "if", "self", ".", "args", ".", "directory", "and", "not", "op", ".", "exists", "(", "zpath", ")", ":", "return", "op", ".", "join", "(", "self", ".", "args", ".", "directory", ",", "path", "[", "len", "(", "'zeta://'", ")", ":", "]", ")", "return", "zpath", "return", "op", ".", "abspath", "(", "op", ".", "normpath", "(", "op", ".", "join", "(", "curdir", ",", "path", ")", ")", ")" ]
Normilize path.
[ "Normilize", "path", "." ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/packer.py#L92-L103
train
klen/zeta-library
zetalibrary/packer.py
Packer.out
def out(msg, error=False): " Send message to shell " pipe = stdout if error: pipe = stderr msg = color_msg(msg, "warning") pipe.write("%s\n" % msg)
python
def out(msg, error=False): " Send message to shell " pipe = stdout if error: pipe = stderr msg = color_msg(msg, "warning") pipe.write("%s\n" % msg)
[ "def", "out", "(", "msg", ",", "error", "=", "False", ")", ":", "pipe", "=", "stdout", "if", "error", ":", "pipe", "=", "stderr", "msg", "=", "color_msg", "(", "msg", ",", "\"warning\"", ")", "pipe", ".", "write", "(", "\"%s\\n\"", "%", "msg", ")" ]
Send message to shell
[ "Send", "message", "to", "shell" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/packer.py#L106-L113
train
klen/zeta-library
zetalibrary/scss/__init__.py
_image_url
def _image_url(image, dst_color=None, src_color=None): """ Generates a path to an asset found relative to the project's images directory. """ if src_color and dst_color: if not Image: raise Exception("Images manipulation require PIL") file = StringValue(image).value path = None if callable(STATIC_ROOT): try: _file, _storage = list(STATIC_ROOT(file))[0] d_obj = _storage.modified_time(_file) filetime = int(time.mktime(d_obj.timetuple())) if dst_color: path = _storage.open(_file) except: filetime = 'NA' else: _path = os.path.join(STATIC_ROOT, file) if os.path.exists(_path): filetime = int(os.path.getmtime(_path)) if dst_color: path = open(_path, 'rb') else: filetime = 'NA' BASE_URL = STATIC_URL if path: src_color = tuple(int(round(c)) for c in ColorValue( src_color).value[:3]) if src_color else (0, 0, 0) dst_color = [int(round(c)) for c in ColorValue(dst_color).value[:3]] file_name, file_ext = os.path.splitext( os.path.normpath(file).replace('\\', '_').replace('/', '_')) key = (filetime, src_color, dst_color) key = file_name + '-' + base64.urlsafe_b64encode( hashlib.md5(repr(key)).digest()).rstrip('=').replace('-', '_') asset_file = key + file_ext asset_path = os.path.join(ASSETS_ROOT, asset_file) if os.path.exists(asset_path): file = asset_file BASE_URL = ASSETS_URL filetime = int(os.path.getmtime(asset_path)) else: image = Image.open(path) image = image.convert("RGBA") pixdata = image.load() for y in xrange(image.size[1]): for x in xrange(image.size[0]): if pixdata[x, y][:3] == src_color: new_color = tuple(dst_color + [pixdata[x, y][3]]) pixdata[x, y] = new_color try: image.save(asset_path) file = asset_file BASE_URL = ASSETS_URL except IOError: log.exception("Error while saving image") url = 'url("%s%s?_=%s")' % (BASE_URL, file, filetime) return StringValue(url)
python
def _image_url(image, dst_color=None, src_color=None): """ Generates a path to an asset found relative to the project's images directory. """ if src_color and dst_color: if not Image: raise Exception("Images manipulation require PIL") file = StringValue(image).value path = None if callable(STATIC_ROOT): try: _file, _storage = list(STATIC_ROOT(file))[0] d_obj = _storage.modified_time(_file) filetime = int(time.mktime(d_obj.timetuple())) if dst_color: path = _storage.open(_file) except: filetime = 'NA' else: _path = os.path.join(STATIC_ROOT, file) if os.path.exists(_path): filetime = int(os.path.getmtime(_path)) if dst_color: path = open(_path, 'rb') else: filetime = 'NA' BASE_URL = STATIC_URL if path: src_color = tuple(int(round(c)) for c in ColorValue( src_color).value[:3]) if src_color else (0, 0, 0) dst_color = [int(round(c)) for c in ColorValue(dst_color).value[:3]] file_name, file_ext = os.path.splitext( os.path.normpath(file).replace('\\', '_').replace('/', '_')) key = (filetime, src_color, dst_color) key = file_name + '-' + base64.urlsafe_b64encode( hashlib.md5(repr(key)).digest()).rstrip('=').replace('-', '_') asset_file = key + file_ext asset_path = os.path.join(ASSETS_ROOT, asset_file) if os.path.exists(asset_path): file = asset_file BASE_URL = ASSETS_URL filetime = int(os.path.getmtime(asset_path)) else: image = Image.open(path) image = image.convert("RGBA") pixdata = image.load() for y in xrange(image.size[1]): for x in xrange(image.size[0]): if pixdata[x, y][:3] == src_color: new_color = tuple(dst_color + [pixdata[x, y][3]]) pixdata[x, y] = new_color try: image.save(asset_path) file = asset_file BASE_URL = ASSETS_URL except IOError: log.exception("Error while saving image") url = 'url("%s%s?_=%s")' % (BASE_URL, file, filetime) return StringValue(url)
[ "def", "_image_url", "(", "image", ",", "dst_color", "=", "None", ",", "src_color", "=", "None", ")", ":", "if", "src_color", "and", "dst_color", ":", "if", "not", "Image", ":", "raise", "Exception", "(", "\"Images manipulation require PIL\"", ")", "file", "=", "StringValue", "(", "image", ")", ".", "value", "path", "=", "None", "if", "callable", "(", "STATIC_ROOT", ")", ":", "try", ":", "_file", ",", "_storage", "=", "list", "(", "STATIC_ROOT", "(", "file", ")", ")", "[", "0", "]", "d_obj", "=", "_storage", ".", "modified_time", "(", "_file", ")", "filetime", "=", "int", "(", "time", ".", "mktime", "(", "d_obj", ".", "timetuple", "(", ")", ")", ")", "if", "dst_color", ":", "path", "=", "_storage", ".", "open", "(", "_file", ")", "except", ":", "filetime", "=", "'NA'", "else", ":", "_path", "=", "os", ".", "path", ".", "join", "(", "STATIC_ROOT", ",", "file", ")", "if", "os", ".", "path", ".", "exists", "(", "_path", ")", ":", "filetime", "=", "int", "(", "os", ".", "path", ".", "getmtime", "(", "_path", ")", ")", "if", "dst_color", ":", "path", "=", "open", "(", "_path", ",", "'rb'", ")", "else", ":", "filetime", "=", "'NA'", "BASE_URL", "=", "STATIC_URL", "if", "path", ":", "src_color", "=", "tuple", "(", "int", "(", "round", "(", "c", ")", ")", "for", "c", "in", "ColorValue", "(", "src_color", ")", ".", "value", "[", ":", "3", "]", ")", "if", "src_color", "else", "(", "0", ",", "0", ",", "0", ")", "dst_color", "=", "[", "int", "(", "round", "(", "c", ")", ")", "for", "c", "in", "ColorValue", "(", "dst_color", ")", ".", "value", "[", ":", "3", "]", "]", "file_name", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "normpath", "(", "file", ")", ".", "replace", "(", "'\\\\'", ",", "'_'", ")", ".", "replace", "(", "'/'", ",", "'_'", ")", ")", "key", "=", "(", "filetime", ",", "src_color", ",", "dst_color", ")", "key", "=", "file_name", "+", "'-'", "+", "base64", ".", "urlsafe_b64encode", "(", "hashlib", ".", "md5", "(", "repr", "(", "key", ")", ")", ".", "digest", "(", ")", ")", ".", "rstrip", "(", "'='", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", "asset_file", "=", "key", "+", "file_ext", "asset_path", "=", "os", ".", "path", ".", "join", "(", "ASSETS_ROOT", ",", "asset_file", ")", "if", "os", ".", "path", ".", "exists", "(", "asset_path", ")", ":", "file", "=", "asset_file", "BASE_URL", "=", "ASSETS_URL", "filetime", "=", "int", "(", "os", ".", "path", ".", "getmtime", "(", "asset_path", ")", ")", "else", ":", "image", "=", "Image", ".", "open", "(", "path", ")", "image", "=", "image", ".", "convert", "(", "\"RGBA\"", ")", "pixdata", "=", "image", ".", "load", "(", ")", "for", "y", "in", "xrange", "(", "image", ".", "size", "[", "1", "]", ")", ":", "for", "x", "in", "xrange", "(", "image", ".", "size", "[", "0", "]", ")", ":", "if", "pixdata", "[", "x", ",", "y", "]", "[", ":", "3", "]", "==", "src_color", ":", "new_color", "=", "tuple", "(", "dst_color", "+", "[", "pixdata", "[", "x", ",", "y", "]", "[", "3", "]", "]", ")", "pixdata", "[", "x", ",", "y", "]", "=", "new_color", "try", ":", "image", ".", "save", "(", "asset_path", ")", "file", "=", "asset_file", "BASE_URL", "=", "ASSETS_URL", "except", "IOError", ":", "log", ".", "exception", "(", "\"Error while saving image\"", ")", "url", "=", "'url(\"%s%s?_=%s\")'", "%", "(", "BASE_URL", ",", "file", ",", "filetime", ")", "return", "StringValue", "(", "url", ")" ]
Generates a path to an asset found relative to the project's images directory.
[ "Generates", "a", "path", "to", "an", "asset", "found", "relative", "to", "the", "project", "s", "images", "directory", "." ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L3003-L3064
train
klen/zeta-library
zetalibrary/scss/__init__.py
_image_width
def _image_width(image): """ Returns the width of the image found at the path supplied by `image` relative to your project's images directory. """ if not Image: raise Exception("Images manipulation require PIL") file = StringValue(image).value path = None try: width = sprite_images[file][0] except KeyError: width = 0 if callable(STATIC_ROOT): try: _file, _storage = list(STATIC_ROOT(file))[0] path = _storage.open(_file) except: pass else: _path = os.path.join(STATIC_ROOT, file) if os.path.exists(_path): path = open(_path, 'rb') if path: image = Image.open(path) size = image.size width = size[0] sprite_images[file] = size return NumberValue(width, 'px')
python
def _image_width(image): """ Returns the width of the image found at the path supplied by `image` relative to your project's images directory. """ if not Image: raise Exception("Images manipulation require PIL") file = StringValue(image).value path = None try: width = sprite_images[file][0] except KeyError: width = 0 if callable(STATIC_ROOT): try: _file, _storage = list(STATIC_ROOT(file))[0] path = _storage.open(_file) except: pass else: _path = os.path.join(STATIC_ROOT, file) if os.path.exists(_path): path = open(_path, 'rb') if path: image = Image.open(path) size = image.size width = size[0] sprite_images[file] = size return NumberValue(width, 'px')
[ "def", "_image_width", "(", "image", ")", ":", "if", "not", "Image", ":", "raise", "Exception", "(", "\"Images manipulation require PIL\"", ")", "file", "=", "StringValue", "(", "image", ")", ".", "value", "path", "=", "None", "try", ":", "width", "=", "sprite_images", "[", "file", "]", "[", "0", "]", "except", "KeyError", ":", "width", "=", "0", "if", "callable", "(", "STATIC_ROOT", ")", ":", "try", ":", "_file", ",", "_storage", "=", "list", "(", "STATIC_ROOT", "(", "file", ")", ")", "[", "0", "]", "path", "=", "_storage", ".", "open", "(", "_file", ")", "except", ":", "pass", "else", ":", "_path", "=", "os", ".", "path", ".", "join", "(", "STATIC_ROOT", ",", "file", ")", "if", "os", ".", "path", ".", "exists", "(", "_path", ")", ":", "path", "=", "open", "(", "_path", ",", "'rb'", ")", "if", "path", ":", "image", "=", "Image", ".", "open", "(", "path", ")", "size", "=", "image", ".", "size", "width", "=", "size", "[", "0", "]", "sprite_images", "[", "file", "]", "=", "size", "return", "NumberValue", "(", "width", ",", "'px'", ")" ]
Returns the width of the image found at the path supplied by `image` relative to your project's images directory.
[ "Returns", "the", "width", "of", "the", "image", "found", "at", "the", "path", "supplied", "by", "image", "relative", "to", "your", "project", "s", "images", "directory", "." ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L3067-L3095
train
klen/zeta-library
zetalibrary/scss/__init__.py
_nth
def _nth(lst, n=1): """ Return the Nth item in the string """ n = StringValue(n).value lst = ListValue(lst).value try: n = int(float(n)) - 1 n = n % len(lst) except: if n.lower() == 'first': n = 0 elif n.lower() == 'last': n = -1 try: ret = lst[n] except KeyError: lst = [v for k, v in sorted(lst.items()) if isinstance(k, int)] try: ret = lst[n] except: ret = '' return ret.__class__(ret)
python
def _nth(lst, n=1): """ Return the Nth item in the string """ n = StringValue(n).value lst = ListValue(lst).value try: n = int(float(n)) - 1 n = n % len(lst) except: if n.lower() == 'first': n = 0 elif n.lower() == 'last': n = -1 try: ret = lst[n] except KeyError: lst = [v for k, v in sorted(lst.items()) if isinstance(k, int)] try: ret = lst[n] except: ret = '' return ret.__class__(ret)
[ "def", "_nth", "(", "lst", ",", "n", "=", "1", ")", ":", "n", "=", "StringValue", "(", "n", ")", ".", "value", "lst", "=", "ListValue", "(", "lst", ")", ".", "value", "try", ":", "n", "=", "int", "(", "float", "(", "n", ")", ")", "-", "1", "n", "=", "n", "%", "len", "(", "lst", ")", "except", ":", "if", "n", ".", "lower", "(", ")", "==", "'first'", ":", "n", "=", "0", "elif", "n", ".", "lower", "(", ")", "==", "'last'", ":", "n", "=", "-", "1", "try", ":", "ret", "=", "lst", "[", "n", "]", "except", "KeyError", ":", "lst", "=", "[", "v", "for", "k", ",", "v", "in", "sorted", "(", "lst", ".", "items", "(", ")", ")", "if", "isinstance", "(", "k", ",", "int", ")", "]", "try", ":", "ret", "=", "lst", "[", "n", "]", "except", ":", "ret", "=", "''", "return", "ret", ".", "__class__", "(", "ret", ")" ]
Return the Nth item in the string
[ "Return", "the", "Nth", "item", "in", "the", "string" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L3246-L3268
train
klen/zeta-library
zetalibrary/scss/__init__.py
Scss.normalize_selectors
def normalize_selectors(self, _selectors, extra_selectors=None, extra_parents=None): """ Normalizes or extends selectors in a string. An optional extra parameter that can be a list of extra selectors to be added to the final normalized selectors string. """ # Fixe tabs and spaces in selectors _selectors = _spaces_re.sub(' ', _selectors) if isinstance(extra_selectors, basestring): extra_selectors = extra_selectors.split(',') if isinstance(extra_parents, basestring): extra_parents = extra_parents.split('&') parents = set() if ' extends ' in _selectors: selectors = set() for key in _selectors.split(','): child, _, parent = key.partition(' extends ') child = child.strip() parent = parent.strip() selectors.add(child) parents.update( s.strip() for s in parent.split('&') if s.strip()) else: selectors = set( s.strip() for s in _selectors.split(',') if s.strip()) if extra_selectors: selectors.update(s.strip() for s in extra_selectors if s.strip()) selectors.discard('') if not selectors: return '' if extra_parents: parents.update(s.strip() for s in extra_parents if s.strip()) parents.discard('') if parents: return ','.join(sorted(selectors)) + ' extends ' + '&'.join(sorted(parents)) return ','.join(sorted(selectors))
python
def normalize_selectors(self, _selectors, extra_selectors=None, extra_parents=None): """ Normalizes or extends selectors in a string. An optional extra parameter that can be a list of extra selectors to be added to the final normalized selectors string. """ # Fixe tabs and spaces in selectors _selectors = _spaces_re.sub(' ', _selectors) if isinstance(extra_selectors, basestring): extra_selectors = extra_selectors.split(',') if isinstance(extra_parents, basestring): extra_parents = extra_parents.split('&') parents = set() if ' extends ' in _selectors: selectors = set() for key in _selectors.split(','): child, _, parent = key.partition(' extends ') child = child.strip() parent = parent.strip() selectors.add(child) parents.update( s.strip() for s in parent.split('&') if s.strip()) else: selectors = set( s.strip() for s in _selectors.split(',') if s.strip()) if extra_selectors: selectors.update(s.strip() for s in extra_selectors if s.strip()) selectors.discard('') if not selectors: return '' if extra_parents: parents.update(s.strip() for s in extra_parents if s.strip()) parents.discard('') if parents: return ','.join(sorted(selectors)) + ' extends ' + '&'.join(sorted(parents)) return ','.join(sorted(selectors))
[ "def", "normalize_selectors", "(", "self", ",", "_selectors", ",", "extra_selectors", "=", "None", ",", "extra_parents", "=", "None", ")", ":", "# Fixe tabs and spaces in selectors", "_selectors", "=", "_spaces_re", ".", "sub", "(", "' '", ",", "_selectors", ")", "if", "isinstance", "(", "extra_selectors", ",", "basestring", ")", ":", "extra_selectors", "=", "extra_selectors", ".", "split", "(", "','", ")", "if", "isinstance", "(", "extra_parents", ",", "basestring", ")", ":", "extra_parents", "=", "extra_parents", ".", "split", "(", "'&'", ")", "parents", "=", "set", "(", ")", "if", "' extends '", "in", "_selectors", ":", "selectors", "=", "set", "(", ")", "for", "key", "in", "_selectors", ".", "split", "(", "','", ")", ":", "child", ",", "_", ",", "parent", "=", "key", ".", "partition", "(", "' extends '", ")", "child", "=", "child", ".", "strip", "(", ")", "parent", "=", "parent", ".", "strip", "(", ")", "selectors", ".", "add", "(", "child", ")", "parents", ".", "update", "(", "s", ".", "strip", "(", ")", "for", "s", "in", "parent", ".", "split", "(", "'&'", ")", "if", "s", ".", "strip", "(", ")", ")", "else", ":", "selectors", "=", "set", "(", "s", ".", "strip", "(", ")", "for", "s", "in", "_selectors", ".", "split", "(", "','", ")", "if", "s", ".", "strip", "(", ")", ")", "if", "extra_selectors", ":", "selectors", ".", "update", "(", "s", ".", "strip", "(", ")", "for", "s", "in", "extra_selectors", "if", "s", ".", "strip", "(", ")", ")", "selectors", ".", "discard", "(", "''", ")", "if", "not", "selectors", ":", "return", "''", "if", "extra_parents", ":", "parents", ".", "update", "(", "s", ".", "strip", "(", ")", "for", "s", "in", "extra_parents", "if", "s", ".", "strip", "(", ")", ")", "parents", ".", "discard", "(", "''", ")", "if", "parents", ":", "return", "','", ".", "join", "(", "sorted", "(", "selectors", ")", ")", "+", "' extends '", "+", "'&'", ".", "join", "(", "sorted", "(", "parents", ")", ")", "return", "','", ".", "join", "(", "sorted", "(", "selectors", ")", ")" ]
Normalizes or extends selectors in a string. An optional extra parameter that can be a list of extra selectors to be added to the final normalized selectors string.
[ "Normalizes", "or", "extends", "selectors", "in", "a", "string", ".", "An", "optional", "extra", "parameter", "that", "can", "be", "a", "list", "of", "extra", "selectors", "to", "be", "added", "to", "the", "final", "normalized", "selectors", "string", "." ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L684-L722
train
klen/zeta-library
zetalibrary/scss/__init__.py
Scss._get_properties
def _get_properties(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr): """ Implements properties and variables extraction """ prop, value = (_prop_split_re.split(c_property, 1) + [None])[:2] try: is_var = (c_property[len(prop)] == '=') except IndexError: is_var = False prop = prop.strip() prop = self.do_glob_math( prop, rule[CONTEXT], rule[OPTIONS], rule, True) if prop: if value: value = value.strip() value = self.calculate( value, rule[CONTEXT], rule[OPTIONS], rule) _prop = (scope or '') + prop if is_var or prop.startswith('$') and value is not None: if isinstance(value, basestring): if '!default' in value: if _prop in rule[CONTEXT]: value = None else: value = value.replace( '!default', '').replace(' ', ' ').strip() elif isinstance(value, ListValue): value = ListValue(value) for k, v in value.value.items(): if v == '!default': if _prop in rule[CONTEXT]: value = None else: del value.value[k] value = value.first( ) if len(value) == 1 else value break if value is not None: rule[CONTEXT][_prop] = value else: _prop = self.apply_vars( _prop, rule[CONTEXT], rule[OPTIONS], rule, True) rule[PROPERTIES].append((c_lineno, _prop, to_str(value) if value is not None else None))
python
def _get_properties(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr): """ Implements properties and variables extraction """ prop, value = (_prop_split_re.split(c_property, 1) + [None])[:2] try: is_var = (c_property[len(prop)] == '=') except IndexError: is_var = False prop = prop.strip() prop = self.do_glob_math( prop, rule[CONTEXT], rule[OPTIONS], rule, True) if prop: if value: value = value.strip() value = self.calculate( value, rule[CONTEXT], rule[OPTIONS], rule) _prop = (scope or '') + prop if is_var or prop.startswith('$') and value is not None: if isinstance(value, basestring): if '!default' in value: if _prop in rule[CONTEXT]: value = None else: value = value.replace( '!default', '').replace(' ', ' ').strip() elif isinstance(value, ListValue): value = ListValue(value) for k, v in value.value.items(): if v == '!default': if _prop in rule[CONTEXT]: value = None else: del value.value[k] value = value.first( ) if len(value) == 1 else value break if value is not None: rule[CONTEXT][_prop] = value else: _prop = self.apply_vars( _prop, rule[CONTEXT], rule[OPTIONS], rule, True) rule[PROPERTIES].append((c_lineno, _prop, to_str(value) if value is not None else None))
[ "def", "_get_properties", "(", "self", ",", "rule", ",", "p_selectors", ",", "p_parents", ",", "p_children", ",", "scope", ",", "media", ",", "c_lineno", ",", "c_property", ",", "c_codestr", ")", ":", "prop", ",", "value", "=", "(", "_prop_split_re", ".", "split", "(", "c_property", ",", "1", ")", "+", "[", "None", "]", ")", "[", ":", "2", "]", "try", ":", "is_var", "=", "(", "c_property", "[", "len", "(", "prop", ")", "]", "==", "'='", ")", "except", "IndexError", ":", "is_var", "=", "False", "prop", "=", "prop", ".", "strip", "(", ")", "prop", "=", "self", ".", "do_glob_math", "(", "prop", ",", "rule", "[", "CONTEXT", "]", ",", "rule", "[", "OPTIONS", "]", ",", "rule", ",", "True", ")", "if", "prop", ":", "if", "value", ":", "value", "=", "value", ".", "strip", "(", ")", "value", "=", "self", ".", "calculate", "(", "value", ",", "rule", "[", "CONTEXT", "]", ",", "rule", "[", "OPTIONS", "]", ",", "rule", ")", "_prop", "=", "(", "scope", "or", "''", ")", "+", "prop", "if", "is_var", "or", "prop", ".", "startswith", "(", "'$'", ")", "and", "value", "is", "not", "None", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "if", "'!default'", "in", "value", ":", "if", "_prop", "in", "rule", "[", "CONTEXT", "]", ":", "value", "=", "None", "else", ":", "value", "=", "value", ".", "replace", "(", "'!default'", ",", "''", ")", ".", "replace", "(", "' '", ",", "' '", ")", ".", "strip", "(", ")", "elif", "isinstance", "(", "value", ",", "ListValue", ")", ":", "value", "=", "ListValue", "(", "value", ")", "for", "k", ",", "v", "in", "value", ".", "value", ".", "items", "(", ")", ":", "if", "v", "==", "'!default'", ":", "if", "_prop", "in", "rule", "[", "CONTEXT", "]", ":", "value", "=", "None", "else", ":", "del", "value", ".", "value", "[", "k", "]", "value", "=", "value", ".", "first", "(", ")", "if", "len", "(", "value", ")", "==", "1", "else", "value", "break", "if", "value", "is", "not", "None", ":", "rule", "[", "CONTEXT", "]", "[", "_prop", "]", "=", "value", "else", ":", "_prop", "=", "self", ".", "apply_vars", "(", "_prop", ",", "rule", "[", "CONTEXT", "]", ",", "rule", "[", "OPTIONS", "]", ",", "rule", ",", "True", ")", "rule", "[", "PROPERTIES", "]", ".", "append", "(", "(", "c_lineno", ",", "_prop", ",", "to_str", "(", "value", ")", "if", "value", "is", "not", "None", "else", "None", ")", ")" ]
Implements properties and variables extraction
[ "Implements", "properties", "and", "variables", "extraction" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L1407-L1450
train
klen/zeta-library
zetalibrary/scss/__init__.py
Scss.link_with_parents
def link_with_parents(self, parent, c_selectors, c_rules): """ Link with a parent for the current child rule. If parents found, returns a list of parent rules to the child """ parent_found = None for p_selectors, p_rules in self.parts.items(): _p_selectors, _, _ = p_selectors.partition(' extends ') _p_selectors = _p_selectors.split(',') new_selectors = set() found = False # Finds all the parent selectors and parent selectors with another # bind selectors behind. For example, if `.specialClass extends # .baseClass`, # and there is a `.baseClass` selector, the extension should create # `.specialClass` for that rule, but if there's also a `.baseClass # a` # it also should create `.specialClass a` for p_selector in _p_selectors: if parent in p_selector: # get the new child selector to add (same as the parent # selector but with the child name) # since selectors can be together, separated with # or . # (i.e. something.parent) check that too: for c_selector in c_selectors.split(','): # Get whatever is different between the two selectors: _c_selector, _parent = c_selector, parent lcp = self.longest_common_prefix(_c_selector, _parent) if lcp: _c_selector = _c_selector[lcp:] _parent = _parent[lcp:] lcs = self.longest_common_suffix(_c_selector, _parent) if lcs: _c_selector = _c_selector[:-lcs] _parent = _parent[:-lcs] if _c_selector and _parent: # Get the new selectors: prev_symbol = '(?<![#.:])' if _parent[ 0] in ('#', '.', ':') else r'(?<![-\w#.:])' post_symbol = r'(?![-\w])' new_parent = re.sub(prev_symbol + _parent + post_symbol, _c_selector, p_selector) if p_selector != new_parent: new_selectors.add(new_parent) found = True if found: # add parent: parent_found = parent_found or [] parent_found.extend(p_rules) if new_selectors: new_selectors = self.normalize_selectors( p_selectors, new_selectors) # rename node: if new_selectors != p_selectors: del self.parts[p_selectors] self.parts.setdefault(new_selectors, []) self.parts[new_selectors].extend(p_rules) deps = set() # save child dependencies: for c_rule in c_rules or []: c_rule[SELECTORS] = c_selectors # re-set the SELECTORS for the rules deps.add(c_rule[POSITION]) for p_rule in p_rules: p_rule[SELECTORS] = new_selectors # re-set the SELECTORS for the rules p_rule[DEPS].update( deps) # position is the "index" of the object return parent_found
python
def link_with_parents(self, parent, c_selectors, c_rules): """ Link with a parent for the current child rule. If parents found, returns a list of parent rules to the child """ parent_found = None for p_selectors, p_rules in self.parts.items(): _p_selectors, _, _ = p_selectors.partition(' extends ') _p_selectors = _p_selectors.split(',') new_selectors = set() found = False # Finds all the parent selectors and parent selectors with another # bind selectors behind. For example, if `.specialClass extends # .baseClass`, # and there is a `.baseClass` selector, the extension should create # `.specialClass` for that rule, but if there's also a `.baseClass # a` # it also should create `.specialClass a` for p_selector in _p_selectors: if parent in p_selector: # get the new child selector to add (same as the parent # selector but with the child name) # since selectors can be together, separated with # or . # (i.e. something.parent) check that too: for c_selector in c_selectors.split(','): # Get whatever is different between the two selectors: _c_selector, _parent = c_selector, parent lcp = self.longest_common_prefix(_c_selector, _parent) if lcp: _c_selector = _c_selector[lcp:] _parent = _parent[lcp:] lcs = self.longest_common_suffix(_c_selector, _parent) if lcs: _c_selector = _c_selector[:-lcs] _parent = _parent[:-lcs] if _c_selector and _parent: # Get the new selectors: prev_symbol = '(?<![#.:])' if _parent[ 0] in ('#', '.', ':') else r'(?<![-\w#.:])' post_symbol = r'(?![-\w])' new_parent = re.sub(prev_symbol + _parent + post_symbol, _c_selector, p_selector) if p_selector != new_parent: new_selectors.add(new_parent) found = True if found: # add parent: parent_found = parent_found or [] parent_found.extend(p_rules) if new_selectors: new_selectors = self.normalize_selectors( p_selectors, new_selectors) # rename node: if new_selectors != p_selectors: del self.parts[p_selectors] self.parts.setdefault(new_selectors, []) self.parts[new_selectors].extend(p_rules) deps = set() # save child dependencies: for c_rule in c_rules or []: c_rule[SELECTORS] = c_selectors # re-set the SELECTORS for the rules deps.add(c_rule[POSITION]) for p_rule in p_rules: p_rule[SELECTORS] = new_selectors # re-set the SELECTORS for the rules p_rule[DEPS].update( deps) # position is the "index" of the object return parent_found
[ "def", "link_with_parents", "(", "self", ",", "parent", ",", "c_selectors", ",", "c_rules", ")", ":", "parent_found", "=", "None", "for", "p_selectors", ",", "p_rules", "in", "self", ".", "parts", ".", "items", "(", ")", ":", "_p_selectors", ",", "_", ",", "_", "=", "p_selectors", ".", "partition", "(", "' extends '", ")", "_p_selectors", "=", "_p_selectors", ".", "split", "(", "','", ")", "new_selectors", "=", "set", "(", ")", "found", "=", "False", "# Finds all the parent selectors and parent selectors with another", "# bind selectors behind. For example, if `.specialClass extends", "# .baseClass`,", "# and there is a `.baseClass` selector, the extension should create", "# `.specialClass` for that rule, but if there's also a `.baseClass", "# a`", "# it also should create `.specialClass a`", "for", "p_selector", "in", "_p_selectors", ":", "if", "parent", "in", "p_selector", ":", "# get the new child selector to add (same as the parent", "# selector but with the child name)", "# since selectors can be together, separated with # or .", "# (i.e. something.parent) check that too:", "for", "c_selector", "in", "c_selectors", ".", "split", "(", "','", ")", ":", "# Get whatever is different between the two selectors:", "_c_selector", ",", "_parent", "=", "c_selector", ",", "parent", "lcp", "=", "self", ".", "longest_common_prefix", "(", "_c_selector", ",", "_parent", ")", "if", "lcp", ":", "_c_selector", "=", "_c_selector", "[", "lcp", ":", "]", "_parent", "=", "_parent", "[", "lcp", ":", "]", "lcs", "=", "self", ".", "longest_common_suffix", "(", "_c_selector", ",", "_parent", ")", "if", "lcs", ":", "_c_selector", "=", "_c_selector", "[", ":", "-", "lcs", "]", "_parent", "=", "_parent", "[", ":", "-", "lcs", "]", "if", "_c_selector", "and", "_parent", ":", "# Get the new selectors:", "prev_symbol", "=", "'(?<![#.:])'", "if", "_parent", "[", "0", "]", "in", "(", "'#'", ",", "'.'", ",", "':'", ")", "else", "r'(?<![-\\w#.:])'", "post_symbol", "=", "r'(?![-\\w])'", "new_parent", "=", "re", ".", "sub", "(", "prev_symbol", "+", "_parent", "+", "post_symbol", ",", "_c_selector", ",", "p_selector", ")", "if", "p_selector", "!=", "new_parent", ":", "new_selectors", ".", "add", "(", "new_parent", ")", "found", "=", "True", "if", "found", ":", "# add parent:", "parent_found", "=", "parent_found", "or", "[", "]", "parent_found", ".", "extend", "(", "p_rules", ")", "if", "new_selectors", ":", "new_selectors", "=", "self", ".", "normalize_selectors", "(", "p_selectors", ",", "new_selectors", ")", "# rename node:", "if", "new_selectors", "!=", "p_selectors", ":", "del", "self", ".", "parts", "[", "p_selectors", "]", "self", ".", "parts", ".", "setdefault", "(", "new_selectors", ",", "[", "]", ")", "self", ".", "parts", "[", "new_selectors", "]", ".", "extend", "(", "p_rules", ")", "deps", "=", "set", "(", ")", "# save child dependencies:", "for", "c_rule", "in", "c_rules", "or", "[", "]", ":", "c_rule", "[", "SELECTORS", "]", "=", "c_selectors", "# re-set the SELECTORS for the rules", "deps", ".", "add", "(", "c_rule", "[", "POSITION", "]", ")", "for", "p_rule", "in", "p_rules", ":", "p_rule", "[", "SELECTORS", "]", "=", "new_selectors", "# re-set the SELECTORS for the rules", "p_rule", "[", "DEPS", "]", ".", "update", "(", "deps", ")", "# position is the \"index\" of the object", "return", "parent_found" ]
Link with a parent for the current child rule. If parents found, returns a list of parent rules to the child
[ "Link", "with", "a", "parent", "for", "the", "current", "child", "rule", ".", "If", "parents", "found", "returns", "a", "list", "of", "parent", "rules", "to", "the", "child" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L1494-L1567
train
klen/zeta-library
zetalibrary/scss/__init__.py
Scss.parse_extends
def parse_extends(self): """ For each part, create the inheritance parts from the ' extends ' """ # To be able to manage multiple extends, you need to # destroy the actual node and create many nodes that have # mono extend. The first one gets all the css rules for _selectors, rules in self.parts.items(): if ' extends ' in _selectors: selectors, _, parent = _selectors.partition(' extends ') parents = parent.split('&') del self.parts[_selectors] for parent in parents: new_selectors = selectors + ' extends ' + parent self.parts.setdefault(new_selectors, []) self.parts[new_selectors].extend(rules) rules = [] # further rules extending other parents will be empty cnt = 0 parents_left = True while parents_left and cnt < 10: cnt += 1 parents_left = False for _selectors in self.parts.keys(): selectors, _, parent = _selectors.partition(' extends ') if parent: parents_left = True if _selectors not in self.parts: continue # Nodes might have been renamed while linking parents... rules = self.parts[_selectors] del self.parts[_selectors] self.parts.setdefault(selectors, []) self.parts[selectors].extend(rules) parents = self.link_with_parents(parent, selectors, rules) if parents is None: log.warn("Parent rule not found: %s", parent) else: # from the parent, inherit the context and the options: new_context = {} new_options = {} for parent in parents: new_context.update(parent[CONTEXT]) new_options.update(parent[OPTIONS]) for rule in rules: _new_context = new_context.copy() _new_context.update(rule[CONTEXT]) rule[CONTEXT] = _new_context _new_options = new_options.copy() _new_options.update(rule[OPTIONS]) rule[OPTIONS] = _new_options
python
def parse_extends(self): """ For each part, create the inheritance parts from the ' extends ' """ # To be able to manage multiple extends, you need to # destroy the actual node and create many nodes that have # mono extend. The first one gets all the css rules for _selectors, rules in self.parts.items(): if ' extends ' in _selectors: selectors, _, parent = _selectors.partition(' extends ') parents = parent.split('&') del self.parts[_selectors] for parent in parents: new_selectors = selectors + ' extends ' + parent self.parts.setdefault(new_selectors, []) self.parts[new_selectors].extend(rules) rules = [] # further rules extending other parents will be empty cnt = 0 parents_left = True while parents_left and cnt < 10: cnt += 1 parents_left = False for _selectors in self.parts.keys(): selectors, _, parent = _selectors.partition(' extends ') if parent: parents_left = True if _selectors not in self.parts: continue # Nodes might have been renamed while linking parents... rules = self.parts[_selectors] del self.parts[_selectors] self.parts.setdefault(selectors, []) self.parts[selectors].extend(rules) parents = self.link_with_parents(parent, selectors, rules) if parents is None: log.warn("Parent rule not found: %s", parent) else: # from the parent, inherit the context and the options: new_context = {} new_options = {} for parent in parents: new_context.update(parent[CONTEXT]) new_options.update(parent[OPTIONS]) for rule in rules: _new_context = new_context.copy() _new_context.update(rule[CONTEXT]) rule[CONTEXT] = _new_context _new_options = new_options.copy() _new_options.update(rule[OPTIONS]) rule[OPTIONS] = _new_options
[ "def", "parse_extends", "(", "self", ")", ":", "# To be able to manage multiple extends, you need to", "# destroy the actual node and create many nodes that have", "# mono extend. The first one gets all the css rules", "for", "_selectors", ",", "rules", "in", "self", ".", "parts", ".", "items", "(", ")", ":", "if", "' extends '", "in", "_selectors", ":", "selectors", ",", "_", ",", "parent", "=", "_selectors", ".", "partition", "(", "' extends '", ")", "parents", "=", "parent", ".", "split", "(", "'&'", ")", "del", "self", ".", "parts", "[", "_selectors", "]", "for", "parent", "in", "parents", ":", "new_selectors", "=", "selectors", "+", "' extends '", "+", "parent", "self", ".", "parts", ".", "setdefault", "(", "new_selectors", ",", "[", "]", ")", "self", ".", "parts", "[", "new_selectors", "]", ".", "extend", "(", "rules", ")", "rules", "=", "[", "]", "# further rules extending other parents will be empty", "cnt", "=", "0", "parents_left", "=", "True", "while", "parents_left", "and", "cnt", "<", "10", ":", "cnt", "+=", "1", "parents_left", "=", "False", "for", "_selectors", "in", "self", ".", "parts", ".", "keys", "(", ")", ":", "selectors", ",", "_", ",", "parent", "=", "_selectors", ".", "partition", "(", "' extends '", ")", "if", "parent", ":", "parents_left", "=", "True", "if", "_selectors", "not", "in", "self", ".", "parts", ":", "continue", "# Nodes might have been renamed while linking parents...", "rules", "=", "self", ".", "parts", "[", "_selectors", "]", "del", "self", ".", "parts", "[", "_selectors", "]", "self", ".", "parts", ".", "setdefault", "(", "selectors", ",", "[", "]", ")", "self", ".", "parts", "[", "selectors", "]", ".", "extend", "(", "rules", ")", "parents", "=", "self", ".", "link_with_parents", "(", "parent", ",", "selectors", ",", "rules", ")", "if", "parents", "is", "None", ":", "log", ".", "warn", "(", "\"Parent rule not found: %s\"", ",", "parent", ")", "else", ":", "# from the parent, inherit the context and the options:", "new_context", "=", "{", "}", "new_options", "=", "{", "}", "for", "parent", "in", "parents", ":", "new_context", ".", "update", "(", "parent", "[", "CONTEXT", "]", ")", "new_options", ".", "update", "(", "parent", "[", "OPTIONS", "]", ")", "for", "rule", "in", "rules", ":", "_new_context", "=", "new_context", ".", "copy", "(", ")", "_new_context", ".", "update", "(", "rule", "[", "CONTEXT", "]", ")", "rule", "[", "CONTEXT", "]", "=", "_new_context", "_new_options", "=", "new_options", ".", "copy", "(", ")", "_new_options", ".", "update", "(", "rule", "[", "OPTIONS", "]", ")", "rule", "[", "OPTIONS", "]", "=", "_new_options" ]
For each part, create the inheritance parts from the ' extends '
[ "For", "each", "part", "create", "the", "inheritance", "parts", "from", "the", "extends" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L1570-L1624
train
klen/zeta-library
zetalibrary/scss/__init__.py
Value._wrap
def _wrap(fn): """ Wrapper function to allow calling any function using Value objects as parameters. """ def _func(*args): merged = None _args = [] for arg in args: if merged.__class__ != arg.__class__: if merged is None: merged = arg.__class__(None) else: merged = Value._merge_type(merged, arg)(None) merged.merge(arg) if isinstance(arg, Value): arg = arg.value _args.append(arg) merged.value = fn(*_args) return merged return _func
python
def _wrap(fn): """ Wrapper function to allow calling any function using Value objects as parameters. """ def _func(*args): merged = None _args = [] for arg in args: if merged.__class__ != arg.__class__: if merged is None: merged = arg.__class__(None) else: merged = Value._merge_type(merged, arg)(None) merged.merge(arg) if isinstance(arg, Value): arg = arg.value _args.append(arg) merged.value = fn(*_args) return merged return _func
[ "def", "_wrap", "(", "fn", ")", ":", "def", "_func", "(", "*", "args", ")", ":", "merged", "=", "None", "_args", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "merged", ".", "__class__", "!=", "arg", ".", "__class__", ":", "if", "merged", "is", "None", ":", "merged", "=", "arg", ".", "__class__", "(", "None", ")", "else", ":", "merged", "=", "Value", ".", "_merge_type", "(", "merged", ",", "arg", ")", "(", "None", ")", "merged", ".", "merge", "(", "arg", ")", "if", "isinstance", "(", "arg", ",", "Value", ")", ":", "arg", "=", "arg", ".", "value", "_args", ".", "append", "(", "arg", ")", "merged", ".", "value", "=", "fn", "(", "*", "_args", ")", "return", "merged", "return", "_func" ]
Wrapper function to allow calling any function using Value objects as parameters.
[ "Wrapper", "function", "to", "allow", "calling", "any", "function", "using", "Value", "objects", "as", "parameters", "." ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L3600-L3620
train
klen/zeta-library
zetalibrary/scss/__init__.py
Scanner.token
def token(self, i, restrict=None): """ Get the i'th token, and if i is one past the end, then scan for another token; restrict is a list of tokens that are allowed, or 0 for any token. """ tokens_len = len(self.tokens) if i == tokens_len: # We are at the end, ge the next... tokens_len += self.scan(restrict) if i < tokens_len: if restrict and self.restrictions[i] and restrict > self.restrictions[i]: raise NotImplementedError( "Unimplemented: restriction set changed") return self.tokens[i] raise NoMoreTokens()
python
def token(self, i, restrict=None): """ Get the i'th token, and if i is one past the end, then scan for another token; restrict is a list of tokens that are allowed, or 0 for any token. """ tokens_len = len(self.tokens) if i == tokens_len: # We are at the end, ge the next... tokens_len += self.scan(restrict) if i < tokens_len: if restrict and self.restrictions[i] and restrict > self.restrictions[i]: raise NotImplementedError( "Unimplemented: restriction set changed") return self.tokens[i] raise NoMoreTokens()
[ "def", "token", "(", "self", ",", "i", ",", "restrict", "=", "None", ")", ":", "tokens_len", "=", "len", "(", "self", ".", "tokens", ")", "if", "i", "==", "tokens_len", ":", "# We are at the end, ge the next...", "tokens_len", "+=", "self", ".", "scan", "(", "restrict", ")", "if", "i", "<", "tokens_len", ":", "if", "restrict", "and", "self", ".", "restrictions", "[", "i", "]", "and", "restrict", ">", "self", ".", "restrictions", "[", "i", "]", ":", "raise", "NotImplementedError", "(", "\"Unimplemented: restriction set changed\"", ")", "return", "self", ".", "tokens", "[", "i", "]", "raise", "NoMoreTokens", "(", ")" ]
Get the i'th token, and if i is one past the end, then scan for another token; restrict is a list of tokens that are allowed, or 0 for any token.
[ "Get", "the", "i", "th", "token", "and", "if", "i", "is", "one", "past", "the", "end", "then", "scan", "for", "another", "token", ";", "restrict", "is", "a", "list", "of", "tokens", "that", "are", "allowed", "or", "0", "for", "any", "token", "." ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L4563-L4577
train
klen/zeta-library
zetalibrary/scss/__init__.py
Scanner.scan
def scan(self, restrict): """ Should scan another token and add it to the list, self.tokens, and add the restriction to self.restrictions """ # Keep looking for a token, ignoring any in self.ignore while True: # Search the patterns for a match, with earlier # tokens in the list having preference best_pat = None best_pat_len = 0 for p, regexp in self.patterns: # First check to see if we're restricting to this token if restrict and p not in restrict and p not in self.ignore: continue m = regexp.match(self.input, self.pos) if m: # We got a match best_pat = p best_pat_len = len(m.group(0)) break # If we didn't find anything, raise an error if best_pat is None: msg = "Bad Token" if restrict: msg = "Trying to find one of " + ", ".join(restrict) raise SyntaxError(self.pos, msg) # If we found something that isn't to be ignored, return it if best_pat in self.ignore: # This token should be ignored .. self.pos += best_pat_len else: end_pos = self.pos + best_pat_len # Create a token with this data token = ( self.pos, end_pos, best_pat, self.input[self.pos:end_pos] ) self.pos = end_pos # Only add this token if it's not in the list # (to prevent looping) if not self.tokens or token != self.tokens[-1]: self.tokens.append(token) self.restrictions.append(restrict) return 1 break return 0
python
def scan(self, restrict): """ Should scan another token and add it to the list, self.tokens, and add the restriction to self.restrictions """ # Keep looking for a token, ignoring any in self.ignore while True: # Search the patterns for a match, with earlier # tokens in the list having preference best_pat = None best_pat_len = 0 for p, regexp in self.patterns: # First check to see if we're restricting to this token if restrict and p not in restrict and p not in self.ignore: continue m = regexp.match(self.input, self.pos) if m: # We got a match best_pat = p best_pat_len = len(m.group(0)) break # If we didn't find anything, raise an error if best_pat is None: msg = "Bad Token" if restrict: msg = "Trying to find one of " + ", ".join(restrict) raise SyntaxError(self.pos, msg) # If we found something that isn't to be ignored, return it if best_pat in self.ignore: # This token should be ignored .. self.pos += best_pat_len else: end_pos = self.pos + best_pat_len # Create a token with this data token = ( self.pos, end_pos, best_pat, self.input[self.pos:end_pos] ) self.pos = end_pos # Only add this token if it's not in the list # (to prevent looping) if not self.tokens or token != self.tokens[-1]: self.tokens.append(token) self.restrictions.append(restrict) return 1 break return 0
[ "def", "scan", "(", "self", ",", "restrict", ")", ":", "# Keep looking for a token, ignoring any in self.ignore", "while", "True", ":", "# Search the patterns for a match, with earlier", "# tokens in the list having preference", "best_pat", "=", "None", "best_pat_len", "=", "0", "for", "p", ",", "regexp", "in", "self", ".", "patterns", ":", "# First check to see if we're restricting to this token", "if", "restrict", "and", "p", "not", "in", "restrict", "and", "p", "not", "in", "self", ".", "ignore", ":", "continue", "m", "=", "regexp", ".", "match", "(", "self", ".", "input", ",", "self", ".", "pos", ")", "if", "m", ":", "# We got a match", "best_pat", "=", "p", "best_pat_len", "=", "len", "(", "m", ".", "group", "(", "0", ")", ")", "break", "# If we didn't find anything, raise an error", "if", "best_pat", "is", "None", ":", "msg", "=", "\"Bad Token\"", "if", "restrict", ":", "msg", "=", "\"Trying to find one of \"", "+", "\", \"", ".", "join", "(", "restrict", ")", "raise", "SyntaxError", "(", "self", ".", "pos", ",", "msg", ")", "# If we found something that isn't to be ignored, return it", "if", "best_pat", "in", "self", ".", "ignore", ":", "# This token should be ignored ..", "self", ".", "pos", "+=", "best_pat_len", "else", ":", "end_pos", "=", "self", ".", "pos", "+", "best_pat_len", "# Create a token with this data", "token", "=", "(", "self", ".", "pos", ",", "end_pos", ",", "best_pat", ",", "self", ".", "input", "[", "self", ".", "pos", ":", "end_pos", "]", ")", "self", ".", "pos", "=", "end_pos", "# Only add this token if it's not in the list", "# (to prevent looping)", "if", "not", "self", ".", "tokens", "or", "token", "!=", "self", ".", "tokens", "[", "-", "1", "]", ":", "self", ".", "tokens", ".", "append", "(", "token", ")", "self", ".", "restrictions", ".", "append", "(", "restrict", ")", "return", "1", "break", "return", "0" ]
Should scan another token and add it to the list, self.tokens, and add the restriction to self.restrictions
[ "Should", "scan", "another", "token", "and", "add", "it", "to", "the", "list", "self", ".", "tokens", "and", "add", "the", "restriction", "to", "self", ".", "restrictions" ]
b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L4587-L4637
train
davidfokkema/artist
demo/demo_event_display.py
main
def main(): """Event display for an event of station 503 Date Time Timestamp Nanoseconds 2012-03-29 10:51:36 1333018296 870008589 Number of MIPs 35.0 51.9 35.8 78.9 Arrival time 15.0 17.5 20.0 27.5 """ # Detector positions in ENU relative to the station GPS x = [-6.34, -2.23, -3.6, 3.46] y = [6.34, 2.23, -3.6, 3.46] # Scale mips to fit the graph n = [35.0, 51.9, 35.8, 78.9] # Make times relative to first detection t = [15., 17.5, 20., 27.5] dt = [ti - min(t) for ti in t] plot = Plot() plot.scatter([0], [0], mark='triangle') plot.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below') plot.scatter_table(x, y, dt, n) plot.set_scalebar(location="lower right") plot.set_colorbar('$\Delta$t [ns]') plot.set_axis_equal() plot.set_mlimits(max=16.) plot.set_slimits(min=10., max=100.) plot.set_xlabel('x [m]') plot.set_ylabel('y [m]') plot.save('event_display') # Add event by Station 508 # Detector positions in ENU relative to the station GPS x508 = [6.12, 0.00, -3.54, 3.54] y508 = [-6.12, -13.23, -3.54, 3.54] # Event GPS timestamp: 1371498167.016412100 # MIPS n508 = [5.6, 16.7, 36.6, 9.0] # Arrival Times t508 = [15., 22.5, 22.5, 30.] dt508 = [ti - min(t508) for ti in t508] plot = MultiPlot(1, 2, width=r'.33\linewidth') plot.set_xlimits_for_all(min=-10, max=15) plot.set_ylimits_for_all(min=-15, max=10) plot.set_mlimits_for_all(min=0., max=16.) plot.set_colorbar('$\Delta$t [ns]', False) plot.set_colormap('blackwhite') plot.set_scalebar_for_all(location="upper right") p = plot.get_subplot_at(0, 0) p.scatter([0], [0], mark='triangle') p.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below') p.scatter_table(x, y, dt, n) p.set_axis_equal() p = plot.get_subplot_at(0, 1) p.scatter([0], [0], mark='triangle') p.add_pin_at_xy(0, 0, 'Station 508', use_arrow=False, location='below') p.scatter_table(x508, y508, dt508, n508) p.set_axis_equal() plot.show_yticklabels_for_all([(0, 0)]) plot.show_xticklabels_for_all([(0, 0), (0, 1)]) plot.set_xlabel('x [m]') plot.set_ylabel('y [m]') plot.save('multi_event_display')
python
def main(): """Event display for an event of station 503 Date Time Timestamp Nanoseconds 2012-03-29 10:51:36 1333018296 870008589 Number of MIPs 35.0 51.9 35.8 78.9 Arrival time 15.0 17.5 20.0 27.5 """ # Detector positions in ENU relative to the station GPS x = [-6.34, -2.23, -3.6, 3.46] y = [6.34, 2.23, -3.6, 3.46] # Scale mips to fit the graph n = [35.0, 51.9, 35.8, 78.9] # Make times relative to first detection t = [15., 17.5, 20., 27.5] dt = [ti - min(t) for ti in t] plot = Plot() plot.scatter([0], [0], mark='triangle') plot.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below') plot.scatter_table(x, y, dt, n) plot.set_scalebar(location="lower right") plot.set_colorbar('$\Delta$t [ns]') plot.set_axis_equal() plot.set_mlimits(max=16.) plot.set_slimits(min=10., max=100.) plot.set_xlabel('x [m]') plot.set_ylabel('y [m]') plot.save('event_display') # Add event by Station 508 # Detector positions in ENU relative to the station GPS x508 = [6.12, 0.00, -3.54, 3.54] y508 = [-6.12, -13.23, -3.54, 3.54] # Event GPS timestamp: 1371498167.016412100 # MIPS n508 = [5.6, 16.7, 36.6, 9.0] # Arrival Times t508 = [15., 22.5, 22.5, 30.] dt508 = [ti - min(t508) for ti in t508] plot = MultiPlot(1, 2, width=r'.33\linewidth') plot.set_xlimits_for_all(min=-10, max=15) plot.set_ylimits_for_all(min=-15, max=10) plot.set_mlimits_for_all(min=0., max=16.) plot.set_colorbar('$\Delta$t [ns]', False) plot.set_colormap('blackwhite') plot.set_scalebar_for_all(location="upper right") p = plot.get_subplot_at(0, 0) p.scatter([0], [0], mark='triangle') p.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below') p.scatter_table(x, y, dt, n) p.set_axis_equal() p = plot.get_subplot_at(0, 1) p.scatter([0], [0], mark='triangle') p.add_pin_at_xy(0, 0, 'Station 508', use_arrow=False, location='below') p.scatter_table(x508, y508, dt508, n508) p.set_axis_equal() plot.show_yticklabels_for_all([(0, 0)]) plot.show_xticklabels_for_all([(0, 0), (0, 1)]) plot.set_xlabel('x [m]') plot.set_ylabel('y [m]') plot.save('multi_event_display')
[ "def", "main", "(", ")", ":", "# Detector positions in ENU relative to the station GPS", "x", "=", "[", "-", "6.34", ",", "-", "2.23", ",", "-", "3.6", ",", "3.46", "]", "y", "=", "[", "6.34", ",", "2.23", ",", "-", "3.6", ",", "3.46", "]", "# Scale mips to fit the graph", "n", "=", "[", "35.0", ",", "51.9", ",", "35.8", ",", "78.9", "]", "# Make times relative to first detection", "t", "=", "[", "15.", ",", "17.5", ",", "20.", ",", "27.5", "]", "dt", "=", "[", "ti", "-", "min", "(", "t", ")", "for", "ti", "in", "t", "]", "plot", "=", "Plot", "(", ")", "plot", ".", "scatter", "(", "[", "0", "]", ",", "[", "0", "]", ",", "mark", "=", "'triangle'", ")", "plot", ".", "add_pin_at_xy", "(", "0", ",", "0", ",", "'Station 503'", ",", "use_arrow", "=", "False", ",", "location", "=", "'below'", ")", "plot", ".", "scatter_table", "(", "x", ",", "y", ",", "dt", ",", "n", ")", "plot", ".", "set_scalebar", "(", "location", "=", "\"lower right\"", ")", "plot", ".", "set_colorbar", "(", "'$\\Delta$t [ns]'", ")", "plot", ".", "set_axis_equal", "(", ")", "plot", ".", "set_mlimits", "(", "max", "=", "16.", ")", "plot", ".", "set_slimits", "(", "min", "=", "10.", ",", "max", "=", "100.", ")", "plot", ".", "set_xlabel", "(", "'x [m]'", ")", "plot", ".", "set_ylabel", "(", "'y [m]'", ")", "plot", ".", "save", "(", "'event_display'", ")", "# Add event by Station 508", "# Detector positions in ENU relative to the station GPS", "x508", "=", "[", "6.12", ",", "0.00", ",", "-", "3.54", ",", "3.54", "]", "y508", "=", "[", "-", "6.12", ",", "-", "13.23", ",", "-", "3.54", ",", "3.54", "]", "# Event GPS timestamp: 1371498167.016412100", "# MIPS", "n508", "=", "[", "5.6", ",", "16.7", ",", "36.6", ",", "9.0", "]", "# Arrival Times", "t508", "=", "[", "15.", ",", "22.5", ",", "22.5", ",", "30.", "]", "dt508", "=", "[", "ti", "-", "min", "(", "t508", ")", "for", "ti", "in", "t508", "]", "plot", "=", "MultiPlot", "(", "1", ",", "2", ",", "width", "=", "r'.33\\linewidth'", ")", "plot", ".", "set_xlimits_for_all", "(", "min", "=", "-", "10", ",", "max", "=", "15", ")", "plot", ".", "set_ylimits_for_all", "(", "min", "=", "-", "15", ",", "max", "=", "10", ")", "plot", ".", "set_mlimits_for_all", "(", "min", "=", "0.", ",", "max", "=", "16.", ")", "plot", ".", "set_colorbar", "(", "'$\\Delta$t [ns]'", ",", "False", ")", "plot", ".", "set_colormap", "(", "'blackwhite'", ")", "plot", ".", "set_scalebar_for_all", "(", "location", "=", "\"upper right\"", ")", "p", "=", "plot", ".", "get_subplot_at", "(", "0", ",", "0", ")", "p", ".", "scatter", "(", "[", "0", "]", ",", "[", "0", "]", ",", "mark", "=", "'triangle'", ")", "p", ".", "add_pin_at_xy", "(", "0", ",", "0", ",", "'Station 503'", ",", "use_arrow", "=", "False", ",", "location", "=", "'below'", ")", "p", ".", "scatter_table", "(", "x", ",", "y", ",", "dt", ",", "n", ")", "p", ".", "set_axis_equal", "(", ")", "p", "=", "plot", ".", "get_subplot_at", "(", "0", ",", "1", ")", "p", ".", "scatter", "(", "[", "0", "]", ",", "[", "0", "]", ",", "mark", "=", "'triangle'", ")", "p", ".", "add_pin_at_xy", "(", "0", ",", "0", ",", "'Station 508'", ",", "use_arrow", "=", "False", ",", "location", "=", "'below'", ")", "p", ".", "scatter_table", "(", "x508", ",", "y508", ",", "dt508", ",", "n508", ")", "p", ".", "set_axis_equal", "(", ")", "plot", ".", "show_yticklabels_for_all", "(", "[", "(", "0", ",", "0", ")", "]", ")", "plot", ".", "show_xticklabels_for_all", "(", "[", "(", "0", ",", "0", ")", ",", "(", "0", ",", "1", ")", "]", ")", "plot", ".", "set_xlabel", "(", "'x [m]'", ")", "plot", ".", "set_ylabel", "(", "'y [m]'", ")", "plot", ".", "save", "(", "'multi_event_display'", ")" ]
Event display for an event of station 503 Date Time Timestamp Nanoseconds 2012-03-29 10:51:36 1333018296 870008589 Number of MIPs 35.0 51.9 35.8 78.9 Arrival time 15.0 17.5 20.0 27.5
[ "Event", "display", "for", "an", "event", "of", "station", "503" ]
26ae7987522622710f2910980770c50012fda47d
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/demo/demo_event_display.py#L4-L83
train
alexhayes/django-cereal
django_cereal/pickle.py
_model_unpickle
def _model_unpickle(cls, data): """Unpickle a model by retrieving it from the database.""" auto_field_value = data['pk'] try: obj = cls.objects.get(pk=auto_field_value) except Exception as e: if isinstance(e, OperationalError): # Attempt reconnect, we've probably hit; # OperationalError(2006, 'MySQL server has gone away') logger.debug("Caught OperationalError, closing database connection.", exc_info=e) from django.db import connection connection.close() obj = cls.objects.get(pk=auto_field_value) else: raise return obj
python
def _model_unpickle(cls, data): """Unpickle a model by retrieving it from the database.""" auto_field_value = data['pk'] try: obj = cls.objects.get(pk=auto_field_value) except Exception as e: if isinstance(e, OperationalError): # Attempt reconnect, we've probably hit; # OperationalError(2006, 'MySQL server has gone away') logger.debug("Caught OperationalError, closing database connection.", exc_info=e) from django.db import connection connection.close() obj = cls.objects.get(pk=auto_field_value) else: raise return obj
[ "def", "_model_unpickle", "(", "cls", ",", "data", ")", ":", "auto_field_value", "=", "data", "[", "'pk'", "]", "try", ":", "obj", "=", "cls", ".", "objects", ".", "get", "(", "pk", "=", "auto_field_value", ")", "except", "Exception", "as", "e", ":", "if", "isinstance", "(", "e", ",", "OperationalError", ")", ":", "# Attempt reconnect, we've probably hit;", "# OperationalError(2006, 'MySQL server has gone away')", "logger", ".", "debug", "(", "\"Caught OperationalError, closing database connection.\"", ",", "exc_info", "=", "e", ")", "from", "django", ".", "db", "import", "connection", "connection", ".", "close", "(", ")", "obj", "=", "cls", ".", "objects", ".", "get", "(", "pk", "=", "auto_field_value", ")", "else", ":", "raise", "return", "obj" ]
Unpickle a model by retrieving it from the database.
[ "Unpickle", "a", "model", "by", "retrieving", "it", "from", "the", "database", "." ]
ab5b7f0283c6604c4df658542f7381262e600e5d
https://github.com/alexhayes/django-cereal/blob/ab5b7f0283c6604c4df658542f7381262e600e5d/django_cereal/pickle.py#L27-L42
train
alexhayes/django-cereal
django_cereal/pickle.py
task
def task(func, *args, **kwargs): """ A task decorator that uses the django-cereal pickler as the default serializer. """ # Note we must import here to avoid recursion issue with kombu entry points registration from celery import shared_task if 'serializer' not in kwargs: kwargs['serializer'] = DJANGO_CEREAL_PICKLE return shared_task(func, *args, **kwargs)
python
def task(func, *args, **kwargs): """ A task decorator that uses the django-cereal pickler as the default serializer. """ # Note we must import here to avoid recursion issue with kombu entry points registration from celery import shared_task if 'serializer' not in kwargs: kwargs['serializer'] = DJANGO_CEREAL_PICKLE return shared_task(func, *args, **kwargs)
[ "def", "task", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Note we must import here to avoid recursion issue with kombu entry points registration", "from", "celery", "import", "shared_task", "if", "'serializer'", "not", "in", "kwargs", ":", "kwargs", "[", "'serializer'", "]", "=", "DJANGO_CEREAL_PICKLE", "return", "shared_task", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
A task decorator that uses the django-cereal pickler as the default serializer.
[ "A", "task", "decorator", "that", "uses", "the", "django", "-", "cereal", "pickler", "as", "the", "default", "serializer", "." ]
ab5b7f0283c6604c4df658542f7381262e600e5d
https://github.com/alexhayes/django-cereal/blob/ab5b7f0283c6604c4df658542f7381262e600e5d/django_cereal/pickle.py#L106-L116
train
PythonOptimizers/cygenja
cygenja/helpers/file_helpers.py
find_files
def find_files(directory, pattern, recursively=True): """ Yield a list of files with their base directories, recursively or not. Returns: A list of (base_directory, filename) Args: directory: base directory to start the search. pattern: fnmatch pattern for filenames. complete_filename: return complete filename or not? recursively: do we recurse or not? """ for root, dirs, files in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): yield root, basename if not recursively: break
python
def find_files(directory, pattern, recursively=True): """ Yield a list of files with their base directories, recursively or not. Returns: A list of (base_directory, filename) Args: directory: base directory to start the search. pattern: fnmatch pattern for filenames. complete_filename: return complete filename or not? recursively: do we recurse or not? """ for root, dirs, files in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): yield root, basename if not recursively: break
[ "def", "find_files", "(", "directory", ",", "pattern", ",", "recursively", "=", "True", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "directory", ")", ":", "for", "basename", "in", "files", ":", "if", "fnmatch", ".", "fnmatch", "(", "basename", ",", "pattern", ")", ":", "yield", "root", ",", "basename", "if", "not", "recursively", ":", "break" ]
Yield a list of files with their base directories, recursively or not. Returns: A list of (base_directory, filename) Args: directory: base directory to start the search. pattern: fnmatch pattern for filenames. complete_filename: return complete filename or not? recursively: do we recurse or not?
[ "Yield", "a", "list", "of", "files", "with", "their", "base", "directories", "recursively", "or", "not", "." ]
a9ef91cdfa8452beeeec4f050f928b830379f91c
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/helpers/file_helpers.py#L6-L25
train
stevelittlefish/easyforms
easyforms/form.py
Field.validate
def validate(self): """Run the form value through the validators, and update the error field if needed""" if self.error: return False for v in self.validators: self.error = v(self.value) if self.error: return False return True
python
def validate(self): """Run the form value through the validators, and update the error field if needed""" if self.error: return False for v in self.validators: self.error = v(self.value) if self.error: return False return True
[ "def", "validate", "(", "self", ")", ":", "if", "self", ".", "error", ":", "return", "False", "for", "v", "in", "self", ".", "validators", ":", "self", ".", "error", "=", "v", "(", "self", ".", "value", ")", "if", "self", ".", "error", ":", "return", "False", "return", "True" ]
Run the form value through the validators, and update the error field if needed
[ "Run", "the", "form", "value", "through", "the", "validators", "and", "update", "the", "error", "field", "if", "needed" ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L203-L213
train
stevelittlefish/easyforms
easyforms/form.py
Field.form_group_classes
def form_group_classes(self): """ Full list of classes for the class attribute of the form group. Returned as a string with spaces separating each class, ready for insertion into the class attribute. This will generally look like the following: 'form-group has-error custom-class' """ classes = ['form-group'] if self.style == styles.BOOTSTRAP_4 and self.form_type == formtype.HORIZONTAL: classes.append('row') if self.error and self.style == styles.BOOTSTRAP_3: classes.append('has-error') if self.form_group_css_class: classes.append(self.form_group_css_class) return ' '.join(classes)
python
def form_group_classes(self): """ Full list of classes for the class attribute of the form group. Returned as a string with spaces separating each class, ready for insertion into the class attribute. This will generally look like the following: 'form-group has-error custom-class' """ classes = ['form-group'] if self.style == styles.BOOTSTRAP_4 and self.form_type == formtype.HORIZONTAL: classes.append('row') if self.error and self.style == styles.BOOTSTRAP_3: classes.append('has-error') if self.form_group_css_class: classes.append(self.form_group_css_class) return ' '.join(classes)
[ "def", "form_group_classes", "(", "self", ")", ":", "classes", "=", "[", "'form-group'", "]", "if", "self", ".", "style", "==", "styles", ".", "BOOTSTRAP_4", "and", "self", ".", "form_type", "==", "formtype", ".", "HORIZONTAL", ":", "classes", ".", "append", "(", "'row'", ")", "if", "self", ".", "error", "and", "self", ".", "style", "==", "styles", ".", "BOOTSTRAP_3", ":", "classes", ".", "append", "(", "'has-error'", ")", "if", "self", ".", "form_group_css_class", ":", "classes", ".", "append", "(", "self", ".", "form_group_css_class", ")", "return", "' '", ".", "join", "(", "classes", ")" ]
Full list of classes for the class attribute of the form group. Returned as a string with spaces separating each class, ready for insertion into the class attribute. This will generally look like the following: 'form-group has-error custom-class'
[ "Full", "list", "of", "classes", "for", "the", "class", "attribute", "of", "the", "form", "group", ".", "Returned", "as", "a", "string", "with", "spaces", "separating", "each", "class", "ready", "for", "insertion", "into", "the", "class", "attribute", "." ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L367-L384
train
stevelittlefish/easyforms
easyforms/form.py
Field.input_classes
def input_classes(self): """ Full list of classes for the class attribute of the input, returned as a string with spaces separating each class. """ classes = [self.base_input_css_class] if self.css_class: classes.append(self.css_class) if self.style == styles.BOOTSTRAP_4 and self.error: classes.append('is-invalid') return ' '.join(classes)
python
def input_classes(self): """ Full list of classes for the class attribute of the input, returned as a string with spaces separating each class. """ classes = [self.base_input_css_class] if self.css_class: classes.append(self.css_class) if self.style == styles.BOOTSTRAP_4 and self.error: classes.append('is-invalid') return ' '.join(classes)
[ "def", "input_classes", "(", "self", ")", ":", "classes", "=", "[", "self", ".", "base_input_css_class", "]", "if", "self", ".", "css_class", ":", "classes", ".", "append", "(", "self", ".", "css_class", ")", "if", "self", ".", "style", "==", "styles", ".", "BOOTSTRAP_4", "and", "self", ".", "error", ":", "classes", ".", "append", "(", "'is-invalid'", ")", "return", "' '", ".", "join", "(", "classes", ")" ]
Full list of classes for the class attribute of the input, returned as a string with spaces separating each class.
[ "Full", "list", "of", "classes", "for", "the", "class", "attribute", "of", "the", "input", "returned", "as", "a", "string", "with", "spaces", "separating", "each", "class", "." ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L387-L399
train
stevelittlefish/easyforms
easyforms/form.py
Form.render
def render(self): """Render the form and all sections to HTML""" return Markup(env.get_template('form.html').render(form=self, render_open_tag=True, render_close_tag=True, render_before=True, render_sections=True, render_after=True, generate_csrf_token=None if self.disable_csrf else _csrf_generation_function))
python
def render(self): """Render the form and all sections to HTML""" return Markup(env.get_template('form.html').render(form=self, render_open_tag=True, render_close_tag=True, render_before=True, render_sections=True, render_after=True, generate_csrf_token=None if self.disable_csrf else _csrf_generation_function))
[ "def", "render", "(", "self", ")", ":", "return", "Markup", "(", "env", ".", "get_template", "(", "'form.html'", ")", ".", "render", "(", "form", "=", "self", ",", "render_open_tag", "=", "True", ",", "render_close_tag", "=", "True", ",", "render_before", "=", "True", ",", "render_sections", "=", "True", ",", "render_after", "=", "True", ",", "generate_csrf_token", "=", "None", "if", "self", ".", "disable_csrf", "else", "_csrf_generation_function", ")", ")" ]
Render the form and all sections to HTML
[ "Render", "the", "form", "and", "all", "sections", "to", "HTML" ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L591-L599
train
stevelittlefish/easyforms
easyforms/form.py
Form.render_before_sections
def render_before_sections(self): """Render the form up to the first section. This will open the form tag but not close it.""" return Markup(env.get_template('form.html').render(form=self, render_open_tag=True, render_close_tag=False, render_before=True, render_sections=False, render_after=False, generate_csrf_token=None if self.action else _csrf_generation_function))
python
def render_before_sections(self): """Render the form up to the first section. This will open the form tag but not close it.""" return Markup(env.get_template('form.html').render(form=self, render_open_tag=True, render_close_tag=False, render_before=True, render_sections=False, render_after=False, generate_csrf_token=None if self.action else _csrf_generation_function))
[ "def", "render_before_sections", "(", "self", ")", ":", "return", "Markup", "(", "env", ".", "get_template", "(", "'form.html'", ")", ".", "render", "(", "form", "=", "self", ",", "render_open_tag", "=", "True", ",", "render_close_tag", "=", "False", ",", "render_before", "=", "True", ",", "render_sections", "=", "False", ",", "render_after", "=", "False", ",", "generate_csrf_token", "=", "None", "if", "self", ".", "action", "else", "_csrf_generation_function", ")", ")" ]
Render the form up to the first section. This will open the form tag but not close it.
[ "Render", "the", "form", "up", "to", "the", "first", "section", ".", "This", "will", "open", "the", "form", "tag", "but", "not", "close", "it", "." ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L601-L609
train
stevelittlefish/easyforms
easyforms/form.py
Form.read_form_data
def read_form_data(self): """Attempt to read the form data from the request""" if self.processed_data: raise exceptions.AlreadyProcessed('The data has already been processed for this form') if self.readonly: return if request.method == self.method: if self.method == 'POST': data = request.form else: data = request.args if self.submitted_hidden_input_name in data: # The form has been submitted self.processed_data = True for field in self.all_fields: # We need to skip readonly fields if field.readonly: pass else: field.extract_value(data) # Validate the field if not field.validate(): log.debug('Validation error in field \'%s\': %s' % (field.name, field.error)) self.has_errors = True
python
def read_form_data(self): """Attempt to read the form data from the request""" if self.processed_data: raise exceptions.AlreadyProcessed('The data has already been processed for this form') if self.readonly: return if request.method == self.method: if self.method == 'POST': data = request.form else: data = request.args if self.submitted_hidden_input_name in data: # The form has been submitted self.processed_data = True for field in self.all_fields: # We need to skip readonly fields if field.readonly: pass else: field.extract_value(data) # Validate the field if not field.validate(): log.debug('Validation error in field \'%s\': %s' % (field.name, field.error)) self.has_errors = True
[ "def", "read_form_data", "(", "self", ")", ":", "if", "self", ".", "processed_data", ":", "raise", "exceptions", ".", "AlreadyProcessed", "(", "'The data has already been processed for this form'", ")", "if", "self", ".", "readonly", ":", "return", "if", "request", ".", "method", "==", "self", ".", "method", ":", "if", "self", ".", "method", "==", "'POST'", ":", "data", "=", "request", ".", "form", "else", ":", "data", "=", "request", ".", "args", "if", "self", ".", "submitted_hidden_input_name", "in", "data", ":", "# The form has been submitted", "self", ".", "processed_data", "=", "True", "for", "field", "in", "self", ".", "all_fields", ":", "# We need to skip readonly fields", "if", "field", ".", "readonly", ":", "pass", "else", ":", "field", ".", "extract_value", "(", "data", ")", "# Validate the field", "if", "not", "field", ".", "validate", "(", ")", ":", "log", ".", "debug", "(", "'Validation error in field \\'%s\\': %s'", "%", "(", "field", ".", "name", ",", "field", ".", "error", ")", ")", "self", ".", "has_errors", "=", "True" ]
Attempt to read the form data from the request
[ "Attempt", "to", "read", "the", "form", "data", "from", "the", "request" ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L679-L707
train
stevelittlefish/easyforms
easyforms/form.py
Form.get_if_present
def get_if_present(self, name, default=None): """ Returns the value for a field, but if the field doesn't exist will return default instead """ if not self.processed_data: raise exceptions.FormNotProcessed('The form data has not been processed yet') if name in self.field_dict: return self[name] return default
python
def get_if_present(self, name, default=None): """ Returns the value for a field, but if the field doesn't exist will return default instead """ if not self.processed_data: raise exceptions.FormNotProcessed('The form data has not been processed yet') if name in self.field_dict: return self[name] return default
[ "def", "get_if_present", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "not", "self", ".", "processed_data", ":", "raise", "exceptions", ".", "FormNotProcessed", "(", "'The form data has not been processed yet'", ")", "if", "name", "in", "self", ".", "field_dict", ":", "return", "self", "[", "name", "]", "return", "default" ]
Returns the value for a field, but if the field doesn't exist will return default instead
[ "Returns", "the", "value", "for", "a", "field", "but", "if", "the", "field", "doesn", "t", "exist", "will", "return", "default", "instead" ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L724-L734
train
stevelittlefish/easyforms
easyforms/form.py
Form.disable_validation
def disable_validation(self, field_name): """Disable the validation rules for a field""" field = self.field_dict.get(field_name) if not field: raise exceptions.FieldNotFound('Field not found: \'%s\' when trying to disable validation' % field_name) field.validators = []
python
def disable_validation(self, field_name): """Disable the validation rules for a field""" field = self.field_dict.get(field_name) if not field: raise exceptions.FieldNotFound('Field not found: \'%s\' when trying to disable validation' % field_name) field.validators = []
[ "def", "disable_validation", "(", "self", ",", "field_name", ")", ":", "field", "=", "self", ".", "field_dict", ".", "get", "(", "field_name", ")", "if", "not", "field", ":", "raise", "exceptions", ".", "FieldNotFound", "(", "'Field not found: \\'%s\\' when trying to disable validation'", "%", "field_name", ")", "field", ".", "validators", "=", "[", "]" ]
Disable the validation rules for a field
[ "Disable", "the", "validation", "rules", "for", "a", "field" ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L763-L770
train
stevelittlefish/easyforms
easyforms/form.py
Form.create_single_button_clone
def create_single_button_clone(self, submit_text='Submit', submit_css_class='btn-primary', read_form_data=True, form_type=None): """ This will create a copy of this form, with all of inputs replaced with hidden inputs, and with a single submit button. This allows you to easily create a "button" that will submit a post request which is identical to the current state of the form. You could then, if required, change some of the values in the hidden inputs. Note: Submit buttons are not included, and the submit button value will change """ from .basicfields import BooleanCheckbox, HiddenField, SubmitButton fields = [] for field in self.all_fields: # If it's valid for the field to be missing, and the value of the field is empty, # then don't add it, otherwise create a hidden input if field.allow_missing: if field.value is None or field.value == '': continue elif isinstance(field, BooleanCheckbox) and not field.value: continue # TODO: is this right? elif isinstance(field, SubmitButton): continue # If we get here, we need to add this field to the list fields.append(HiddenField(field.name, field.value)) form = Form(fields, action=self.action, method=self.method, submit_css_class=submit_css_class, submit_text=submit_text, read_form_data=read_form_data, disable_csrf=self.disable_csrf, readonly=False, form_type=form_type if form_type else self.form_type) return form
python
def create_single_button_clone(self, submit_text='Submit', submit_css_class='btn-primary', read_form_data=True, form_type=None): """ This will create a copy of this form, with all of inputs replaced with hidden inputs, and with a single submit button. This allows you to easily create a "button" that will submit a post request which is identical to the current state of the form. You could then, if required, change some of the values in the hidden inputs. Note: Submit buttons are not included, and the submit button value will change """ from .basicfields import BooleanCheckbox, HiddenField, SubmitButton fields = [] for field in self.all_fields: # If it's valid for the field to be missing, and the value of the field is empty, # then don't add it, otherwise create a hidden input if field.allow_missing: if field.value is None or field.value == '': continue elif isinstance(field, BooleanCheckbox) and not field.value: continue # TODO: is this right? elif isinstance(field, SubmitButton): continue # If we get here, we need to add this field to the list fields.append(HiddenField(field.name, field.value)) form = Form(fields, action=self.action, method=self.method, submit_css_class=submit_css_class, submit_text=submit_text, read_form_data=read_form_data, disable_csrf=self.disable_csrf, readonly=False, form_type=form_type if form_type else self.form_type) return form
[ "def", "create_single_button_clone", "(", "self", ",", "submit_text", "=", "'Submit'", ",", "submit_css_class", "=", "'btn-primary'", ",", "read_form_data", "=", "True", ",", "form_type", "=", "None", ")", ":", "from", ".", "basicfields", "import", "BooleanCheckbox", ",", "HiddenField", ",", "SubmitButton", "fields", "=", "[", "]", "for", "field", "in", "self", ".", "all_fields", ":", "# If it's valid for the field to be missing, and the value of the field is empty,", "# then don't add it, otherwise create a hidden input", "if", "field", ".", "allow_missing", ":", "if", "field", ".", "value", "is", "None", "or", "field", ".", "value", "==", "''", ":", "continue", "elif", "isinstance", "(", "field", ",", "BooleanCheckbox", ")", "and", "not", "field", ".", "value", ":", "continue", "# TODO: is this right?", "elif", "isinstance", "(", "field", ",", "SubmitButton", ")", ":", "continue", "# If we get here, we need to add this field to the list", "fields", ".", "append", "(", "HiddenField", "(", "field", ".", "name", ",", "field", ".", "value", ")", ")", "form", "=", "Form", "(", "fields", ",", "action", "=", "self", ".", "action", ",", "method", "=", "self", ".", "method", ",", "submit_css_class", "=", "submit_css_class", ",", "submit_text", "=", "submit_text", ",", "read_form_data", "=", "read_form_data", ",", "disable_csrf", "=", "self", ".", "disable_csrf", ",", "readonly", "=", "False", ",", "form_type", "=", "form_type", "if", "form_type", "else", "self", ".", "form_type", ")", "return", "form" ]
This will create a copy of this form, with all of inputs replaced with hidden inputs, and with a single submit button. This allows you to easily create a "button" that will submit a post request which is identical to the current state of the form. You could then, if required, change some of the values in the hidden inputs. Note: Submit buttons are not included, and the submit button value will change
[ "This", "will", "create", "a", "copy", "of", "this", "form", "with", "all", "of", "inputs", "replaced", "with", "hidden", "inputs", "and", "with", "a", "single", "submit", "button", ".", "This", "allows", "you", "to", "easily", "create", "a", "button", "that", "will", "submit", "a", "post", "request", "which", "is", "identical", "to", "the", "current", "state", "of", "the", "form", ".", "You", "could", "then", "if", "required", "change", "some", "of", "the", "values", "in", "the", "hidden", "inputs", "." ]
f5dd2635b045beec9af970b249909f8429cedc57
https://github.com/stevelittlefish/easyforms/blob/f5dd2635b045beec9af970b249909f8429cedc57/easyforms/form.py#L805-L838
train
ckcollab/polished
polished/decorators.py
polish
def polish(commit_indexes=None, urls=None): ''' Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots For example, if you have 10 commits in a row where static file links were broken, you could re-write the html in memory as it is interpreted. Keyword arguments: commit_indexes -- A list of indexes to apply the wrapped function to url -- A list of URLs to apply the wrapped function to ''' def decorator(f): if commit_indexes: f.polish_commit_indexes = commit_indexes if urls: f.polish_urls = urls @wraps(f) def wrappee(*args, **kwargs): return f(*args, **kwargs) return wrappee return decorator
python
def polish(commit_indexes=None, urls=None): ''' Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots For example, if you have 10 commits in a row where static file links were broken, you could re-write the html in memory as it is interpreted. Keyword arguments: commit_indexes -- A list of indexes to apply the wrapped function to url -- A list of URLs to apply the wrapped function to ''' def decorator(f): if commit_indexes: f.polish_commit_indexes = commit_indexes if urls: f.polish_urls = urls @wraps(f) def wrappee(*args, **kwargs): return f(*args, **kwargs) return wrappee return decorator
[ "def", "polish", "(", "commit_indexes", "=", "None", ",", "urls", "=", "None", ")", ":", "def", "decorator", "(", "f", ")", ":", "if", "commit_indexes", ":", "f", ".", "polish_commit_indexes", "=", "commit_indexes", "if", "urls", ":", "f", ".", "polish_urls", "=", "urls", "@", "wraps", "(", "f", ")", "def", "wrappee", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrappee", "return", "decorator" ]
Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots For example, if you have 10 commits in a row where static file links were broken, you could re-write the html in memory as it is interpreted. Keyword arguments: commit_indexes -- A list of indexes to apply the wrapped function to url -- A list of URLs to apply the wrapped function to
[ "Apply", "certain", "behaviors", "to", "commits", "or", "URLs", "that", "need", "polishing", "before", "they", "are", "ready", "for", "screenshots" ]
5a00b2fbe569bc957d1647c0849fd344db29b644
https://github.com/ckcollab/polished/blob/5a00b2fbe569bc957d1647c0849fd344db29b644/polished/decorators.py#L5-L28
train
iLampard/x-utils
xutils/date_utils/convert.py
DatetimeConverter.timestamp_to_datetime
def timestamp_to_datetime(cls, time_stamp, localized=True): """ Converts a UTC timestamp to a datetime.datetime.""" ret = datetime.datetime.utcfromtimestamp(time_stamp) if localized: ret = localize(ret, pytz.utc) return ret
python
def timestamp_to_datetime(cls, time_stamp, localized=True): """ Converts a UTC timestamp to a datetime.datetime.""" ret = datetime.datetime.utcfromtimestamp(time_stamp) if localized: ret = localize(ret, pytz.utc) return ret
[ "def", "timestamp_to_datetime", "(", "cls", ",", "time_stamp", ",", "localized", "=", "True", ")", ":", "ret", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "time_stamp", ")", "if", "localized", ":", "ret", "=", "localize", "(", "ret", ",", "pytz", ".", "utc", ")", "return", "ret" ]
Converts a UTC timestamp to a datetime.datetime.
[ "Converts", "a", "UTC", "timestamp", "to", "a", "datetime", ".", "datetime", "." ]
291d92832ee0e0c89bc22e10ecf2f44445e0d300
https://github.com/iLampard/x-utils/blob/291d92832ee0e0c89bc22e10ecf2f44445e0d300/xutils/date_utils/convert.py#L45-L50
train
noahbenson/pimms
pimms/immutable.py
_imm_default_init
def _imm_default_init(self, *args, **kwargs): ''' An immutable's defalt initialization function is to accept any number of dictionaries followed by any number of keyword args and to turn them all into the parameters of the immutable that is being created. ''' for (k,v) in six.iteritems({k:v for dct in (args + (kwargs,)) for (k,v) in dct}): setattr(self, k, v)
python
def _imm_default_init(self, *args, **kwargs): ''' An immutable's defalt initialization function is to accept any number of dictionaries followed by any number of keyword args and to turn them all into the parameters of the immutable that is being created. ''' for (k,v) in six.iteritems({k:v for dct in (args + (kwargs,)) for (k,v) in dct}): setattr(self, k, v)
[ "def", "_imm_default_init", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "{", "k", ":", "v", "for", "dct", "in", "(", "args", "+", "(", "kwargs", ",", ")", ")", "for", "(", "k", ",", "v", ")", "in", "dct", "}", ")", ":", "setattr", "(", "self", ",", "k", ",", "v", ")" ]
An immutable's defalt initialization function is to accept any number of dictionaries followed by any number of keyword args and to turn them all into the parameters of the immutable that is being created.
[ "An", "immutable", "s", "defalt", "initialization", "function", "is", "to", "accept", "any", "number", "of", "dictionaries", "followed", "by", "any", "number", "of", "keyword", "args", "and", "to", "turn", "them", "all", "into", "the", "parameters", "of", "the", "immutable", "that", "is", "being", "created", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L94-L101
train
noahbenson/pimms
pimms/immutable.py
_imm_init_getattribute
def _imm_init_getattribute(self, name): ''' During the initial transient state, getattribute works on params; as soon as a non-param is requested, all checks are forced and the getattr switches to standard transient form. ''' values = _imm_value_data(self) params = _imm_param_data(self) if name in values: _imm_init_to_trans(self) return getattr(self, name) elif name in params: dd = object.__getattribute__(self, '__dict__') if name in dd: return dd[name] else: raise RuntimeError('Required immutable parameter %s requested before set' % name) else: # if they request a required param before it's set, raise an exception; that's fine return object.__getattribute__(self, name)
python
def _imm_init_getattribute(self, name): ''' During the initial transient state, getattribute works on params; as soon as a non-param is requested, all checks are forced and the getattr switches to standard transient form. ''' values = _imm_value_data(self) params = _imm_param_data(self) if name in values: _imm_init_to_trans(self) return getattr(self, name) elif name in params: dd = object.__getattribute__(self, '__dict__') if name in dd: return dd[name] else: raise RuntimeError('Required immutable parameter %s requested before set' % name) else: # if they request a required param before it's set, raise an exception; that's fine return object.__getattribute__(self, name)
[ "def", "_imm_init_getattribute", "(", "self", ",", "name", ")", ":", "values", "=", "_imm_value_data", "(", "self", ")", "params", "=", "_imm_param_data", "(", "self", ")", "if", "name", "in", "values", ":", "_imm_init_to_trans", "(", "self", ")", "return", "getattr", "(", "self", ",", "name", ")", "elif", "name", "in", "params", ":", "dd", "=", "object", ".", "__getattribute__", "(", "self", ",", "'__dict__'", ")", "if", "name", "in", "dd", ":", "return", "dd", "[", "name", "]", "else", ":", "raise", "RuntimeError", "(", "'Required immutable parameter %s requested before set'", "%", "name", ")", "else", ":", "# if they request a required param before it's set, raise an exception; that's fine", "return", "object", ".", "__getattribute__", "(", "self", ",", "name", ")" ]
During the initial transient state, getattribute works on params; as soon as a non-param is requested, all checks are forced and the getattr switches to standard transient form.
[ "During", "the", "initial", "transient", "state", "getattribute", "works", "on", "params", ";", "as", "soon", "as", "a", "non", "-", "param", "is", "requested", "all", "checks", "are", "forced", "and", "the", "getattr", "switches", "to", "standard", "transient", "form", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L102-L118
train
noahbenson/pimms
pimms/immutable.py
_imm_getattribute
def _imm_getattribute(self, name): ''' An immutable's getattribute calculates lazy values when not yet cached in the object then adds them as attributes. ''' if _imm_is_init(self): return _imm_init_getattribute(self, name) else: dd = object.__getattribute__(self, '__dict__') if name == '__dict__': return dd curval = dd.get(name, dd) if curval is not dd: return dd[name] values = _imm_value_data(self) if name not in values: return object.__getattribute__(self, name) (args, memfn, _) = values[name] value = memfn(*[getattr(self, arg) for arg in args]) dd[name] = value # if this is a const, it may have checks to run if name in _imm_const_data(self): # #TODO # Note that there's a race condition that eventually needs to be handled here: # If dd[name] is set then a check fails, there may have been something that read the # improper value in the meantime try: _imm_check(self, [name]) except: del dd[name] raise # if those pass, then we're fine return value
python
def _imm_getattribute(self, name): ''' An immutable's getattribute calculates lazy values when not yet cached in the object then adds them as attributes. ''' if _imm_is_init(self): return _imm_init_getattribute(self, name) else: dd = object.__getattribute__(self, '__dict__') if name == '__dict__': return dd curval = dd.get(name, dd) if curval is not dd: return dd[name] values = _imm_value_data(self) if name not in values: return object.__getattribute__(self, name) (args, memfn, _) = values[name] value = memfn(*[getattr(self, arg) for arg in args]) dd[name] = value # if this is a const, it may have checks to run if name in _imm_const_data(self): # #TODO # Note that there's a race condition that eventually needs to be handled here: # If dd[name] is set then a check fails, there may have been something that read the # improper value in the meantime try: _imm_check(self, [name]) except: del dd[name] raise # if those pass, then we're fine return value
[ "def", "_imm_getattribute", "(", "self", ",", "name", ")", ":", "if", "_imm_is_init", "(", "self", ")", ":", "return", "_imm_init_getattribute", "(", "self", ",", "name", ")", "else", ":", "dd", "=", "object", ".", "__getattribute__", "(", "self", ",", "'__dict__'", ")", "if", "name", "==", "'__dict__'", ":", "return", "dd", "curval", "=", "dd", ".", "get", "(", "name", ",", "dd", ")", "if", "curval", "is", "not", "dd", ":", "return", "dd", "[", "name", "]", "values", "=", "_imm_value_data", "(", "self", ")", "if", "name", "not", "in", "values", ":", "return", "object", ".", "__getattribute__", "(", "self", ",", "name", ")", "(", "args", ",", "memfn", ",", "_", ")", "=", "values", "[", "name", "]", "value", "=", "memfn", "(", "*", "[", "getattr", "(", "self", ",", "arg", ")", "for", "arg", "in", "args", "]", ")", "dd", "[", "name", "]", "=", "value", "# if this is a const, it may have checks to run", "if", "name", "in", "_imm_const_data", "(", "self", ")", ":", "# #TODO", "# Note that there's a race condition that eventually needs to be handled here:", "# If dd[name] is set then a check fails, there may have been something that read the", "# improper value in the meantime", "try", ":", "_imm_check", "(", "self", ",", "[", "name", "]", ")", "except", ":", "del", "dd", "[", "name", "]", "raise", "# if those pass, then we're fine", "return", "value" ]
An immutable's getattribute calculates lazy values when not yet cached in the object then adds them as attributes.
[ "An", "immutable", "s", "getattribute", "calculates", "lazy", "values", "when", "not", "yet", "cached", "in", "the", "object", "then", "adds", "them", "as", "attributes", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L119-L149
train
noahbenson/pimms
pimms/immutable.py
_imm_init_setattr
def _imm_init_setattr(self, name, value): ''' An immutable's initial setattr allows only param's to be set and does not run checks on the new parameters until a full parameter-set has been specified, at which point it runs all checks and switches over to a normal setattr and getattr method. ''' params = _imm_param_data(self) if name in params: tx_fn = params[name][1] value = value if tx_fn is None else tx_fn(value) # Set the value object.__getattribute__(self, '__dict__')[name] = value # No checks are run, as we're in initialization mode... else: raise TypeError( 'Attempt to change non-parameter \'%s\' of initializing immutable' % name)
python
def _imm_init_setattr(self, name, value): ''' An immutable's initial setattr allows only param's to be set and does not run checks on the new parameters until a full parameter-set has been specified, at which point it runs all checks and switches over to a normal setattr and getattr method. ''' params = _imm_param_data(self) if name in params: tx_fn = params[name][1] value = value if tx_fn is None else tx_fn(value) # Set the value object.__getattribute__(self, '__dict__')[name] = value # No checks are run, as we're in initialization mode... else: raise TypeError( 'Attempt to change non-parameter \'%s\' of initializing immutable' % name)
[ "def", "_imm_init_setattr", "(", "self", ",", "name", ",", "value", ")", ":", "params", "=", "_imm_param_data", "(", "self", ")", "if", "name", "in", "params", ":", "tx_fn", "=", "params", "[", "name", "]", "[", "1", "]", "value", "=", "value", "if", "tx_fn", "is", "None", "else", "tx_fn", "(", "value", ")", "# Set the value", "object", ".", "__getattribute__", "(", "self", ",", "'__dict__'", ")", "[", "name", "]", "=", "value", "# No checks are run, as we're in initialization mode...", "else", ":", "raise", "TypeError", "(", "'Attempt to change non-parameter \\'%s\\' of initializing immutable'", "%", "name", ")" ]
An immutable's initial setattr allows only param's to be set and does not run checks on the new parameters until a full parameter-set has been specified, at which point it runs all checks and switches over to a normal setattr and getattr method.
[ "An", "immutable", "s", "initial", "setattr", "allows", "only", "param", "s", "to", "be", "set", "and", "does", "not", "run", "checks", "on", "the", "new", "parameters", "until", "a", "full", "parameter", "-", "set", "has", "been", "specified", "at", "which", "point", "it", "runs", "all", "checks", "and", "switches", "over", "to", "a", "normal", "setattr", "and", "getattr", "method", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L150-L165
train
noahbenson/pimms
pimms/immutable.py
_imm_trans_setattr
def _imm_trans_setattr(self, name, value): ''' An immutable's transient setattr allows params to be set, and runs checks as they are. ''' params = _imm_param_data(self) dd = object.__getattribute__(self, '__dict__') if name in params: (_, tx_fn, arg_lists, check_fns, deps) = params[name] value = value if tx_fn is None else tx_fn(value) old_deps = {} orig_value = dd[name] # clear the dependencies before we run the checks; save them in case the checks fail and we # go back to how things were... for dep in deps: if dep in dd: old_deps[dep] = dd[dep] del dd[dep] try: dd[name] = value for (args, check_fn) in zip(arg_lists, check_fns): if not check_fn(*[getattr(self, arg) for arg in args]): raise RuntimeError( ('Changing value of immutable attribute \'%s\'' + ' caused validation failure: %s') % (name, (args, check_fn))) # if all the checks ran, we don't return the old deps; they are now invalid old_deps = None finally: if old_deps: # in this case, something didn't check-out, so we return the old deps and let the # exception ride; we also return the original value of the edited param for (dep,val) in six.iteritems(old_deps): dd[dep] = val dd[name] = orig_value else: raise TypeError( 'Attempt to change non-parameter member \'%s\' of transient immutable' % name)
python
def _imm_trans_setattr(self, name, value): ''' An immutable's transient setattr allows params to be set, and runs checks as they are. ''' params = _imm_param_data(self) dd = object.__getattribute__(self, '__dict__') if name in params: (_, tx_fn, arg_lists, check_fns, deps) = params[name] value = value if tx_fn is None else tx_fn(value) old_deps = {} orig_value = dd[name] # clear the dependencies before we run the checks; save them in case the checks fail and we # go back to how things were... for dep in deps: if dep in dd: old_deps[dep] = dd[dep] del dd[dep] try: dd[name] = value for (args, check_fn) in zip(arg_lists, check_fns): if not check_fn(*[getattr(self, arg) for arg in args]): raise RuntimeError( ('Changing value of immutable attribute \'%s\'' + ' caused validation failure: %s') % (name, (args, check_fn))) # if all the checks ran, we don't return the old deps; they are now invalid old_deps = None finally: if old_deps: # in this case, something didn't check-out, so we return the old deps and let the # exception ride; we also return the original value of the edited param for (dep,val) in six.iteritems(old_deps): dd[dep] = val dd[name] = orig_value else: raise TypeError( 'Attempt to change non-parameter member \'%s\' of transient immutable' % name)
[ "def", "_imm_trans_setattr", "(", "self", ",", "name", ",", "value", ")", ":", "params", "=", "_imm_param_data", "(", "self", ")", "dd", "=", "object", ".", "__getattribute__", "(", "self", ",", "'__dict__'", ")", "if", "name", "in", "params", ":", "(", "_", ",", "tx_fn", ",", "arg_lists", ",", "check_fns", ",", "deps", ")", "=", "params", "[", "name", "]", "value", "=", "value", "if", "tx_fn", "is", "None", "else", "tx_fn", "(", "value", ")", "old_deps", "=", "{", "}", "orig_value", "=", "dd", "[", "name", "]", "# clear the dependencies before we run the checks; save them in case the checks fail and we", "# go back to how things were...", "for", "dep", "in", "deps", ":", "if", "dep", "in", "dd", ":", "old_deps", "[", "dep", "]", "=", "dd", "[", "dep", "]", "del", "dd", "[", "dep", "]", "try", ":", "dd", "[", "name", "]", "=", "value", "for", "(", "args", ",", "check_fn", ")", "in", "zip", "(", "arg_lists", ",", "check_fns", ")", ":", "if", "not", "check_fn", "(", "*", "[", "getattr", "(", "self", ",", "arg", ")", "for", "arg", "in", "args", "]", ")", ":", "raise", "RuntimeError", "(", "(", "'Changing value of immutable attribute \\'%s\\''", "+", "' caused validation failure: %s'", ")", "%", "(", "name", ",", "(", "args", ",", "check_fn", ")", ")", ")", "# if all the checks ran, we don't return the old deps; they are now invalid", "old_deps", "=", "None", "finally", ":", "if", "old_deps", ":", "# in this case, something didn't check-out, so we return the old deps and let the", "# exception ride; we also return the original value of the edited param", "for", "(", "dep", ",", "val", ")", "in", "six", ".", "iteritems", "(", "old_deps", ")", ":", "dd", "[", "dep", "]", "=", "val", "dd", "[", "name", "]", "=", "orig_value", "else", ":", "raise", "TypeError", "(", "'Attempt to change non-parameter member \\'%s\\' of transient immutable'", "%", "name", ")" ]
An immutable's transient setattr allows params to be set, and runs checks as they are.
[ "An", "immutable", "s", "transient", "setattr", "allows", "params", "to", "be", "set", "and", "runs", "checks", "as", "they", "are", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L166-L201
train
noahbenson/pimms
pimms/immutable.py
_imm_setattr
def _imm_setattr(self, name, value): ''' A persistent immutable's setattr simply does not allow attributes to be set. ''' if _imm_is_persist(self): raise TypeError('Attempt to change parameter \'%s\' of non-transient immutable' % name) elif _imm_is_trans(self): return _imm_trans_setattr(self, name, value) else: return _imm_init_setattr(self, name, value)
python
def _imm_setattr(self, name, value): ''' A persistent immutable's setattr simply does not allow attributes to be set. ''' if _imm_is_persist(self): raise TypeError('Attempt to change parameter \'%s\' of non-transient immutable' % name) elif _imm_is_trans(self): return _imm_trans_setattr(self, name, value) else: return _imm_init_setattr(self, name, value)
[ "def", "_imm_setattr", "(", "self", ",", "name", ",", "value", ")", ":", "if", "_imm_is_persist", "(", "self", ")", ":", "raise", "TypeError", "(", "'Attempt to change parameter \\'%s\\' of non-transient immutable'", "%", "name", ")", "elif", "_imm_is_trans", "(", "self", ")", ":", "return", "_imm_trans_setattr", "(", "self", ",", "name", ",", "value", ")", "else", ":", "return", "_imm_init_setattr", "(", "self", ",", "name", ",", "value", ")" ]
A persistent immutable's setattr simply does not allow attributes to be set.
[ "A", "persistent", "immutable", "s", "setattr", "simply", "does", "not", "allow", "attributes", "to", "be", "set", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L202-L211
train
noahbenson/pimms
pimms/immutable.py
_imm_trans_delattr
def _imm_trans_delattr(self, name): ''' A transient immutable's delattr allows the object's value-caches to be invalidated; a var that is deleted returns to its default-value in a transient immutable, otherwise raises an exception. ''' (params, values) = (_imm_param_data(self), _imm_value_data(self)) if name in params: dflt = params[name][0] if dflt is None: raise TypeError( 'Attempt to reset required parameter \'%s\' of immutable' % name) setattr(self, name, dflt[0]) elif name in values: dd = object.__getattribute__(self, '__dict__') if name in dd: del dd[name] if name in _imm_const_data(self): _imm_check(imm, [name]) else: raise TypeError('Cannot delete non-value non-param attribute \'%s\' from immutable' % name)
python
def _imm_trans_delattr(self, name): ''' A transient immutable's delattr allows the object's value-caches to be invalidated; a var that is deleted returns to its default-value in a transient immutable, otherwise raises an exception. ''' (params, values) = (_imm_param_data(self), _imm_value_data(self)) if name in params: dflt = params[name][0] if dflt is None: raise TypeError( 'Attempt to reset required parameter \'%s\' of immutable' % name) setattr(self, name, dflt[0]) elif name in values: dd = object.__getattribute__(self, '__dict__') if name in dd: del dd[name] if name in _imm_const_data(self): _imm_check(imm, [name]) else: raise TypeError('Cannot delete non-value non-param attribute \'%s\' from immutable' % name)
[ "def", "_imm_trans_delattr", "(", "self", ",", "name", ")", ":", "(", "params", ",", "values", ")", "=", "(", "_imm_param_data", "(", "self", ")", ",", "_imm_value_data", "(", "self", ")", ")", "if", "name", "in", "params", ":", "dflt", "=", "params", "[", "name", "]", "[", "0", "]", "if", "dflt", "is", "None", ":", "raise", "TypeError", "(", "'Attempt to reset required parameter \\'%s\\' of immutable'", "%", "name", ")", "setattr", "(", "self", ",", "name", ",", "dflt", "[", "0", "]", ")", "elif", "name", "in", "values", ":", "dd", "=", "object", ".", "__getattribute__", "(", "self", ",", "'__dict__'", ")", "if", "name", "in", "dd", ":", "del", "dd", "[", "name", "]", "if", "name", "in", "_imm_const_data", "(", "self", ")", ":", "_imm_check", "(", "imm", ",", "[", "name", "]", ")", "else", ":", "raise", "TypeError", "(", "'Cannot delete non-value non-param attribute \\'%s\\' from immutable'", "%", "name", ")" ]
A transient immutable's delattr allows the object's value-caches to be invalidated; a var that is deleted returns to its default-value in a transient immutable, otherwise raises an exception.
[ "A", "transient", "immutable", "s", "delattr", "allows", "the", "object", "s", "value", "-", "caches", "to", "be", "invalidated", ";", "a", "var", "that", "is", "deleted", "returns", "to", "its", "default", "-", "value", "in", "a", "transient", "immutable", "otherwise", "raises", "an", "exception", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L212-L230
train
noahbenson/pimms
pimms/immutable.py
_imm_delattr
def _imm_delattr(self, name): ''' A persistent immutable's delattr allows the object's value-caches to be invalidated, otherwise raises an exception. ''' if _imm_is_persist(self): values = _imm_value_data(self) if name in values: dd = object.__getattribute__(self, '__dict__') if name in dd: del dd[name] if name in _imm_const_data(self): _imm_check(imm, [name]) else: raise TypeError('Attempt to reset parameter \'%s\' of non-transient immutable' % name) else: return _imm_trans_delattr(self, name)
python
def _imm_delattr(self, name): ''' A persistent immutable's delattr allows the object's value-caches to be invalidated, otherwise raises an exception. ''' if _imm_is_persist(self): values = _imm_value_data(self) if name in values: dd = object.__getattribute__(self, '__dict__') if name in dd: del dd[name] if name in _imm_const_data(self): _imm_check(imm, [name]) else: raise TypeError('Attempt to reset parameter \'%s\' of non-transient immutable' % name) else: return _imm_trans_delattr(self, name)
[ "def", "_imm_delattr", "(", "self", ",", "name", ")", ":", "if", "_imm_is_persist", "(", "self", ")", ":", "values", "=", "_imm_value_data", "(", "self", ")", "if", "name", "in", "values", ":", "dd", "=", "object", ".", "__getattribute__", "(", "self", ",", "'__dict__'", ")", "if", "name", "in", "dd", ":", "del", "dd", "[", "name", "]", "if", "name", "in", "_imm_const_data", "(", "self", ")", ":", "_imm_check", "(", "imm", ",", "[", "name", "]", ")", "else", ":", "raise", "TypeError", "(", "'Attempt to reset parameter \\'%s\\' of non-transient immutable'", "%", "name", ")", "else", ":", "return", "_imm_trans_delattr", "(", "self", ",", "name", ")" ]
A persistent immutable's delattr allows the object's value-caches to be invalidated, otherwise raises an exception.
[ "A", "persistent", "immutable", "s", "delattr", "allows", "the", "object", "s", "value", "-", "caches", "to", "be", "invalidated", "otherwise", "raises", "an", "exception", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L231-L246
train
noahbenson/pimms
pimms/immutable.py
_imm_dir
def _imm_dir(self): ''' An immutable object's dir function should list not only its attributes, but also its un-cached lazy values. ''' dir0 = set(dir(self.__class__)) dir0.update(self.__dict__.keys()) dir0.update(six.iterkeys(_imm_value_data(self))) return sorted(list(dir0))
python
def _imm_dir(self): ''' An immutable object's dir function should list not only its attributes, but also its un-cached lazy values. ''' dir0 = set(dir(self.__class__)) dir0.update(self.__dict__.keys()) dir0.update(six.iterkeys(_imm_value_data(self))) return sorted(list(dir0))
[ "def", "_imm_dir", "(", "self", ")", ":", "dir0", "=", "set", "(", "dir", "(", "self", ".", "__class__", ")", ")", "dir0", ".", "update", "(", "self", ".", "__dict__", ".", "keys", "(", ")", ")", "dir0", ".", "update", "(", "six", ".", "iterkeys", "(", "_imm_value_data", "(", "self", ")", ")", ")", "return", "sorted", "(", "list", "(", "dir0", ")", ")" ]
An immutable object's dir function should list not only its attributes, but also its un-cached lazy values.
[ "An", "immutable", "object", "s", "dir", "function", "should", "list", "not", "only", "its", "attributes", "but", "also", "its", "un", "-", "cached", "lazy", "values", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L247-L255
train
noahbenson/pimms
pimms/immutable.py
_imm_repr
def _imm_repr(self): ''' The default representation function for an immutable object. ''' return (type(self).__name__ + ('(' if _imm_is_persist(self) else '*(') + ', '.join([k + '=' + str(v) for (k,v) in six.iteritems(imm_params(self))]) + ')')
python
def _imm_repr(self): ''' The default representation function for an immutable object. ''' return (type(self).__name__ + ('(' if _imm_is_persist(self) else '*(') + ', '.join([k + '=' + str(v) for (k,v) in six.iteritems(imm_params(self))]) + ')')
[ "def", "_imm_repr", "(", "self", ")", ":", "return", "(", "type", "(", "self", ")", ".", "__name__", "+", "(", "'('", "if", "_imm_is_persist", "(", "self", ")", "else", "'*('", ")", "+", "', '", ".", "join", "(", "[", "k", "+", "'='", "+", "str", "(", "v", ")", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "imm_params", "(", "self", ")", ")", "]", ")", "+", "')'", ")" ]
The default representation function for an immutable object.
[ "The", "default", "representation", "function", "for", "an", "immutable", "object", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L256-L263
train
noahbenson/pimms
pimms/immutable.py
_imm_new
def _imm_new(cls): ''' All immutable new classes use a hack to make sure the post-init cleanup occurs. ''' imm = object.__new__(cls) # Note that right now imm has a normal setattr method; # Give any parameter that has one a default value params = cls._pimms_immutable_data_['params'] for (p,dat) in six.iteritems(params): dat = dat[0] if dat: object.__setattr__(imm, p, dat[0]) # Clear any values; they are not allowed yet _imm_clear(imm) # Note that we are initializing... dd = object.__getattribute__(imm, '__dict__') dd['_pimms_immutable_is_init'] = True # That should do it! return imm
python
def _imm_new(cls): ''' All immutable new classes use a hack to make sure the post-init cleanup occurs. ''' imm = object.__new__(cls) # Note that right now imm has a normal setattr method; # Give any parameter that has one a default value params = cls._pimms_immutable_data_['params'] for (p,dat) in six.iteritems(params): dat = dat[0] if dat: object.__setattr__(imm, p, dat[0]) # Clear any values; they are not allowed yet _imm_clear(imm) # Note that we are initializing... dd = object.__getattribute__(imm, '__dict__') dd['_pimms_immutable_is_init'] = True # That should do it! return imm
[ "def", "_imm_new", "(", "cls", ")", ":", "imm", "=", "object", ".", "__new__", "(", "cls", ")", "# Note that right now imm has a normal setattr method;", "# Give any parameter that has one a default value", "params", "=", "cls", ".", "_pimms_immutable_data_", "[", "'params'", "]", "for", "(", "p", ",", "dat", ")", "in", "six", ".", "iteritems", "(", "params", ")", ":", "dat", "=", "dat", "[", "0", "]", "if", "dat", ":", "object", ".", "__setattr__", "(", "imm", ",", "p", ",", "dat", "[", "0", "]", ")", "# Clear any values; they are not allowed yet", "_imm_clear", "(", "imm", ")", "# Note that we are initializing...", "dd", "=", "object", ".", "__getattribute__", "(", "imm", ",", "'__dict__'", ")", "dd", "[", "'_pimms_immutable_is_init'", "]", "=", "True", "# That should do it!", "return", "imm" ]
All immutable new classes use a hack to make sure the post-init cleanup occurs.
[ "All", "immutable", "new", "classes", "use", "a", "hack", "to", "make", "sure", "the", "post", "-", "init", "cleanup", "occurs", "." ]
9051b86d6b858a7a13511b72c48dc21bc903dab2
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L264-L281
train
tweekmonster/moult
moult/filesystem_scanner.py
_scan_file
def _scan_file(filename, sentinel, source_type='import'): '''Generator that performs the actual scanning of files. Yeilds a tuple containing import type, import path, and an extra file that should be scanned. Extra file scans should be the file or directory that relates to the import name. ''' filename = os.path.abspath(filename) real_filename = os.path.realpath(filename) if os.path.getsize(filename) <= max_file_size: if real_filename not in sentinel and os.path.isfile(filename): sentinel.add(real_filename) basename = os.path.basename(filename) scope, imports = ast_scan_file(filename) if scope is not None and imports is not None: for imp in imports: yield (source_type, imp.module, None) if 'INSTALLED_APPS' in scope and basename == 'settings.py': log.info('Found Django settings: %s', filename) for item in django.handle_django_settings(filename): yield item else: log.warn('Could not scan imports from: %s', filename) else: log.warn('File size too large: %s', filename)
python
def _scan_file(filename, sentinel, source_type='import'): '''Generator that performs the actual scanning of files. Yeilds a tuple containing import type, import path, and an extra file that should be scanned. Extra file scans should be the file or directory that relates to the import name. ''' filename = os.path.abspath(filename) real_filename = os.path.realpath(filename) if os.path.getsize(filename) <= max_file_size: if real_filename not in sentinel and os.path.isfile(filename): sentinel.add(real_filename) basename = os.path.basename(filename) scope, imports = ast_scan_file(filename) if scope is not None and imports is not None: for imp in imports: yield (source_type, imp.module, None) if 'INSTALLED_APPS' in scope and basename == 'settings.py': log.info('Found Django settings: %s', filename) for item in django.handle_django_settings(filename): yield item else: log.warn('Could not scan imports from: %s', filename) else: log.warn('File size too large: %s', filename)
[ "def", "_scan_file", "(", "filename", ",", "sentinel", ",", "source_type", "=", "'import'", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "real_filename", "=", "os", ".", "path", ".", "realpath", "(", "filename", ")", "if", "os", ".", "path", ".", "getsize", "(", "filename", ")", "<=", "max_file_size", ":", "if", "real_filename", "not", "in", "sentinel", "and", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "sentinel", ".", "add", "(", "real_filename", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "scope", ",", "imports", "=", "ast_scan_file", "(", "filename", ")", "if", "scope", "is", "not", "None", "and", "imports", "is", "not", "None", ":", "for", "imp", "in", "imports", ":", "yield", "(", "source_type", ",", "imp", ".", "module", ",", "None", ")", "if", "'INSTALLED_APPS'", "in", "scope", "and", "basename", "==", "'settings.py'", ":", "log", ".", "info", "(", "'Found Django settings: %s'", ",", "filename", ")", "for", "item", "in", "django", ".", "handle_django_settings", "(", "filename", ")", ":", "yield", "item", "else", ":", "log", ".", "warn", "(", "'Could not scan imports from: %s'", ",", "filename", ")", "else", ":", "log", ".", "warn", "(", "'File size too large: %s'", ",", "filename", ")" ]
Generator that performs the actual scanning of files. Yeilds a tuple containing import type, import path, and an extra file that should be scanned. Extra file scans should be the file or directory that relates to the import name.
[ "Generator", "that", "performs", "the", "actual", "scanning", "of", "files", "." ]
38d3a3b9002336219897ebe263ca1d8dcadbecf5
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/filesystem_scanner.py#L20-L48
train
tweekmonster/moult
moult/filesystem_scanner.py
_scan_directory
def _scan_directory(directory, sentinel, depth=0): '''Basically os.listdir with some filtering. ''' directory = os.path.abspath(directory) real_directory = os.path.realpath(directory) if depth < max_directory_depth and real_directory not in sentinel \ and os.path.isdir(directory): sentinel.add(real_directory) for item in os.listdir(directory): if item in ('.', '..'): # I'm not sure if this is even needed any more. continue p = os.path.abspath(os.path.join(directory, item)) if (os.path.isdir(p) and _dir_ignore.search(p)) \ or (os.path.isfile(p) and _ext_ignore.search(p)): continue yield p
python
def _scan_directory(directory, sentinel, depth=0): '''Basically os.listdir with some filtering. ''' directory = os.path.abspath(directory) real_directory = os.path.realpath(directory) if depth < max_directory_depth and real_directory not in sentinel \ and os.path.isdir(directory): sentinel.add(real_directory) for item in os.listdir(directory): if item in ('.', '..'): # I'm not sure if this is even needed any more. continue p = os.path.abspath(os.path.join(directory, item)) if (os.path.isdir(p) and _dir_ignore.search(p)) \ or (os.path.isfile(p) and _ext_ignore.search(p)): continue yield p
[ "def", "_scan_directory", "(", "directory", ",", "sentinel", ",", "depth", "=", "0", ")", ":", "directory", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "real_directory", "=", "os", ".", "path", ".", "realpath", "(", "directory", ")", "if", "depth", "<", "max_directory_depth", "and", "real_directory", "not", "in", "sentinel", "and", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "sentinel", ".", "add", "(", "real_directory", ")", "for", "item", "in", "os", ".", "listdir", "(", "directory", ")", ":", "if", "item", "in", "(", "'.'", ",", "'..'", ")", ":", "# I'm not sure if this is even needed any more.", "continue", "p", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "item", ")", ")", "if", "(", "os", ".", "path", ".", "isdir", "(", "p", ")", "and", "_dir_ignore", ".", "search", "(", "p", ")", ")", "or", "(", "os", ".", "path", ".", "isfile", "(", "p", ")", "and", "_ext_ignore", ".", "search", "(", "p", ")", ")", ":", "continue", "yield", "p" ]
Basically os.listdir with some filtering.
[ "Basically", "os", ".", "listdir", "with", "some", "filtering", "." ]
38d3a3b9002336219897ebe263ca1d8dcadbecf5
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/filesystem_scanner.py#L51-L71
train
mkoura/dump2polarion
dump2polarion/configuration.py
_get_project_conf
def _get_project_conf(): """Loads configuration from project config file.""" config_settings = {} project_root = find_vcs_root(".") if project_root is None: return config_settings for conf_dir in PROJECT_CONF_DIRS: conf_dir = conf_dir.lstrip("./") joined_dir = os.path.join(project_root, conf_dir) if conf_dir else project_root joined_glob = os.path.join(joined_dir, PROJECT_CONF) conf_files = glob.glob(joined_glob) # config files found, not trying other directories if conf_files: break else: conf_files = [] for conf_file in conf_files: try: with io.open(conf_file, encoding="utf-8") as input_file: loaded_settings = yaml.safe_load(input_file) except EnvironmentError: logger.warning("Failed to load config from %s", conf_file) else: logger.info("Config loaded from %s", conf_file) config_settings.update(loaded_settings) return config_settings
python
def _get_project_conf(): """Loads configuration from project config file.""" config_settings = {} project_root = find_vcs_root(".") if project_root is None: return config_settings for conf_dir in PROJECT_CONF_DIRS: conf_dir = conf_dir.lstrip("./") joined_dir = os.path.join(project_root, conf_dir) if conf_dir else project_root joined_glob = os.path.join(joined_dir, PROJECT_CONF) conf_files = glob.glob(joined_glob) # config files found, not trying other directories if conf_files: break else: conf_files = [] for conf_file in conf_files: try: with io.open(conf_file, encoding="utf-8") as input_file: loaded_settings = yaml.safe_load(input_file) except EnvironmentError: logger.warning("Failed to load config from %s", conf_file) else: logger.info("Config loaded from %s", conf_file) config_settings.update(loaded_settings) return config_settings
[ "def", "_get_project_conf", "(", ")", ":", "config_settings", "=", "{", "}", "project_root", "=", "find_vcs_root", "(", "\".\"", ")", "if", "project_root", "is", "None", ":", "return", "config_settings", "for", "conf_dir", "in", "PROJECT_CONF_DIRS", ":", "conf_dir", "=", "conf_dir", ".", "lstrip", "(", "\"./\"", ")", "joined_dir", "=", "os", ".", "path", ".", "join", "(", "project_root", ",", "conf_dir", ")", "if", "conf_dir", "else", "project_root", "joined_glob", "=", "os", ".", "path", ".", "join", "(", "joined_dir", ",", "PROJECT_CONF", ")", "conf_files", "=", "glob", ".", "glob", "(", "joined_glob", ")", "# config files found, not trying other directories", "if", "conf_files", ":", "break", "else", ":", "conf_files", "=", "[", "]", "for", "conf_file", "in", "conf_files", ":", "try", ":", "with", "io", ".", "open", "(", "conf_file", ",", "encoding", "=", "\"utf-8\"", ")", "as", "input_file", ":", "loaded_settings", "=", "yaml", ".", "safe_load", "(", "input_file", ")", "except", "EnvironmentError", ":", "logger", ".", "warning", "(", "\"Failed to load config from %s\"", ",", "conf_file", ")", "else", ":", "logger", ".", "info", "(", "\"Config loaded from %s\"", ",", "conf_file", ")", "config_settings", ".", "update", "(", "loaded_settings", ")", "return", "config_settings" ]
Loads configuration from project config file.
[ "Loads", "configuration", "from", "project", "config", "file", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/configuration.py#L131-L160
train
mkoura/dump2polarion
dump2polarion/configuration.py
get_config
def get_config(config_file=None, config_values=None, load_project_conf=True): """Loads config file and returns its content.""" config_values = config_values or {} config_settings = {} default_conf = _get_default_conf() user_conf = _get_user_conf(config_file) if config_file else {} # load project configuration only when user configuration was not specified project_conf = {} if user_conf or not load_project_conf else _get_project_conf() if not (user_conf or project_conf or config_values): if load_project_conf: raise Dump2PolarionException( "Failed to find configuration file for the project " "and no configuration file or values passed." ) raise Dump2PolarionException("No configuration file or values passed.") # merge configuration config_settings.update(default_conf) config_settings.update(user_conf) config_settings.update(project_conf) config_settings.update(config_values) _populate_urls(config_settings) _set_legacy_project_id(config_settings) _set_legacy_custom_fields(config_settings) _check_config(config_settings) return config_settings
python
def get_config(config_file=None, config_values=None, load_project_conf=True): """Loads config file and returns its content.""" config_values = config_values or {} config_settings = {} default_conf = _get_default_conf() user_conf = _get_user_conf(config_file) if config_file else {} # load project configuration only when user configuration was not specified project_conf = {} if user_conf or not load_project_conf else _get_project_conf() if not (user_conf or project_conf or config_values): if load_project_conf: raise Dump2PolarionException( "Failed to find configuration file for the project " "and no configuration file or values passed." ) raise Dump2PolarionException("No configuration file or values passed.") # merge configuration config_settings.update(default_conf) config_settings.update(user_conf) config_settings.update(project_conf) config_settings.update(config_values) _populate_urls(config_settings) _set_legacy_project_id(config_settings) _set_legacy_custom_fields(config_settings) _check_config(config_settings) return config_settings
[ "def", "get_config", "(", "config_file", "=", "None", ",", "config_values", "=", "None", ",", "load_project_conf", "=", "True", ")", ":", "config_values", "=", "config_values", "or", "{", "}", "config_settings", "=", "{", "}", "default_conf", "=", "_get_default_conf", "(", ")", "user_conf", "=", "_get_user_conf", "(", "config_file", ")", "if", "config_file", "else", "{", "}", "# load project configuration only when user configuration was not specified", "project_conf", "=", "{", "}", "if", "user_conf", "or", "not", "load_project_conf", "else", "_get_project_conf", "(", ")", "if", "not", "(", "user_conf", "or", "project_conf", "or", "config_values", ")", ":", "if", "load_project_conf", ":", "raise", "Dump2PolarionException", "(", "\"Failed to find configuration file for the project \"", "\"and no configuration file or values passed.\"", ")", "raise", "Dump2PolarionException", "(", "\"No configuration file or values passed.\"", ")", "# merge configuration", "config_settings", ".", "update", "(", "default_conf", ")", "config_settings", ".", "update", "(", "user_conf", ")", "config_settings", ".", "update", "(", "project_conf", ")", "config_settings", ".", "update", "(", "config_values", ")", "_populate_urls", "(", "config_settings", ")", "_set_legacy_project_id", "(", "config_settings", ")", "_set_legacy_custom_fields", "(", "config_settings", ")", "_check_config", "(", "config_settings", ")", "return", "config_settings" ]
Loads config file and returns its content.
[ "Loads", "config", "file", "and", "returns", "its", "content", "." ]
f4bd24e9d5070e282aad15f1e8bb514c0525cd37
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/configuration.py#L163-L192
train
ngmarchant/oasis
oasis/input_verification.py
verify_predictions
def verify_predictions(predictions): """Ensures that predictions is stored as a numpy array and checks that all values are either 0 or 1. """ # Check that it contains only zeros and ones predictions = np.array(predictions, copy=False) if not np.array_equal(predictions, predictions.astype(bool)): raise ValueError("predictions contains invalid values. " + "The only permitted values are 0 or 1.") if predictions.ndim == 1: predictions = predictions[:,np.newaxis] return predictions
python
def verify_predictions(predictions): """Ensures that predictions is stored as a numpy array and checks that all values are either 0 or 1. """ # Check that it contains only zeros and ones predictions = np.array(predictions, copy=False) if not np.array_equal(predictions, predictions.astype(bool)): raise ValueError("predictions contains invalid values. " + "The only permitted values are 0 or 1.") if predictions.ndim == 1: predictions = predictions[:,np.newaxis] return predictions
[ "def", "verify_predictions", "(", "predictions", ")", ":", "# Check that it contains only zeros and ones", "predictions", "=", "np", ".", "array", "(", "predictions", ",", "copy", "=", "False", ")", "if", "not", "np", ".", "array_equal", "(", "predictions", ",", "predictions", ".", "astype", "(", "bool", ")", ")", ":", "raise", "ValueError", "(", "\"predictions contains invalid values. \"", "+", "\"The only permitted values are 0 or 1.\"", ")", "if", "predictions", ".", "ndim", "==", "1", ":", "predictions", "=", "predictions", "[", ":", ",", "np", ".", "newaxis", "]", "return", "predictions" ]
Ensures that predictions is stored as a numpy array and checks that all values are either 0 or 1.
[ "Ensures", "that", "predictions", "is", "stored", "as", "a", "numpy", "array", "and", "checks", "that", "all", "values", "are", "either", "0", "or", "1", "." ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/input_verification.py#L10-L21
train
ngmarchant/oasis
oasis/input_verification.py
verify_scores
def verify_scores(scores): """Ensures that scores is stored as a numpy array and checks that all values are finite. """ scores = np.array(scores, copy=False) if np.any(~np.isfinite(scores)): raise ValueError("scores contains invalid values. " + "Please check that all values are finite.") if scores.ndim == 1: scores = scores[:,np.newaxis] return scores
python
def verify_scores(scores): """Ensures that scores is stored as a numpy array and checks that all values are finite. """ scores = np.array(scores, copy=False) if np.any(~np.isfinite(scores)): raise ValueError("scores contains invalid values. " + "Please check that all values are finite.") if scores.ndim == 1: scores = scores[:,np.newaxis] return scores
[ "def", "verify_scores", "(", "scores", ")", ":", "scores", "=", "np", ".", "array", "(", "scores", ",", "copy", "=", "False", ")", "if", "np", ".", "any", "(", "~", "np", ".", "isfinite", "(", "scores", ")", ")", ":", "raise", "ValueError", "(", "\"scores contains invalid values. \"", "+", "\"Please check that all values are finite.\"", ")", "if", "scores", ".", "ndim", "==", "1", ":", "scores", "=", "scores", "[", ":", ",", "np", ".", "newaxis", "]", "return", "scores" ]
Ensures that scores is stored as a numpy array and checks that all values are finite.
[ "Ensures", "that", "scores", "is", "stored", "as", "a", "numpy", "array", "and", "checks", "that", "all", "values", "are", "finite", "." ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/input_verification.py#L23-L33
train
ngmarchant/oasis
oasis/input_verification.py
verify_consistency
def verify_consistency(predictions, scores, proba, opt_class): """Verifies that all arrays have consistent dimensions. Also verifies that the scores are consistent with proba. Returns ------- proba, opt_class """ if predictions.shape != scores.shape: raise ValueError("predictions and scores arrays have inconsistent " + "dimensions.") n_class = scores.shape[1] if scores.ndim > 1 else 1 # If proba not given, default to False for all classifiers if proba is None: proba = np.repeat(False, n_class) # If opt_class is not given, default to True for all classifiers if opt_class is None: opt_class = np.repeat(True, n_class) # Convert to numpy arrays if necessary proba = np.array(proba, dtype=bool, ndmin=1) opt_class = np.array(opt_class, dtype=bool, ndmin=1) if np.sum(opt_class) < 1: raise ValueError("opt_class should contain at least one True value.") if predictions.shape[1] != len(proba): raise ValueError("mismatch in shape of proba and predictions.") if predictions.shape[1] != len(opt_class): raise ValueError("mismatch in shape of opt_class and predictions.") for m in range(n_class): if (np.any(np.logical_or(scores[:,m] < 0, scores[:,m] > 1)) and proba[m]): warnings.warn("scores fall outside the [0,1] interval for " + "classifier {}. Setting proba[m]=False.".format(m)) proba[m] = False return proba, opt_class
python
def verify_consistency(predictions, scores, proba, opt_class): """Verifies that all arrays have consistent dimensions. Also verifies that the scores are consistent with proba. Returns ------- proba, opt_class """ if predictions.shape != scores.shape: raise ValueError("predictions and scores arrays have inconsistent " + "dimensions.") n_class = scores.shape[1] if scores.ndim > 1 else 1 # If proba not given, default to False for all classifiers if proba is None: proba = np.repeat(False, n_class) # If opt_class is not given, default to True for all classifiers if opt_class is None: opt_class = np.repeat(True, n_class) # Convert to numpy arrays if necessary proba = np.array(proba, dtype=bool, ndmin=1) opt_class = np.array(opt_class, dtype=bool, ndmin=1) if np.sum(opt_class) < 1: raise ValueError("opt_class should contain at least one True value.") if predictions.shape[1] != len(proba): raise ValueError("mismatch in shape of proba and predictions.") if predictions.shape[1] != len(opt_class): raise ValueError("mismatch in shape of opt_class and predictions.") for m in range(n_class): if (np.any(np.logical_or(scores[:,m] < 0, scores[:,m] > 1)) and proba[m]): warnings.warn("scores fall outside the [0,1] interval for " + "classifier {}. Setting proba[m]=False.".format(m)) proba[m] = False return proba, opt_class
[ "def", "verify_consistency", "(", "predictions", ",", "scores", ",", "proba", ",", "opt_class", ")", ":", "if", "predictions", ".", "shape", "!=", "scores", ".", "shape", ":", "raise", "ValueError", "(", "\"predictions and scores arrays have inconsistent \"", "+", "\"dimensions.\"", ")", "n_class", "=", "scores", ".", "shape", "[", "1", "]", "if", "scores", ".", "ndim", ">", "1", "else", "1", "# If proba not given, default to False for all classifiers", "if", "proba", "is", "None", ":", "proba", "=", "np", ".", "repeat", "(", "False", ",", "n_class", ")", "# If opt_class is not given, default to True for all classifiers", "if", "opt_class", "is", "None", ":", "opt_class", "=", "np", ".", "repeat", "(", "True", ",", "n_class", ")", "# Convert to numpy arrays if necessary", "proba", "=", "np", ".", "array", "(", "proba", ",", "dtype", "=", "bool", ",", "ndmin", "=", "1", ")", "opt_class", "=", "np", ".", "array", "(", "opt_class", ",", "dtype", "=", "bool", ",", "ndmin", "=", "1", ")", "if", "np", ".", "sum", "(", "opt_class", ")", "<", "1", ":", "raise", "ValueError", "(", "\"opt_class should contain at least one True value.\"", ")", "if", "predictions", ".", "shape", "[", "1", "]", "!=", "len", "(", "proba", ")", ":", "raise", "ValueError", "(", "\"mismatch in shape of proba and predictions.\"", ")", "if", "predictions", ".", "shape", "[", "1", "]", "!=", "len", "(", "opt_class", ")", ":", "raise", "ValueError", "(", "\"mismatch in shape of opt_class and predictions.\"", ")", "for", "m", "in", "range", "(", "n_class", ")", ":", "if", "(", "np", ".", "any", "(", "np", ".", "logical_or", "(", "scores", "[", ":", ",", "m", "]", "<", "0", ",", "scores", "[", ":", ",", "m", "]", ">", "1", ")", ")", "and", "proba", "[", "m", "]", ")", ":", "warnings", ".", "warn", "(", "\"scores fall outside the [0,1] interval for \"", "+", "\"classifier {}. Setting proba[m]=False.\"", ".", "format", "(", "m", ")", ")", "proba", "[", "m", "]", "=", "False", "return", "proba", ",", "opt_class" ]
Verifies that all arrays have consistent dimensions. Also verifies that the scores are consistent with proba. Returns ------- proba, opt_class
[ "Verifies", "that", "all", "arrays", "have", "consistent", "dimensions", ".", "Also", "verifies", "that", "the", "scores", "are", "consistent", "with", "proba", "." ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/input_verification.py#L35-L75
train
ngmarchant/oasis
oasis/input_verification.py
verify_identifiers
def verify_identifiers(identifiers, n_items): """Ensure that identifiers has a compatible length and that its elements are unique""" if identifiers is None: return identifiers identifiers = np.array(identifiers, copy=False) # Check length for consistency if len(identifiers) != n_items: raise ValueError("identifiers has inconsistent dimension.") # Check that identifiers are unique if len(np.unique(identifiers)) != n_items: raise ValueError("identifiers contains duplicate values.") return identifiers
python
def verify_identifiers(identifiers, n_items): """Ensure that identifiers has a compatible length and that its elements are unique""" if identifiers is None: return identifiers identifiers = np.array(identifiers, copy=False) # Check length for consistency if len(identifiers) != n_items: raise ValueError("identifiers has inconsistent dimension.") # Check that identifiers are unique if len(np.unique(identifiers)) != n_items: raise ValueError("identifiers contains duplicate values.") return identifiers
[ "def", "verify_identifiers", "(", "identifiers", ",", "n_items", ")", ":", "if", "identifiers", "is", "None", ":", "return", "identifiers", "identifiers", "=", "np", ".", "array", "(", "identifiers", ",", "copy", "=", "False", ")", "# Check length for consistency", "if", "len", "(", "identifiers", ")", "!=", "n_items", ":", "raise", "ValueError", "(", "\"identifiers has inconsistent dimension.\"", ")", "# Check that identifiers are unique", "if", "len", "(", "np", ".", "unique", "(", "identifiers", ")", ")", "!=", "n_items", ":", "raise", "ValueError", "(", "\"identifiers contains duplicate values.\"", ")", "return", "identifiers" ]
Ensure that identifiers has a compatible length and that its elements are unique
[ "Ensure", "that", "identifiers", "has", "a", "compatible", "length", "and", "that", "its", "elements", "are", "unique" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/input_verification.py#L91-L107
train
ngmarchant/oasis
oasis/input_verification.py
scores_to_probs
def scores_to_probs(scores, proba, eps=0.01): """Transforms scores to probabilities by applying the logistic function""" if np.any(~proba): # Need to convert some of the scores into probabilities probs = copy.deepcopy(scores) n_class = len(proba) for m in range(n_class): if not proba[m]: #TODO: incorporate threshold (currently assuming zero) # find most extreme absolute score max_extreme_score = max(np.abs(np.min(scores[:,m])),\ np.abs(np.max(scores[:,m]))) k = np.log((1-eps)/eps)/max_extreme_score # scale factor self._probs[:,m] = expit(k * self.scores[:,m]) return probs else: return scores
python
def scores_to_probs(scores, proba, eps=0.01): """Transforms scores to probabilities by applying the logistic function""" if np.any(~proba): # Need to convert some of the scores into probabilities probs = copy.deepcopy(scores) n_class = len(proba) for m in range(n_class): if not proba[m]: #TODO: incorporate threshold (currently assuming zero) # find most extreme absolute score max_extreme_score = max(np.abs(np.min(scores[:,m])),\ np.abs(np.max(scores[:,m]))) k = np.log((1-eps)/eps)/max_extreme_score # scale factor self._probs[:,m] = expit(k * self.scores[:,m]) return probs else: return scores
[ "def", "scores_to_probs", "(", "scores", ",", "proba", ",", "eps", "=", "0.01", ")", ":", "if", "np", ".", "any", "(", "~", "proba", ")", ":", "# Need to convert some of the scores into probabilities", "probs", "=", "copy", ".", "deepcopy", "(", "scores", ")", "n_class", "=", "len", "(", "proba", ")", "for", "m", "in", "range", "(", "n_class", ")", ":", "if", "not", "proba", "[", "m", "]", ":", "#TODO: incorporate threshold (currently assuming zero)", "# find most extreme absolute score", "max_extreme_score", "=", "max", "(", "np", ".", "abs", "(", "np", ".", "min", "(", "scores", "[", ":", ",", "m", "]", ")", ")", ",", "np", ".", "abs", "(", "np", ".", "max", "(", "scores", "[", ":", ",", "m", "]", ")", ")", ")", "k", "=", "np", ".", "log", "(", "(", "1", "-", "eps", ")", "/", "eps", ")", "/", "max_extreme_score", "# scale factor", "self", ".", "_probs", "[", ":", ",", "m", "]", "=", "expit", "(", "k", "*", "self", ".", "scores", "[", ":", ",", "m", "]", ")", "return", "probs", "else", ":", "return", "scores" ]
Transforms scores to probabilities by applying the logistic function
[ "Transforms", "scores", "to", "probabilities", "by", "applying", "the", "logistic", "function" ]
28a037a8924b85ae97db8a93960a910a219d6a4a
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/input_verification.py#L117-L133
train
praekeltfoundation/seed-message-sender
message_sender/tasks.py
fire_metric
def fire_metric(metric_name, metric_value): """ Fires a metric using the MetricsApiClient """ metric_value = float(metric_value) metric = {metric_name: metric_value} metric_client.fire_metrics(**metric) return "Fired metric <{}> with value <{}>".format(metric_name, metric_value)
python
def fire_metric(metric_name, metric_value): """ Fires a metric using the MetricsApiClient """ metric_value = float(metric_value) metric = {metric_name: metric_value} metric_client.fire_metrics(**metric) return "Fired metric <{}> with value <{}>".format(metric_name, metric_value)
[ "def", "fire_metric", "(", "metric_name", ",", "metric_value", ")", ":", "metric_value", "=", "float", "(", "metric_value", ")", "metric", "=", "{", "metric_name", ":", "metric_value", "}", "metric_client", ".", "fire_metrics", "(", "*", "*", "metric", ")", "return", "\"Fired metric <{}> with value <{}>\"", ".", "format", "(", "metric_name", ",", "metric_value", ")" ]
Fires a metric using the MetricsApiClient
[ "Fires", "a", "metric", "using", "the", "MetricsApiClient" ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L118-L124
train
praekeltfoundation/seed-message-sender
message_sender/tasks.py
SendMessage.fire_failed_msisdn_lookup
def fire_failed_msisdn_lookup(self, to_identity): """ Fires a webhook in the event of a None to_addr. """ payload = {"to_identity": to_identity} hooks = Hook.objects.filter(event="identity.no_address") for hook in hooks: hook.deliver_hook( None, payload_override={"hook": hook.dict(), "data": payload} )
python
def fire_failed_msisdn_lookup(self, to_identity): """ Fires a webhook in the event of a None to_addr. """ payload = {"to_identity": to_identity} hooks = Hook.objects.filter(event="identity.no_address") for hook in hooks: hook.deliver_hook( None, payload_override={"hook": hook.dict(), "data": payload} )
[ "def", "fire_failed_msisdn_lookup", "(", "self", ",", "to_identity", ")", ":", "payload", "=", "{", "\"to_identity\"", ":", "to_identity", "}", "hooks", "=", "Hook", ".", "objects", ".", "filter", "(", "event", "=", "\"identity.no_address\"", ")", "for", "hook", "in", "hooks", ":", "hook", ".", "deliver_hook", "(", "None", ",", "payload_override", "=", "{", "\"hook\"", ":", "hook", ".", "dict", "(", ")", ",", "\"data\"", ":", "payload", "}", ")" ]
Fires a webhook in the event of a None to_addr.
[ "Fires", "a", "webhook", "in", "the", "event", "of", "a", "None", "to_addr", "." ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L214-L223
train
praekeltfoundation/seed-message-sender
message_sender/tasks.py
ArchiveOutboundMessages.dump_data
def dump_data(self, filename, queryset): """ Serializes the queryset into a newline separated JSON format, and places it into a gzipped file """ with gzip.open(filename, "wb") as f: for outbound in queryset.iterator(): data = OutboundArchiveSerializer(outbound).data data = JSONRenderer().render(data) f.write(data) f.write("\n".encode("utf-8"))
python
def dump_data(self, filename, queryset): """ Serializes the queryset into a newline separated JSON format, and places it into a gzipped file """ with gzip.open(filename, "wb") as f: for outbound in queryset.iterator(): data = OutboundArchiveSerializer(outbound).data data = JSONRenderer().render(data) f.write(data) f.write("\n".encode("utf-8"))
[ "def", "dump_data", "(", "self", ",", "filename", ",", "queryset", ")", ":", "with", "gzip", ".", "open", "(", "filename", ",", "\"wb\"", ")", "as", "f", ":", "for", "outbound", "in", "queryset", ".", "iterator", "(", ")", ":", "data", "=", "OutboundArchiveSerializer", "(", "outbound", ")", ".", "data", "data", "=", "JSONRenderer", "(", ")", ".", "render", "(", "data", ")", "f", ".", "write", "(", "data", ")", "f", ".", "write", "(", "\"\\n\"", ".", "encode", "(", "\"utf-8\"", ")", ")" ]
Serializes the queryset into a newline separated JSON format, and places it into a gzipped file
[ "Serializes", "the", "queryset", "into", "a", "newline", "separated", "JSON", "format", "and", "places", "it", "into", "a", "gzipped", "file" ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L486-L496
train
praekeltfoundation/seed-message-sender
message_sender/tasks.py
ArchiveOutboundMessages.create_archived_outbound
def create_archived_outbound(self, date, filename): """ Creates the required ArchivedOutbound entry with the file specified at `filename` """ with open(filename, "rb") as f: f = File(f) ArchivedOutbounds.objects.create(date=date, archive=f)
python
def create_archived_outbound(self, date, filename): """ Creates the required ArchivedOutbound entry with the file specified at `filename` """ with open(filename, "rb") as f: f = File(f) ArchivedOutbounds.objects.create(date=date, archive=f)
[ "def", "create_archived_outbound", "(", "self", ",", "date", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "f", "=", "File", "(", "f", ")", "ArchivedOutbounds", ".", "objects", ".", "create", "(", "date", "=", "date", ",", "archive", "=", "f", ")" ]
Creates the required ArchivedOutbound entry with the file specified at `filename`
[ "Creates", "the", "required", "ArchivedOutbound", "entry", "with", "the", "file", "specified", "at", "filename" ]
257b01635171b9dbe1f5f13baa810c971bb2620e
https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L498-L505
train
djaodjin/djaodjin-deployutils
deployutils/apps/django/mixins.py
AccessiblesMixin.manages
def manages(self, account): """ Returns ``True`` if the ``request.user`` is a manager for ``account``. ``account`` will be converted to a string and compared to an organization slug. """ account_slug = str(account) for organization in self.request.session.get( 'roles', {}).get('manager', []): if account_slug == organization['slug']: return True return False
python
def manages(self, account): """ Returns ``True`` if the ``request.user`` is a manager for ``account``. ``account`` will be converted to a string and compared to an organization slug. """ account_slug = str(account) for organization in self.request.session.get( 'roles', {}).get('manager', []): if account_slug == organization['slug']: return True return False
[ "def", "manages", "(", "self", ",", "account", ")", ":", "account_slug", "=", "str", "(", "account", ")", "for", "organization", "in", "self", ".", "request", ".", "session", ".", "get", "(", "'roles'", ",", "{", "}", ")", ".", "get", "(", "'manager'", ",", "[", "]", ")", ":", "if", "account_slug", "==", "organization", "[", "'slug'", "]", ":", "return", "True", "return", "False" ]
Returns ``True`` if the ``request.user`` is a manager for ``account``. ``account`` will be converted to a string and compared to an organization slug.
[ "Returns", "True", "if", "the", "request", ".", "user", "is", "a", "manager", "for", "account", ".", "account", "will", "be", "converted", "to", "a", "string", "and", "compared", "to", "an", "organization", "slug", "." ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/mixins.py#L99-L110
train
djaodjin/djaodjin-deployutils
deployutils/apps/django/mixins.py
BeforeMixin.get_queryset
def get_queryset(self): """ Implements before date filtering on ``date_field`` """ kwargs = {} if self.ends_at: kwargs.update({'%s__lt' % self.date_field: self.ends_at}) return super(BeforeMixin, self).get_queryset().filter(**kwargs)
python
def get_queryset(self): """ Implements before date filtering on ``date_field`` """ kwargs = {} if self.ends_at: kwargs.update({'%s__lt' % self.date_field: self.ends_at}) return super(BeforeMixin, self).get_queryset().filter(**kwargs)
[ "def", "get_queryset", "(", "self", ")", ":", "kwargs", "=", "{", "}", "if", "self", ".", "ends_at", ":", "kwargs", ".", "update", "(", "{", "'%s__lt'", "%", "self", ".", "date_field", ":", "self", ".", "ends_at", "}", ")", "return", "super", "(", "BeforeMixin", ",", "self", ")", ".", "get_queryset", "(", ")", ".", "filter", "(", "*", "*", "kwargs", ")" ]
Implements before date filtering on ``date_field``
[ "Implements", "before", "date", "filtering", "on", "date_field" ]
a0fe3cf3030dbbf09025c69ce75a69b326565dd8
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/mixins.py#L201-L208
train