repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
django-salesforce/django-salesforce | salesforce/utils.py | get_soap_client | def get_soap_client(db_alias, client_class=None):
"""
Create the SOAP client for the current user logged in the db_alias
The default created client is "beatbox.PythonClient", but an
alternative client is possible. (i.e. other subtype of beatbox.XMLClient)
"""
if not beatbox:
raise InterfaceError("To use SOAP API, you'll need to install the Beatbox package.")
if client_class is None:
client_class = beatbox.PythonClient
soap_client = client_class()
# authenticate
connection = connections[db_alias]
# verify the authenticated connection, because Beatbox can not refresh the token
cursor = connection.cursor()
cursor.urls_request()
auth_info = connections[db_alias].sf_session.auth
access_token = auth_info.get_auth()['access_token']
assert access_token[15] == '!'
org_id = access_token[:15]
url = '/services/Soap/u/{version}/{org_id}'.format(version=salesforce.API_VERSION,
org_id=org_id)
soap_client.useSession(access_token, auth_info.instance_url + url)
return soap_client | python | def get_soap_client(db_alias, client_class=None):
"""
Create the SOAP client for the current user logged in the db_alias
The default created client is "beatbox.PythonClient", but an
alternative client is possible. (i.e. other subtype of beatbox.XMLClient)
"""
if not beatbox:
raise InterfaceError("To use SOAP API, you'll need to install the Beatbox package.")
if client_class is None:
client_class = beatbox.PythonClient
soap_client = client_class()
# authenticate
connection = connections[db_alias]
# verify the authenticated connection, because Beatbox can not refresh the token
cursor = connection.cursor()
cursor.urls_request()
auth_info = connections[db_alias].sf_session.auth
access_token = auth_info.get_auth()['access_token']
assert access_token[15] == '!'
org_id = access_token[:15]
url = '/services/Soap/u/{version}/{org_id}'.format(version=salesforce.API_VERSION,
org_id=org_id)
soap_client.useSession(access_token, auth_info.instance_url + url)
return soap_client | [
"def",
"get_soap_client",
"(",
"db_alias",
",",
"client_class",
"=",
"None",
")",
":",
"if",
"not",
"beatbox",
":",
"raise",
"InterfaceError",
"(",
"\"To use SOAP API, you'll need to install the Beatbox package.\"",
")",
"if",
"client_class",
"is",
"None",
":",
"client_class",
"=",
"beatbox",
".",
"PythonClient",
"soap_client",
"=",
"client_class",
"(",
")",
"# authenticate",
"connection",
"=",
"connections",
"[",
"db_alias",
"]",
"# verify the authenticated connection, because Beatbox can not refresh the token",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"urls_request",
"(",
")",
"auth_info",
"=",
"connections",
"[",
"db_alias",
"]",
".",
"sf_session",
".",
"auth",
"access_token",
"=",
"auth_info",
".",
"get_auth",
"(",
")",
"[",
"'access_token'",
"]",
"assert",
"access_token",
"[",
"15",
"]",
"==",
"'!'",
"org_id",
"=",
"access_token",
"[",
":",
"15",
"]",
"url",
"=",
"'/services/Soap/u/{version}/{org_id}'",
".",
"format",
"(",
"version",
"=",
"salesforce",
".",
"API_VERSION",
",",
"org_id",
"=",
"org_id",
")",
"soap_client",
".",
"useSession",
"(",
"access_token",
",",
"auth_info",
".",
"instance_url",
"+",
"url",
")",
"return",
"soap_client"
]
| Create the SOAP client for the current user logged in the db_alias
The default created client is "beatbox.PythonClient", but an
alternative client is possible. (i.e. other subtype of beatbox.XMLClient) | [
"Create",
"the",
"SOAP",
"client",
"for",
"the",
"current",
"user",
"logged",
"in",
"the",
"db_alias"
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/utils.py#L20-L46 | train |
django-salesforce/django-salesforce | salesforce/dbapi/driver.py | signalize_extensions | def signalize_extensions():
"""DB API 2.0 extension are reported by warnings at run-time."""
warnings.warn("DB-API extension cursor.rownumber used", SalesforceWarning)
warnings.warn("DB-API extension connection.<exception> used", SalesforceWarning) # TODO
warnings.warn("DB-API extension cursor.connection used", SalesforceWarning)
# not implemented DB-API extension cursor.scroll(, SalesforceWarning)
warnings.warn("DB-API extension cursor.messages used", SalesforceWarning)
warnings.warn("DB-API extension connection.messages used", SalesforceWarning)
warnings.warn("DB-API extension cursor.next(, SalesforceWarning) used")
warnings.warn("DB-API extension cursor.__iter__(, SalesforceWarning) used")
warnings.warn("DB-API extension cursor.lastrowid used", SalesforceWarning)
warnings.warn("DB-API extension .errorhandler used", SalesforceWarning) | python | def signalize_extensions():
"""DB API 2.0 extension are reported by warnings at run-time."""
warnings.warn("DB-API extension cursor.rownumber used", SalesforceWarning)
warnings.warn("DB-API extension connection.<exception> used", SalesforceWarning) # TODO
warnings.warn("DB-API extension cursor.connection used", SalesforceWarning)
# not implemented DB-API extension cursor.scroll(, SalesforceWarning)
warnings.warn("DB-API extension cursor.messages used", SalesforceWarning)
warnings.warn("DB-API extension connection.messages used", SalesforceWarning)
warnings.warn("DB-API extension cursor.next(, SalesforceWarning) used")
warnings.warn("DB-API extension cursor.__iter__(, SalesforceWarning) used")
warnings.warn("DB-API extension cursor.lastrowid used", SalesforceWarning)
warnings.warn("DB-API extension .errorhandler used", SalesforceWarning) | [
"def",
"signalize_extensions",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.rownumber used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension connection.<exception> used\"",
",",
"SalesforceWarning",
")",
"# TODO",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.connection used\"",
",",
"SalesforceWarning",
")",
"# not implemented DB-API extension cursor.scroll(, SalesforceWarning)",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.messages used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension connection.messages used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.next(, SalesforceWarning) used\"",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.__iter__(, SalesforceWarning) used\"",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension cursor.lastrowid used\"",
",",
"SalesforceWarning",
")",
"warnings",
".",
"warn",
"(",
"\"DB-API extension .errorhandler used\"",
",",
"SalesforceWarning",
")"
]
| DB API 2.0 extension are reported by warnings at run-time. | [
"DB",
"API",
"2",
".",
"0",
"extension",
"are",
"reported",
"by",
"warnings",
"at",
"run",
"-",
"time",
"."
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L670-L681 | train |
django-salesforce/django-salesforce | salesforce/dbapi/driver.py | arg_to_soql | def arg_to_soql(arg):
"""
Perform necessary SOQL quoting on the arg.
"""
conversion = sql_conversions.get(type(arg))
if conversion:
return conversion(arg)
for type_ in subclass_conversions:
if isinstance(arg, type_):
return sql_conversions[type_](arg)
return sql_conversions[str](arg) | python | def arg_to_soql(arg):
"""
Perform necessary SOQL quoting on the arg.
"""
conversion = sql_conversions.get(type(arg))
if conversion:
return conversion(arg)
for type_ in subclass_conversions:
if isinstance(arg, type_):
return sql_conversions[type_](arg)
return sql_conversions[str](arg) | [
"def",
"arg_to_soql",
"(",
"arg",
")",
":",
"conversion",
"=",
"sql_conversions",
".",
"get",
"(",
"type",
"(",
"arg",
")",
")",
"if",
"conversion",
":",
"return",
"conversion",
"(",
"arg",
")",
"for",
"type_",
"in",
"subclass_conversions",
":",
"if",
"isinstance",
"(",
"arg",
",",
"type_",
")",
":",
"return",
"sql_conversions",
"[",
"type_",
"]",
"(",
"arg",
")",
"return",
"sql_conversions",
"[",
"str",
"]",
"(",
"arg",
")"
]
| Perform necessary SOQL quoting on the arg. | [
"Perform",
"necessary",
"SOQL",
"quoting",
"on",
"the",
"arg",
"."
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L735-L745 | train |
django-salesforce/django-salesforce | salesforce/dbapi/driver.py | arg_to_json | def arg_to_json(arg):
"""
Perform necessary JSON conversion on the arg.
"""
conversion = json_conversions.get(type(arg))
if conversion:
return conversion(arg)
for type_ in subclass_conversions:
if isinstance(arg, type_):
return json_conversions[type_](arg)
return json_conversions[str](arg) | python | def arg_to_json(arg):
"""
Perform necessary JSON conversion on the arg.
"""
conversion = json_conversions.get(type(arg))
if conversion:
return conversion(arg)
for type_ in subclass_conversions:
if isinstance(arg, type_):
return json_conversions[type_](arg)
return json_conversions[str](arg) | [
"def",
"arg_to_json",
"(",
"arg",
")",
":",
"conversion",
"=",
"json_conversions",
".",
"get",
"(",
"type",
"(",
"arg",
")",
")",
"if",
"conversion",
":",
"return",
"conversion",
"(",
"arg",
")",
"for",
"type_",
"in",
"subclass_conversions",
":",
"if",
"isinstance",
"(",
"arg",
",",
"type_",
")",
":",
"return",
"json_conversions",
"[",
"type_",
"]",
"(",
"arg",
")",
"return",
"json_conversions",
"[",
"str",
"]",
"(",
"arg",
")"
]
| Perform necessary JSON conversion on the arg. | [
"Perform",
"necessary",
"JSON",
"conversion",
"on",
"the",
"arg",
"."
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L748-L758 | train |
django-salesforce/django-salesforce | salesforce/dbapi/driver.py | merge_dict | def merge_dict(dict_1, *other, **kw):
"""Merge two or more dict including kw into result dict."""
tmp = dict_1.copy()
for x in other:
tmp.update(x)
tmp.update(kw)
return tmp | python | def merge_dict(dict_1, *other, **kw):
"""Merge two or more dict including kw into result dict."""
tmp = dict_1.copy()
for x in other:
tmp.update(x)
tmp.update(kw)
return tmp | [
"def",
"merge_dict",
"(",
"dict_1",
",",
"*",
"other",
",",
"*",
"*",
"kw",
")",
":",
"tmp",
"=",
"dict_1",
".",
"copy",
"(",
")",
"for",
"x",
"in",
"other",
":",
"tmp",
".",
"update",
"(",
"x",
")",
"tmp",
".",
"update",
"(",
"kw",
")",
"return",
"tmp"
]
| Merge two or more dict including kw into result dict. | [
"Merge",
"two",
"or",
"more",
"dict",
"including",
"kw",
"into",
"result",
"dict",
"."
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L792-L798 | train |
django-salesforce/django-salesforce | salesforce/dbapi/driver.py | RawConnection.make_session | def make_session(self):
"""Authenticate and get the name of assigned SFDC data server"""
with connect_lock:
if self._sf_session is None:
sf_session = requests.Session()
# TODO configurable class Salesforce***Auth
sf_session.auth = SalesforcePasswordAuth(db_alias=self.alias,
settings_dict=self.settings_dict)
sf_instance_url = sf_session.auth.instance_url
sf_requests_adapter = HTTPAdapter(max_retries=get_max_retries())
sf_session.mount(sf_instance_url, sf_requests_adapter)
# Additional headers work, but the same are added automatically by "requests' package.
# sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO
self._sf_session = sf_session | python | def make_session(self):
"""Authenticate and get the name of assigned SFDC data server"""
with connect_lock:
if self._sf_session is None:
sf_session = requests.Session()
# TODO configurable class Salesforce***Auth
sf_session.auth = SalesforcePasswordAuth(db_alias=self.alias,
settings_dict=self.settings_dict)
sf_instance_url = sf_session.auth.instance_url
sf_requests_adapter = HTTPAdapter(max_retries=get_max_retries())
sf_session.mount(sf_instance_url, sf_requests_adapter)
# Additional headers work, but the same are added automatically by "requests' package.
# sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO
self._sf_session = sf_session | [
"def",
"make_session",
"(",
"self",
")",
":",
"with",
"connect_lock",
":",
"if",
"self",
".",
"_sf_session",
"is",
"None",
":",
"sf_session",
"=",
"requests",
".",
"Session",
"(",
")",
"# TODO configurable class Salesforce***Auth",
"sf_session",
".",
"auth",
"=",
"SalesforcePasswordAuth",
"(",
"db_alias",
"=",
"self",
".",
"alias",
",",
"settings_dict",
"=",
"self",
".",
"settings_dict",
")",
"sf_instance_url",
"=",
"sf_session",
".",
"auth",
".",
"instance_url",
"sf_requests_adapter",
"=",
"HTTPAdapter",
"(",
"max_retries",
"=",
"get_max_retries",
"(",
")",
")",
"sf_session",
".",
"mount",
"(",
"sf_instance_url",
",",
"sf_requests_adapter",
")",
"# Additional headers work, but the same are added automatically by \"requests' package.",
"# sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO",
"self",
".",
"_sf_session",
"=",
"sf_session"
]
| Authenticate and get the name of assigned SFDC data server | [
"Authenticate",
"and",
"get",
"the",
"name",
"of",
"assigned",
"SFDC",
"data",
"server"
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L159-L172 | train |
django-salesforce/django-salesforce | salesforce/dbapi/driver.py | RawConnection.rest_api_url | def rest_api_url(self, *url_parts, **kwargs):
"""Join the URL of REST_API
parameters:
upl_parts: strings that are joined to the url by "/".
a REST url like https://na1.salesforce.com/services/data/v44.0/
is usually added, but not if the first string starts with https://
api_ver: API version that should be used instead of connection.api_ver
default. A special api_ver="" can be used to omit api version
(for request to ask for available api versions)
relative: If `relative` is true then the url is without domain
Examples: self.rest_api_url("query?q=select+id+from+Organization")
self.rest_api_url("sobject", "Contact", id, api_ver="45.0")
self.rest_api_url(api_ver="") # versions request
self.rest_api_url("sobject", relative=True)
self.rest_api_url("/services/data/v45.0")
Output:
https://na1.salesforce.com/services/data/v44.0/query?q=select+id+from+Organization
https://na1.salesforce.com/services/data/v45.0/sobject/Contact/003DD00000000XYAAA
https://na1.salesforce.com/services/data
/services/data/v45.0
https://na1.salesforce.com/services/data/44.0
"""
url_parts = list(url_parts)
if url_parts and re.match(r'^(?:https|mock)://', url_parts[0]):
return '/'.join(url_parts)
relative = kwargs.pop('relative', False)
api_ver = kwargs.pop('api_ver', None)
api_ver = api_ver if api_ver is not None else self.api_ver
assert not kwargs
if not relative:
base = [self.sf_session.auth.instance_url]
else:
base = ['']
if url_parts and url_parts[0].startswith('/'):
prefix = []
url_parts[0] = url_parts[0][1:]
else:
prefix = ['services/data']
if api_ver:
prefix += ['v{api_ver}'.format(api_ver=api_ver)]
return '/'.join(base + prefix + url_parts) | python | def rest_api_url(self, *url_parts, **kwargs):
"""Join the URL of REST_API
parameters:
upl_parts: strings that are joined to the url by "/".
a REST url like https://na1.salesforce.com/services/data/v44.0/
is usually added, but not if the first string starts with https://
api_ver: API version that should be used instead of connection.api_ver
default. A special api_ver="" can be used to omit api version
(for request to ask for available api versions)
relative: If `relative` is true then the url is without domain
Examples: self.rest_api_url("query?q=select+id+from+Organization")
self.rest_api_url("sobject", "Contact", id, api_ver="45.0")
self.rest_api_url(api_ver="") # versions request
self.rest_api_url("sobject", relative=True)
self.rest_api_url("/services/data/v45.0")
Output:
https://na1.salesforce.com/services/data/v44.0/query?q=select+id+from+Organization
https://na1.salesforce.com/services/data/v45.0/sobject/Contact/003DD00000000XYAAA
https://na1.salesforce.com/services/data
/services/data/v45.0
https://na1.salesforce.com/services/data/44.0
"""
url_parts = list(url_parts)
if url_parts and re.match(r'^(?:https|mock)://', url_parts[0]):
return '/'.join(url_parts)
relative = kwargs.pop('relative', False)
api_ver = kwargs.pop('api_ver', None)
api_ver = api_ver if api_ver is not None else self.api_ver
assert not kwargs
if not relative:
base = [self.sf_session.auth.instance_url]
else:
base = ['']
if url_parts and url_parts[0].startswith('/'):
prefix = []
url_parts[0] = url_parts[0][1:]
else:
prefix = ['services/data']
if api_ver:
prefix += ['v{api_ver}'.format(api_ver=api_ver)]
return '/'.join(base + prefix + url_parts) | [
"def",
"rest_api_url",
"(",
"self",
",",
"*",
"url_parts",
",",
"*",
"*",
"kwargs",
")",
":",
"url_parts",
"=",
"list",
"(",
"url_parts",
")",
"if",
"url_parts",
"and",
"re",
".",
"match",
"(",
"r'^(?:https|mock)://'",
",",
"url_parts",
"[",
"0",
"]",
")",
":",
"return",
"'/'",
".",
"join",
"(",
"url_parts",
")",
"relative",
"=",
"kwargs",
".",
"pop",
"(",
"'relative'",
",",
"False",
")",
"api_ver",
"=",
"kwargs",
".",
"pop",
"(",
"'api_ver'",
",",
"None",
")",
"api_ver",
"=",
"api_ver",
"if",
"api_ver",
"is",
"not",
"None",
"else",
"self",
".",
"api_ver",
"assert",
"not",
"kwargs",
"if",
"not",
"relative",
":",
"base",
"=",
"[",
"self",
".",
"sf_session",
".",
"auth",
".",
"instance_url",
"]",
"else",
":",
"base",
"=",
"[",
"''",
"]",
"if",
"url_parts",
"and",
"url_parts",
"[",
"0",
"]",
".",
"startswith",
"(",
"'/'",
")",
":",
"prefix",
"=",
"[",
"]",
"url_parts",
"[",
"0",
"]",
"=",
"url_parts",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"else",
":",
"prefix",
"=",
"[",
"'services/data'",
"]",
"if",
"api_ver",
":",
"prefix",
"+=",
"[",
"'v{api_ver}'",
".",
"format",
"(",
"api_ver",
"=",
"api_ver",
")",
"]",
"return",
"'/'",
".",
"join",
"(",
"base",
"+",
"prefix",
"+",
"url_parts",
")"
]
| Join the URL of REST_API
parameters:
upl_parts: strings that are joined to the url by "/".
a REST url like https://na1.salesforce.com/services/data/v44.0/
is usually added, but not if the first string starts with https://
api_ver: API version that should be used instead of connection.api_ver
default. A special api_ver="" can be used to omit api version
(for request to ask for available api versions)
relative: If `relative` is true then the url is without domain
Examples: self.rest_api_url("query?q=select+id+from+Organization")
self.rest_api_url("sobject", "Contact", id, api_ver="45.0")
self.rest_api_url(api_ver="") # versions request
self.rest_api_url("sobject", relative=True)
self.rest_api_url("/services/data/v45.0")
Output:
https://na1.salesforce.com/services/data/v44.0/query?q=select+id+from+Organization
https://na1.salesforce.com/services/data/v45.0/sobject/Contact/003DD00000000XYAAA
https://na1.salesforce.com/services/data
/services/data/v45.0
https://na1.salesforce.com/services/data/44.0 | [
"Join",
"the",
"URL",
"of",
"REST_API"
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L174-L216 | train |
django-salesforce/django-salesforce | salesforce/dbapi/driver.py | RawConnection.raise_errors | def raise_errors(self, response):
"""The innermost part - report errors by exceptions"""
# Errors: 400, 403 permissions or REQUEST_LIMIT_EXCEEDED, 404, 405, 415, 500)
# TODO extract a case ID for Salesforce support from code 500 messages
# TODO disabled 'debug_verbs' temporarily, after writing better default messages
verb = self.debug_verbs # NOQA pylint:disable=unused-variable
method = response.request.method
data = None
is_json = 'json' in response.headers.get('Content-Type', '') and response.text
if is_json:
data = json.loads(response.text)
if not (isinstance(data, list) and data and 'errorCode' in data[0]):
messages = [response.text] if is_json else []
raise OperationalError(
['HTTP error "%d %s":' % (response.status_code, response.reason)]
+ messages, response, ['method+url'])
# Other Errors are reported in the json body
err_msg = data[0]['message']
err_code = data[0]['errorCode']
if response.status_code == 404: # ResourceNotFound
if method == 'DELETE' and err_code in ('ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It was a delete command and the object is in trash bin or it is
# completely deleted or it could be a valid Id for this sobject type.
# Then we accept it with a warning, similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
warn_sf([err_msg, "Object is deleted before delete or update"], response, ['method+url'])
# TODO add a warning and add it to messages
return None
if err_code in ('NOT_FOUND', # 404 e.g. invalid object type in url path or url query?q=select ...
'METHOD_NOT_ALLOWED', # 405 e.g. patch instead of post
): # both need to report the url
raise SalesforceError([err_msg], response, ['method+url'])
# it is good e.g for these errorCode: ('INVALID_FIELD', 'MALFORMED_QUERY', 'INVALID_FIELD_FOR_INSERT_UPDATE')
raise SalesforceError([err_msg], response) | python | def raise_errors(self, response):
"""The innermost part - report errors by exceptions"""
# Errors: 400, 403 permissions or REQUEST_LIMIT_EXCEEDED, 404, 405, 415, 500)
# TODO extract a case ID for Salesforce support from code 500 messages
# TODO disabled 'debug_verbs' temporarily, after writing better default messages
verb = self.debug_verbs # NOQA pylint:disable=unused-variable
method = response.request.method
data = None
is_json = 'json' in response.headers.get('Content-Type', '') and response.text
if is_json:
data = json.loads(response.text)
if not (isinstance(data, list) and data and 'errorCode' in data[0]):
messages = [response.text] if is_json else []
raise OperationalError(
['HTTP error "%d %s":' % (response.status_code, response.reason)]
+ messages, response, ['method+url'])
# Other Errors are reported in the json body
err_msg = data[0]['message']
err_code = data[0]['errorCode']
if response.status_code == 404: # ResourceNotFound
if method == 'DELETE' and err_code in ('ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It was a delete command and the object is in trash bin or it is
# completely deleted or it could be a valid Id for this sobject type.
# Then we accept it with a warning, similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
warn_sf([err_msg, "Object is deleted before delete or update"], response, ['method+url'])
# TODO add a warning and add it to messages
return None
if err_code in ('NOT_FOUND', # 404 e.g. invalid object type in url path or url query?q=select ...
'METHOD_NOT_ALLOWED', # 405 e.g. patch instead of post
): # both need to report the url
raise SalesforceError([err_msg], response, ['method+url'])
# it is good e.g for these errorCode: ('INVALID_FIELD', 'MALFORMED_QUERY', 'INVALID_FIELD_FOR_INSERT_UPDATE')
raise SalesforceError([err_msg], response) | [
"def",
"raise_errors",
"(",
"self",
",",
"response",
")",
":",
"# Errors: 400, 403 permissions or REQUEST_LIMIT_EXCEEDED, 404, 405, 415, 500)",
"# TODO extract a case ID for Salesforce support from code 500 messages",
"# TODO disabled 'debug_verbs' temporarily, after writing better default messages",
"verb",
"=",
"self",
".",
"debug_verbs",
"# NOQA pylint:disable=unused-variable",
"method",
"=",
"response",
".",
"request",
".",
"method",
"data",
"=",
"None",
"is_json",
"=",
"'json'",
"in",
"response",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
",",
"''",
")",
"and",
"response",
".",
"text",
"if",
"is_json",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")",
"if",
"not",
"(",
"isinstance",
"(",
"data",
",",
"list",
")",
"and",
"data",
"and",
"'errorCode'",
"in",
"data",
"[",
"0",
"]",
")",
":",
"messages",
"=",
"[",
"response",
".",
"text",
"]",
"if",
"is_json",
"else",
"[",
"]",
"raise",
"OperationalError",
"(",
"[",
"'HTTP error \"%d %s\":'",
"%",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"reason",
")",
"]",
"+",
"messages",
",",
"response",
",",
"[",
"'method+url'",
"]",
")",
"# Other Errors are reported in the json body",
"err_msg",
"=",
"data",
"[",
"0",
"]",
"[",
"'message'",
"]",
"err_code",
"=",
"data",
"[",
"0",
"]",
"[",
"'errorCode'",
"]",
"if",
"response",
".",
"status_code",
"==",
"404",
":",
"# ResourceNotFound",
"if",
"method",
"==",
"'DELETE'",
"and",
"err_code",
"in",
"(",
"'ENTITY_IS_DELETED'",
",",
"'INVALID_CROSS_REFERENCE_KEY'",
")",
":",
"# It was a delete command and the object is in trash bin or it is",
"# completely deleted or it could be a valid Id for this sobject type.",
"# Then we accept it with a warning, similarly to delete by a classic database query:",
"# DELETE FROM xy WHERE id = 'something_deleted_yet'",
"warn_sf",
"(",
"[",
"err_msg",
",",
"\"Object is deleted before delete or update\"",
"]",
",",
"response",
",",
"[",
"'method+url'",
"]",
")",
"# TODO add a warning and add it to messages",
"return",
"None",
"if",
"err_code",
"in",
"(",
"'NOT_FOUND'",
",",
"# 404 e.g. invalid object type in url path or url query?q=select ...",
"'METHOD_NOT_ALLOWED'",
",",
"# 405 e.g. patch instead of post",
")",
":",
"# both need to report the url",
"raise",
"SalesforceError",
"(",
"[",
"err_msg",
"]",
",",
"response",
",",
"[",
"'method+url'",
"]",
")",
"# it is good e.g for these errorCode: ('INVALID_FIELD', 'MALFORMED_QUERY', 'INVALID_FIELD_FOR_INSERT_UPDATE')",
"raise",
"SalesforceError",
"(",
"[",
"err_msg",
"]",
",",
"response",
")"
]
| The innermost part - report errors by exceptions | [
"The",
"innermost",
"part",
"-",
"report",
"errors",
"by",
"exceptions"
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L287-L322 | train |
django-salesforce/django-salesforce | salesforce/dbapi/driver.py | RawConnection.composite_request | def composite_request(self, data):
"""Call a 'composite' request with subrequests, error handling
A fake object for request/response is created for a subrequest in case
of error, to be possible to use the same error hanler with a clear
message as with an individual request.
"""
post_data = {'compositeRequest': data, 'allOrNone': True}
resp = self.handle_api_exceptions('POST', 'composite', json=post_data)
comp_resp = resp.json()['compositeResponse']
is_ok = all(x['httpStatusCode'] < 400 for x in comp_resp)
if is_ok:
return resp
# construct an equivalent of individual bad request/response
bad_responses = {
i: x for i, x in enumerate(comp_resp)
if not (x['httpStatusCode'] == 400
and x['body'][0]['errorCode'] in ('PROCESSING_HALTED', 'ALL_OR_NONE_OPERATION_ROLLED_BACK'))
}
if len(bad_responses) != 1:
raise InternalError("Too much or too many subrequests with an individual error")
bad_i, bad_response = bad_responses.popitem()
bad_request = data[bad_i]
bad_req = FakeReq(bad_request['method'], bad_request['url'], bad_request.get('body'),
bad_request.get('httpHeaders', {}), context={bad_i: bad_request['referenceId']})
body = [merge_dict(x, referenceId=bad_response['referenceId'])
for x in bad_response['body']]
bad_resp_headers = bad_response['httpHeaders'].copy()
bad_resp_headers.update({'Content-Type': resp.headers['Content-Type']})
bad_resp = FakeResp(bad_response['httpStatusCode'], json.dumps(body), bad_req, bad_resp_headers)
self.raise_errors(bad_resp) | python | def composite_request(self, data):
"""Call a 'composite' request with subrequests, error handling
A fake object for request/response is created for a subrequest in case
of error, to be possible to use the same error hanler with a clear
message as with an individual request.
"""
post_data = {'compositeRequest': data, 'allOrNone': True}
resp = self.handle_api_exceptions('POST', 'composite', json=post_data)
comp_resp = resp.json()['compositeResponse']
is_ok = all(x['httpStatusCode'] < 400 for x in comp_resp)
if is_ok:
return resp
# construct an equivalent of individual bad request/response
bad_responses = {
i: x for i, x in enumerate(comp_resp)
if not (x['httpStatusCode'] == 400
and x['body'][0]['errorCode'] in ('PROCESSING_HALTED', 'ALL_OR_NONE_OPERATION_ROLLED_BACK'))
}
if len(bad_responses) != 1:
raise InternalError("Too much or too many subrequests with an individual error")
bad_i, bad_response = bad_responses.popitem()
bad_request = data[bad_i]
bad_req = FakeReq(bad_request['method'], bad_request['url'], bad_request.get('body'),
bad_request.get('httpHeaders', {}), context={bad_i: bad_request['referenceId']})
body = [merge_dict(x, referenceId=bad_response['referenceId'])
for x in bad_response['body']]
bad_resp_headers = bad_response['httpHeaders'].copy()
bad_resp_headers.update({'Content-Type': resp.headers['Content-Type']})
bad_resp = FakeResp(bad_response['httpStatusCode'], json.dumps(body), bad_req, bad_resp_headers)
self.raise_errors(bad_resp) | [
"def",
"composite_request",
"(",
"self",
",",
"data",
")",
":",
"post_data",
"=",
"{",
"'compositeRequest'",
":",
"data",
",",
"'allOrNone'",
":",
"True",
"}",
"resp",
"=",
"self",
".",
"handle_api_exceptions",
"(",
"'POST'",
",",
"'composite'",
",",
"json",
"=",
"post_data",
")",
"comp_resp",
"=",
"resp",
".",
"json",
"(",
")",
"[",
"'compositeResponse'",
"]",
"is_ok",
"=",
"all",
"(",
"x",
"[",
"'httpStatusCode'",
"]",
"<",
"400",
"for",
"x",
"in",
"comp_resp",
")",
"if",
"is_ok",
":",
"return",
"resp",
"# construct an equivalent of individual bad request/response",
"bad_responses",
"=",
"{",
"i",
":",
"x",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"comp_resp",
")",
"if",
"not",
"(",
"x",
"[",
"'httpStatusCode'",
"]",
"==",
"400",
"and",
"x",
"[",
"'body'",
"]",
"[",
"0",
"]",
"[",
"'errorCode'",
"]",
"in",
"(",
"'PROCESSING_HALTED'",
",",
"'ALL_OR_NONE_OPERATION_ROLLED_BACK'",
")",
")",
"}",
"if",
"len",
"(",
"bad_responses",
")",
"!=",
"1",
":",
"raise",
"InternalError",
"(",
"\"Too much or too many subrequests with an individual error\"",
")",
"bad_i",
",",
"bad_response",
"=",
"bad_responses",
".",
"popitem",
"(",
")",
"bad_request",
"=",
"data",
"[",
"bad_i",
"]",
"bad_req",
"=",
"FakeReq",
"(",
"bad_request",
"[",
"'method'",
"]",
",",
"bad_request",
"[",
"'url'",
"]",
",",
"bad_request",
".",
"get",
"(",
"'body'",
")",
",",
"bad_request",
".",
"get",
"(",
"'httpHeaders'",
",",
"{",
"}",
")",
",",
"context",
"=",
"{",
"bad_i",
":",
"bad_request",
"[",
"'referenceId'",
"]",
"}",
")",
"body",
"=",
"[",
"merge_dict",
"(",
"x",
",",
"referenceId",
"=",
"bad_response",
"[",
"'referenceId'",
"]",
")",
"for",
"x",
"in",
"bad_response",
"[",
"'body'",
"]",
"]",
"bad_resp_headers",
"=",
"bad_response",
"[",
"'httpHeaders'",
"]",
".",
"copy",
"(",
")",
"bad_resp_headers",
".",
"update",
"(",
"{",
"'Content-Type'",
":",
"resp",
".",
"headers",
"[",
"'Content-Type'",
"]",
"}",
")",
"bad_resp",
"=",
"FakeResp",
"(",
"bad_response",
"[",
"'httpStatusCode'",
"]",
",",
"json",
".",
"dumps",
"(",
"body",
")",
",",
"bad_req",
",",
"bad_resp_headers",
")",
"self",
".",
"raise_errors",
"(",
"bad_resp",
")"
]
| Call a 'composite' request with subrequests, error handling
A fake object for request/response is created for a subrequest in case
of error, to be possible to use the same error hanler with a clear
message as with an individual request. | [
"Call",
"a",
"composite",
"request",
"with",
"subrequests",
"error",
"handling"
]
| 6fd5643dba69d49c5881de50875cf90204a8f808 | https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L324-L359 | train |
crs4/pydoop | pydoop/avrolib.py | SeekableDataFileReader.align_after | def align_after(self, offset):
"""
Search for a sync point after offset and align just after that.
"""
f = self.reader
if offset <= 0: # FIXME what is a negative offset??
f.seek(0)
self._block_count = 0
self._read_header() # FIXME we can't extimate how big it is...
return
sm = self.sync_marker
sml = len(sm)
pos = offset
while pos < self.file_length - sml:
f.seek(pos)
data = f.read(self.FORWARD_WINDOW_SIZE)
sync_offset = data.find(sm)
if sync_offset > -1:
f.seek(pos + sync_offset)
self._block_count = 0
return
pos += len(data) | python | def align_after(self, offset):
"""
Search for a sync point after offset and align just after that.
"""
f = self.reader
if offset <= 0: # FIXME what is a negative offset??
f.seek(0)
self._block_count = 0
self._read_header() # FIXME we can't extimate how big it is...
return
sm = self.sync_marker
sml = len(sm)
pos = offset
while pos < self.file_length - sml:
f.seek(pos)
data = f.read(self.FORWARD_WINDOW_SIZE)
sync_offset = data.find(sm)
if sync_offset > -1:
f.seek(pos + sync_offset)
self._block_count = 0
return
pos += len(data) | [
"def",
"align_after",
"(",
"self",
",",
"offset",
")",
":",
"f",
"=",
"self",
".",
"reader",
"if",
"offset",
"<=",
"0",
":",
"# FIXME what is a negative offset??",
"f",
".",
"seek",
"(",
"0",
")",
"self",
".",
"_block_count",
"=",
"0",
"self",
".",
"_read_header",
"(",
")",
"# FIXME we can't extimate how big it is...",
"return",
"sm",
"=",
"self",
".",
"sync_marker",
"sml",
"=",
"len",
"(",
"sm",
")",
"pos",
"=",
"offset",
"while",
"pos",
"<",
"self",
".",
"file_length",
"-",
"sml",
":",
"f",
".",
"seek",
"(",
"pos",
")",
"data",
"=",
"f",
".",
"read",
"(",
"self",
".",
"FORWARD_WINDOW_SIZE",
")",
"sync_offset",
"=",
"data",
".",
"find",
"(",
"sm",
")",
"if",
"sync_offset",
">",
"-",
"1",
":",
"f",
".",
"seek",
"(",
"pos",
"+",
"sync_offset",
")",
"self",
".",
"_block_count",
"=",
"0",
"return",
"pos",
"+=",
"len",
"(",
"data",
")"
]
| Search for a sync point after offset and align just after that. | [
"Search",
"for",
"a",
"sync",
"point",
"after",
"offset",
"and",
"align",
"just",
"after",
"that",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/avrolib.py#L77-L98 | train |
crs4/pydoop | pydoop/avrolib.py | AvroReader.get_progress | def get_progress(self):
"""
Give a rough estimate of the progress done.
"""
pos = self.reader.reader.tell()
return min((pos - self.region_start) /
float(self.region_end - self.region_start),
1.0) | python | def get_progress(self):
"""
Give a rough estimate of the progress done.
"""
pos = self.reader.reader.tell()
return min((pos - self.region_start) /
float(self.region_end - self.region_start),
1.0) | [
"def",
"get_progress",
"(",
"self",
")",
":",
"pos",
"=",
"self",
".",
"reader",
".",
"reader",
".",
"tell",
"(",
")",
"return",
"min",
"(",
"(",
"pos",
"-",
"self",
".",
"region_start",
")",
"/",
"float",
"(",
"self",
".",
"region_end",
"-",
"self",
".",
"region_start",
")",
",",
"1.0",
")"
]
| Give a rough estimate of the progress done. | [
"Give",
"a",
"rough",
"estimate",
"of",
"the",
"progress",
"done",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/avrolib.py#L124-L131 | train |
crs4/pydoop | pydoop/hadoop_utils.py | is_exe | def is_exe(fpath):
"""
Path references an executable file.
"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK) | python | def is_exe(fpath):
"""
Path references an executable file.
"""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK) | [
"def",
"is_exe",
"(",
"fpath",
")",
":",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"fpath",
")",
"and",
"os",
".",
"access",
"(",
"fpath",
",",
"os",
".",
"X_OK",
")"
]
| Path references an executable file. | [
"Path",
"references",
"an",
"executable",
"file",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadoop_utils.py#L265-L269 | train |
crs4/pydoop | pydoop/hadoop_utils.py | is_readable | def is_readable(fpath):
"""
Path references a readable file.
"""
return os.path.isfile(fpath) and os.access(fpath, os.R_OK) | python | def is_readable(fpath):
"""
Path references a readable file.
"""
return os.path.isfile(fpath) and os.access(fpath, os.R_OK) | [
"def",
"is_readable",
"(",
"fpath",
")",
":",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"fpath",
")",
"and",
"os",
".",
"access",
"(",
"fpath",
",",
"os",
".",
"R_OK",
")"
]
| Path references a readable file. | [
"Path",
"references",
"a",
"readable",
"file",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadoop_utils.py#L272-L276 | train |
crs4/pydoop | pydoop/hadoop_utils.py | PathFinder.is_local | def is_local(self, hadoop_conf=None, hadoop_home=None):
"""\
Is Hadoop configured to run in local mode?
By default, it is. [pseudo-]distributed mode must be
explicitly configured.
"""
conf = self.hadoop_params(hadoop_conf, hadoop_home)
keys = ('mapreduce.framework.name',
'mapreduce.jobtracker.address',
'mapred.job.tracker')
for k in keys:
if conf.get(k, 'local').lower() != 'local':
return False
return True | python | def is_local(self, hadoop_conf=None, hadoop_home=None):
"""\
Is Hadoop configured to run in local mode?
By default, it is. [pseudo-]distributed mode must be
explicitly configured.
"""
conf = self.hadoop_params(hadoop_conf, hadoop_home)
keys = ('mapreduce.framework.name',
'mapreduce.jobtracker.address',
'mapred.job.tracker')
for k in keys:
if conf.get(k, 'local').lower() != 'local':
return False
return True | [
"def",
"is_local",
"(",
"self",
",",
"hadoop_conf",
"=",
"None",
",",
"hadoop_home",
"=",
"None",
")",
":",
"conf",
"=",
"self",
".",
"hadoop_params",
"(",
"hadoop_conf",
",",
"hadoop_home",
")",
"keys",
"=",
"(",
"'mapreduce.framework.name'",
",",
"'mapreduce.jobtracker.address'",
",",
"'mapred.job.tracker'",
")",
"for",
"k",
"in",
"keys",
":",
"if",
"conf",
".",
"get",
"(",
"k",
",",
"'local'",
")",
".",
"lower",
"(",
")",
"!=",
"'local'",
":",
"return",
"False",
"return",
"True"
]
| \
Is Hadoop configured to run in local mode?
By default, it is. [pseudo-]distributed mode must be
explicitly configured. | [
"\\",
"Is",
"Hadoop",
"configured",
"to",
"run",
"in",
"local",
"mode?"
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadoop_utils.py#L562-L576 | train |
crs4/pydoop | pydoop/hdfs/path.py | abspath | def abspath(hdfs_path, user=None, local=False):
"""
Return an absolute path for ``hdfs_path``.
The ``user`` arg is passed to :func:`split`. The ``local`` argument
forces ``hdfs_path`` to be interpreted as an ordinary local path:
.. code-block:: python
>>> import os
>>> os.chdir('/tmp')
>>> import pydoop.hdfs.path as hpath
>>> hpath.abspath('file:/tmp')
'file:/tmp'
>>> hpath.abspath('file:/tmp', local=True)
'file:/tmp/file:/tmp'
Note that this function always return a full URI:
.. code-block:: python
>>> import pydoop.hdfs.path as hpath
>>> hpath.abspath('/tmp')
'hdfs://localhost:9000/tmp'
"""
if local:
return 'file:%s' % os.path.abspath(hdfs_path)
if isfull(hdfs_path):
return hdfs_path
hostname, port, path = split(hdfs_path, user=user)
if hostname:
fs = hdfs_fs.hdfs(hostname, port)
apath = join("hdfs://%s:%s" % (fs.host, fs.port), path)
fs.close()
else:
apath = "file:%s" % os.path.abspath(path)
return apath | python | def abspath(hdfs_path, user=None, local=False):
"""
Return an absolute path for ``hdfs_path``.
The ``user`` arg is passed to :func:`split`. The ``local`` argument
forces ``hdfs_path`` to be interpreted as an ordinary local path:
.. code-block:: python
>>> import os
>>> os.chdir('/tmp')
>>> import pydoop.hdfs.path as hpath
>>> hpath.abspath('file:/tmp')
'file:/tmp'
>>> hpath.abspath('file:/tmp', local=True)
'file:/tmp/file:/tmp'
Note that this function always return a full URI:
.. code-block:: python
>>> import pydoop.hdfs.path as hpath
>>> hpath.abspath('/tmp')
'hdfs://localhost:9000/tmp'
"""
if local:
return 'file:%s' % os.path.abspath(hdfs_path)
if isfull(hdfs_path):
return hdfs_path
hostname, port, path = split(hdfs_path, user=user)
if hostname:
fs = hdfs_fs.hdfs(hostname, port)
apath = join("hdfs://%s:%s" % (fs.host, fs.port), path)
fs.close()
else:
apath = "file:%s" % os.path.abspath(path)
return apath | [
"def",
"abspath",
"(",
"hdfs_path",
",",
"user",
"=",
"None",
",",
"local",
"=",
"False",
")",
":",
"if",
"local",
":",
"return",
"'file:%s'",
"%",
"os",
".",
"path",
".",
"abspath",
"(",
"hdfs_path",
")",
"if",
"isfull",
"(",
"hdfs_path",
")",
":",
"return",
"hdfs_path",
"hostname",
",",
"port",
",",
"path",
"=",
"split",
"(",
"hdfs_path",
",",
"user",
"=",
"user",
")",
"if",
"hostname",
":",
"fs",
"=",
"hdfs_fs",
".",
"hdfs",
"(",
"hostname",
",",
"port",
")",
"apath",
"=",
"join",
"(",
"\"hdfs://%s:%s\"",
"%",
"(",
"fs",
".",
"host",
",",
"fs",
".",
"port",
")",
",",
"path",
")",
"fs",
".",
"close",
"(",
")",
"else",
":",
"apath",
"=",
"\"file:%s\"",
"%",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"return",
"apath"
]
| Return an absolute path for ``hdfs_path``.
The ``user`` arg is passed to :func:`split`. The ``local`` argument
forces ``hdfs_path`` to be interpreted as an ordinary local path:
.. code-block:: python
>>> import os
>>> os.chdir('/tmp')
>>> import pydoop.hdfs.path as hpath
>>> hpath.abspath('file:/tmp')
'file:/tmp'
>>> hpath.abspath('file:/tmp', local=True)
'file:/tmp/file:/tmp'
Note that this function always return a full URI:
.. code-block:: python
>>> import pydoop.hdfs.path as hpath
>>> hpath.abspath('/tmp')
'hdfs://localhost:9000/tmp' | [
"Return",
"an",
"absolute",
"path",
"for",
"hdfs_path",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L242-L278 | train |
crs4/pydoop | pydoop/hdfs/path.py | dirname | def dirname(hdfs_path):
"""
Return the directory component of ``hdfs_path``.
"""
scheme, netloc, path = parse(hdfs_path)
return unparse(scheme, netloc, os.path.dirname(path)) | python | def dirname(hdfs_path):
"""
Return the directory component of ``hdfs_path``.
"""
scheme, netloc, path = parse(hdfs_path)
return unparse(scheme, netloc, os.path.dirname(path)) | [
"def",
"dirname",
"(",
"hdfs_path",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
"=",
"parse",
"(",
"hdfs_path",
")",
"return",
"unparse",
"(",
"scheme",
",",
"netloc",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")"
]
| Return the directory component of ``hdfs_path``. | [
"Return",
"the",
"directory",
"component",
"of",
"hdfs_path",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L296-L301 | train |
crs4/pydoop | pydoop/hdfs/path.py | expanduser | def expanduser(path):
"""
Replace initial ``~`` or ``~user`` with the user's home directory.
**NOTE:** if the default file system is HDFS, the ``~user`` form is
expanded regardless of the user's existence.
"""
if hdfs_fs.default_is_local():
return os.path.expanduser(path)
m = re.match(r'^~([^/]*)', path)
if m is None:
return path
user = m.groups()[0] or common.DEFAULT_USER
return '/user/%s%s' % (user, path[m.end(1):]) | python | def expanduser(path):
"""
Replace initial ``~`` or ``~user`` with the user's home directory.
**NOTE:** if the default file system is HDFS, the ``~user`` form is
expanded regardless of the user's existence.
"""
if hdfs_fs.default_is_local():
return os.path.expanduser(path)
m = re.match(r'^~([^/]*)', path)
if m is None:
return path
user = m.groups()[0] or common.DEFAULT_USER
return '/user/%s%s' % (user, path[m.end(1):]) | [
"def",
"expanduser",
"(",
"path",
")",
":",
"if",
"hdfs_fs",
".",
"default_is_local",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"m",
"=",
"re",
".",
"match",
"(",
"r'^~([^/]*)'",
",",
"path",
")",
"if",
"m",
"is",
"None",
":",
"return",
"path",
"user",
"=",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"or",
"common",
".",
"DEFAULT_USER",
"return",
"'/user/%s%s'",
"%",
"(",
"user",
",",
"path",
"[",
"m",
".",
"end",
"(",
"1",
")",
":",
"]",
")"
]
| Replace initial ``~`` or ``~user`` with the user's home directory.
**NOTE:** if the default file system is HDFS, the ``~user`` form is
expanded regardless of the user's existence. | [
"Replace",
"initial",
"~",
"or",
"~user",
"with",
"the",
"user",
"s",
"home",
"directory",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L355-L368 | train |
crs4/pydoop | pydoop/hdfs/path.py | normpath | def normpath(path):
"""
Normalize ``path``, collapsing redundant separators and up-level refs.
"""
scheme, netloc, path_ = parse(path)
return unparse(scheme, netloc, os.path.normpath(path_)) | python | def normpath(path):
"""
Normalize ``path``, collapsing redundant separators and up-level refs.
"""
scheme, netloc, path_ = parse(path)
return unparse(scheme, netloc, os.path.normpath(path_)) | [
"def",
"normpath",
"(",
"path",
")",
":",
"scheme",
",",
"netloc",
",",
"path_",
"=",
"parse",
"(",
"path",
")",
"return",
"unparse",
"(",
"scheme",
",",
"netloc",
",",
"os",
".",
"path",
".",
"normpath",
"(",
"path_",
")",
")"
]
| Normalize ``path``, collapsing redundant separators and up-level refs. | [
"Normalize",
"path",
"collapsing",
"redundant",
"separators",
"and",
"up",
"-",
"level",
"refs",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L480-L485 | train |
crs4/pydoop | pydoop/hdfs/path.py | realpath | def realpath(path):
"""
Return ``path`` with symlinks resolved.
Currently this function returns non-local paths unchanged.
"""
scheme, netloc, path_ = parse(path)
if scheme == 'file' or hdfs_fs.default_is_local():
return unparse(scheme, netloc, os.path.realpath(path_))
return path | python | def realpath(path):
"""
Return ``path`` with symlinks resolved.
Currently this function returns non-local paths unchanged.
"""
scheme, netloc, path_ = parse(path)
if scheme == 'file' or hdfs_fs.default_is_local():
return unparse(scheme, netloc, os.path.realpath(path_))
return path | [
"def",
"realpath",
"(",
"path",
")",
":",
"scheme",
",",
"netloc",
",",
"path_",
"=",
"parse",
"(",
"path",
")",
"if",
"scheme",
"==",
"'file'",
"or",
"hdfs_fs",
".",
"default_is_local",
"(",
")",
":",
"return",
"unparse",
"(",
"scheme",
",",
"netloc",
",",
"os",
".",
"path",
".",
"realpath",
"(",
"path_",
")",
")",
"return",
"path"
]
| Return ``path`` with symlinks resolved.
Currently this function returns non-local paths unchanged. | [
"Return",
"path",
"with",
"symlinks",
"resolved",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/path.py#L488-L497 | train |
crs4/pydoop | pydoop/hdfs/fs.py | default_is_local | def default_is_local(hadoop_conf=None, hadoop_home=None):
"""\
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured.
"""
params = pydoop.hadoop_params(hadoop_conf, hadoop_home)
for k in 'fs.defaultFS', 'fs.default.name':
if not params.get(k, 'file:').startswith('file:'):
return False
return True | python | def default_is_local(hadoop_conf=None, hadoop_home=None):
"""\
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured.
"""
params = pydoop.hadoop_params(hadoop_conf, hadoop_home)
for k in 'fs.defaultFS', 'fs.default.name':
if not params.get(k, 'file:').startswith('file:'):
return False
return True | [
"def",
"default_is_local",
"(",
"hadoop_conf",
"=",
"None",
",",
"hadoop_home",
"=",
"None",
")",
":",
"params",
"=",
"pydoop",
".",
"hadoop_params",
"(",
"hadoop_conf",
",",
"hadoop_home",
")",
"for",
"k",
"in",
"'fs.defaultFS'",
",",
"'fs.default.name'",
":",
"if",
"not",
"params",
".",
"get",
"(",
"k",
",",
"'file:'",
")",
".",
"startswith",
"(",
"'file:'",
")",
":",
"return",
"False",
"return",
"True"
]
| \
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured. | [
"\\",
"Is",
"Hadoop",
"configured",
"to",
"use",
"the",
"local",
"file",
"system?"
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L93-L103 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.open_file | def open_file(self, path,
mode="r",
buff_size=0,
replication=0,
blocksize=0,
encoding=None,
errors=None):
"""
Open an HDFS file.
Supported opening modes are "r", "w", "a". In addition, a
trailing "t" can be added to specify text mode (e.g., "rt" =
open for reading text).
Pass 0 as ``buff_size``, ``replication`` or ``blocksize`` if you want
to use the "configured" values, i.e., the ones set in the Hadoop
configuration files.
:type path: str
:param path: the full path to the file
:type mode: str
:param mode: opening mode
:type buff_size: int
:param buff_size: read/write buffer size in bytes
:type replication: int
:param replication: HDFS block replication
:type blocksize: int
:param blocksize: HDFS block size
:rtpye: :class:`~.file.hdfs_file`
:return: handle to the open file
"""
_complain_ifclosed(self.closed)
if not path:
raise ValueError("Empty path")
m, is_text = common.parse_mode(mode)
if not self.host:
fret = local_file(self, path, m)
if is_text:
cls = io.BufferedReader if m == "r" else io.BufferedWriter
fret = TextIOWrapper(cls(fret), encoding, errors)
return fret
f = self.fs.open_file(path, m, buff_size, replication, blocksize)
cls = FileIO if is_text else hdfs_file
fret = cls(f, self, mode)
return fret | python | def open_file(self, path,
mode="r",
buff_size=0,
replication=0,
blocksize=0,
encoding=None,
errors=None):
"""
Open an HDFS file.
Supported opening modes are "r", "w", "a". In addition, a
trailing "t" can be added to specify text mode (e.g., "rt" =
open for reading text).
Pass 0 as ``buff_size``, ``replication`` or ``blocksize`` if you want
to use the "configured" values, i.e., the ones set in the Hadoop
configuration files.
:type path: str
:param path: the full path to the file
:type mode: str
:param mode: opening mode
:type buff_size: int
:param buff_size: read/write buffer size in bytes
:type replication: int
:param replication: HDFS block replication
:type blocksize: int
:param blocksize: HDFS block size
:rtpye: :class:`~.file.hdfs_file`
:return: handle to the open file
"""
_complain_ifclosed(self.closed)
if not path:
raise ValueError("Empty path")
m, is_text = common.parse_mode(mode)
if not self.host:
fret = local_file(self, path, m)
if is_text:
cls = io.BufferedReader if m == "r" else io.BufferedWriter
fret = TextIOWrapper(cls(fret), encoding, errors)
return fret
f = self.fs.open_file(path, m, buff_size, replication, blocksize)
cls = FileIO if is_text else hdfs_file
fret = cls(f, self, mode)
return fret | [
"def",
"open_file",
"(",
"self",
",",
"path",
",",
"mode",
"=",
"\"r\"",
",",
"buff_size",
"=",
"0",
",",
"replication",
"=",
"0",
",",
"blocksize",
"=",
"0",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"None",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"if",
"not",
"path",
":",
"raise",
"ValueError",
"(",
"\"Empty path\"",
")",
"m",
",",
"is_text",
"=",
"common",
".",
"parse_mode",
"(",
"mode",
")",
"if",
"not",
"self",
".",
"host",
":",
"fret",
"=",
"local_file",
"(",
"self",
",",
"path",
",",
"m",
")",
"if",
"is_text",
":",
"cls",
"=",
"io",
".",
"BufferedReader",
"if",
"m",
"==",
"\"r\"",
"else",
"io",
".",
"BufferedWriter",
"fret",
"=",
"TextIOWrapper",
"(",
"cls",
"(",
"fret",
")",
",",
"encoding",
",",
"errors",
")",
"return",
"fret",
"f",
"=",
"self",
".",
"fs",
".",
"open_file",
"(",
"path",
",",
"m",
",",
"buff_size",
",",
"replication",
",",
"blocksize",
")",
"cls",
"=",
"FileIO",
"if",
"is_text",
"else",
"hdfs_file",
"fret",
"=",
"cls",
"(",
"f",
",",
"self",
",",
"mode",
")",
"return",
"fret"
]
| Open an HDFS file.
Supported opening modes are "r", "w", "a". In addition, a
trailing "t" can be added to specify text mode (e.g., "rt" =
open for reading text).
Pass 0 as ``buff_size``, ``replication`` or ``blocksize`` if you want
to use the "configured" values, i.e., the ones set in the Hadoop
configuration files.
:type path: str
:param path: the full path to the file
:type mode: str
:param mode: opening mode
:type buff_size: int
:param buff_size: read/write buffer size in bytes
:type replication: int
:param replication: HDFS block replication
:type blocksize: int
:param blocksize: HDFS block size
:rtpye: :class:`~.file.hdfs_file`
:return: handle to the open file | [
"Open",
"an",
"HDFS",
"file",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L235-L280 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.capacity | def capacity(self):
"""
Return the raw capacity of the filesystem.
:rtype: int
:return: filesystem capacity
"""
_complain_ifclosed(self.closed)
if not self.__status.host:
raise RuntimeError('Capacity is not defined for a local fs')
return self.fs.get_capacity() | python | def capacity(self):
"""
Return the raw capacity of the filesystem.
:rtype: int
:return: filesystem capacity
"""
_complain_ifclosed(self.closed)
if not self.__status.host:
raise RuntimeError('Capacity is not defined for a local fs')
return self.fs.get_capacity() | [
"def",
"capacity",
"(",
"self",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"if",
"not",
"self",
".",
"__status",
".",
"host",
":",
"raise",
"RuntimeError",
"(",
"'Capacity is not defined for a local fs'",
")",
"return",
"self",
".",
"fs",
".",
"get_capacity",
"(",
")"
]
| Return the raw capacity of the filesystem.
:rtype: int
:return: filesystem capacity | [
"Return",
"the",
"raw",
"capacity",
"of",
"the",
"filesystem",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L282-L292 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.copy | def copy(self, from_path, to_hdfs, to_path):
"""
Copy file from one filesystem to another.
:type from_path: str
:param from_path: the path of the source file
:type to_hdfs: :class:`hdfs`
:param to_hdfs: destination filesystem
:type to_path: str
:param to_path: the path of the destination file
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
if isinstance(to_hdfs, self.__class__):
to_hdfs = to_hdfs.fs
return self.fs.copy(from_path, to_hdfs, to_path) | python | def copy(self, from_path, to_hdfs, to_path):
"""
Copy file from one filesystem to another.
:type from_path: str
:param from_path: the path of the source file
:type to_hdfs: :class:`hdfs`
:param to_hdfs: destination filesystem
:type to_path: str
:param to_path: the path of the destination file
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
if isinstance(to_hdfs, self.__class__):
to_hdfs = to_hdfs.fs
return self.fs.copy(from_path, to_hdfs, to_path) | [
"def",
"copy",
"(",
"self",
",",
"from_path",
",",
"to_hdfs",
",",
"to_path",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"if",
"isinstance",
"(",
"to_hdfs",
",",
"self",
".",
"__class__",
")",
":",
"to_hdfs",
"=",
"to_hdfs",
".",
"fs",
"return",
"self",
".",
"fs",
".",
"copy",
"(",
"from_path",
",",
"to_hdfs",
",",
"to_path",
")"
]
| Copy file from one filesystem to another.
:type from_path: str
:param from_path: the path of the source file
:type to_hdfs: :class:`hdfs`
:param to_hdfs: destination filesystem
:type to_path: str
:param to_path: the path of the destination file
:raises: :exc:`~exceptions.IOError` | [
"Copy",
"file",
"from",
"one",
"filesystem",
"to",
"another",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L294-L309 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.delete | def delete(self, path, recursive=True):
"""
Delete ``path``.
:type path: str
:param path: the path of the file or directory
:type recursive: bool
:param recursive: if ``path`` is a directory, delete it recursively
when :obj:`True`
:raises: :exc:`~exceptions.IOError` when ``recursive`` is
:obj:`False` and directory is non-empty
"""
_complain_ifclosed(self.closed)
return self.fs.delete(path, recursive) | python | def delete(self, path, recursive=True):
"""
Delete ``path``.
:type path: str
:param path: the path of the file or directory
:type recursive: bool
:param recursive: if ``path`` is a directory, delete it recursively
when :obj:`True`
:raises: :exc:`~exceptions.IOError` when ``recursive`` is
:obj:`False` and directory is non-empty
"""
_complain_ifclosed(self.closed)
return self.fs.delete(path, recursive) | [
"def",
"delete",
"(",
"self",
",",
"path",
",",
"recursive",
"=",
"True",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"fs",
".",
"delete",
"(",
"path",
",",
"recursive",
")"
]
| Delete ``path``.
:type path: str
:param path: the path of the file or directory
:type recursive: bool
:param recursive: if ``path`` is a directory, delete it recursively
when :obj:`True`
:raises: :exc:`~exceptions.IOError` when ``recursive`` is
:obj:`False` and directory is non-empty | [
"Delete",
"path",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L333-L346 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.exists | def exists(self, path):
"""
Check if a given path exists on the filesystem.
:type path: str
:param path: the path to look for
:rtype: bool
:return: :obj:`True` if ``path`` exists
"""
_complain_ifclosed(self.closed)
return self.fs.exists(path) | python | def exists(self, path):
"""
Check if a given path exists on the filesystem.
:type path: str
:param path: the path to look for
:rtype: bool
:return: :obj:`True` if ``path`` exists
"""
_complain_ifclosed(self.closed)
return self.fs.exists(path) | [
"def",
"exists",
"(",
"self",
",",
"path",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"fs",
".",
"exists",
"(",
"path",
")"
]
| Check if a given path exists on the filesystem.
:type path: str
:param path: the path to look for
:rtype: bool
:return: :obj:`True` if ``path`` exists | [
"Check",
"if",
"a",
"given",
"path",
"exists",
"on",
"the",
"filesystem",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L348-L358 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.get_path_info | def get_path_info(self, path):
"""
Get information about ``path`` as a dict of properties.
The return value, based upon ``fs.FileStatus`` from the Java API,
has the following fields:
* ``block_size``: HDFS block size of ``path``
* ``group``: group associated with ``path``
* ``kind``: ``'file'`` or ``'directory'``
* ``last_access``: last access time of ``path``
* ``last_mod``: last modification time of ``path``
* ``name``: fully qualified path name
* ``owner``: owner of ``path``
* ``permissions``: file system permissions associated with ``path``
* ``replication``: replication factor of ``path``
* ``size``: size in bytes of ``path``
:type path: str
:param path: a path in the filesystem
:rtype: dict
:return: path information
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.get_path_info(path) | python | def get_path_info(self, path):
"""
Get information about ``path`` as a dict of properties.
The return value, based upon ``fs.FileStatus`` from the Java API,
has the following fields:
* ``block_size``: HDFS block size of ``path``
* ``group``: group associated with ``path``
* ``kind``: ``'file'`` or ``'directory'``
* ``last_access``: last access time of ``path``
* ``last_mod``: last modification time of ``path``
* ``name``: fully qualified path name
* ``owner``: owner of ``path``
* ``permissions``: file system permissions associated with ``path``
* ``replication``: replication factor of ``path``
* ``size``: size in bytes of ``path``
:type path: str
:param path: a path in the filesystem
:rtype: dict
:return: path information
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.get_path_info(path) | [
"def",
"get_path_info",
"(",
"self",
",",
"path",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"fs",
".",
"get_path_info",
"(",
"path",
")"
]
| Get information about ``path`` as a dict of properties.
The return value, based upon ``fs.FileStatus`` from the Java API,
has the following fields:
* ``block_size``: HDFS block size of ``path``
* ``group``: group associated with ``path``
* ``kind``: ``'file'`` or ``'directory'``
* ``last_access``: last access time of ``path``
* ``last_mod``: last modification time of ``path``
* ``name``: fully qualified path name
* ``owner``: owner of ``path``
* ``permissions``: file system permissions associated with ``path``
* ``replication``: replication factor of ``path``
* ``size``: size in bytes of ``path``
:type path: str
:param path: a path in the filesystem
:rtype: dict
:return: path information
:raises: :exc:`~exceptions.IOError` | [
"Get",
"information",
"about",
"path",
"as",
"a",
"dict",
"of",
"properties",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L378-L403 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.list_directory | def list_directory(self, path):
r"""
Get list of files and directories for ``path``\ .
:type path: str
:param path: the path of the directory
:rtype: list
:return: list of files and directories in ``path``
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.list_directory(path) | python | def list_directory(self, path):
r"""
Get list of files and directories for ``path``\ .
:type path: str
:param path: the path of the directory
:rtype: list
:return: list of files and directories in ``path``
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.list_directory(path) | [
"def",
"list_directory",
"(",
"self",
",",
"path",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"fs",
".",
"list_directory",
"(",
"path",
")"
]
| r"""
Get list of files and directories for ``path``\ .
:type path: str
:param path: the path of the directory
:rtype: list
:return: list of files and directories in ``path``
:raises: :exc:`~exceptions.IOError` | [
"r",
"Get",
"list",
"of",
"files",
"and",
"directories",
"for",
"path",
"\\",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L405-L416 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.rename | def rename(self, from_path, to_path):
"""
Rename file.
:type from_path: str
:param from_path: the path of the source file
:type to_path: str
:param to_path: the path of the destination file
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.rename(from_path, to_path) | python | def rename(self, from_path, to_path):
"""
Rename file.
:type from_path: str
:param from_path: the path of the source file
:type to_path: str
:param to_path: the path of the destination file
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.rename(from_path, to_path) | [
"def",
"rename",
"(",
"self",
",",
"from_path",
",",
"to_path",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"fs",
".",
"rename",
"(",
"from_path",
",",
"to_path",
")"
]
| Rename file.
:type from_path: str
:param from_path: the path of the source file
:type to_path: str
:param to_path: the path of the destination file
:raises: :exc:`~exceptions.IOError` | [
"Rename",
"file",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L435-L446 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.set_replication | def set_replication(self, path, replication):
r"""
Set the replication of ``path`` to ``replication``\ .
:type path: str
:param path: the path of the file
:type replication: int
:param replication: the replication value
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.set_replication(path, replication) | python | def set_replication(self, path, replication):
r"""
Set the replication of ``path`` to ``replication``\ .
:type path: str
:param path: the path of the file
:type replication: int
:param replication: the replication value
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.set_replication(path, replication) | [
"def",
"set_replication",
"(",
"self",
",",
"path",
",",
"replication",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"fs",
".",
"set_replication",
"(",
"path",
",",
"replication",
")"
]
| r"""
Set the replication of ``path`` to ``replication``\ .
:type path: str
:param path: the path of the file
:type replication: int
:param replication: the replication value
:raises: :exc:`~exceptions.IOError` | [
"r",
"Set",
"the",
"replication",
"of",
"path",
"to",
"replication",
"\\",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L448-L459 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.set_working_directory | def set_working_directory(self, path):
r"""
Set the working directory to ``path``\ . All relative paths will
be resolved relative to it.
:type path: str
:param path: the path of the directory
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.set_working_directory(path) | python | def set_working_directory(self, path):
r"""
Set the working directory to ``path``\ . All relative paths will
be resolved relative to it.
:type path: str
:param path: the path of the directory
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.set_working_directory(path) | [
"def",
"set_working_directory",
"(",
"self",
",",
"path",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"fs",
".",
"set_working_directory",
"(",
"path",
")"
]
| r"""
Set the working directory to ``path``\ . All relative paths will
be resolved relative to it.
:type path: str
:param path: the path of the directory
:raises: :exc:`~exceptions.IOError` | [
"r",
"Set",
"the",
"working",
"directory",
"to",
"path",
"\\",
".",
"All",
"relative",
"paths",
"will",
"be",
"resolved",
"relative",
"to",
"it",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L461-L471 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.working_directory | def working_directory(self):
"""
Get the current working directory.
:rtype: str
:return: current working directory
"""
_complain_ifclosed(self.closed)
wd = self.fs.get_working_directory()
return wd | python | def working_directory(self):
"""
Get the current working directory.
:rtype: str
:return: current working directory
"""
_complain_ifclosed(self.closed)
wd = self.fs.get_working_directory()
return wd | [
"def",
"working_directory",
"(",
"self",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"wd",
"=",
"self",
".",
"fs",
".",
"get_working_directory",
"(",
")",
"return",
"wd"
]
| Get the current working directory.
:rtype: str
:return: current working directory | [
"Get",
"the",
"current",
"working",
"directory",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L483-L492 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.__compute_mode_from_string | def __compute_mode_from_string(self, path, mode_string):
"""
Scan a unix-style mode string and apply it to ``path``.
:type mode_string: str
:param mode_string: see ``man chmod`` for details. ``X``, ``s``
and ``t`` modes are not supported. The string should match the
following regular expression: ``[ugoa]*[-+=]([rwx]*)``.
:rtype: int
:return: a new mode integer resulting from applying ``mode_string``
to ``path``.
:raises: :exc:`~exceptions.ValueError` if ``mode_string`` is invalid.
"""
Char_to_perm_byte = {'r': 4, 'w': 2, 'x': 1}
Fields = (('u', 6), ('g', 3), ('o', 0))
# --
m = re.match(r"\s*([ugoa]*)([-+=])([rwx]*)\s*", mode_string)
if not m:
raise ValueError("Invalid mode string %s" % mode_string)
who = m.group(1)
what_op = m.group(2)
which_perm = m.group(3)
# --
old_mode = self.fs.get_path_info(path)['permissions']
# The mode to be applied by the operation, repeated three
# times in a list, for user, group, and other respectively.
# Initially these are identical, but some may change if we
# have to respect the umask setting.
op_perm = [
reduce(ops.ior, [Char_to_perm_byte[c] for c in which_perm])
] * 3
if 'a' in who:
who = 'ugo'
elif who == '':
who = 'ugo'
# erase the umask bits
inverted_umask = ~self.__get_umask()
for i, field in enumerate(Fields):
op_perm[i] &= (inverted_umask >> field[1]) & 0x7
# for each user, compute the permission bit and set it in the mode
new_mode = 0
for i, tpl in enumerate(Fields):
field, shift = tpl
# shift by the bits specified for the field; keep only the
# 3 lowest bits
old = (old_mode >> shift) & 0x7
if field in who:
if what_op == '-':
new = old & ~op_perm[i]
elif what_op == '=':
new = op_perm[i]
elif what_op == '+':
new = old | op_perm[i]
else:
raise RuntimeError(
"unexpected permission operation %s" % what_op
)
else:
# copy the previous permissions
new = old
new_mode |= new << shift
return new_mode | python | def __compute_mode_from_string(self, path, mode_string):
"""
Scan a unix-style mode string and apply it to ``path``.
:type mode_string: str
:param mode_string: see ``man chmod`` for details. ``X``, ``s``
and ``t`` modes are not supported. The string should match the
following regular expression: ``[ugoa]*[-+=]([rwx]*)``.
:rtype: int
:return: a new mode integer resulting from applying ``mode_string``
to ``path``.
:raises: :exc:`~exceptions.ValueError` if ``mode_string`` is invalid.
"""
Char_to_perm_byte = {'r': 4, 'w': 2, 'x': 1}
Fields = (('u', 6), ('g', 3), ('o', 0))
# --
m = re.match(r"\s*([ugoa]*)([-+=])([rwx]*)\s*", mode_string)
if not m:
raise ValueError("Invalid mode string %s" % mode_string)
who = m.group(1)
what_op = m.group(2)
which_perm = m.group(3)
# --
old_mode = self.fs.get_path_info(path)['permissions']
# The mode to be applied by the operation, repeated three
# times in a list, for user, group, and other respectively.
# Initially these are identical, but some may change if we
# have to respect the umask setting.
op_perm = [
reduce(ops.ior, [Char_to_perm_byte[c] for c in which_perm])
] * 3
if 'a' in who:
who = 'ugo'
elif who == '':
who = 'ugo'
# erase the umask bits
inverted_umask = ~self.__get_umask()
for i, field in enumerate(Fields):
op_perm[i] &= (inverted_umask >> field[1]) & 0x7
# for each user, compute the permission bit and set it in the mode
new_mode = 0
for i, tpl in enumerate(Fields):
field, shift = tpl
# shift by the bits specified for the field; keep only the
# 3 lowest bits
old = (old_mode >> shift) & 0x7
if field in who:
if what_op == '-':
new = old & ~op_perm[i]
elif what_op == '=':
new = op_perm[i]
elif what_op == '+':
new = old | op_perm[i]
else:
raise RuntimeError(
"unexpected permission operation %s" % what_op
)
else:
# copy the previous permissions
new = old
new_mode |= new << shift
return new_mode | [
"def",
"__compute_mode_from_string",
"(",
"self",
",",
"path",
",",
"mode_string",
")",
":",
"Char_to_perm_byte",
"=",
"{",
"'r'",
":",
"4",
",",
"'w'",
":",
"2",
",",
"'x'",
":",
"1",
"}",
"Fields",
"=",
"(",
"(",
"'u'",
",",
"6",
")",
",",
"(",
"'g'",
",",
"3",
")",
",",
"(",
"'o'",
",",
"0",
")",
")",
"# --",
"m",
"=",
"re",
".",
"match",
"(",
"r\"\\s*([ugoa]*)([-+=])([rwx]*)\\s*\"",
",",
"mode_string",
")",
"if",
"not",
"m",
":",
"raise",
"ValueError",
"(",
"\"Invalid mode string %s\"",
"%",
"mode_string",
")",
"who",
"=",
"m",
".",
"group",
"(",
"1",
")",
"what_op",
"=",
"m",
".",
"group",
"(",
"2",
")",
"which_perm",
"=",
"m",
".",
"group",
"(",
"3",
")",
"# --",
"old_mode",
"=",
"self",
".",
"fs",
".",
"get_path_info",
"(",
"path",
")",
"[",
"'permissions'",
"]",
"# The mode to be applied by the operation, repeated three",
"# times in a list, for user, group, and other respectively.",
"# Initially these are identical, but some may change if we",
"# have to respect the umask setting.",
"op_perm",
"=",
"[",
"reduce",
"(",
"ops",
".",
"ior",
",",
"[",
"Char_to_perm_byte",
"[",
"c",
"]",
"for",
"c",
"in",
"which_perm",
"]",
")",
"]",
"*",
"3",
"if",
"'a'",
"in",
"who",
":",
"who",
"=",
"'ugo'",
"elif",
"who",
"==",
"''",
":",
"who",
"=",
"'ugo'",
"# erase the umask bits",
"inverted_umask",
"=",
"~",
"self",
".",
"__get_umask",
"(",
")",
"for",
"i",
",",
"field",
"in",
"enumerate",
"(",
"Fields",
")",
":",
"op_perm",
"[",
"i",
"]",
"&=",
"(",
"inverted_umask",
">>",
"field",
"[",
"1",
"]",
")",
"&",
"0x7",
"# for each user, compute the permission bit and set it in the mode",
"new_mode",
"=",
"0",
"for",
"i",
",",
"tpl",
"in",
"enumerate",
"(",
"Fields",
")",
":",
"field",
",",
"shift",
"=",
"tpl",
"# shift by the bits specified for the field; keep only the",
"# 3 lowest bits",
"old",
"=",
"(",
"old_mode",
">>",
"shift",
")",
"&",
"0x7",
"if",
"field",
"in",
"who",
":",
"if",
"what_op",
"==",
"'-'",
":",
"new",
"=",
"old",
"&",
"~",
"op_perm",
"[",
"i",
"]",
"elif",
"what_op",
"==",
"'='",
":",
"new",
"=",
"op_perm",
"[",
"i",
"]",
"elif",
"what_op",
"==",
"'+'",
":",
"new",
"=",
"old",
"|",
"op_perm",
"[",
"i",
"]",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"unexpected permission operation %s\"",
"%",
"what_op",
")",
"else",
":",
"# copy the previous permissions",
"new",
"=",
"old",
"new_mode",
"|=",
"new",
"<<",
"shift",
"return",
"new_mode"
]
| Scan a unix-style mode string and apply it to ``path``.
:type mode_string: str
:param mode_string: see ``man chmod`` for details. ``X``, ``s``
and ``t`` modes are not supported. The string should match the
following regular expression: ``[ugoa]*[-+=]([rwx]*)``.
:rtype: int
:return: a new mode integer resulting from applying ``mode_string``
to ``path``.
:raises: :exc:`~exceptions.ValueError` if ``mode_string`` is invalid. | [
"Scan",
"a",
"unix",
"-",
"style",
"mode",
"string",
"and",
"apply",
"it",
"to",
"path",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L515-L576 | train |
crs4/pydoop | pydoop/hdfs/fs.py | hdfs.utime | def utime(self, path, mtime, atime):
"""
Change file last access and modification times.
:type path: str
:param path: the path to the file or directory
:type mtime: int
:param mtime: new modification time in seconds
:type atime: int
:param atime: new access time in seconds
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.utime(path, int(mtime), int(atime)) | python | def utime(self, path, mtime, atime):
"""
Change file last access and modification times.
:type path: str
:param path: the path to the file or directory
:type mtime: int
:param mtime: new modification time in seconds
:type atime: int
:param atime: new access time in seconds
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.utime(path, int(mtime), int(atime)) | [
"def",
"utime",
"(",
"self",
",",
"path",
",",
"mtime",
",",
"atime",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"fs",
".",
"utime",
"(",
"path",
",",
"int",
"(",
"mtime",
")",
",",
"int",
"(",
"atime",
")",
")"
]
| Change file last access and modification times.
:type path: str
:param path: the path to the file or directory
:type mtime: int
:param mtime: new modification time in seconds
:type atime: int
:param atime: new access time in seconds
:raises: :exc:`~exceptions.IOError` | [
"Change",
"file",
"last",
"access",
"and",
"modification",
"times",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/fs.py#L595-L608 | train |
crs4/pydoop | setup.py | rm_rf | def rm_rf(path, dry_run=False):
"""
Remove a file or directory tree.
Won't throw an exception, even if the removal fails.
"""
log.info("removing %s" % path)
if dry_run:
return
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
pass | python | def rm_rf(path, dry_run=False):
"""
Remove a file or directory tree.
Won't throw an exception, even if the removal fails.
"""
log.info("removing %s" % path)
if dry_run:
return
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
pass | [
"def",
"rm_rf",
"(",
"path",
",",
"dry_run",
"=",
"False",
")",
":",
"log",
".",
"info",
"(",
"\"removing %s\"",
"%",
"path",
")",
"if",
"dry_run",
":",
"return",
"try",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"and",
"not",
"os",
".",
"path",
".",
"islink",
"(",
"path",
")",
":",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"else",
":",
"os",
".",
"remove",
"(",
"path",
")",
"except",
"OSError",
":",
"pass"
]
| Remove a file or directory tree.
Won't throw an exception, even if the removal fails. | [
"Remove",
"a",
"file",
"or",
"directory",
"tree",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/setup.py#L93-L108 | train |
crs4/pydoop | setup.py | BuildPydoopExt.__finalize_hdfs | def __finalize_hdfs(self, ext):
"""\
Adds a few bits that depend on the specific environment.
Delaying this until the build_ext phase allows non-build commands
(e.g., sdist) to be run without java.
"""
java_home = jvm.get_java_home()
jvm_lib_path, _ = jvm.get_jvm_lib_path_and_name(java_home)
ext.include_dirs = jvm.get_include_dirs() + ext.include_dirs
ext.libraries = jvm.get_libraries()
ext.library_dirs = [os.path.join(java_home, "Libraries"), jvm_lib_path]
ext.define_macros = jvm.get_macros()
ext.extra_link_args = ['-Wl,-rpath,%s' % jvm_lib_path]
if self.__have_better_tls():
ext.define_macros.append(("HAVE_BETTER_TLS", None))
try:
# too many warnings in libhdfs
self.compiler.compiler_so.remove("-Wsign-compare")
except (AttributeError, ValueError):
pass | python | def __finalize_hdfs(self, ext):
"""\
Adds a few bits that depend on the specific environment.
Delaying this until the build_ext phase allows non-build commands
(e.g., sdist) to be run without java.
"""
java_home = jvm.get_java_home()
jvm_lib_path, _ = jvm.get_jvm_lib_path_and_name(java_home)
ext.include_dirs = jvm.get_include_dirs() + ext.include_dirs
ext.libraries = jvm.get_libraries()
ext.library_dirs = [os.path.join(java_home, "Libraries"), jvm_lib_path]
ext.define_macros = jvm.get_macros()
ext.extra_link_args = ['-Wl,-rpath,%s' % jvm_lib_path]
if self.__have_better_tls():
ext.define_macros.append(("HAVE_BETTER_TLS", None))
try:
# too many warnings in libhdfs
self.compiler.compiler_so.remove("-Wsign-compare")
except (AttributeError, ValueError):
pass | [
"def",
"__finalize_hdfs",
"(",
"self",
",",
"ext",
")",
":",
"java_home",
"=",
"jvm",
".",
"get_java_home",
"(",
")",
"jvm_lib_path",
",",
"_",
"=",
"jvm",
".",
"get_jvm_lib_path_and_name",
"(",
"java_home",
")",
"ext",
".",
"include_dirs",
"=",
"jvm",
".",
"get_include_dirs",
"(",
")",
"+",
"ext",
".",
"include_dirs",
"ext",
".",
"libraries",
"=",
"jvm",
".",
"get_libraries",
"(",
")",
"ext",
".",
"library_dirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"java_home",
",",
"\"Libraries\"",
")",
",",
"jvm_lib_path",
"]",
"ext",
".",
"define_macros",
"=",
"jvm",
".",
"get_macros",
"(",
")",
"ext",
".",
"extra_link_args",
"=",
"[",
"'-Wl,-rpath,%s'",
"%",
"jvm_lib_path",
"]",
"if",
"self",
".",
"__have_better_tls",
"(",
")",
":",
"ext",
".",
"define_macros",
".",
"append",
"(",
"(",
"\"HAVE_BETTER_TLS\"",
",",
"None",
")",
")",
"try",
":",
"# too many warnings in libhdfs",
"self",
".",
"compiler",
".",
"compiler_so",
".",
"remove",
"(",
"\"-Wsign-compare\"",
")",
"except",
"(",
"AttributeError",
",",
"ValueError",
")",
":",
"pass"
]
| \
Adds a few bits that depend on the specific environment.
Delaying this until the build_ext phase allows non-build commands
(e.g., sdist) to be run without java. | [
"\\",
"Adds",
"a",
"few",
"bits",
"that",
"depend",
"on",
"the",
"specific",
"environment",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/setup.py#L286-L306 | train |
crs4/pydoop | pydoop/hadut.py | run_tool_cmd | def run_tool_cmd(tool, cmd, args=None, properties=None, hadoop_conf_dir=None,
logger=None, keep_streams=True):
"""
Run a Hadoop command.
If ``keep_streams`` is set to :obj:`True` (the default), the
stdout and stderr of the command will be buffered in memory. If
the command succeeds, the former will be returned; if it fails, a
``RunCmdError`` will be raised with the latter as the message.
This mode is appropriate for short-running commands whose "result"
is represented by their standard output (e.g., ``"dfsadmin",
["-safemode", "get"]``).
If ``keep_streams`` is set to :obj:`False`, the command will write
directly to the stdout and stderr of the calling process, and the
return value will be empty. This mode is appropriate for long
running commands that do not write their "real" output to stdout
(such as pipes).
.. code-block:: python
>>> hadoop_classpath = run_cmd('classpath')
"""
if logger is None:
logger = utils.NullLogger()
_args = [tool]
if hadoop_conf_dir:
_args.extend(["--config", hadoop_conf_dir])
_args.append(cmd)
if properties:
_args.extend(_construct_property_args(properties))
if args:
if isinstance(args, basestring):
args = shlex.split(args)
_merge_csv_args(args)
gargs = _pop_generic_args(args)
for seq in gargs, args:
_args.extend(map(str, seq))
logger.debug('final args: %r', (_args,))
if keep_streams:
p = subprocess.Popen(
_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
error = ""
stderr_iterator = iter(p.stderr.readline, b"")
for line in stderr_iterator:
error += line
logger.info("cmd stderr line: %s", line.strip())
output, _ = p.communicate()
else:
p = subprocess.Popen(_args, stdout=None, stderr=None, bufsize=1)
ret = p.wait()
error = 'command exited with %d status' % ret if ret else ''
output = ''
if p.returncode:
raise RunCmdError(p.returncode, ' '.join(_args), error)
return output | python | def run_tool_cmd(tool, cmd, args=None, properties=None, hadoop_conf_dir=None,
logger=None, keep_streams=True):
"""
Run a Hadoop command.
If ``keep_streams`` is set to :obj:`True` (the default), the
stdout and stderr of the command will be buffered in memory. If
the command succeeds, the former will be returned; if it fails, a
``RunCmdError`` will be raised with the latter as the message.
This mode is appropriate for short-running commands whose "result"
is represented by their standard output (e.g., ``"dfsadmin",
["-safemode", "get"]``).
If ``keep_streams`` is set to :obj:`False`, the command will write
directly to the stdout and stderr of the calling process, and the
return value will be empty. This mode is appropriate for long
running commands that do not write their "real" output to stdout
(such as pipes).
.. code-block:: python
>>> hadoop_classpath = run_cmd('classpath')
"""
if logger is None:
logger = utils.NullLogger()
_args = [tool]
if hadoop_conf_dir:
_args.extend(["--config", hadoop_conf_dir])
_args.append(cmd)
if properties:
_args.extend(_construct_property_args(properties))
if args:
if isinstance(args, basestring):
args = shlex.split(args)
_merge_csv_args(args)
gargs = _pop_generic_args(args)
for seq in gargs, args:
_args.extend(map(str, seq))
logger.debug('final args: %r', (_args,))
if keep_streams:
p = subprocess.Popen(
_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
error = ""
stderr_iterator = iter(p.stderr.readline, b"")
for line in stderr_iterator:
error += line
logger.info("cmd stderr line: %s", line.strip())
output, _ = p.communicate()
else:
p = subprocess.Popen(_args, stdout=None, stderr=None, bufsize=1)
ret = p.wait()
error = 'command exited with %d status' % ret if ret else ''
output = ''
if p.returncode:
raise RunCmdError(p.returncode, ' '.join(_args), error)
return output | [
"def",
"run_tool_cmd",
"(",
"tool",
",",
"cmd",
",",
"args",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"hadoop_conf_dir",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"keep_streams",
"=",
"True",
")",
":",
"if",
"logger",
"is",
"None",
":",
"logger",
"=",
"utils",
".",
"NullLogger",
"(",
")",
"_args",
"=",
"[",
"tool",
"]",
"if",
"hadoop_conf_dir",
":",
"_args",
".",
"extend",
"(",
"[",
"\"--config\"",
",",
"hadoop_conf_dir",
"]",
")",
"_args",
".",
"append",
"(",
"cmd",
")",
"if",
"properties",
":",
"_args",
".",
"extend",
"(",
"_construct_property_args",
"(",
"properties",
")",
")",
"if",
"args",
":",
"if",
"isinstance",
"(",
"args",
",",
"basestring",
")",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"args",
")",
"_merge_csv_args",
"(",
"args",
")",
"gargs",
"=",
"_pop_generic_args",
"(",
"args",
")",
"for",
"seq",
"in",
"gargs",
",",
"args",
":",
"_args",
".",
"extend",
"(",
"map",
"(",
"str",
",",
"seq",
")",
")",
"logger",
".",
"debug",
"(",
"'final args: %r'",
",",
"(",
"_args",
",",
")",
")",
"if",
"keep_streams",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"_args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"error",
"=",
"\"\"",
"stderr_iterator",
"=",
"iter",
"(",
"p",
".",
"stderr",
".",
"readline",
",",
"b\"\"",
")",
"for",
"line",
"in",
"stderr_iterator",
":",
"error",
"+=",
"line",
"logger",
".",
"info",
"(",
"\"cmd stderr line: %s\"",
",",
"line",
".",
"strip",
"(",
")",
")",
"output",
",",
"_",
"=",
"p",
".",
"communicate",
"(",
")",
"else",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"_args",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"bufsize",
"=",
"1",
")",
"ret",
"=",
"p",
".",
"wait",
"(",
")",
"error",
"=",
"'command exited with %d status'",
"%",
"ret",
"if",
"ret",
"else",
"''",
"output",
"=",
"''",
"if",
"p",
".",
"returncode",
":",
"raise",
"RunCmdError",
"(",
"p",
".",
"returncode",
",",
"' '",
".",
"join",
"(",
"_args",
")",
",",
"error",
")",
"return",
"output"
]
| Run a Hadoop command.
If ``keep_streams`` is set to :obj:`True` (the default), the
stdout and stderr of the command will be buffered in memory. If
the command succeeds, the former will be returned; if it fails, a
``RunCmdError`` will be raised with the latter as the message.
This mode is appropriate for short-running commands whose "result"
is represented by their standard output (e.g., ``"dfsadmin",
["-safemode", "get"]``).
If ``keep_streams`` is set to :obj:`False`, the command will write
directly to the stdout and stderr of the calling process, and the
return value will be empty. This mode is appropriate for long
running commands that do not write their "real" output to stdout
(such as pipes).
.. code-block:: python
>>> hadoop_classpath = run_cmd('classpath') | [
"Run",
"a",
"Hadoop",
"command",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L118-L175 | train |
crs4/pydoop | pydoop/hadut.py | get_task_trackers | def get_task_trackers(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the list of task trackers in the Hadoop cluster.
Each element in the returned list is in the ``(host, port)`` format.
All arguments are passed to :func:`run_class`.
If ``offline`` is :obj:`True`, try getting the list of task trackers from
the ``slaves`` file in Hadoop's configuration directory (no attempt is
made to contact the Hadoop daemons). In this case, ports are set to 0.
"""
if offline:
if not hadoop_conf_dir:
hadoop_conf_dir = pydoop.hadoop_conf()
slaves = os.path.join(hadoop_conf_dir, "slaves")
try:
with open(slaves) as f:
task_trackers = [(l.strip(), 0) for l in f]
except IOError:
task_trackers = []
else:
# run JobClient directly (avoids "hadoop job" deprecation)
stdout = run_class(
"org.apache.hadoop.mapred.JobClient", ["-list-active-trackers"],
properties=properties, hadoop_conf_dir=hadoop_conf_dir,
keep_streams=True
)
task_trackers = []
for line in stdout.splitlines():
if not line:
continue
line = line.split(":")
task_trackers.append((line[0].split("_")[1], int(line[-1])))
return task_trackers | python | def get_task_trackers(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the list of task trackers in the Hadoop cluster.
Each element in the returned list is in the ``(host, port)`` format.
All arguments are passed to :func:`run_class`.
If ``offline`` is :obj:`True`, try getting the list of task trackers from
the ``slaves`` file in Hadoop's configuration directory (no attempt is
made to contact the Hadoop daemons). In this case, ports are set to 0.
"""
if offline:
if not hadoop_conf_dir:
hadoop_conf_dir = pydoop.hadoop_conf()
slaves = os.path.join(hadoop_conf_dir, "slaves")
try:
with open(slaves) as f:
task_trackers = [(l.strip(), 0) for l in f]
except IOError:
task_trackers = []
else:
# run JobClient directly (avoids "hadoop job" deprecation)
stdout = run_class(
"org.apache.hadoop.mapred.JobClient", ["-list-active-trackers"],
properties=properties, hadoop_conf_dir=hadoop_conf_dir,
keep_streams=True
)
task_trackers = []
for line in stdout.splitlines():
if not line:
continue
line = line.split(":")
task_trackers.append((line[0].split("_")[1], int(line[-1])))
return task_trackers | [
"def",
"get_task_trackers",
"(",
"properties",
"=",
"None",
",",
"hadoop_conf_dir",
"=",
"None",
",",
"offline",
"=",
"False",
")",
":",
"if",
"offline",
":",
"if",
"not",
"hadoop_conf_dir",
":",
"hadoop_conf_dir",
"=",
"pydoop",
".",
"hadoop_conf",
"(",
")",
"slaves",
"=",
"os",
".",
"path",
".",
"join",
"(",
"hadoop_conf_dir",
",",
"\"slaves\"",
")",
"try",
":",
"with",
"open",
"(",
"slaves",
")",
"as",
"f",
":",
"task_trackers",
"=",
"[",
"(",
"l",
".",
"strip",
"(",
")",
",",
"0",
")",
"for",
"l",
"in",
"f",
"]",
"except",
"IOError",
":",
"task_trackers",
"=",
"[",
"]",
"else",
":",
"# run JobClient directly (avoids \"hadoop job\" deprecation)",
"stdout",
"=",
"run_class",
"(",
"\"org.apache.hadoop.mapred.JobClient\"",
",",
"[",
"\"-list-active-trackers\"",
"]",
",",
"properties",
"=",
"properties",
",",
"hadoop_conf_dir",
"=",
"hadoop_conf_dir",
",",
"keep_streams",
"=",
"True",
")",
"task_trackers",
"=",
"[",
"]",
"for",
"line",
"in",
"stdout",
".",
"splitlines",
"(",
")",
":",
"if",
"not",
"line",
":",
"continue",
"line",
"=",
"line",
".",
"split",
"(",
"\":\"",
")",
"task_trackers",
".",
"append",
"(",
"(",
"line",
"[",
"0",
"]",
".",
"split",
"(",
"\"_\"",
")",
"[",
"1",
"]",
",",
"int",
"(",
"line",
"[",
"-",
"1",
"]",
")",
")",
")",
"return",
"task_trackers"
]
| Get the list of task trackers in the Hadoop cluster.
Each element in the returned list is in the ``(host, port)`` format.
All arguments are passed to :func:`run_class`.
If ``offline`` is :obj:`True`, try getting the list of task trackers from
the ``slaves`` file in Hadoop's configuration directory (no attempt is
made to contact the Hadoop daemons). In this case, ports are set to 0. | [
"Get",
"the",
"list",
"of",
"task",
"trackers",
"in",
"the",
"Hadoop",
"cluster",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L194-L227 | train |
crs4/pydoop | pydoop/hadut.py | get_num_nodes | def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`.
"""
return len(get_task_trackers(properties, hadoop_conf_dir, offline)) | python | def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`.
"""
return len(get_task_trackers(properties, hadoop_conf_dir, offline)) | [
"def",
"get_num_nodes",
"(",
"properties",
"=",
"None",
",",
"hadoop_conf_dir",
"=",
"None",
",",
"offline",
"=",
"False",
")",
":",
"return",
"len",
"(",
"get_task_trackers",
"(",
"properties",
",",
"hadoop_conf_dir",
",",
"offline",
")",
")"
]
| Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`. | [
"Get",
"the",
"number",
"of",
"task",
"trackers",
"in",
"the",
"Hadoop",
"cluster",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L230-L236 | train |
crs4/pydoop | pydoop/hadut.py | dfs | def dfs(args=None, properties=None, hadoop_conf_dir=None):
"""
Run the Hadoop file system shell.
All arguments are passed to :func:`run_class`.
"""
# run FsShell directly (avoids "hadoop dfs" deprecation)
return run_class(
"org.apache.hadoop.fs.FsShell", args, properties,
hadoop_conf_dir=hadoop_conf_dir, keep_streams=True
) | python | def dfs(args=None, properties=None, hadoop_conf_dir=None):
"""
Run the Hadoop file system shell.
All arguments are passed to :func:`run_class`.
"""
# run FsShell directly (avoids "hadoop dfs" deprecation)
return run_class(
"org.apache.hadoop.fs.FsShell", args, properties,
hadoop_conf_dir=hadoop_conf_dir, keep_streams=True
) | [
"def",
"dfs",
"(",
"args",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"hadoop_conf_dir",
"=",
"None",
")",
":",
"# run FsShell directly (avoids \"hadoop dfs\" deprecation)",
"return",
"run_class",
"(",
"\"org.apache.hadoop.fs.FsShell\"",
",",
"args",
",",
"properties",
",",
"hadoop_conf_dir",
"=",
"hadoop_conf_dir",
",",
"keep_streams",
"=",
"True",
")"
]
| Run the Hadoop file system shell.
All arguments are passed to :func:`run_class`. | [
"Run",
"the",
"Hadoop",
"file",
"system",
"shell",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L239-L249 | train |
crs4/pydoop | pydoop/hadut.py | run_pipes | def run_pipes(executable, input_path, output_path, more_args=None,
properties=None, force_pydoop_submitter=False,
hadoop_conf_dir=None, logger=None, keep_streams=False):
"""
Run a pipes command.
``more_args`` (after setting input/output path) and ``properties``
are passed to :func:`run_cmd`.
If not specified otherwise, this function sets the properties
``mapreduce.pipes.isjavarecordreader`` and
``mapreduce.pipes.isjavarecordwriter`` to ``"true"``.
This function works around a bug in Hadoop pipes that affects
versions of Hadoop with security when the local file system is
used as the default FS (no HDFS); see
https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those
set-ups, the function uses Pydoop's own pipes submitter
application. You can force the use of Pydoop's submitter by
passing the argument force_pydoop_submitter=True.
"""
if logger is None:
logger = utils.NullLogger()
if not hdfs.path.exists(executable):
raise IOError("executable %s not found" % executable)
if not hdfs.path.exists(input_path) and not (set(input_path) & GLOB_CHARS):
raise IOError("input path %s not found" % input_path)
if properties is None:
properties = {}
properties.setdefault('mapreduce.pipes.isjavarecordreader', 'true')
properties.setdefault('mapreduce.pipes.isjavarecordwriter', 'true')
if force_pydoop_submitter:
use_pydoop_submit = True
else:
use_pydoop_submit = False
ver = pydoop.hadoop_version_info()
if ver.has_security():
if ver.is_cdh_mrv2() and hdfs.default_is_local():
raise RuntimeError("mrv2 on local fs not supported yet")
use_pydoop_submit = hdfs.default_is_local()
args = [
"-program", executable,
"-input", input_path,
"-output", output_path,
]
if more_args is not None:
args.extend(more_args)
if use_pydoop_submit:
submitter = "it.crs4.pydoop.pipes.Submitter"
pydoop_jar = pydoop.jar_path()
args.extend(("-libjars", pydoop_jar))
return run_class(submitter, args, properties,
classpath=pydoop_jar, logger=logger,
keep_streams=keep_streams)
else:
return run_mapred_cmd("pipes", args=args, properties=properties,
hadoop_conf_dir=hadoop_conf_dir, logger=logger,
keep_streams=keep_streams) | python | def run_pipes(executable, input_path, output_path, more_args=None,
properties=None, force_pydoop_submitter=False,
hadoop_conf_dir=None, logger=None, keep_streams=False):
"""
Run a pipes command.
``more_args`` (after setting input/output path) and ``properties``
are passed to :func:`run_cmd`.
If not specified otherwise, this function sets the properties
``mapreduce.pipes.isjavarecordreader`` and
``mapreduce.pipes.isjavarecordwriter`` to ``"true"``.
This function works around a bug in Hadoop pipes that affects
versions of Hadoop with security when the local file system is
used as the default FS (no HDFS); see
https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those
set-ups, the function uses Pydoop's own pipes submitter
application. You can force the use of Pydoop's submitter by
passing the argument force_pydoop_submitter=True.
"""
if logger is None:
logger = utils.NullLogger()
if not hdfs.path.exists(executable):
raise IOError("executable %s not found" % executable)
if not hdfs.path.exists(input_path) and not (set(input_path) & GLOB_CHARS):
raise IOError("input path %s not found" % input_path)
if properties is None:
properties = {}
properties.setdefault('mapreduce.pipes.isjavarecordreader', 'true')
properties.setdefault('mapreduce.pipes.isjavarecordwriter', 'true')
if force_pydoop_submitter:
use_pydoop_submit = True
else:
use_pydoop_submit = False
ver = pydoop.hadoop_version_info()
if ver.has_security():
if ver.is_cdh_mrv2() and hdfs.default_is_local():
raise RuntimeError("mrv2 on local fs not supported yet")
use_pydoop_submit = hdfs.default_is_local()
args = [
"-program", executable,
"-input", input_path,
"-output", output_path,
]
if more_args is not None:
args.extend(more_args)
if use_pydoop_submit:
submitter = "it.crs4.pydoop.pipes.Submitter"
pydoop_jar = pydoop.jar_path()
args.extend(("-libjars", pydoop_jar))
return run_class(submitter, args, properties,
classpath=pydoop_jar, logger=logger,
keep_streams=keep_streams)
else:
return run_mapred_cmd("pipes", args=args, properties=properties,
hadoop_conf_dir=hadoop_conf_dir, logger=logger,
keep_streams=keep_streams) | [
"def",
"run_pipes",
"(",
"executable",
",",
"input_path",
",",
"output_path",
",",
"more_args",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"force_pydoop_submitter",
"=",
"False",
",",
"hadoop_conf_dir",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"keep_streams",
"=",
"False",
")",
":",
"if",
"logger",
"is",
"None",
":",
"logger",
"=",
"utils",
".",
"NullLogger",
"(",
")",
"if",
"not",
"hdfs",
".",
"path",
".",
"exists",
"(",
"executable",
")",
":",
"raise",
"IOError",
"(",
"\"executable %s not found\"",
"%",
"executable",
")",
"if",
"not",
"hdfs",
".",
"path",
".",
"exists",
"(",
"input_path",
")",
"and",
"not",
"(",
"set",
"(",
"input_path",
")",
"&",
"GLOB_CHARS",
")",
":",
"raise",
"IOError",
"(",
"\"input path %s not found\"",
"%",
"input_path",
")",
"if",
"properties",
"is",
"None",
":",
"properties",
"=",
"{",
"}",
"properties",
".",
"setdefault",
"(",
"'mapreduce.pipes.isjavarecordreader'",
",",
"'true'",
")",
"properties",
".",
"setdefault",
"(",
"'mapreduce.pipes.isjavarecordwriter'",
",",
"'true'",
")",
"if",
"force_pydoop_submitter",
":",
"use_pydoop_submit",
"=",
"True",
"else",
":",
"use_pydoop_submit",
"=",
"False",
"ver",
"=",
"pydoop",
".",
"hadoop_version_info",
"(",
")",
"if",
"ver",
".",
"has_security",
"(",
")",
":",
"if",
"ver",
".",
"is_cdh_mrv2",
"(",
")",
"and",
"hdfs",
".",
"default_is_local",
"(",
")",
":",
"raise",
"RuntimeError",
"(",
"\"mrv2 on local fs not supported yet\"",
")",
"use_pydoop_submit",
"=",
"hdfs",
".",
"default_is_local",
"(",
")",
"args",
"=",
"[",
"\"-program\"",
",",
"executable",
",",
"\"-input\"",
",",
"input_path",
",",
"\"-output\"",
",",
"output_path",
",",
"]",
"if",
"more_args",
"is",
"not",
"None",
":",
"args",
".",
"extend",
"(",
"more_args",
")",
"if",
"use_pydoop_submit",
":",
"submitter",
"=",
"\"it.crs4.pydoop.pipes.Submitter\"",
"pydoop_jar",
"=",
"pydoop",
".",
"jar_path",
"(",
")",
"args",
".",
"extend",
"(",
"(",
"\"-libjars\"",
",",
"pydoop_jar",
")",
")",
"return",
"run_class",
"(",
"submitter",
",",
"args",
",",
"properties",
",",
"classpath",
"=",
"pydoop_jar",
",",
"logger",
"=",
"logger",
",",
"keep_streams",
"=",
"keep_streams",
")",
"else",
":",
"return",
"run_mapred_cmd",
"(",
"\"pipes\"",
",",
"args",
"=",
"args",
",",
"properties",
"=",
"properties",
",",
"hadoop_conf_dir",
"=",
"hadoop_conf_dir",
",",
"logger",
"=",
"logger",
",",
"keep_streams",
"=",
"keep_streams",
")"
]
| Run a pipes command.
``more_args`` (after setting input/output path) and ``properties``
are passed to :func:`run_cmd`.
If not specified otherwise, this function sets the properties
``mapreduce.pipes.isjavarecordreader`` and
``mapreduce.pipes.isjavarecordwriter`` to ``"true"``.
This function works around a bug in Hadoop pipes that affects
versions of Hadoop with security when the local file system is
used as the default FS (no HDFS); see
https://issues.apache.org/jira/browse/MAPREDUCE-4000. In those
set-ups, the function uses Pydoop's own pipes submitter
application. You can force the use of Pydoop's submitter by
passing the argument force_pydoop_submitter=True. | [
"Run",
"a",
"pipes",
"command",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L338-L395 | train |
crs4/pydoop | pydoop/hadut.py | collect_output | def collect_output(mr_out_dir, out_file=None):
"""
Return all mapreduce output in ``mr_out_dir``.
Append the output to ``out_file`` if provided. Otherwise, return
the result as a single string (it is the caller's responsibility to
ensure that the amount of data retrieved fits into memory).
"""
if out_file is None:
output = []
for fn in iter_mr_out_files(mr_out_dir):
with hdfs.open(fn, "rt") as f:
output.append(f.read())
return "".join(output)
else:
block_size = 16777216
with open(out_file, 'a') as o:
for fn in iter_mr_out_files(mr_out_dir):
with hdfs.open(fn) as f:
data = f.read(block_size)
while len(data) > 0:
o.write(data)
data = f.read(block_size) | python | def collect_output(mr_out_dir, out_file=None):
"""
Return all mapreduce output in ``mr_out_dir``.
Append the output to ``out_file`` if provided. Otherwise, return
the result as a single string (it is the caller's responsibility to
ensure that the amount of data retrieved fits into memory).
"""
if out_file is None:
output = []
for fn in iter_mr_out_files(mr_out_dir):
with hdfs.open(fn, "rt") as f:
output.append(f.read())
return "".join(output)
else:
block_size = 16777216
with open(out_file, 'a') as o:
for fn in iter_mr_out_files(mr_out_dir):
with hdfs.open(fn) as f:
data = f.read(block_size)
while len(data) > 0:
o.write(data)
data = f.read(block_size) | [
"def",
"collect_output",
"(",
"mr_out_dir",
",",
"out_file",
"=",
"None",
")",
":",
"if",
"out_file",
"is",
"None",
":",
"output",
"=",
"[",
"]",
"for",
"fn",
"in",
"iter_mr_out_files",
"(",
"mr_out_dir",
")",
":",
"with",
"hdfs",
".",
"open",
"(",
"fn",
",",
"\"rt\"",
")",
"as",
"f",
":",
"output",
".",
"append",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"output",
")",
"else",
":",
"block_size",
"=",
"16777216",
"with",
"open",
"(",
"out_file",
",",
"'a'",
")",
"as",
"o",
":",
"for",
"fn",
"in",
"iter_mr_out_files",
"(",
"mr_out_dir",
")",
":",
"with",
"hdfs",
".",
"open",
"(",
"fn",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
"block_size",
")",
"while",
"len",
"(",
"data",
")",
">",
"0",
":",
"o",
".",
"write",
"(",
"data",
")",
"data",
"=",
"f",
".",
"read",
"(",
"block_size",
")"
]
| Return all mapreduce output in ``mr_out_dir``.
Append the output to ``out_file`` if provided. Otherwise, return
the result as a single string (it is the caller's responsibility to
ensure that the amount of data retrieved fits into memory). | [
"Return",
"all",
"mapreduce",
"output",
"in",
"mr_out_dir",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L425-L447 | train |
crs4/pydoop | pydoop/hadut.py | PipesRunner.set_output | def set_output(self, output):
"""
Set the output path for the job. Optional if the runner has been
instantiated with a prefix.
"""
self.output = output
self.logger.info("assigning output to %s", self.output) | python | def set_output(self, output):
"""
Set the output path for the job. Optional if the runner has been
instantiated with a prefix.
"""
self.output = output
self.logger.info("assigning output to %s", self.output) | [
"def",
"set_output",
"(",
"self",
",",
"output",
")",
":",
"self",
".",
"output",
"=",
"output",
"self",
".",
"logger",
".",
"info",
"(",
"\"assigning output to %s\"",
",",
"self",
".",
"output",
")"
]
| Set the output path for the job. Optional if the runner has been
instantiated with a prefix. | [
"Set",
"the",
"output",
"path",
"for",
"the",
"job",
".",
"Optional",
"if",
"the",
"runner",
"has",
"been",
"instantiated",
"with",
"a",
"prefix",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L504-L510 | train |
crs4/pydoop | pydoop/hadut.py | PipesRunner.set_exe | def set_exe(self, pipes_code):
"""
Dump launcher code to the distributed file system.
"""
if not self.output:
raise RuntimeError("no output directory, can't create launcher")
parent = hdfs.path.dirname(hdfs.path.abspath(self.output.rstrip("/")))
self.exe = hdfs.path.join(parent, utils.make_random_str())
hdfs.dump(pipes_code, self.exe) | python | def set_exe(self, pipes_code):
"""
Dump launcher code to the distributed file system.
"""
if not self.output:
raise RuntimeError("no output directory, can't create launcher")
parent = hdfs.path.dirname(hdfs.path.abspath(self.output.rstrip("/")))
self.exe = hdfs.path.join(parent, utils.make_random_str())
hdfs.dump(pipes_code, self.exe) | [
"def",
"set_exe",
"(",
"self",
",",
"pipes_code",
")",
":",
"if",
"not",
"self",
".",
"output",
":",
"raise",
"RuntimeError",
"(",
"\"no output directory, can't create launcher\"",
")",
"parent",
"=",
"hdfs",
".",
"path",
".",
"dirname",
"(",
"hdfs",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"output",
".",
"rstrip",
"(",
"\"/\"",
")",
")",
")",
"self",
".",
"exe",
"=",
"hdfs",
".",
"path",
".",
"join",
"(",
"parent",
",",
"utils",
".",
"make_random_str",
"(",
")",
")",
"hdfs",
".",
"dump",
"(",
"pipes_code",
",",
"self",
".",
"exe",
")"
]
| Dump launcher code to the distributed file system. | [
"Dump",
"launcher",
"code",
"to",
"the",
"distributed",
"file",
"system",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L512-L520 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | dump | def dump(data, hdfs_path, **kwargs):
"""\
Write ``data`` to ``hdfs_path``.
Keyword arguments are passed to :func:`open`, except for ``mode``,
which is forced to ``"w"`` (or ``"wt"`` for text data).
"""
kwargs["mode"] = "w" if isinstance(data, bintype) else "wt"
with open(hdfs_path, **kwargs) as fo:
i = 0
bufsize = common.BUFSIZE
while i < len(data):
fo.write(data[i: i + bufsize])
i += bufsize
fo.fs.close() | python | def dump(data, hdfs_path, **kwargs):
"""\
Write ``data`` to ``hdfs_path``.
Keyword arguments are passed to :func:`open`, except for ``mode``,
which is forced to ``"w"`` (or ``"wt"`` for text data).
"""
kwargs["mode"] = "w" if isinstance(data, bintype) else "wt"
with open(hdfs_path, **kwargs) as fo:
i = 0
bufsize = common.BUFSIZE
while i < len(data):
fo.write(data[i: i + bufsize])
i += bufsize
fo.fs.close() | [
"def",
"dump",
"(",
"data",
",",
"hdfs_path",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"mode\"",
"]",
"=",
"\"w\"",
"if",
"isinstance",
"(",
"data",
",",
"bintype",
")",
"else",
"\"wt\"",
"with",
"open",
"(",
"hdfs_path",
",",
"*",
"*",
"kwargs",
")",
"as",
"fo",
":",
"i",
"=",
"0",
"bufsize",
"=",
"common",
".",
"BUFSIZE",
"while",
"i",
"<",
"len",
"(",
"data",
")",
":",
"fo",
".",
"write",
"(",
"data",
"[",
"i",
":",
"i",
"+",
"bufsize",
"]",
")",
"i",
"+=",
"bufsize",
"fo",
".",
"fs",
".",
"close",
"(",
")"
]
| \
Write ``data`` to ``hdfs_path``.
Keyword arguments are passed to :func:`open`, except for ``mode``,
which is forced to ``"w"`` (or ``"wt"`` for text data). | [
"\\",
"Write",
"data",
"to",
"hdfs_path",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L129-L143 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | load | def load(hdfs_path, **kwargs):
"""\
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly.
"""
m, _ = common.parse_mode(kwargs.get("mode", "r"))
if m != "r":
raise ValueError("opening mode must be readonly")
with open(hdfs_path, **kwargs) as fi:
data = fi.read()
fi.fs.close()
return data | python | def load(hdfs_path, **kwargs):
"""\
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly.
"""
m, _ = common.parse_mode(kwargs.get("mode", "r"))
if m != "r":
raise ValueError("opening mode must be readonly")
with open(hdfs_path, **kwargs) as fi:
data = fi.read()
fi.fs.close()
return data | [
"def",
"load",
"(",
"hdfs_path",
",",
"*",
"*",
"kwargs",
")",
":",
"m",
",",
"_",
"=",
"common",
".",
"parse_mode",
"(",
"kwargs",
".",
"get",
"(",
"\"mode\"",
",",
"\"r\"",
")",
")",
"if",
"m",
"!=",
"\"r\"",
":",
"raise",
"ValueError",
"(",
"\"opening mode must be readonly\"",
")",
"with",
"open",
"(",
"hdfs_path",
",",
"*",
"*",
"kwargs",
")",
"as",
"fi",
":",
"data",
"=",
"fi",
".",
"read",
"(",
")",
"fi",
".",
"fs",
".",
"close",
"(",
")",
"return",
"data"
]
| \
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly. | [
"\\",
"Read",
"the",
"content",
"of",
"hdfs_path",
"and",
"return",
"it",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L146-L159 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | cp | def cp(src_hdfs_path, dest_hdfs_path, **kwargs):
"""\
Copy the contents of ``src_hdfs_path`` to ``dest_hdfs_path``.
If ``src_hdfs_path`` is a directory, its contents will be copied
recursively. Source file(s) are opened for reading and copies are
opened for writing. Additional keyword arguments, if any, are
handled like in :func:`open`.
"""
src, dest = {}, {}
try:
for d, p in ((src, src_hdfs_path), (dest, dest_hdfs_path)):
d["host"], d["port"], d["path"] = path.split(p)
d["fs"] = hdfs(d["host"], d["port"])
# --- does src exist? ---
try:
src["info"] = src["fs"].get_path_info(src["path"])
except IOError:
raise IOError("no such file or directory: %r" % (src["path"]))
# --- src exists. Does dest exist? ---
try:
dest["info"] = dest["fs"].get_path_info(dest["path"])
except IOError:
if src["info"]["kind"] == "file":
_cp_file(src["fs"], src["path"], dest["fs"], dest["path"],
**kwargs)
return
else:
dest["fs"].create_directory(dest["path"])
dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"]
for item in src["fs"].list_directory(src["path"]):
cp(item["name"], dest_hdfs_path, **kwargs)
return
# --- dest exists. Is it a file? ---
if dest["info"]["kind"] == "file":
raise IOError("%r already exists" % (dest["path"]))
# --- dest is a directory ---
dest["path"] = path.join(dest["path"], path.basename(src["path"]))
if dest["fs"].exists(dest["path"]):
raise IOError("%r already exists" % (dest["path"]))
if src["info"]["kind"] == "file":
_cp_file(src["fs"], src["path"], dest["fs"], dest["path"],
**kwargs)
else:
dest["fs"].create_directory(dest["path"])
dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"]
for item in src["fs"].list_directory(src["path"]):
cp(item["name"], dest_hdfs_path, **kwargs)
finally:
for d in src, dest:
try:
d["fs"].close()
except KeyError:
pass | python | def cp(src_hdfs_path, dest_hdfs_path, **kwargs):
"""\
Copy the contents of ``src_hdfs_path`` to ``dest_hdfs_path``.
If ``src_hdfs_path`` is a directory, its contents will be copied
recursively. Source file(s) are opened for reading and copies are
opened for writing. Additional keyword arguments, if any, are
handled like in :func:`open`.
"""
src, dest = {}, {}
try:
for d, p in ((src, src_hdfs_path), (dest, dest_hdfs_path)):
d["host"], d["port"], d["path"] = path.split(p)
d["fs"] = hdfs(d["host"], d["port"])
# --- does src exist? ---
try:
src["info"] = src["fs"].get_path_info(src["path"])
except IOError:
raise IOError("no such file or directory: %r" % (src["path"]))
# --- src exists. Does dest exist? ---
try:
dest["info"] = dest["fs"].get_path_info(dest["path"])
except IOError:
if src["info"]["kind"] == "file":
_cp_file(src["fs"], src["path"], dest["fs"], dest["path"],
**kwargs)
return
else:
dest["fs"].create_directory(dest["path"])
dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"]
for item in src["fs"].list_directory(src["path"]):
cp(item["name"], dest_hdfs_path, **kwargs)
return
# --- dest exists. Is it a file? ---
if dest["info"]["kind"] == "file":
raise IOError("%r already exists" % (dest["path"]))
# --- dest is a directory ---
dest["path"] = path.join(dest["path"], path.basename(src["path"]))
if dest["fs"].exists(dest["path"]):
raise IOError("%r already exists" % (dest["path"]))
if src["info"]["kind"] == "file":
_cp_file(src["fs"], src["path"], dest["fs"], dest["path"],
**kwargs)
else:
dest["fs"].create_directory(dest["path"])
dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"]
for item in src["fs"].list_directory(src["path"]):
cp(item["name"], dest_hdfs_path, **kwargs)
finally:
for d in src, dest:
try:
d["fs"].close()
except KeyError:
pass | [
"def",
"cp",
"(",
"src_hdfs_path",
",",
"dest_hdfs_path",
",",
"*",
"*",
"kwargs",
")",
":",
"src",
",",
"dest",
"=",
"{",
"}",
",",
"{",
"}",
"try",
":",
"for",
"d",
",",
"p",
"in",
"(",
"(",
"src",
",",
"src_hdfs_path",
")",
",",
"(",
"dest",
",",
"dest_hdfs_path",
")",
")",
":",
"d",
"[",
"\"host\"",
"]",
",",
"d",
"[",
"\"port\"",
"]",
",",
"d",
"[",
"\"path\"",
"]",
"=",
"path",
".",
"split",
"(",
"p",
")",
"d",
"[",
"\"fs\"",
"]",
"=",
"hdfs",
"(",
"d",
"[",
"\"host\"",
"]",
",",
"d",
"[",
"\"port\"",
"]",
")",
"# --- does src exist? ---",
"try",
":",
"src",
"[",
"\"info\"",
"]",
"=",
"src",
"[",
"\"fs\"",
"]",
".",
"get_path_info",
"(",
"src",
"[",
"\"path\"",
"]",
")",
"except",
"IOError",
":",
"raise",
"IOError",
"(",
"\"no such file or directory: %r\"",
"%",
"(",
"src",
"[",
"\"path\"",
"]",
")",
")",
"# --- src exists. Does dest exist? ---",
"try",
":",
"dest",
"[",
"\"info\"",
"]",
"=",
"dest",
"[",
"\"fs\"",
"]",
".",
"get_path_info",
"(",
"dest",
"[",
"\"path\"",
"]",
")",
"except",
"IOError",
":",
"if",
"src",
"[",
"\"info\"",
"]",
"[",
"\"kind\"",
"]",
"==",
"\"file\"",
":",
"_cp_file",
"(",
"src",
"[",
"\"fs\"",
"]",
",",
"src",
"[",
"\"path\"",
"]",
",",
"dest",
"[",
"\"fs\"",
"]",
",",
"dest",
"[",
"\"path\"",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"else",
":",
"dest",
"[",
"\"fs\"",
"]",
".",
"create_directory",
"(",
"dest",
"[",
"\"path\"",
"]",
")",
"dest_hdfs_path",
"=",
"dest",
"[",
"\"fs\"",
"]",
".",
"get_path_info",
"(",
"dest",
"[",
"\"path\"",
"]",
")",
"[",
"\"name\"",
"]",
"for",
"item",
"in",
"src",
"[",
"\"fs\"",
"]",
".",
"list_directory",
"(",
"src",
"[",
"\"path\"",
"]",
")",
":",
"cp",
"(",
"item",
"[",
"\"name\"",
"]",
",",
"dest_hdfs_path",
",",
"*",
"*",
"kwargs",
")",
"return",
"# --- dest exists. Is it a file? ---",
"if",
"dest",
"[",
"\"info\"",
"]",
"[",
"\"kind\"",
"]",
"==",
"\"file\"",
":",
"raise",
"IOError",
"(",
"\"%r already exists\"",
"%",
"(",
"dest",
"[",
"\"path\"",
"]",
")",
")",
"# --- dest is a directory ---",
"dest",
"[",
"\"path\"",
"]",
"=",
"path",
".",
"join",
"(",
"dest",
"[",
"\"path\"",
"]",
",",
"path",
".",
"basename",
"(",
"src",
"[",
"\"path\"",
"]",
")",
")",
"if",
"dest",
"[",
"\"fs\"",
"]",
".",
"exists",
"(",
"dest",
"[",
"\"path\"",
"]",
")",
":",
"raise",
"IOError",
"(",
"\"%r already exists\"",
"%",
"(",
"dest",
"[",
"\"path\"",
"]",
")",
")",
"if",
"src",
"[",
"\"info\"",
"]",
"[",
"\"kind\"",
"]",
"==",
"\"file\"",
":",
"_cp_file",
"(",
"src",
"[",
"\"fs\"",
"]",
",",
"src",
"[",
"\"path\"",
"]",
",",
"dest",
"[",
"\"fs\"",
"]",
",",
"dest",
"[",
"\"path\"",
"]",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"dest",
"[",
"\"fs\"",
"]",
".",
"create_directory",
"(",
"dest",
"[",
"\"path\"",
"]",
")",
"dest_hdfs_path",
"=",
"dest",
"[",
"\"fs\"",
"]",
".",
"get_path_info",
"(",
"dest",
"[",
"\"path\"",
"]",
")",
"[",
"\"name\"",
"]",
"for",
"item",
"in",
"src",
"[",
"\"fs\"",
"]",
".",
"list_directory",
"(",
"src",
"[",
"\"path\"",
"]",
")",
":",
"cp",
"(",
"item",
"[",
"\"name\"",
"]",
",",
"dest_hdfs_path",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"for",
"d",
"in",
"src",
",",
"dest",
":",
"try",
":",
"d",
"[",
"\"fs\"",
"]",
".",
"close",
"(",
")",
"except",
"KeyError",
":",
"pass"
]
| \
Copy the contents of ``src_hdfs_path`` to ``dest_hdfs_path``.
If ``src_hdfs_path`` is a directory, its contents will be copied
recursively. Source file(s) are opened for reading and copies are
opened for writing. Additional keyword arguments, if any, are
handled like in :func:`open`. | [
"\\",
"Copy",
"the",
"contents",
"of",
"src_hdfs_path",
"to",
"dest_hdfs_path",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L177-L230 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | put | def put(src_path, dest_hdfs_path, **kwargs):
"""\
Copy the contents of ``src_path`` to ``dest_hdfs_path``.
``src_path`` is forced to be interpreted as an ordinary local path
(see :func:`~path.abspath`). The source file is opened for reading
and the copy is opened for writing. Additional keyword arguments,
if any, are handled like in :func:`open`.
"""
cp(path.abspath(src_path, local=True), dest_hdfs_path, **kwargs) | python | def put(src_path, dest_hdfs_path, **kwargs):
"""\
Copy the contents of ``src_path`` to ``dest_hdfs_path``.
``src_path`` is forced to be interpreted as an ordinary local path
(see :func:`~path.abspath`). The source file is opened for reading
and the copy is opened for writing. Additional keyword arguments,
if any, are handled like in :func:`open`.
"""
cp(path.abspath(src_path, local=True), dest_hdfs_path, **kwargs) | [
"def",
"put",
"(",
"src_path",
",",
"dest_hdfs_path",
",",
"*",
"*",
"kwargs",
")",
":",
"cp",
"(",
"path",
".",
"abspath",
"(",
"src_path",
",",
"local",
"=",
"True",
")",
",",
"dest_hdfs_path",
",",
"*",
"*",
"kwargs",
")"
]
| \
Copy the contents of ``src_path`` to ``dest_hdfs_path``.
``src_path`` is forced to be interpreted as an ordinary local path
(see :func:`~path.abspath`). The source file is opened for reading
and the copy is opened for writing. Additional keyword arguments,
if any, are handled like in :func:`open`. | [
"\\",
"Copy",
"the",
"contents",
"of",
"src_path",
"to",
"dest_hdfs_path",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L233-L242 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | get | def get(src_hdfs_path, dest_path, **kwargs):
"""\
Copy the contents of ``src_hdfs_path`` to ``dest_path``.
``dest_path`` is forced to be interpreted as an ordinary local
path (see :func:`~path.abspath`). The source file is opened for
reading and the copy is opened for writing. Additional keyword
arguments, if any, are handled like in :func:`open`.
"""
cp(src_hdfs_path, path.abspath(dest_path, local=True), **kwargs) | python | def get(src_hdfs_path, dest_path, **kwargs):
"""\
Copy the contents of ``src_hdfs_path`` to ``dest_path``.
``dest_path`` is forced to be interpreted as an ordinary local
path (see :func:`~path.abspath`). The source file is opened for
reading and the copy is opened for writing. Additional keyword
arguments, if any, are handled like in :func:`open`.
"""
cp(src_hdfs_path, path.abspath(dest_path, local=True), **kwargs) | [
"def",
"get",
"(",
"src_hdfs_path",
",",
"dest_path",
",",
"*",
"*",
"kwargs",
")",
":",
"cp",
"(",
"src_hdfs_path",
",",
"path",
".",
"abspath",
"(",
"dest_path",
",",
"local",
"=",
"True",
")",
",",
"*",
"*",
"kwargs",
")"
]
| \
Copy the contents of ``src_hdfs_path`` to ``dest_path``.
``dest_path`` is forced to be interpreted as an ordinary local
path (see :func:`~path.abspath`). The source file is opened for
reading and the copy is opened for writing. Additional keyword
arguments, if any, are handled like in :func:`open`. | [
"\\",
"Copy",
"the",
"contents",
"of",
"src_hdfs_path",
"to",
"dest_path",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L245-L254 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | mkdir | def mkdir(hdfs_path, user=None):
"""
Create a directory and its parents as needed.
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
retval = fs.create_directory(path_)
fs.close()
return retval | python | def mkdir(hdfs_path, user=None):
"""
Create a directory and its parents as needed.
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
retval = fs.create_directory(path_)
fs.close()
return retval | [
"def",
"mkdir",
"(",
"hdfs_path",
",",
"user",
"=",
"None",
")",
":",
"host",
",",
"port",
",",
"path_",
"=",
"path",
".",
"split",
"(",
"hdfs_path",
",",
"user",
")",
"fs",
"=",
"hdfs",
"(",
"host",
",",
"port",
",",
"user",
")",
"retval",
"=",
"fs",
".",
"create_directory",
"(",
"path_",
")",
"fs",
".",
"close",
"(",
")",
"return",
"retval"
]
| Create a directory and its parents as needed. | [
"Create",
"a",
"directory",
"and",
"its",
"parents",
"as",
"needed",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L257-L265 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | lsl | def lsl(hdfs_path, user=None, recursive=False):
"""
Return a list of dictionaries of file properties.
If ``hdfs_path`` is a file, there is only one item corresponding to
the file itself; if it is a directory and ``recursive`` is
:obj:`False`, each list item corresponds to a file or directory
contained by it; if it is a directory and ``recursive`` is
:obj:`True`, the list contains one item for every file or directory
in the tree rooted at ``hdfs_path``.
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
if not recursive:
dir_list = fs.list_directory(path_)
else:
treewalk = fs.walk(path_)
top = next(treewalk)
if top['kind'] == 'directory':
dir_list = list(treewalk)
else:
dir_list = [top]
fs.close()
return dir_list | python | def lsl(hdfs_path, user=None, recursive=False):
"""
Return a list of dictionaries of file properties.
If ``hdfs_path`` is a file, there is only one item corresponding to
the file itself; if it is a directory and ``recursive`` is
:obj:`False`, each list item corresponds to a file or directory
contained by it; if it is a directory and ``recursive`` is
:obj:`True`, the list contains one item for every file or directory
in the tree rooted at ``hdfs_path``.
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
if not recursive:
dir_list = fs.list_directory(path_)
else:
treewalk = fs.walk(path_)
top = next(treewalk)
if top['kind'] == 'directory':
dir_list = list(treewalk)
else:
dir_list = [top]
fs.close()
return dir_list | [
"def",
"lsl",
"(",
"hdfs_path",
",",
"user",
"=",
"None",
",",
"recursive",
"=",
"False",
")",
":",
"host",
",",
"port",
",",
"path_",
"=",
"path",
".",
"split",
"(",
"hdfs_path",
",",
"user",
")",
"fs",
"=",
"hdfs",
"(",
"host",
",",
"port",
",",
"user",
")",
"if",
"not",
"recursive",
":",
"dir_list",
"=",
"fs",
".",
"list_directory",
"(",
"path_",
")",
"else",
":",
"treewalk",
"=",
"fs",
".",
"walk",
"(",
"path_",
")",
"top",
"=",
"next",
"(",
"treewalk",
")",
"if",
"top",
"[",
"'kind'",
"]",
"==",
"'directory'",
":",
"dir_list",
"=",
"list",
"(",
"treewalk",
")",
"else",
":",
"dir_list",
"=",
"[",
"top",
"]",
"fs",
".",
"close",
"(",
")",
"return",
"dir_list"
]
| Return a list of dictionaries of file properties.
If ``hdfs_path`` is a file, there is only one item corresponding to
the file itself; if it is a directory and ``recursive`` is
:obj:`False`, each list item corresponds to a file or directory
contained by it; if it is a directory and ``recursive`` is
:obj:`True`, the list contains one item for every file or directory
in the tree rooted at ``hdfs_path``. | [
"Return",
"a",
"list",
"of",
"dictionaries",
"of",
"file",
"properties",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L287-L310 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | ls | def ls(hdfs_path, user=None, recursive=False):
"""
Return a list of hdfs paths.
Works in the same way as :func:`lsl`, except for the fact that list
items are hdfs paths instead of dictionaries of properties.
"""
dir_list = lsl(hdfs_path, user, recursive)
return [d["name"] for d in dir_list] | python | def ls(hdfs_path, user=None, recursive=False):
"""
Return a list of hdfs paths.
Works in the same way as :func:`lsl`, except for the fact that list
items are hdfs paths instead of dictionaries of properties.
"""
dir_list = lsl(hdfs_path, user, recursive)
return [d["name"] for d in dir_list] | [
"def",
"ls",
"(",
"hdfs_path",
",",
"user",
"=",
"None",
",",
"recursive",
"=",
"False",
")",
":",
"dir_list",
"=",
"lsl",
"(",
"hdfs_path",
",",
"user",
",",
"recursive",
")",
"return",
"[",
"d",
"[",
"\"name\"",
"]",
"for",
"d",
"in",
"dir_list",
"]"
]
| Return a list of hdfs paths.
Works in the same way as :func:`lsl`, except for the fact that list
items are hdfs paths instead of dictionaries of properties. | [
"Return",
"a",
"list",
"of",
"hdfs",
"paths",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L313-L321 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | move | def move(src, dest, user=None):
"""
Move or rename src to dest.
"""
src_host, src_port, src_path = path.split(src, user)
dest_host, dest_port, dest_path = path.split(dest, user)
src_fs = hdfs(src_host, src_port, user)
dest_fs = hdfs(dest_host, dest_port, user)
try:
retval = src_fs.move(src_path, dest_fs, dest_path)
return retval
finally:
src_fs.close()
dest_fs.close() | python | def move(src, dest, user=None):
"""
Move or rename src to dest.
"""
src_host, src_port, src_path = path.split(src, user)
dest_host, dest_port, dest_path = path.split(dest, user)
src_fs = hdfs(src_host, src_port, user)
dest_fs = hdfs(dest_host, dest_port, user)
try:
retval = src_fs.move(src_path, dest_fs, dest_path)
return retval
finally:
src_fs.close()
dest_fs.close() | [
"def",
"move",
"(",
"src",
",",
"dest",
",",
"user",
"=",
"None",
")",
":",
"src_host",
",",
"src_port",
",",
"src_path",
"=",
"path",
".",
"split",
"(",
"src",
",",
"user",
")",
"dest_host",
",",
"dest_port",
",",
"dest_path",
"=",
"path",
".",
"split",
"(",
"dest",
",",
"user",
")",
"src_fs",
"=",
"hdfs",
"(",
"src_host",
",",
"src_port",
",",
"user",
")",
"dest_fs",
"=",
"hdfs",
"(",
"dest_host",
",",
"dest_port",
",",
"user",
")",
"try",
":",
"retval",
"=",
"src_fs",
".",
"move",
"(",
"src_path",
",",
"dest_fs",
",",
"dest_path",
")",
"return",
"retval",
"finally",
":",
"src_fs",
".",
"close",
"(",
")",
"dest_fs",
".",
"close",
"(",
")"
]
| Move or rename src to dest. | [
"Move",
"or",
"rename",
"src",
"to",
"dest",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L340-L353 | train |
crs4/pydoop | pydoop/hdfs/__init__.py | renames | def renames(from_path, to_path, user=None):
"""
Rename ``from_path`` to ``to_path``, creating parents as needed.
"""
to_dir = path.dirname(to_path)
if to_dir:
mkdir(to_dir, user=user)
rename(from_path, to_path, user=user) | python | def renames(from_path, to_path, user=None):
"""
Rename ``from_path`` to ``to_path``, creating parents as needed.
"""
to_dir = path.dirname(to_path)
if to_dir:
mkdir(to_dir, user=user)
rename(from_path, to_path, user=user) | [
"def",
"renames",
"(",
"from_path",
",",
"to_path",
",",
"user",
"=",
"None",
")",
":",
"to_dir",
"=",
"path",
".",
"dirname",
"(",
"to_path",
")",
"if",
"to_dir",
":",
"mkdir",
"(",
"to_dir",
",",
"user",
"=",
"user",
")",
"rename",
"(",
"from_path",
",",
"to_path",
",",
"user",
"=",
"user",
")"
]
| Rename ``from_path`` to ``to_path``, creating parents as needed. | [
"Rename",
"from_path",
"to",
"to_path",
"creating",
"parents",
"as",
"needed",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L381-L388 | train |
crs4/pydoop | pydoop/hdfs/file.py | FileIO.readline | def readline(self):
"""
Read and return a line of text.
:rtype: str
:return: the next line of text in the file, including the
newline character
"""
_complain_ifclosed(self.closed)
line = self.f.readline()
if self.__encoding:
return line.decode(self.__encoding, self.__errors)
else:
return line | python | def readline(self):
"""
Read and return a line of text.
:rtype: str
:return: the next line of text in the file, including the
newline character
"""
_complain_ifclosed(self.closed)
line = self.f.readline()
if self.__encoding:
return line.decode(self.__encoding, self.__errors)
else:
return line | [
"def",
"readline",
"(",
"self",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"line",
"=",
"self",
".",
"f",
".",
"readline",
"(",
")",
"if",
"self",
".",
"__encoding",
":",
"return",
"line",
".",
"decode",
"(",
"self",
".",
"__encoding",
",",
"self",
".",
"__errors",
")",
"else",
":",
"return",
"line"
]
| Read and return a line of text.
:rtype: str
:return: the next line of text in the file, including the
newline character | [
"Read",
"and",
"return",
"a",
"line",
"of",
"text",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L107-L120 | train |
crs4/pydoop | pydoop/hdfs/file.py | FileIO.pread | def pread(self, position, length):
r"""
Read ``length`` bytes of data from the file, starting from
``position``\ .
:type position: int
:param position: position from which to read
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file
"""
_complain_ifclosed(self.closed)
if position > self.size:
raise IOError("position cannot be past EOF")
if length < 0:
length = self.size - position
data = self.f.raw.pread(position, length)
if self.__encoding:
return data.decode(self.__encoding, self.__errors)
else:
return data | python | def pread(self, position, length):
r"""
Read ``length`` bytes of data from the file, starting from
``position``\ .
:type position: int
:param position: position from which to read
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file
"""
_complain_ifclosed(self.closed)
if position > self.size:
raise IOError("position cannot be past EOF")
if length < 0:
length = self.size - position
data = self.f.raw.pread(position, length)
if self.__encoding:
return data.decode(self.__encoding, self.__errors)
else:
return data | [
"def",
"pread",
"(",
"self",
",",
"position",
",",
"length",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"if",
"position",
">",
"self",
".",
"size",
":",
"raise",
"IOError",
"(",
"\"position cannot be past EOF\"",
")",
"if",
"length",
"<",
"0",
":",
"length",
"=",
"self",
".",
"size",
"-",
"position",
"data",
"=",
"self",
".",
"f",
".",
"raw",
".",
"pread",
"(",
"position",
",",
"length",
")",
"if",
"self",
".",
"__encoding",
":",
"return",
"data",
".",
"decode",
"(",
"self",
".",
"__encoding",
",",
"self",
".",
"__errors",
")",
"else",
":",
"return",
"data"
]
| r"""
Read ``length`` bytes of data from the file, starting from
``position``\ .
:type position: int
:param position: position from which to read
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file | [
"r",
"Read",
"length",
"bytes",
"of",
"data",
"from",
"the",
"file",
"starting",
"from",
"position",
"\\",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L165-L186 | train |
crs4/pydoop | pydoop/hdfs/file.py | FileIO.read | def read(self, length=-1):
"""
Read ``length`` bytes from the file. If ``length`` is negative or
omitted, read all data until EOF.
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file
"""
_complain_ifclosed(self.closed)
# NOTE: libhdfs read stops at block boundaries: it is *essential*
# to ensure that we actually read the required number of bytes.
if length < 0:
length = self.size
chunks = []
while 1:
if length <= 0:
break
c = self.f.read(min(self.buff_size, length))
if c == b"":
break
chunks.append(c)
length -= len(c)
data = b"".join(chunks)
if self.__encoding:
return data.decode(self.__encoding, self.__errors)
else:
return data | python | def read(self, length=-1):
"""
Read ``length`` bytes from the file. If ``length`` is negative or
omitted, read all data until EOF.
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file
"""
_complain_ifclosed(self.closed)
# NOTE: libhdfs read stops at block boundaries: it is *essential*
# to ensure that we actually read the required number of bytes.
if length < 0:
length = self.size
chunks = []
while 1:
if length <= 0:
break
c = self.f.read(min(self.buff_size, length))
if c == b"":
break
chunks.append(c)
length -= len(c)
data = b"".join(chunks)
if self.__encoding:
return data.decode(self.__encoding, self.__errors)
else:
return data | [
"def",
"read",
"(",
"self",
",",
"length",
"=",
"-",
"1",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"# NOTE: libhdfs read stops at block boundaries: it is *essential*",
"# to ensure that we actually read the required number of bytes.",
"if",
"length",
"<",
"0",
":",
"length",
"=",
"self",
".",
"size",
"chunks",
"=",
"[",
"]",
"while",
"1",
":",
"if",
"length",
"<=",
"0",
":",
"break",
"c",
"=",
"self",
".",
"f",
".",
"read",
"(",
"min",
"(",
"self",
".",
"buff_size",
",",
"length",
")",
")",
"if",
"c",
"==",
"b\"\"",
":",
"break",
"chunks",
".",
"append",
"(",
"c",
")",
"length",
"-=",
"len",
"(",
"c",
")",
"data",
"=",
"b\"\"",
".",
"join",
"(",
"chunks",
")",
"if",
"self",
".",
"__encoding",
":",
"return",
"data",
".",
"decode",
"(",
"self",
".",
"__encoding",
",",
"self",
".",
"__errors",
")",
"else",
":",
"return",
"data"
]
| Read ``length`` bytes from the file. If ``length`` is negative or
omitted, read all data until EOF.
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file | [
"Read",
"length",
"bytes",
"from",
"the",
"file",
".",
"If",
"length",
"is",
"negative",
"or",
"omitted",
"read",
"all",
"data",
"until",
"EOF",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L188-L216 | train |
crs4/pydoop | pydoop/hdfs/file.py | FileIO.seek | def seek(self, position, whence=os.SEEK_SET):
"""
Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end).
"""
_complain_ifclosed(self.closed)
return self.f.seek(position, whence) | python | def seek(self, position, whence=os.SEEK_SET):
"""
Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end).
"""
_complain_ifclosed(self.closed)
return self.f.seek(position, whence) | [
"def",
"seek",
"(",
"self",
",",
"position",
",",
"whence",
"=",
"os",
".",
"SEEK_SET",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"return",
"self",
".",
"f",
".",
"seek",
"(",
"position",
",",
"whence",
")"
]
| Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end). | [
"Seek",
"to",
"position",
"in",
"file",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L218-L230 | train |
crs4/pydoop | pydoop/hdfs/file.py | FileIO.write | def write(self, data):
"""
Write ``data`` to the file.
:type data: bytes
:param data: the data to be written to the file
:rtype: int
:return: the number of bytes written
"""
_complain_ifclosed(self.closed)
if self.__encoding:
self.f.write(data.encode(self.__encoding, self.__errors))
return len(data)
else:
return self.f.write(data) | python | def write(self, data):
"""
Write ``data`` to the file.
:type data: bytes
:param data: the data to be written to the file
:rtype: int
:return: the number of bytes written
"""
_complain_ifclosed(self.closed)
if self.__encoding:
self.f.write(data.encode(self.__encoding, self.__errors))
return len(data)
else:
return self.f.write(data) | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"_complain_ifclosed",
"(",
"self",
".",
"closed",
")",
"if",
"self",
".",
"__encoding",
":",
"self",
".",
"f",
".",
"write",
"(",
"data",
".",
"encode",
"(",
"self",
".",
"__encoding",
",",
"self",
".",
"__errors",
")",
")",
"return",
"len",
"(",
"data",
")",
"else",
":",
"return",
"self",
".",
"f",
".",
"write",
"(",
"data",
")"
]
| Write ``data`` to the file.
:type data: bytes
:param data: the data to be written to the file
:rtype: int
:return: the number of bytes written | [
"Write",
"data",
"to",
"the",
"file",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/file.py#L242-L256 | train |
crs4/pydoop | pydoop/app/submit.py | PydoopSubmitter.set_args | def set_args(self, args, unknown_args=None):
"""
Configure job, based on the arguments provided.
"""
if unknown_args is None:
unknown_args = []
self.logger.setLevel(getattr(logging, args.log_level))
parent = hdfs.path.dirname(hdfs.path.abspath(args.output.rstrip("/")))
self.remote_wd = hdfs.path.join(
parent, utils.make_random_str(prefix="pydoop_submit_")
)
self.remote_exe = hdfs.path.join(self.remote_wd, str(uuid.uuid4()))
self.properties[JOB_NAME] = args.job_name or 'pydoop'
self.properties[IS_JAVA_RR] = (
'false' if args.do_not_use_java_record_reader else 'true'
)
self.properties[IS_JAVA_RW] = (
'false' if args.do_not_use_java_record_writer else 'true'
)
self.properties[JOB_REDUCES] = args.num_reducers
if args.job_name:
self.properties[JOB_NAME] = args.job_name
self.properties.update(args.job_conf or {})
self.__set_files_to_cache(args)
self.__set_archives_to_cache(args)
self.requested_env = self._env_arg_to_dict(args.set_env or [])
self.args = args
self.unknown_args = unknown_args | python | def set_args(self, args, unknown_args=None):
"""
Configure job, based on the arguments provided.
"""
if unknown_args is None:
unknown_args = []
self.logger.setLevel(getattr(logging, args.log_level))
parent = hdfs.path.dirname(hdfs.path.abspath(args.output.rstrip("/")))
self.remote_wd = hdfs.path.join(
parent, utils.make_random_str(prefix="pydoop_submit_")
)
self.remote_exe = hdfs.path.join(self.remote_wd, str(uuid.uuid4()))
self.properties[JOB_NAME] = args.job_name or 'pydoop'
self.properties[IS_JAVA_RR] = (
'false' if args.do_not_use_java_record_reader else 'true'
)
self.properties[IS_JAVA_RW] = (
'false' if args.do_not_use_java_record_writer else 'true'
)
self.properties[JOB_REDUCES] = args.num_reducers
if args.job_name:
self.properties[JOB_NAME] = args.job_name
self.properties.update(args.job_conf or {})
self.__set_files_to_cache(args)
self.__set_archives_to_cache(args)
self.requested_env = self._env_arg_to_dict(args.set_env or [])
self.args = args
self.unknown_args = unknown_args | [
"def",
"set_args",
"(",
"self",
",",
"args",
",",
"unknown_args",
"=",
"None",
")",
":",
"if",
"unknown_args",
"is",
"None",
":",
"unknown_args",
"=",
"[",
"]",
"self",
".",
"logger",
".",
"setLevel",
"(",
"getattr",
"(",
"logging",
",",
"args",
".",
"log_level",
")",
")",
"parent",
"=",
"hdfs",
".",
"path",
".",
"dirname",
"(",
"hdfs",
".",
"path",
".",
"abspath",
"(",
"args",
".",
"output",
".",
"rstrip",
"(",
"\"/\"",
")",
")",
")",
"self",
".",
"remote_wd",
"=",
"hdfs",
".",
"path",
".",
"join",
"(",
"parent",
",",
"utils",
".",
"make_random_str",
"(",
"prefix",
"=",
"\"pydoop_submit_\"",
")",
")",
"self",
".",
"remote_exe",
"=",
"hdfs",
".",
"path",
".",
"join",
"(",
"self",
".",
"remote_wd",
",",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"self",
".",
"properties",
"[",
"JOB_NAME",
"]",
"=",
"args",
".",
"job_name",
"or",
"'pydoop'",
"self",
".",
"properties",
"[",
"IS_JAVA_RR",
"]",
"=",
"(",
"'false'",
"if",
"args",
".",
"do_not_use_java_record_reader",
"else",
"'true'",
")",
"self",
".",
"properties",
"[",
"IS_JAVA_RW",
"]",
"=",
"(",
"'false'",
"if",
"args",
".",
"do_not_use_java_record_writer",
"else",
"'true'",
")",
"self",
".",
"properties",
"[",
"JOB_REDUCES",
"]",
"=",
"args",
".",
"num_reducers",
"if",
"args",
".",
"job_name",
":",
"self",
".",
"properties",
"[",
"JOB_NAME",
"]",
"=",
"args",
".",
"job_name",
"self",
".",
"properties",
".",
"update",
"(",
"args",
".",
"job_conf",
"or",
"{",
"}",
")",
"self",
".",
"__set_files_to_cache",
"(",
"args",
")",
"self",
".",
"__set_archives_to_cache",
"(",
"args",
")",
"self",
".",
"requested_env",
"=",
"self",
".",
"_env_arg_to_dict",
"(",
"args",
".",
"set_env",
"or",
"[",
"]",
")",
"self",
".",
"args",
"=",
"args",
"self",
".",
"unknown_args",
"=",
"unknown_args"
]
| Configure job, based on the arguments provided. | [
"Configure",
"job",
"based",
"on",
"the",
"arguments",
"provided",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/app/submit.py#L136-L164 | train |
crs4/pydoop | pydoop/app/submit.py | PydoopSubmitter.__warn_user_if_wd_maybe_unreadable | def __warn_user_if_wd_maybe_unreadable(self, abs_remote_path):
"""
Check directories above the remote module and issue a warning if
they are not traversable by all users.
The reasoning behind this is mainly aimed at set-ups with a
centralized Hadoop cluster, accessed by all users, and where
the Hadoop task tracker user is not a superuser; an example
may be if you're running a shared Hadoop without HDFS (using
only a POSIX shared file system). The task tracker correctly
changes user to the job requester's user for most operations,
but not when initializing the distributed cache, so jobs who
want to place files not accessible by the Hadoop user into
dist cache fail.
"""
host, port, path = hdfs.path.split(abs_remote_path)
if host == '' and port == 0: # local file system
host_port = "file:///"
else:
# FIXME: this won't work with any scheme other than
# hdfs:// (e.g., s3)
host_port = "hdfs://%s:%s/" % (host, port)
path_pieces = path.strip('/').split(os.path.sep)
fs = hdfs.hdfs(host, port)
for i in range(0, len(path_pieces)):
part = os.path.join(
host_port, os.path.sep.join(path_pieces[0: i + 1])
)
permissions = fs.get_path_info(part)['permissions']
if permissions & 0o111 != 0o111:
self.logger.warning(
("remote module %s may not be readable by the task "
"tracker when initializing the distributed cache. "
"Permissions on %s: %s"),
abs_remote_path, part, oct(permissions)
)
break | python | def __warn_user_if_wd_maybe_unreadable(self, abs_remote_path):
"""
Check directories above the remote module and issue a warning if
they are not traversable by all users.
The reasoning behind this is mainly aimed at set-ups with a
centralized Hadoop cluster, accessed by all users, and where
the Hadoop task tracker user is not a superuser; an example
may be if you're running a shared Hadoop without HDFS (using
only a POSIX shared file system). The task tracker correctly
changes user to the job requester's user for most operations,
but not when initializing the distributed cache, so jobs who
want to place files not accessible by the Hadoop user into
dist cache fail.
"""
host, port, path = hdfs.path.split(abs_remote_path)
if host == '' and port == 0: # local file system
host_port = "file:///"
else:
# FIXME: this won't work with any scheme other than
# hdfs:// (e.g., s3)
host_port = "hdfs://%s:%s/" % (host, port)
path_pieces = path.strip('/').split(os.path.sep)
fs = hdfs.hdfs(host, port)
for i in range(0, len(path_pieces)):
part = os.path.join(
host_port, os.path.sep.join(path_pieces[0: i + 1])
)
permissions = fs.get_path_info(part)['permissions']
if permissions & 0o111 != 0o111:
self.logger.warning(
("remote module %s may not be readable by the task "
"tracker when initializing the distributed cache. "
"Permissions on %s: %s"),
abs_remote_path, part, oct(permissions)
)
break | [
"def",
"__warn_user_if_wd_maybe_unreadable",
"(",
"self",
",",
"abs_remote_path",
")",
":",
"host",
",",
"port",
",",
"path",
"=",
"hdfs",
".",
"path",
".",
"split",
"(",
"abs_remote_path",
")",
"if",
"host",
"==",
"''",
"and",
"port",
"==",
"0",
":",
"# local file system",
"host_port",
"=",
"\"file:///\"",
"else",
":",
"# FIXME: this won't work with any scheme other than",
"# hdfs:// (e.g., s3)",
"host_port",
"=",
"\"hdfs://%s:%s/\"",
"%",
"(",
"host",
",",
"port",
")",
"path_pieces",
"=",
"path",
".",
"strip",
"(",
"'/'",
")",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"fs",
"=",
"hdfs",
".",
"hdfs",
"(",
"host",
",",
"port",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"path_pieces",
")",
")",
":",
"part",
"=",
"os",
".",
"path",
".",
"join",
"(",
"host_port",
",",
"os",
".",
"path",
".",
"sep",
".",
"join",
"(",
"path_pieces",
"[",
"0",
":",
"i",
"+",
"1",
"]",
")",
")",
"permissions",
"=",
"fs",
".",
"get_path_info",
"(",
"part",
")",
"[",
"'permissions'",
"]",
"if",
"permissions",
"&",
"0o111",
"!=",
"0o111",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"(",
"\"remote module %s may not be readable by the task \"",
"\"tracker when initializing the distributed cache. \"",
"\"Permissions on %s: %s\"",
")",
",",
"abs_remote_path",
",",
"part",
",",
"oct",
"(",
"permissions",
")",
")",
"break"
]
| Check directories above the remote module and issue a warning if
they are not traversable by all users.
The reasoning behind this is mainly aimed at set-ups with a
centralized Hadoop cluster, accessed by all users, and where
the Hadoop task tracker user is not a superuser; an example
may be if you're running a shared Hadoop without HDFS (using
only a POSIX shared file system). The task tracker correctly
changes user to the job requester's user for most operations,
but not when initializing the distributed cache, so jobs who
want to place files not accessible by the Hadoop user into
dist cache fail. | [
"Check",
"directories",
"above",
"the",
"remote",
"module",
"and",
"issue",
"a",
"warning",
"if",
"they",
"are",
"not",
"traversable",
"by",
"all",
"users",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/app/submit.py#L166-L202 | train |
crs4/pydoop | pydoop/app/submit.py | PydoopSubmitter.__setup_remote_paths | def __setup_remote_paths(self):
"""
Actually create the working directory and copy the module into it.
Note: the script has to be readable by Hadoop; though this may not
generally be a problem on HDFS, where the Hadoop user is usually
the superuser, things may be different if our working directory is
on a shared POSIX filesystem. Therefore, we make the directory
and the script accessible by all.
"""
self.logger.debug("remote_wd: %s", self.remote_wd)
self.logger.debug("remote_exe: %s", self.remote_exe)
self.logger.debug("remotes: %s", self.files_to_upload)
if self.args.module:
self.logger.debug(
'Generated pipes_code:\n\n %s', self._generate_pipes_code()
)
if not self.args.pretend:
hdfs.mkdir(self.remote_wd)
hdfs.chmod(self.remote_wd, "a+rx")
self.logger.debug("created and chmod-ed: %s", self.remote_wd)
pipes_code = self._generate_pipes_code()
hdfs.dump(pipes_code, self.remote_exe)
self.logger.debug("dumped pipes_code to: %s", self.remote_exe)
hdfs.chmod(self.remote_exe, "a+rx")
self.__warn_user_if_wd_maybe_unreadable(self.remote_wd)
for (l, h, _) in self.files_to_upload:
self.logger.debug("uploading: %s to %s", l, h)
hdfs.cp(l, h)
self.logger.debug("Created%sremote paths:" %
(' [simulation] ' if self.args.pretend else ' ')) | python | def __setup_remote_paths(self):
"""
Actually create the working directory and copy the module into it.
Note: the script has to be readable by Hadoop; though this may not
generally be a problem on HDFS, where the Hadoop user is usually
the superuser, things may be different if our working directory is
on a shared POSIX filesystem. Therefore, we make the directory
and the script accessible by all.
"""
self.logger.debug("remote_wd: %s", self.remote_wd)
self.logger.debug("remote_exe: %s", self.remote_exe)
self.logger.debug("remotes: %s", self.files_to_upload)
if self.args.module:
self.logger.debug(
'Generated pipes_code:\n\n %s', self._generate_pipes_code()
)
if not self.args.pretend:
hdfs.mkdir(self.remote_wd)
hdfs.chmod(self.remote_wd, "a+rx")
self.logger.debug("created and chmod-ed: %s", self.remote_wd)
pipes_code = self._generate_pipes_code()
hdfs.dump(pipes_code, self.remote_exe)
self.logger.debug("dumped pipes_code to: %s", self.remote_exe)
hdfs.chmod(self.remote_exe, "a+rx")
self.__warn_user_if_wd_maybe_unreadable(self.remote_wd)
for (l, h, _) in self.files_to_upload:
self.logger.debug("uploading: %s to %s", l, h)
hdfs.cp(l, h)
self.logger.debug("Created%sremote paths:" %
(' [simulation] ' if self.args.pretend else ' ')) | [
"def",
"__setup_remote_paths",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"remote_wd: %s\"",
",",
"self",
".",
"remote_wd",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"remote_exe: %s\"",
",",
"self",
".",
"remote_exe",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"remotes: %s\"",
",",
"self",
".",
"files_to_upload",
")",
"if",
"self",
".",
"args",
".",
"module",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Generated pipes_code:\\n\\n %s'",
",",
"self",
".",
"_generate_pipes_code",
"(",
")",
")",
"if",
"not",
"self",
".",
"args",
".",
"pretend",
":",
"hdfs",
".",
"mkdir",
"(",
"self",
".",
"remote_wd",
")",
"hdfs",
".",
"chmod",
"(",
"self",
".",
"remote_wd",
",",
"\"a+rx\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"created and chmod-ed: %s\"",
",",
"self",
".",
"remote_wd",
")",
"pipes_code",
"=",
"self",
".",
"_generate_pipes_code",
"(",
")",
"hdfs",
".",
"dump",
"(",
"pipes_code",
",",
"self",
".",
"remote_exe",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"dumped pipes_code to: %s\"",
",",
"self",
".",
"remote_exe",
")",
"hdfs",
".",
"chmod",
"(",
"self",
".",
"remote_exe",
",",
"\"a+rx\"",
")",
"self",
".",
"__warn_user_if_wd_maybe_unreadable",
"(",
"self",
".",
"remote_wd",
")",
"for",
"(",
"l",
",",
"h",
",",
"_",
")",
"in",
"self",
".",
"files_to_upload",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"uploading: %s to %s\"",
",",
"l",
",",
"h",
")",
"hdfs",
".",
"cp",
"(",
"l",
",",
"h",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Created%sremote paths:\"",
"%",
"(",
"' [simulation] '",
"if",
"self",
".",
"args",
".",
"pretend",
"else",
"' '",
")",
")"
]
| Actually create the working directory and copy the module into it.
Note: the script has to be readable by Hadoop; though this may not
generally be a problem on HDFS, where the Hadoop user is usually
the superuser, things may be different if our working directory is
on a shared POSIX filesystem. Therefore, we make the directory
and the script accessible by all. | [
"Actually",
"create",
"the",
"working",
"directory",
"and",
"copy",
"the",
"module",
"into",
"it",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/app/submit.py#L295-L325 | train |
crs4/pydoop | dev_tools/docker/scripts/share_etc_hosts.py | docker_client | def docker_client():
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
if cert_path == '':
cert_path = os.path.join(os.environ.get('HOME', ''), '.docker')
base_url = os.environ.get('DOCKER_HOST')
tls_config = None
if os.environ.get('DOCKER_TLS_VERIFY', '') != '':
parts = base_url.split('://', 1)
base_url = '%s://%s' % ('https', parts[1])
client_cert = (os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem'))
ca_cert = os.path.join(cert_path, 'ca.pem')
tls_config = tls.TLSConfig(
ssl_version=ssl.PROTOCOL_TLSv1,
verify=True,
assert_hostname=False,
client_cert=client_cert,
ca_cert=ca_cert,
)
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
return Client(
base_url=base_url, tls=tls_config, version='1.15', timeout=timeout
) | python | def docker_client():
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
if cert_path == '':
cert_path = os.path.join(os.environ.get('HOME', ''), '.docker')
base_url = os.environ.get('DOCKER_HOST')
tls_config = None
if os.environ.get('DOCKER_TLS_VERIFY', '') != '':
parts = base_url.split('://', 1)
base_url = '%s://%s' % ('https', parts[1])
client_cert = (os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem'))
ca_cert = os.path.join(cert_path, 'ca.pem')
tls_config = tls.TLSConfig(
ssl_version=ssl.PROTOCOL_TLSv1,
verify=True,
assert_hostname=False,
client_cert=client_cert,
ca_cert=ca_cert,
)
timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60))
return Client(
base_url=base_url, tls=tls_config, version='1.15', timeout=timeout
) | [
"def",
"docker_client",
"(",
")",
":",
"cert_path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'DOCKER_CERT_PATH'",
",",
"''",
")",
"if",
"cert_path",
"==",
"''",
":",
"cert_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'HOME'",
",",
"''",
")",
",",
"'.docker'",
")",
"base_url",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'DOCKER_HOST'",
")",
"tls_config",
"=",
"None",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'DOCKER_TLS_VERIFY'",
",",
"''",
")",
"!=",
"''",
":",
"parts",
"=",
"base_url",
".",
"split",
"(",
"'://'",
",",
"1",
")",
"base_url",
"=",
"'%s://%s'",
"%",
"(",
"'https'",
",",
"parts",
"[",
"1",
"]",
")",
"client_cert",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"cert_path",
",",
"'cert.pem'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"cert_path",
",",
"'key.pem'",
")",
")",
"ca_cert",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cert_path",
",",
"'ca.pem'",
")",
"tls_config",
"=",
"tls",
".",
"TLSConfig",
"(",
"ssl_version",
"=",
"ssl",
".",
"PROTOCOL_TLSv1",
",",
"verify",
"=",
"True",
",",
"assert_hostname",
"=",
"False",
",",
"client_cert",
"=",
"client_cert",
",",
"ca_cert",
"=",
"ca_cert",
",",
")",
"timeout",
"=",
"int",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'DOCKER_CLIENT_TIMEOUT'",
",",
"60",
")",
")",
"return",
"Client",
"(",
"base_url",
"=",
"base_url",
",",
"tls",
"=",
"tls_config",
",",
"version",
"=",
"'1.15'",
",",
"timeout",
"=",
"timeout",
")"
]
| Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client. | [
"Returns",
"a",
"docker",
"-",
"py",
"client",
"configured",
"using",
"environment",
"variables",
"according",
"to",
"the",
"same",
"logic",
"as",
"the",
"official",
"Docker",
"client",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/dev_tools/docker/scripts/share_etc_hosts.py#L44-L75 | train |
crs4/pydoop | pydoop/utils/jvm.py | get_java_home | def get_java_home():
"""\
Try getting JAVA_HOME from system properties.
We are interested in the JDK home, containing include/jni.h, while the
java.home property points to the JRE home. If a JDK is installed, however,
the two are (usually) related: the JDK home is either the same directory
as the JRE home (recent java versions) or its parent (and java.home points
to jdk_home/jre).
"""
error = RuntimeError("java home not found, try setting JAVA_HOME")
try:
return os.environ["JAVA_HOME"]
except KeyError:
wd = tempfile.mkdtemp(prefix='pydoop_')
jclass = "Temp"
jsrc = os.path.join(wd, "%s.java" % jclass)
with open(jsrc, "w") as f:
f.write(JPROG.substitute(classname=jclass))
try:
subprocess.check_call(["javac", jsrc])
path = subprocess.check_output(
["java", "-cp", wd, jclass], universal_newlines=True
)
except (OSError, UnicodeDecodeError, subprocess.CalledProcessError):
raise error
finally:
shutil.rmtree(wd)
path = os.path.normpath(path.strip())
if os.path.exists(os.path.join(path, "include", "jni.h")):
return path
path = os.path.dirname(path)
if os.path.exists(os.path.join(path, "include", "jni.h")):
return path
raise error | python | def get_java_home():
"""\
Try getting JAVA_HOME from system properties.
We are interested in the JDK home, containing include/jni.h, while the
java.home property points to the JRE home. If a JDK is installed, however,
the two are (usually) related: the JDK home is either the same directory
as the JRE home (recent java versions) or its parent (and java.home points
to jdk_home/jre).
"""
error = RuntimeError("java home not found, try setting JAVA_HOME")
try:
return os.environ["JAVA_HOME"]
except KeyError:
wd = tempfile.mkdtemp(prefix='pydoop_')
jclass = "Temp"
jsrc = os.path.join(wd, "%s.java" % jclass)
with open(jsrc, "w") as f:
f.write(JPROG.substitute(classname=jclass))
try:
subprocess.check_call(["javac", jsrc])
path = subprocess.check_output(
["java", "-cp", wd, jclass], universal_newlines=True
)
except (OSError, UnicodeDecodeError, subprocess.CalledProcessError):
raise error
finally:
shutil.rmtree(wd)
path = os.path.normpath(path.strip())
if os.path.exists(os.path.join(path, "include", "jni.h")):
return path
path = os.path.dirname(path)
if os.path.exists(os.path.join(path, "include", "jni.h")):
return path
raise error | [
"def",
"get_java_home",
"(",
")",
":",
"error",
"=",
"RuntimeError",
"(",
"\"java home not found, try setting JAVA_HOME\"",
")",
"try",
":",
"return",
"os",
".",
"environ",
"[",
"\"JAVA_HOME\"",
"]",
"except",
"KeyError",
":",
"wd",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"'pydoop_'",
")",
"jclass",
"=",
"\"Temp\"",
"jsrc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"wd",
",",
"\"%s.java\"",
"%",
"jclass",
")",
"with",
"open",
"(",
"jsrc",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"JPROG",
".",
"substitute",
"(",
"classname",
"=",
"jclass",
")",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"\"javac\"",
",",
"jsrc",
"]",
")",
"path",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"java\"",
",",
"\"-cp\"",
",",
"wd",
",",
"jclass",
"]",
",",
"universal_newlines",
"=",
"True",
")",
"except",
"(",
"OSError",
",",
"UnicodeDecodeError",
",",
"subprocess",
".",
"CalledProcessError",
")",
":",
"raise",
"error",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"wd",
")",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"path",
".",
"strip",
"(",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"include\"",
",",
"\"jni.h\"",
")",
")",
":",
"return",
"path",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"include\"",
",",
"\"jni.h\"",
")",
")",
":",
"return",
"path",
"raise",
"error"
]
| \
Try getting JAVA_HOME from system properties.
We are interested in the JDK home, containing include/jni.h, while the
java.home property points to the JRE home. If a JDK is installed, however,
the two are (usually) related: the JDK home is either the same directory
as the JRE home (recent java versions) or its parent (and java.home points
to jdk_home/jre). | [
"\\",
"Try",
"getting",
"JAVA_HOME",
"from",
"system",
"properties",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/utils/jvm.py#L37-L71 | train |
crs4/pydoop | pydoop/mapreduce/pipes.py | run_task | def run_task(factory, **kwargs):
"""\
Run a MapReduce task.
Available keyword arguments:
* ``raw_keys`` (default: :obj:`False`): pass map input keys to context
as byte strings (ignore any type information)
* ``raw_values`` (default: :obj:`False`): pass map input values to context
as byte strings (ignore any type information)
* ``private_encoding`` (default: :obj:`True`): automatically serialize map
output k/v and deserialize reduce input k/v (pickle)
* ``auto_serialize`` (default: :obj:`True`): automatically serialize reduce
output (map output in map-only jobs) k/v (call str/unicode then encode as
utf-8)
Advanced keyword arguments:
* ``pstats_dir``: run the task with cProfile and store stats in this dir
* ``pstats_fmt``: use this pattern for pstats filenames (experts only)
The pstats dir and filename pattern can also be provided via ``pydoop
submit`` arguments, with lower precedence in case of clashes.
"""
context = TaskContext(factory, **kwargs)
pstats_dir = kwargs.get("pstats_dir", os.getenv(PSTATS_DIR))
if pstats_dir:
import cProfile
import tempfile
import pydoop.hdfs as hdfs
hdfs.mkdir(pstats_dir)
fd, pstats_fn = tempfile.mkstemp(suffix=".pstats")
os.close(fd)
cProfile.runctx(
"_run(context, **kwargs)", globals(), locals(),
filename=pstats_fn
)
pstats_fmt = kwargs.get(
"pstats_fmt",
os.getenv(PSTATS_FMT, DEFAULT_PSTATS_FMT)
)
name = pstats_fmt % (
context.task_type,
context.get_task_partition(),
os.path.basename(pstats_fn)
)
hdfs.put(pstats_fn, hdfs.path.join(pstats_dir, name))
else:
_run(context, **kwargs) | python | def run_task(factory, **kwargs):
"""\
Run a MapReduce task.
Available keyword arguments:
* ``raw_keys`` (default: :obj:`False`): pass map input keys to context
as byte strings (ignore any type information)
* ``raw_values`` (default: :obj:`False`): pass map input values to context
as byte strings (ignore any type information)
* ``private_encoding`` (default: :obj:`True`): automatically serialize map
output k/v and deserialize reduce input k/v (pickle)
* ``auto_serialize`` (default: :obj:`True`): automatically serialize reduce
output (map output in map-only jobs) k/v (call str/unicode then encode as
utf-8)
Advanced keyword arguments:
* ``pstats_dir``: run the task with cProfile and store stats in this dir
* ``pstats_fmt``: use this pattern for pstats filenames (experts only)
The pstats dir and filename pattern can also be provided via ``pydoop
submit`` arguments, with lower precedence in case of clashes.
"""
context = TaskContext(factory, **kwargs)
pstats_dir = kwargs.get("pstats_dir", os.getenv(PSTATS_DIR))
if pstats_dir:
import cProfile
import tempfile
import pydoop.hdfs as hdfs
hdfs.mkdir(pstats_dir)
fd, pstats_fn = tempfile.mkstemp(suffix=".pstats")
os.close(fd)
cProfile.runctx(
"_run(context, **kwargs)", globals(), locals(),
filename=pstats_fn
)
pstats_fmt = kwargs.get(
"pstats_fmt",
os.getenv(PSTATS_FMT, DEFAULT_PSTATS_FMT)
)
name = pstats_fmt % (
context.task_type,
context.get_task_partition(),
os.path.basename(pstats_fn)
)
hdfs.put(pstats_fn, hdfs.path.join(pstats_dir, name))
else:
_run(context, **kwargs) | [
"def",
"run_task",
"(",
"factory",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"TaskContext",
"(",
"factory",
",",
"*",
"*",
"kwargs",
")",
"pstats_dir",
"=",
"kwargs",
".",
"get",
"(",
"\"pstats_dir\"",
",",
"os",
".",
"getenv",
"(",
"PSTATS_DIR",
")",
")",
"if",
"pstats_dir",
":",
"import",
"cProfile",
"import",
"tempfile",
"import",
"pydoop",
".",
"hdfs",
"as",
"hdfs",
"hdfs",
".",
"mkdir",
"(",
"pstats_dir",
")",
"fd",
",",
"pstats_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"\".pstats\"",
")",
"os",
".",
"close",
"(",
"fd",
")",
"cProfile",
".",
"runctx",
"(",
"\"_run(context, **kwargs)\"",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"filename",
"=",
"pstats_fn",
")",
"pstats_fmt",
"=",
"kwargs",
".",
"get",
"(",
"\"pstats_fmt\"",
",",
"os",
".",
"getenv",
"(",
"PSTATS_FMT",
",",
"DEFAULT_PSTATS_FMT",
")",
")",
"name",
"=",
"pstats_fmt",
"%",
"(",
"context",
".",
"task_type",
",",
"context",
".",
"get_task_partition",
"(",
")",
",",
"os",
".",
"path",
".",
"basename",
"(",
"pstats_fn",
")",
")",
"hdfs",
".",
"put",
"(",
"pstats_fn",
",",
"hdfs",
".",
"path",
".",
"join",
"(",
"pstats_dir",
",",
"name",
")",
")",
"else",
":",
"_run",
"(",
"context",
",",
"*",
"*",
"kwargs",
")"
]
| \
Run a MapReduce task.
Available keyword arguments:
* ``raw_keys`` (default: :obj:`False`): pass map input keys to context
as byte strings (ignore any type information)
* ``raw_values`` (default: :obj:`False`): pass map input values to context
as byte strings (ignore any type information)
* ``private_encoding`` (default: :obj:`True`): automatically serialize map
output k/v and deserialize reduce input k/v (pickle)
* ``auto_serialize`` (default: :obj:`True`): automatically serialize reduce
output (map output in map-only jobs) k/v (call str/unicode then encode as
utf-8)
Advanced keyword arguments:
* ``pstats_dir``: run the task with cProfile and store stats in this dir
* ``pstats_fmt``: use this pattern for pstats filenames (experts only)
The pstats dir and filename pattern can also be provided via ``pydoop
submit`` arguments, with lower precedence in case of clashes. | [
"\\",
"Run",
"a",
"MapReduce",
"task",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/mapreduce/pipes.py#L414-L462 | train |
crs4/pydoop | pydoop/mapreduce/pipes.py | TaskContext.progress | def progress(self):
"""\
Report progress to the Java side.
This needs to flush the uplink stream, but too many flushes can
disrupt performance, so we actually talk to upstream once per second.
"""
now = time()
if now - self.last_progress_t > 1:
self.last_progress_t = now
if self.status:
self.uplink.status(self.status)
self.status = None
self.__spill_counters()
self.uplink.progress(self.progress_value)
self.uplink.flush() | python | def progress(self):
"""\
Report progress to the Java side.
This needs to flush the uplink stream, but too many flushes can
disrupt performance, so we actually talk to upstream once per second.
"""
now = time()
if now - self.last_progress_t > 1:
self.last_progress_t = now
if self.status:
self.uplink.status(self.status)
self.status = None
self.__spill_counters()
self.uplink.progress(self.progress_value)
self.uplink.flush() | [
"def",
"progress",
"(",
"self",
")",
":",
"now",
"=",
"time",
"(",
")",
"if",
"now",
"-",
"self",
".",
"last_progress_t",
">",
"1",
":",
"self",
".",
"last_progress_t",
"=",
"now",
"if",
"self",
".",
"status",
":",
"self",
".",
"uplink",
".",
"status",
"(",
"self",
".",
"status",
")",
"self",
".",
"status",
"=",
"None",
"self",
".",
"__spill_counters",
"(",
")",
"self",
".",
"uplink",
".",
"progress",
"(",
"self",
".",
"progress_value",
")",
"self",
".",
"uplink",
".",
"flush",
"(",
")"
]
| \
Report progress to the Java side.
This needs to flush the uplink stream, but too many flushes can
disrupt performance, so we actually talk to upstream once per second. | [
"\\",
"Report",
"progress",
"to",
"the",
"Java",
"side",
"."
]
| f375be2a06f9c67eaae3ce6f605195dbca143b2b | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/mapreduce/pipes.py#L210-L225 | train |
Bouke/docx-mailmerge | mailmerge.py | MailMerge.merge_pages | def merge_pages(self, replacements):
"""
Deprecated method.
"""
warnings.warn("merge_pages has been deprecated in favour of merge_templates",
category=DeprecationWarning,
stacklevel=2)
self.merge_templates(replacements, "page_break") | python | def merge_pages(self, replacements):
"""
Deprecated method.
"""
warnings.warn("merge_pages has been deprecated in favour of merge_templates",
category=DeprecationWarning,
stacklevel=2)
self.merge_templates(replacements, "page_break") | [
"def",
"merge_pages",
"(",
"self",
",",
"replacements",
")",
":",
"warnings",
".",
"warn",
"(",
"\"merge_pages has been deprecated in favour of merge_templates\"",
",",
"category",
"=",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"self",
".",
"merge_templates",
"(",
"replacements",
",",
"\"page_break\"",
")"
]
| Deprecated method. | [
"Deprecated",
"method",
"."
]
| 6900b686794b4bf85b662488add8df0880114b99 | https://github.com/Bouke/docx-mailmerge/blob/6900b686794b4bf85b662488add8df0880114b99/mailmerge.py#L236-L243 | train |
bashu/django-easy-maps | easy_maps/utils.py | importpath | def importpath(path, error_text=None):
"""
Import value by specified ``path``.
Value can represent module, class, object, attribute or method.
If ``error_text`` is not None and import will
raise ImproperlyConfigured with user friendly text.
"""
result = None
attrs = []
parts = path.split('.')
exception = None
while parts:
try:
result = __import__('.'.join(parts), {}, {}, [''])
except ImportError as e:
if exception is None:
exception = e
attrs = parts[-1:] + attrs
parts = parts[:-1]
else:
break
for attr in attrs:
try:
result = getattr(result, attr)
except (AttributeError, ValueError) as e:
if error_text is not None:
raise ImproperlyConfigured('Error: %s can import "%s"' % (
error_text, path))
else:
raise exception
return result | python | def importpath(path, error_text=None):
"""
Import value by specified ``path``.
Value can represent module, class, object, attribute or method.
If ``error_text`` is not None and import will
raise ImproperlyConfigured with user friendly text.
"""
result = None
attrs = []
parts = path.split('.')
exception = None
while parts:
try:
result = __import__('.'.join(parts), {}, {}, [''])
except ImportError as e:
if exception is None:
exception = e
attrs = parts[-1:] + attrs
parts = parts[:-1]
else:
break
for attr in attrs:
try:
result = getattr(result, attr)
except (AttributeError, ValueError) as e:
if error_text is not None:
raise ImproperlyConfigured('Error: %s can import "%s"' % (
error_text, path))
else:
raise exception
return result | [
"def",
"importpath",
"(",
"path",
",",
"error_text",
"=",
"None",
")",
":",
"result",
"=",
"None",
"attrs",
"=",
"[",
"]",
"parts",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"exception",
"=",
"None",
"while",
"parts",
":",
"try",
":",
"result",
"=",
"__import__",
"(",
"'.'",
".",
"join",
"(",
"parts",
")",
",",
"{",
"}",
",",
"{",
"}",
",",
"[",
"''",
"]",
")",
"except",
"ImportError",
"as",
"e",
":",
"if",
"exception",
"is",
"None",
":",
"exception",
"=",
"e",
"attrs",
"=",
"parts",
"[",
"-",
"1",
":",
"]",
"+",
"attrs",
"parts",
"=",
"parts",
"[",
":",
"-",
"1",
"]",
"else",
":",
"break",
"for",
"attr",
"in",
"attrs",
":",
"try",
":",
"result",
"=",
"getattr",
"(",
"result",
",",
"attr",
")",
"except",
"(",
"AttributeError",
",",
"ValueError",
")",
"as",
"e",
":",
"if",
"error_text",
"is",
"not",
"None",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Error: %s can import \"%s\"'",
"%",
"(",
"error_text",
",",
"path",
")",
")",
"else",
":",
"raise",
"exception",
"return",
"result"
]
| Import value by specified ``path``.
Value can represent module, class, object, attribute or method.
If ``error_text`` is not None and import will
raise ImproperlyConfigured with user friendly text. | [
"Import",
"value",
"by",
"specified",
"path",
".",
"Value",
"can",
"represent",
"module",
"class",
"object",
"attribute",
"or",
"method",
".",
"If",
"error_text",
"is",
"not",
"None",
"and",
"import",
"will",
"raise",
"ImproperlyConfigured",
"with",
"user",
"friendly",
"text",
"."
]
| 32f4f3274443219e8828d93d09a406bf2a126982 | https://github.com/bashu/django-easy-maps/blob/32f4f3274443219e8828d93d09a406bf2a126982/easy_maps/utils.py#L6-L37 | train |
bitlabstudio/django-booking | booking/utils.py | get_booking | def get_booking(request):
"""
Returns the booking that is in progress for the current user or None
We assume that a user can only have one booking that is in-progress.
TODO: This implementation assumes that there is a status called
'inprogress' and that there should only be one such booking for a given
user. We need to see if this can be more generic for future projects.
:param request: The Request object.
"""
booking = None
if request.user.is_authenticated():
try:
booking = Booking.objects.get(
user=request.user,
booking_status__slug='inprogress')
except Booking.DoesNotExist:
# The user does not have any open bookings
pass
else:
session = Session.objects.get(
session_key=request.session.session_key)
try:
booking = Booking.objects.get(session=session)
except Booking.DoesNotExist:
# The user does not have any bookings in his session
pass
return booking | python | def get_booking(request):
"""
Returns the booking that is in progress for the current user or None
We assume that a user can only have one booking that is in-progress.
TODO: This implementation assumes that there is a status called
'inprogress' and that there should only be one such booking for a given
user. We need to see if this can be more generic for future projects.
:param request: The Request object.
"""
booking = None
if request.user.is_authenticated():
try:
booking = Booking.objects.get(
user=request.user,
booking_status__slug='inprogress')
except Booking.DoesNotExist:
# The user does not have any open bookings
pass
else:
session = Session.objects.get(
session_key=request.session.session_key)
try:
booking = Booking.objects.get(session=session)
except Booking.DoesNotExist:
# The user does not have any bookings in his session
pass
return booking | [
"def",
"get_booking",
"(",
"request",
")",
":",
"booking",
"=",
"None",
"if",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
":",
"try",
":",
"booking",
"=",
"Booking",
".",
"objects",
".",
"get",
"(",
"user",
"=",
"request",
".",
"user",
",",
"booking_status__slug",
"=",
"'inprogress'",
")",
"except",
"Booking",
".",
"DoesNotExist",
":",
"# The user does not have any open bookings",
"pass",
"else",
":",
"session",
"=",
"Session",
".",
"objects",
".",
"get",
"(",
"session_key",
"=",
"request",
".",
"session",
".",
"session_key",
")",
"try",
":",
"booking",
"=",
"Booking",
".",
"objects",
".",
"get",
"(",
"session",
"=",
"session",
")",
"except",
"Booking",
".",
"DoesNotExist",
":",
"# The user does not have any bookings in his session",
"pass",
"return",
"booking"
]
| Returns the booking that is in progress for the current user or None
We assume that a user can only have one booking that is in-progress.
TODO: This implementation assumes that there is a status called
'inprogress' and that there should only be one such booking for a given
user. We need to see if this can be more generic for future projects.
:param request: The Request object. | [
"Returns",
"the",
"booking",
"that",
"is",
"in",
"progress",
"for",
"the",
"current",
"user",
"or",
"None"
]
| 7bb5fdddb28b52b62b86f1d05b19a7654b5ffe00 | https://github.com/bitlabstudio/django-booking/blob/7bb5fdddb28b52b62b86f1d05b19a7654b5ffe00/booking/utils.py#L7-L37 | train |
bitlabstudio/django-booking | booking/utils.py | persist_booking | def persist_booking(booking, user):
"""
Ties an in-progress booking from a session to a user when the user logs in.
If we don't do this, the booking will be lost, because on a login, the
old session will be deleted and a new one will be created. Since the
booking has a FK to the session, it would be deleted as well when the user
logs in.
We assume that a user can only have one booking that is in-progress.
Therefore we will delete any existing in-progress bookings of this user
before tying the one from the session to the user.
TODO: Find a more generic solution for this, as this assumes that there is
a status called inprogress and that a user can only have one such booking.
:param booking: The booking that should be tied to the user.
:user: The user the booking should be tied to.
"""
if booking is not None:
existing_bookings = Booking.objects.filter(
user=user, booking_status__slug='inprogress').exclude(
pk=booking.pk)
existing_bookings.delete()
booking.session = None
booking.user = user
booking.save() | python | def persist_booking(booking, user):
"""
Ties an in-progress booking from a session to a user when the user logs in.
If we don't do this, the booking will be lost, because on a login, the
old session will be deleted and a new one will be created. Since the
booking has a FK to the session, it would be deleted as well when the user
logs in.
We assume that a user can only have one booking that is in-progress.
Therefore we will delete any existing in-progress bookings of this user
before tying the one from the session to the user.
TODO: Find a more generic solution for this, as this assumes that there is
a status called inprogress and that a user can only have one such booking.
:param booking: The booking that should be tied to the user.
:user: The user the booking should be tied to.
"""
if booking is not None:
existing_bookings = Booking.objects.filter(
user=user, booking_status__slug='inprogress').exclude(
pk=booking.pk)
existing_bookings.delete()
booking.session = None
booking.user = user
booking.save() | [
"def",
"persist_booking",
"(",
"booking",
",",
"user",
")",
":",
"if",
"booking",
"is",
"not",
"None",
":",
"existing_bookings",
"=",
"Booking",
".",
"objects",
".",
"filter",
"(",
"user",
"=",
"user",
",",
"booking_status__slug",
"=",
"'inprogress'",
")",
".",
"exclude",
"(",
"pk",
"=",
"booking",
".",
"pk",
")",
"existing_bookings",
".",
"delete",
"(",
")",
"booking",
".",
"session",
"=",
"None",
"booking",
".",
"user",
"=",
"user",
"booking",
".",
"save",
"(",
")"
]
| Ties an in-progress booking from a session to a user when the user logs in.
If we don't do this, the booking will be lost, because on a login, the
old session will be deleted and a new one will be created. Since the
booking has a FK to the session, it would be deleted as well when the user
logs in.
We assume that a user can only have one booking that is in-progress.
Therefore we will delete any existing in-progress bookings of this user
before tying the one from the session to the user.
TODO: Find a more generic solution for this, as this assumes that there is
a status called inprogress and that a user can only have one such booking.
:param booking: The booking that should be tied to the user.
:user: The user the booking should be tied to. | [
"Ties",
"an",
"in",
"-",
"progress",
"booking",
"from",
"a",
"session",
"to",
"a",
"user",
"when",
"the",
"user",
"logs",
"in",
"."
]
| 7bb5fdddb28b52b62b86f1d05b19a7654b5ffe00 | https://github.com/bitlabstudio/django-booking/blob/7bb5fdddb28b52b62b86f1d05b19a7654b5ffe00/booking/utils.py#L40-L68 | train |
spotify/pyfg | pyFG/forticonfig.py | FortiConfig.compare_config | def compare_config(self, target, init=True, indent_level=0):
"""
This method will return all the necessary commands to get from the config we are in to the target
config.
Args:
* **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config.
* **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\
the recursion. You can ignore this parameter.
* **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it.
Returns:
A string containing all the necessary commands to reach the target config.
"""
if init:
fwd = self.full_path_fwd
bwd = self.full_path_bwd
else:
fwd = self.rel_path_fwd
bwd = self.rel_path_bwd
indent = 4*indent_level*' '
if indent_level == 0 and self.vdom is not None:
if self.vdom == 'global':
pre = 'conf global\n'
else:
pre = 'conf vdom\n edit %s\n' % self.vdom
post = 'end'
else:
pre = ''
post = ''
pre_block = '%s%s' % (indent, fwd)
post_block = '%s%s' % (indent, bwd)
my_params = self.parameters.keys()
ot_params = target.parameters.keys()
text = ''
for param in my_params:
if param not in ot_params:
text += ' %sunset %s\n' % (indent, param)
else:
# We ignore quotes when comparing values
if str(self.get_param(param)).replace('"', '') != str(target.get_param(param)).replace('"', ''):
text += ' %sset %s %s\n' % (indent, param, target.get_param(param))
for param in ot_params:
if param not in my_params:
text += ' %sset %s %s\n' % (indent, param, target.get_param(param))
my_blocks = self.sub_blocks.keys()
ot_blocks = target.sub_blocks.keys()
for block_name in my_blocks:
if block_name not in ot_blocks:
text += " %sdelete %s\n" % (indent, block_name)
else:
text += self[block_name].compare_config(target[block_name], False, indent_level+1)
for block_name in ot_blocks:
if block_name not in my_blocks:
text += target[block_name].to_text(True, indent_level+1, True)
if text == '':
return ''
else:
return '%s%s%s%s%s' % (pre, pre_block, text, post_block, post) | python | def compare_config(self, target, init=True, indent_level=0):
"""
This method will return all the necessary commands to get from the config we are in to the target
config.
Args:
* **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config.
* **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\
the recursion. You can ignore this parameter.
* **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it.
Returns:
A string containing all the necessary commands to reach the target config.
"""
if init:
fwd = self.full_path_fwd
bwd = self.full_path_bwd
else:
fwd = self.rel_path_fwd
bwd = self.rel_path_bwd
indent = 4*indent_level*' '
if indent_level == 0 and self.vdom is not None:
if self.vdom == 'global':
pre = 'conf global\n'
else:
pre = 'conf vdom\n edit %s\n' % self.vdom
post = 'end'
else:
pre = ''
post = ''
pre_block = '%s%s' % (indent, fwd)
post_block = '%s%s' % (indent, bwd)
my_params = self.parameters.keys()
ot_params = target.parameters.keys()
text = ''
for param in my_params:
if param not in ot_params:
text += ' %sunset %s\n' % (indent, param)
else:
# We ignore quotes when comparing values
if str(self.get_param(param)).replace('"', '') != str(target.get_param(param)).replace('"', ''):
text += ' %sset %s %s\n' % (indent, param, target.get_param(param))
for param in ot_params:
if param not in my_params:
text += ' %sset %s %s\n' % (indent, param, target.get_param(param))
my_blocks = self.sub_blocks.keys()
ot_blocks = target.sub_blocks.keys()
for block_name in my_blocks:
if block_name not in ot_blocks:
text += " %sdelete %s\n" % (indent, block_name)
else:
text += self[block_name].compare_config(target[block_name], False, indent_level+1)
for block_name in ot_blocks:
if block_name not in my_blocks:
text += target[block_name].to_text(True, indent_level+1, True)
if text == '':
return ''
else:
return '%s%s%s%s%s' % (pre, pre_block, text, post_block, post) | [
"def",
"compare_config",
"(",
"self",
",",
"target",
",",
"init",
"=",
"True",
",",
"indent_level",
"=",
"0",
")",
":",
"if",
"init",
":",
"fwd",
"=",
"self",
".",
"full_path_fwd",
"bwd",
"=",
"self",
".",
"full_path_bwd",
"else",
":",
"fwd",
"=",
"self",
".",
"rel_path_fwd",
"bwd",
"=",
"self",
".",
"rel_path_bwd",
"indent",
"=",
"4",
"*",
"indent_level",
"*",
"' '",
"if",
"indent_level",
"==",
"0",
"and",
"self",
".",
"vdom",
"is",
"not",
"None",
":",
"if",
"self",
".",
"vdom",
"==",
"'global'",
":",
"pre",
"=",
"'conf global\\n'",
"else",
":",
"pre",
"=",
"'conf vdom\\n edit %s\\n'",
"%",
"self",
".",
"vdom",
"post",
"=",
"'end'",
"else",
":",
"pre",
"=",
"''",
"post",
"=",
"''",
"pre_block",
"=",
"'%s%s'",
"%",
"(",
"indent",
",",
"fwd",
")",
"post_block",
"=",
"'%s%s'",
"%",
"(",
"indent",
",",
"bwd",
")",
"my_params",
"=",
"self",
".",
"parameters",
".",
"keys",
"(",
")",
"ot_params",
"=",
"target",
".",
"parameters",
".",
"keys",
"(",
")",
"text",
"=",
"''",
"for",
"param",
"in",
"my_params",
":",
"if",
"param",
"not",
"in",
"ot_params",
":",
"text",
"+=",
"' %sunset %s\\n'",
"%",
"(",
"indent",
",",
"param",
")",
"else",
":",
"# We ignore quotes when comparing values",
"if",
"str",
"(",
"self",
".",
"get_param",
"(",
"param",
")",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"!=",
"str",
"(",
"target",
".",
"get_param",
"(",
"param",
")",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
":",
"text",
"+=",
"' %sset %s %s\\n'",
"%",
"(",
"indent",
",",
"param",
",",
"target",
".",
"get_param",
"(",
"param",
")",
")",
"for",
"param",
"in",
"ot_params",
":",
"if",
"param",
"not",
"in",
"my_params",
":",
"text",
"+=",
"' %sset %s %s\\n'",
"%",
"(",
"indent",
",",
"param",
",",
"target",
".",
"get_param",
"(",
"param",
")",
")",
"my_blocks",
"=",
"self",
".",
"sub_blocks",
".",
"keys",
"(",
")",
"ot_blocks",
"=",
"target",
".",
"sub_blocks",
".",
"keys",
"(",
")",
"for",
"block_name",
"in",
"my_blocks",
":",
"if",
"block_name",
"not",
"in",
"ot_blocks",
":",
"text",
"+=",
"\" %sdelete %s\\n\"",
"%",
"(",
"indent",
",",
"block_name",
")",
"else",
":",
"text",
"+=",
"self",
"[",
"block_name",
"]",
".",
"compare_config",
"(",
"target",
"[",
"block_name",
"]",
",",
"False",
",",
"indent_level",
"+",
"1",
")",
"for",
"block_name",
"in",
"ot_blocks",
":",
"if",
"block_name",
"not",
"in",
"my_blocks",
":",
"text",
"+=",
"target",
"[",
"block_name",
"]",
".",
"to_text",
"(",
"True",
",",
"indent_level",
"+",
"1",
",",
"True",
")",
"if",
"text",
"==",
"''",
":",
"return",
"''",
"else",
":",
"return",
"'%s%s%s%s%s'",
"%",
"(",
"pre",
",",
"pre_block",
",",
"text",
",",
"post_block",
",",
"post",
")"
]
| This method will return all the necessary commands to get from the config we are in to the target
config.
Args:
* **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config.
* **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\
the recursion. You can ignore this parameter.
* **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it.
Returns:
A string containing all the necessary commands to reach the target config. | [
"This",
"method",
"will",
"return",
"all",
"the",
"necessary",
"commands",
"to",
"get",
"from",
"the",
"config",
"we",
"are",
"in",
"to",
"the",
"target",
"config",
"."
]
| 518668539146e7f998a37d75994a4278adf79897 | https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/forticonfig.py#L103-L173 | train |
spotify/pyfg | pyFG/forticonfig.py | FortiConfig.to_text | def to_text(self, relative=False, indent_level=0, clean_empty_block=False):
"""
This method returns the object model in text format. You should be able to copy&paste this text into any
device running a supported version of FortiOS.
Args:
- **relative** (bool):
* If ``True`` the text returned will assume that you are one block away
* If ``False`` the text returned will contain instructions to reach the block from the root.
- **indent_level** (int): This value is for aesthetics only. It will help format the text in blocks to\
increase readability.
- **clean_empty_block** (bool):
* If ``True`` a block without parameters or with sub_blocks without parameters will return an empty\
string
* If ``False`` a block without parameters will still return how to create it.
"""
if relative:
fwd = self.rel_path_fwd
bwd = self.rel_path_bwd
else:
fwd = self.full_path_fwd
bwd = self.full_path_bwd
indent = 4*indent_level*' '
pre = '%s%s' % (indent, fwd)
post = '%s%s' % (indent, bwd)
text = ''
for param, value in self.iterparams():
text += ' %sset %s %s\n' % (indent, param, value)
for key, block in self.iterblocks():
text += block.to_text(True, indent_level+1)
if len(text) > 0 or not clean_empty_block:
text = '%s%s%s' % (pre, text, post)
return text | python | def to_text(self, relative=False, indent_level=0, clean_empty_block=False):
"""
This method returns the object model in text format. You should be able to copy&paste this text into any
device running a supported version of FortiOS.
Args:
- **relative** (bool):
* If ``True`` the text returned will assume that you are one block away
* If ``False`` the text returned will contain instructions to reach the block from the root.
- **indent_level** (int): This value is for aesthetics only. It will help format the text in blocks to\
increase readability.
- **clean_empty_block** (bool):
* If ``True`` a block without parameters or with sub_blocks without parameters will return an empty\
string
* If ``False`` a block without parameters will still return how to create it.
"""
if relative:
fwd = self.rel_path_fwd
bwd = self.rel_path_bwd
else:
fwd = self.full_path_fwd
bwd = self.full_path_bwd
indent = 4*indent_level*' '
pre = '%s%s' % (indent, fwd)
post = '%s%s' % (indent, bwd)
text = ''
for param, value in self.iterparams():
text += ' %sset %s %s\n' % (indent, param, value)
for key, block in self.iterblocks():
text += block.to_text(True, indent_level+1)
if len(text) > 0 or not clean_empty_block:
text = '%s%s%s' % (pre, text, post)
return text | [
"def",
"to_text",
"(",
"self",
",",
"relative",
"=",
"False",
",",
"indent_level",
"=",
"0",
",",
"clean_empty_block",
"=",
"False",
")",
":",
"if",
"relative",
":",
"fwd",
"=",
"self",
".",
"rel_path_fwd",
"bwd",
"=",
"self",
".",
"rel_path_bwd",
"else",
":",
"fwd",
"=",
"self",
".",
"full_path_fwd",
"bwd",
"=",
"self",
".",
"full_path_bwd",
"indent",
"=",
"4",
"*",
"indent_level",
"*",
"' '",
"pre",
"=",
"'%s%s'",
"%",
"(",
"indent",
",",
"fwd",
")",
"post",
"=",
"'%s%s'",
"%",
"(",
"indent",
",",
"bwd",
")",
"text",
"=",
"''",
"for",
"param",
",",
"value",
"in",
"self",
".",
"iterparams",
"(",
")",
":",
"text",
"+=",
"' %sset %s %s\\n'",
"%",
"(",
"indent",
",",
"param",
",",
"value",
")",
"for",
"key",
",",
"block",
"in",
"self",
".",
"iterblocks",
"(",
")",
":",
"text",
"+=",
"block",
".",
"to_text",
"(",
"True",
",",
"indent_level",
"+",
"1",
")",
"if",
"len",
"(",
"text",
")",
">",
"0",
"or",
"not",
"clean_empty_block",
":",
"text",
"=",
"'%s%s%s'",
"%",
"(",
"pre",
",",
"text",
",",
"post",
")",
"return",
"text"
]
| This method returns the object model in text format. You should be able to copy&paste this text into any
device running a supported version of FortiOS.
Args:
- **relative** (bool):
* If ``True`` the text returned will assume that you are one block away
* If ``False`` the text returned will contain instructions to reach the block from the root.
- **indent_level** (int): This value is for aesthetics only. It will help format the text in blocks to\
increase readability.
- **clean_empty_block** (bool):
* If ``True`` a block without parameters or with sub_blocks without parameters will return an empty\
string
* If ``False`` a block without parameters will still return how to create it. | [
"This",
"method",
"returns",
"the",
"object",
"model",
"in",
"text",
"format",
".",
"You",
"should",
"be",
"able",
"to",
"copy&paste",
"this",
"text",
"into",
"any",
"device",
"running",
"a",
"supported",
"version",
"of",
"FortiOS",
"."
]
| 518668539146e7f998a37d75994a4278adf79897 | https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/forticonfig.py#L305-L341 | train |
spotify/pyfg | pyFG/fortios.py | FortiOS.open | def open(self):
"""
Opens the ssh session with the device.
"""
logger.debug('Connecting to device %s, vdom %s' % (self.hostname, self.vdom))
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
cfg = {
'hostname': self.hostname,
'timeout': self.timeout,
'username': self.username,
'password': self.password,
'key_filename': self.keyfile
}
if os.path.exists(os.path.expanduser("~/.ssh/config")):
ssh_config = paramiko.SSHConfig()
user_config_file = os.path.expanduser("~/.ssh/config")
with io.open(user_config_file, 'rt', encoding='utf-8') as f:
ssh_config.parse(f)
host_conf = ssh_config.lookup(self.hostname)
if host_conf:
if 'proxycommand' in host_conf:
cfg['sock'] = paramiko.ProxyCommand(host_conf['proxycommand'])
if 'user' in host_conf:
cfg['username'] = host_conf['user']
if 'identityfile' in host_conf:
cfg['key_filename'] = host_conf['identityfile']
if 'hostname' in host_conf:
cfg['hostname'] = host_conf['hostname']
self.ssh.connect(**cfg) | python | def open(self):
"""
Opens the ssh session with the device.
"""
logger.debug('Connecting to device %s, vdom %s' % (self.hostname, self.vdom))
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
cfg = {
'hostname': self.hostname,
'timeout': self.timeout,
'username': self.username,
'password': self.password,
'key_filename': self.keyfile
}
if os.path.exists(os.path.expanduser("~/.ssh/config")):
ssh_config = paramiko.SSHConfig()
user_config_file = os.path.expanduser("~/.ssh/config")
with io.open(user_config_file, 'rt', encoding='utf-8') as f:
ssh_config.parse(f)
host_conf = ssh_config.lookup(self.hostname)
if host_conf:
if 'proxycommand' in host_conf:
cfg['sock'] = paramiko.ProxyCommand(host_conf['proxycommand'])
if 'user' in host_conf:
cfg['username'] = host_conf['user']
if 'identityfile' in host_conf:
cfg['key_filename'] = host_conf['identityfile']
if 'hostname' in host_conf:
cfg['hostname'] = host_conf['hostname']
self.ssh.connect(**cfg) | [
"def",
"open",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Connecting to device %s, vdom %s'",
"%",
"(",
"self",
".",
"hostname",
",",
"self",
".",
"vdom",
")",
")",
"self",
".",
"ssh",
"=",
"paramiko",
".",
"SSHClient",
"(",
")",
"self",
".",
"ssh",
".",
"set_missing_host_key_policy",
"(",
"paramiko",
".",
"AutoAddPolicy",
"(",
")",
")",
"cfg",
"=",
"{",
"'hostname'",
":",
"self",
".",
"hostname",
",",
"'timeout'",
":",
"self",
".",
"timeout",
",",
"'username'",
":",
"self",
".",
"username",
",",
"'password'",
":",
"self",
".",
"password",
",",
"'key_filename'",
":",
"self",
".",
"keyfile",
"}",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.ssh/config\"",
")",
")",
":",
"ssh_config",
"=",
"paramiko",
".",
"SSHConfig",
"(",
")",
"user_config_file",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.ssh/config\"",
")",
"with",
"io",
".",
"open",
"(",
"user_config_file",
",",
"'rt'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"ssh_config",
".",
"parse",
"(",
"f",
")",
"host_conf",
"=",
"ssh_config",
".",
"lookup",
"(",
"self",
".",
"hostname",
")",
"if",
"host_conf",
":",
"if",
"'proxycommand'",
"in",
"host_conf",
":",
"cfg",
"[",
"'sock'",
"]",
"=",
"paramiko",
".",
"ProxyCommand",
"(",
"host_conf",
"[",
"'proxycommand'",
"]",
")",
"if",
"'user'",
"in",
"host_conf",
":",
"cfg",
"[",
"'username'",
"]",
"=",
"host_conf",
"[",
"'user'",
"]",
"if",
"'identityfile'",
"in",
"host_conf",
":",
"cfg",
"[",
"'key_filename'",
"]",
"=",
"host_conf",
"[",
"'identityfile'",
"]",
"if",
"'hostname'",
"in",
"host_conf",
":",
"cfg",
"[",
"'hostname'",
"]",
"=",
"host_conf",
"[",
"'hostname'",
"]",
"self",
".",
"ssh",
".",
"connect",
"(",
"*",
"*",
"cfg",
")"
]
| Opens the ssh session with the device. | [
"Opens",
"the",
"ssh",
"session",
"with",
"the",
"device",
"."
]
| 518668539146e7f998a37d75994a4278adf79897 | https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/fortios.py#L68-L103 | train |
spotify/pyfg | pyFG/fortios.py | FortiOS._read_wrapper | def _read_wrapper(data):
"""Ensure unicode always returned on read."""
# Paramiko (strangely) in PY3 returns an int here.
if isinstance(data, int):
data = chr(data)
# Ensure unicode
return py23_compat.text_type(data) | python | def _read_wrapper(data):
"""Ensure unicode always returned on read."""
# Paramiko (strangely) in PY3 returns an int here.
if isinstance(data, int):
data = chr(data)
# Ensure unicode
return py23_compat.text_type(data) | [
"def",
"_read_wrapper",
"(",
"data",
")",
":",
"# Paramiko (strangely) in PY3 returns an int here.",
"if",
"isinstance",
"(",
"data",
",",
"int",
")",
":",
"data",
"=",
"chr",
"(",
"data",
")",
"# Ensure unicode",
"return",
"py23_compat",
".",
"text_type",
"(",
"data",
")"
]
| Ensure unicode always returned on read. | [
"Ensure",
"unicode",
"always",
"returned",
"on",
"read",
"."
]
| 518668539146e7f998a37d75994a4278adf79897 | https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/fortios.py#L114-L120 | train |
spotify/pyfg | pyFG/fortios.py | FortiOS._parse_batch_lastlog | def _parse_batch_lastlog(last_log):
"""
This static method will help reading the result of the commit, command by command.
Args:
last_log(list): A list containing, line by line, the result of committing the changes.
Returns:
A list of tuples that went wrong. The tuple will contain (*status_code*, *command*)
"""
regexp = re.compile('(-?[0-9]\d*):\W+(.*)')
wrong_commands = list()
for line in last_log:
result = regexp.match(line)
if result is not None:
status_code = result.group(1)
command = result.group(2)
if int(status_code) < 0:
wrong_commands.append((status_code, command))
return wrong_commands | python | def _parse_batch_lastlog(last_log):
"""
This static method will help reading the result of the commit, command by command.
Args:
last_log(list): A list containing, line by line, the result of committing the changes.
Returns:
A list of tuples that went wrong. The tuple will contain (*status_code*, *command*)
"""
regexp = re.compile('(-?[0-9]\d*):\W+(.*)')
wrong_commands = list()
for line in last_log:
result = regexp.match(line)
if result is not None:
status_code = result.group(1)
command = result.group(2)
if int(status_code) < 0:
wrong_commands.append((status_code, command))
return wrong_commands | [
"def",
"_parse_batch_lastlog",
"(",
"last_log",
")",
":",
"regexp",
"=",
"re",
".",
"compile",
"(",
"'(-?[0-9]\\d*):\\W+(.*)'",
")",
"wrong_commands",
"=",
"list",
"(",
")",
"for",
"line",
"in",
"last_log",
":",
"result",
"=",
"regexp",
".",
"match",
"(",
"line",
")",
"if",
"result",
"is",
"not",
"None",
":",
"status_code",
"=",
"result",
".",
"group",
"(",
"1",
")",
"command",
"=",
"result",
".",
"group",
"(",
"2",
")",
"if",
"int",
"(",
"status_code",
")",
"<",
"0",
":",
"wrong_commands",
".",
"append",
"(",
"(",
"status_code",
",",
"command",
")",
")",
"return",
"wrong_commands"
]
| This static method will help reading the result of the commit, command by command.
Args:
last_log(list): A list containing, line by line, the result of committing the changes.
Returns:
A list of tuples that went wrong. The tuple will contain (*status_code*, *command*) | [
"This",
"static",
"method",
"will",
"help",
"reading",
"the",
"result",
"of",
"the",
"commit",
"command",
"by",
"command",
"."
]
| 518668539146e7f998a37d75994a4278adf79897 | https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/fortios.py#L349-L370 | train |
spotify/pyfg | pyFG/fortios.py | FortiOS._reload_config | def _reload_config(self, reload_original_config):
"""
This command will update the running config from the live device.
Args:
* reload_original_config:
* If ``True`` the original config will be loaded with the running config before reloading the\
original config.
* If ``False`` the original config will remain untouched.
"""
# We don't want to reload the config under some circumstances
if reload_original_config:
self.original_config = self.running_config
self.original_config.set_name('original')
paths = self.running_config.get_paths()
self.running_config = FortiConfig('running', vdom=self.vdom)
for path in paths:
self.load_config(path, empty_candidate=True) | python | def _reload_config(self, reload_original_config):
"""
This command will update the running config from the live device.
Args:
* reload_original_config:
* If ``True`` the original config will be loaded with the running config before reloading the\
original config.
* If ``False`` the original config will remain untouched.
"""
# We don't want to reload the config under some circumstances
if reload_original_config:
self.original_config = self.running_config
self.original_config.set_name('original')
paths = self.running_config.get_paths()
self.running_config = FortiConfig('running', vdom=self.vdom)
for path in paths:
self.load_config(path, empty_candidate=True) | [
"def",
"_reload_config",
"(",
"self",
",",
"reload_original_config",
")",
":",
"# We don't want to reload the config under some circumstances",
"if",
"reload_original_config",
":",
"self",
".",
"original_config",
"=",
"self",
".",
"running_config",
"self",
".",
"original_config",
".",
"set_name",
"(",
"'original'",
")",
"paths",
"=",
"self",
".",
"running_config",
".",
"get_paths",
"(",
")",
"self",
".",
"running_config",
"=",
"FortiConfig",
"(",
"'running'",
",",
"vdom",
"=",
"self",
".",
"vdom",
")",
"for",
"path",
"in",
"paths",
":",
"self",
".",
"load_config",
"(",
"path",
",",
"empty_candidate",
"=",
"True",
")"
]
| This command will update the running config from the live device.
Args:
* reload_original_config:
* If ``True`` the original config will be loaded with the running config before reloading the\
original config.
* If ``False`` the original config will remain untouched. | [
"This",
"command",
"will",
"update",
"the",
"running",
"config",
"from",
"the",
"live",
"device",
"."
]
| 518668539146e7f998a37d75994a4278adf79897 | https://github.com/spotify/pyfg/blob/518668539146e7f998a37d75994a4278adf79897/pyFG/fortios.py#L372-L392 | train |
mattjj/pyslds | pyslds/states.py | _SLDSStates.generate_states | def generate_states(self, initial_condition=None, with_noise=True, stateseq=None):
"""
Jointly sample the discrete and continuous states
"""
from pybasicbayes.util.stats import sample_discrete
# Generate from the prior and raise exception if unstable
T, K, n = self.T, self.num_states, self.D_latent
A = self.trans_matrix
# Initialize discrete state sequence
dss = -1 * np.ones(T, dtype=np.int32) if stateseq is None else stateseq.astype(np.int32)
assert dss.shape == (T,)
gss = np.empty((T,n), dtype='double')
if initial_condition is None:
if dss[0] == -1:
dss[0] = sample_discrete(self.pi_0)
gss[0] = self.init_dynamics_distns[dss[0]].rvs()
else:
dss[0] = initial_condition[0]
gss[0] = initial_condition[1]
for t in range(1,T):
# Sample discrete state given previous continuous state
if with_noise:
# Sample discre=te state from recurrent transition matrix
if dss[t] == -1:
dss[t] = sample_discrete(A[dss[t-1], :])
# Sample continuous state given current discrete state
gss[t] = self.dynamics_distns[dss[t-1]].\
rvs(x=np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])),
return_xy=False)
else:
# Pick the most likely next discrete state and continuous state
if dss[t] == -1:
dss[t] = np.argmax(A[dss[t-1], :])
gss[t] = self.dynamics_distns[dss[t-1]]. \
predict(np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])))
assert np.all(np.isfinite(gss[t])), "SLDS appears to be unstable!"
self.stateseq = dss
self.gaussian_states = gss | python | def generate_states(self, initial_condition=None, with_noise=True, stateseq=None):
"""
Jointly sample the discrete and continuous states
"""
from pybasicbayes.util.stats import sample_discrete
# Generate from the prior and raise exception if unstable
T, K, n = self.T, self.num_states, self.D_latent
A = self.trans_matrix
# Initialize discrete state sequence
dss = -1 * np.ones(T, dtype=np.int32) if stateseq is None else stateseq.astype(np.int32)
assert dss.shape == (T,)
gss = np.empty((T,n), dtype='double')
if initial_condition is None:
if dss[0] == -1:
dss[0] = sample_discrete(self.pi_0)
gss[0] = self.init_dynamics_distns[dss[0]].rvs()
else:
dss[0] = initial_condition[0]
gss[0] = initial_condition[1]
for t in range(1,T):
# Sample discrete state given previous continuous state
if with_noise:
# Sample discre=te state from recurrent transition matrix
if dss[t] == -1:
dss[t] = sample_discrete(A[dss[t-1], :])
# Sample continuous state given current discrete state
gss[t] = self.dynamics_distns[dss[t-1]].\
rvs(x=np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])),
return_xy=False)
else:
# Pick the most likely next discrete state and continuous state
if dss[t] == -1:
dss[t] = np.argmax(A[dss[t-1], :])
gss[t] = self.dynamics_distns[dss[t-1]]. \
predict(np.hstack((gss[t-1][None,:], self.inputs[t-1][None,:])))
assert np.all(np.isfinite(gss[t])), "SLDS appears to be unstable!"
self.stateseq = dss
self.gaussian_states = gss | [
"def",
"generate_states",
"(",
"self",
",",
"initial_condition",
"=",
"None",
",",
"with_noise",
"=",
"True",
",",
"stateseq",
"=",
"None",
")",
":",
"from",
"pybasicbayes",
".",
"util",
".",
"stats",
"import",
"sample_discrete",
"# Generate from the prior and raise exception if unstable",
"T",
",",
"K",
",",
"n",
"=",
"self",
".",
"T",
",",
"self",
".",
"num_states",
",",
"self",
".",
"D_latent",
"A",
"=",
"self",
".",
"trans_matrix",
"# Initialize discrete state sequence",
"dss",
"=",
"-",
"1",
"*",
"np",
".",
"ones",
"(",
"T",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"if",
"stateseq",
"is",
"None",
"else",
"stateseq",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"assert",
"dss",
".",
"shape",
"==",
"(",
"T",
",",
")",
"gss",
"=",
"np",
".",
"empty",
"(",
"(",
"T",
",",
"n",
")",
",",
"dtype",
"=",
"'double'",
")",
"if",
"initial_condition",
"is",
"None",
":",
"if",
"dss",
"[",
"0",
"]",
"==",
"-",
"1",
":",
"dss",
"[",
"0",
"]",
"=",
"sample_discrete",
"(",
"self",
".",
"pi_0",
")",
"gss",
"[",
"0",
"]",
"=",
"self",
".",
"init_dynamics_distns",
"[",
"dss",
"[",
"0",
"]",
"]",
".",
"rvs",
"(",
")",
"else",
":",
"dss",
"[",
"0",
"]",
"=",
"initial_condition",
"[",
"0",
"]",
"gss",
"[",
"0",
"]",
"=",
"initial_condition",
"[",
"1",
"]",
"for",
"t",
"in",
"range",
"(",
"1",
",",
"T",
")",
":",
"# Sample discrete state given previous continuous state",
"if",
"with_noise",
":",
"# Sample discre=te state from recurrent transition matrix",
"if",
"dss",
"[",
"t",
"]",
"==",
"-",
"1",
":",
"dss",
"[",
"t",
"]",
"=",
"sample_discrete",
"(",
"A",
"[",
"dss",
"[",
"t",
"-",
"1",
"]",
",",
":",
"]",
")",
"# Sample continuous state given current discrete state",
"gss",
"[",
"t",
"]",
"=",
"self",
".",
"dynamics_distns",
"[",
"dss",
"[",
"t",
"-",
"1",
"]",
"]",
".",
"rvs",
"(",
"x",
"=",
"np",
".",
"hstack",
"(",
"(",
"gss",
"[",
"t",
"-",
"1",
"]",
"[",
"None",
",",
":",
"]",
",",
"self",
".",
"inputs",
"[",
"t",
"-",
"1",
"]",
"[",
"None",
",",
":",
"]",
")",
")",
",",
"return_xy",
"=",
"False",
")",
"else",
":",
"# Pick the most likely next discrete state and continuous state",
"if",
"dss",
"[",
"t",
"]",
"==",
"-",
"1",
":",
"dss",
"[",
"t",
"]",
"=",
"np",
".",
"argmax",
"(",
"A",
"[",
"dss",
"[",
"t",
"-",
"1",
"]",
",",
":",
"]",
")",
"gss",
"[",
"t",
"]",
"=",
"self",
".",
"dynamics_distns",
"[",
"dss",
"[",
"t",
"-",
"1",
"]",
"]",
".",
"predict",
"(",
"np",
".",
"hstack",
"(",
"(",
"gss",
"[",
"t",
"-",
"1",
"]",
"[",
"None",
",",
":",
"]",
",",
"self",
".",
"inputs",
"[",
"t",
"-",
"1",
"]",
"[",
"None",
",",
":",
"]",
")",
")",
")",
"assert",
"np",
".",
"all",
"(",
"np",
".",
"isfinite",
"(",
"gss",
"[",
"t",
"]",
")",
")",
",",
"\"SLDS appears to be unstable!\"",
"self",
".",
"stateseq",
"=",
"dss",
"self",
".",
"gaussian_states",
"=",
"gss"
]
| Jointly sample the discrete and continuous states | [
"Jointly",
"sample",
"the",
"discrete",
"and",
"continuous",
"states"
]
| c505c2bd05a5549d450b518f02493b68ed12e590 | https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L35-L78 | train |
mattjj/pyslds | pyslds/states.py | _SLDSStatesMaskedData.heldout_log_likelihood | def heldout_log_likelihood(self, test_mask=None):
"""
Compute the log likelihood of the masked data given the latent
discrete and continuous states.
"""
if test_mask is None:
# If a test mask is not supplied, use the negation of this object's mask
if self.mask is None:
return 0
else:
test_mask = ~self.mask
xs = np.hstack((self.gaussian_states, self.inputs))
if self.single_emission:
return self.emission_distns[0].\
log_likelihood((xs, self.data), mask=test_mask).sum()
else:
hll = 0
z = self.stateseq
for idx, ed in enumerate(self.emission_distns):
hll += ed.log_likelihood((xs[z == idx], self.data[z == idx]),
mask=test_mask[z == idx]).sum() | python | def heldout_log_likelihood(self, test_mask=None):
"""
Compute the log likelihood of the masked data given the latent
discrete and continuous states.
"""
if test_mask is None:
# If a test mask is not supplied, use the negation of this object's mask
if self.mask is None:
return 0
else:
test_mask = ~self.mask
xs = np.hstack((self.gaussian_states, self.inputs))
if self.single_emission:
return self.emission_distns[0].\
log_likelihood((xs, self.data), mask=test_mask).sum()
else:
hll = 0
z = self.stateseq
for idx, ed in enumerate(self.emission_distns):
hll += ed.log_likelihood((xs[z == idx], self.data[z == idx]),
mask=test_mask[z == idx]).sum() | [
"def",
"heldout_log_likelihood",
"(",
"self",
",",
"test_mask",
"=",
"None",
")",
":",
"if",
"test_mask",
"is",
"None",
":",
"# If a test mask is not supplied, use the negation of this object's mask",
"if",
"self",
".",
"mask",
"is",
"None",
":",
"return",
"0",
"else",
":",
"test_mask",
"=",
"~",
"self",
".",
"mask",
"xs",
"=",
"np",
".",
"hstack",
"(",
"(",
"self",
".",
"gaussian_states",
",",
"self",
".",
"inputs",
")",
")",
"if",
"self",
".",
"single_emission",
":",
"return",
"self",
".",
"emission_distns",
"[",
"0",
"]",
".",
"log_likelihood",
"(",
"(",
"xs",
",",
"self",
".",
"data",
")",
",",
"mask",
"=",
"test_mask",
")",
".",
"sum",
"(",
")",
"else",
":",
"hll",
"=",
"0",
"z",
"=",
"self",
".",
"stateseq",
"for",
"idx",
",",
"ed",
"in",
"enumerate",
"(",
"self",
".",
"emission_distns",
")",
":",
"hll",
"+=",
"ed",
".",
"log_likelihood",
"(",
"(",
"xs",
"[",
"z",
"==",
"idx",
"]",
",",
"self",
".",
"data",
"[",
"z",
"==",
"idx",
"]",
")",
",",
"mask",
"=",
"test_mask",
"[",
"z",
"==",
"idx",
"]",
")",
".",
"sum",
"(",
")"
]
| Compute the log likelihood of the masked data given the latent
discrete and continuous states. | [
"Compute",
"the",
"log",
"likelihood",
"of",
"the",
"masked",
"data",
"given",
"the",
"latent",
"discrete",
"and",
"continuous",
"states",
"."
]
| c505c2bd05a5549d450b518f02493b68ed12e590 | https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L853-L874 | train |
mattjj/pyslds | pyslds/states.py | _SLDSStatesCountData.empirical_rate | def empirical_rate(data, sigma=3.0):
"""
Smooth count data to get an empirical rate
"""
from scipy.ndimage.filters import gaussian_filter1d
return 0.001 + gaussian_filter1d(data.astype(np.float), sigma, axis=0) | python | def empirical_rate(data, sigma=3.0):
"""
Smooth count data to get an empirical rate
"""
from scipy.ndimage.filters import gaussian_filter1d
return 0.001 + gaussian_filter1d(data.astype(np.float), sigma, axis=0) | [
"def",
"empirical_rate",
"(",
"data",
",",
"sigma",
"=",
"3.0",
")",
":",
"from",
"scipy",
".",
"ndimage",
".",
"filters",
"import",
"gaussian_filter1d",
"return",
"0.001",
"+",
"gaussian_filter1d",
"(",
"data",
".",
"astype",
"(",
"np",
".",
"float",
")",
",",
"sigma",
",",
"axis",
"=",
"0",
")"
]
| Smooth count data to get an empirical rate | [
"Smooth",
"count",
"data",
"to",
"get",
"an",
"empirical",
"rate"
]
| c505c2bd05a5549d450b518f02493b68ed12e590 | https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L1251-L1256 | train |
mattjj/pyslds | pyslds/util.py | get_empirical_ar_params | def get_empirical_ar_params(train_datas, params):
"""
Estimate the parameters of an AR observation model
by fitting a single AR model to the entire dataset.
"""
assert isinstance(train_datas, list) and len(train_datas) > 0
datadimension = train_datas[0].shape[1]
assert params["nu_0"] > datadimension + 1
# Initialize the observation parameters
obs_params = dict(nu_0=params["nu_0"],
S_0=params['S_0'],
M_0=params['M_0'],
K_0=params['K_0'],
affine=params['affine'])
# Fit an AR model to the entire dataset
obs_distn = AutoRegression(**obs_params)
obs_distn.max_likelihood(train_datas)
# Use the inferred noise covariance as the prior mean
# E_{IW}[S] = S_0 / (nu_0 - datadimension - 1)
obs_params["S_0"] = obs_distn.sigma * (params["nu_0"] - datadimension - 1)
obs_params["M_0"] = obs_distn.A.copy()
return obs_params | python | def get_empirical_ar_params(train_datas, params):
"""
Estimate the parameters of an AR observation model
by fitting a single AR model to the entire dataset.
"""
assert isinstance(train_datas, list) and len(train_datas) > 0
datadimension = train_datas[0].shape[1]
assert params["nu_0"] > datadimension + 1
# Initialize the observation parameters
obs_params = dict(nu_0=params["nu_0"],
S_0=params['S_0'],
M_0=params['M_0'],
K_0=params['K_0'],
affine=params['affine'])
# Fit an AR model to the entire dataset
obs_distn = AutoRegression(**obs_params)
obs_distn.max_likelihood(train_datas)
# Use the inferred noise covariance as the prior mean
# E_{IW}[S] = S_0 / (nu_0 - datadimension - 1)
obs_params["S_0"] = obs_distn.sigma * (params["nu_0"] - datadimension - 1)
obs_params["M_0"] = obs_distn.A.copy()
return obs_params | [
"def",
"get_empirical_ar_params",
"(",
"train_datas",
",",
"params",
")",
":",
"assert",
"isinstance",
"(",
"train_datas",
",",
"list",
")",
"and",
"len",
"(",
"train_datas",
")",
">",
"0",
"datadimension",
"=",
"train_datas",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
"assert",
"params",
"[",
"\"nu_0\"",
"]",
">",
"datadimension",
"+",
"1",
"# Initialize the observation parameters",
"obs_params",
"=",
"dict",
"(",
"nu_0",
"=",
"params",
"[",
"\"nu_0\"",
"]",
",",
"S_0",
"=",
"params",
"[",
"'S_0'",
"]",
",",
"M_0",
"=",
"params",
"[",
"'M_0'",
"]",
",",
"K_0",
"=",
"params",
"[",
"'K_0'",
"]",
",",
"affine",
"=",
"params",
"[",
"'affine'",
"]",
")",
"# Fit an AR model to the entire dataset",
"obs_distn",
"=",
"AutoRegression",
"(",
"*",
"*",
"obs_params",
")",
"obs_distn",
".",
"max_likelihood",
"(",
"train_datas",
")",
"# Use the inferred noise covariance as the prior mean",
"# E_{IW}[S] = S_0 / (nu_0 - datadimension - 1)",
"obs_params",
"[",
"\"S_0\"",
"]",
"=",
"obs_distn",
".",
"sigma",
"*",
"(",
"params",
"[",
"\"nu_0\"",
"]",
"-",
"datadimension",
"-",
"1",
")",
"obs_params",
"[",
"\"M_0\"",
"]",
"=",
"obs_distn",
".",
"A",
".",
"copy",
"(",
")",
"return",
"obs_params"
]
| Estimate the parameters of an AR observation model
by fitting a single AR model to the entire dataset. | [
"Estimate",
"the",
"parameters",
"of",
"an",
"AR",
"observation",
"model",
"by",
"fitting",
"a",
"single",
"AR",
"model",
"to",
"the",
"entire",
"dataset",
"."
]
| c505c2bd05a5549d450b518f02493b68ed12e590 | https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/util.py#L6-L32 | train |
pastpages/savepagenow | savepagenow/api.py | capture | def capture(
target_url,
user_agent="savepagenow (https://github.com/pastpages/savepagenow)",
accept_cache=False
):
"""
Archives the provided URL using archive.org's Wayback Machine.
Returns the archive.org URL where the capture is stored.
Raises a CachedPage exception if archive.org declines to conduct a new
capture and returns a previous snapshot instead.
To silence that exception, pass into True to the ``accept_cache`` keyword
argument.
"""
# Put together the URL that will save our request
domain = "https://web.archive.org"
save_url = urljoin(domain, "/save/")
request_url = save_url + target_url
# Send the capture request to achive.org
headers = {
'User-Agent': user_agent,
}
response = requests.get(request_url, headers=headers)
# If it has an error header, raise that.
has_error_header = 'X-Archive-Wayback-Runtime-Error' in response.headers
if has_error_header:
error_header = response.headers['X-Archive-Wayback-Runtime-Error']
if error_header == 'RobotAccessControlException: Blocked By Robots':
raise BlockedByRobots("archive.org returned blocked by robots.txt error")
else:
raise WaybackRuntimeError(error_header)
# If it has an error code, raise that
if response.status_code in [403, 502]:
raise WaybackRuntimeError(response.headers)
# Put together the URL where this page is archived
try:
archive_id = response.headers['Content-Location']
except KeyError:
# If it can't find that key raise the error
raise WaybackRuntimeError(dict(status_code=response.status_code, headers=response.headers))
archive_url = urljoin(domain, archive_id)
# Determine if the response was cached
cached = 'X-Page-Cache' in response.headers and response.headers['X-Page-Cache'] == 'HIT'
if cached:
if not accept_cache:
raise CachedPage("archive.org returned a cached version of this page: {}".format(
archive_url
))
# Return that
return archive_url | python | def capture(
target_url,
user_agent="savepagenow (https://github.com/pastpages/savepagenow)",
accept_cache=False
):
"""
Archives the provided URL using archive.org's Wayback Machine.
Returns the archive.org URL where the capture is stored.
Raises a CachedPage exception if archive.org declines to conduct a new
capture and returns a previous snapshot instead.
To silence that exception, pass into True to the ``accept_cache`` keyword
argument.
"""
# Put together the URL that will save our request
domain = "https://web.archive.org"
save_url = urljoin(domain, "/save/")
request_url = save_url + target_url
# Send the capture request to achive.org
headers = {
'User-Agent': user_agent,
}
response = requests.get(request_url, headers=headers)
# If it has an error header, raise that.
has_error_header = 'X-Archive-Wayback-Runtime-Error' in response.headers
if has_error_header:
error_header = response.headers['X-Archive-Wayback-Runtime-Error']
if error_header == 'RobotAccessControlException: Blocked By Robots':
raise BlockedByRobots("archive.org returned blocked by robots.txt error")
else:
raise WaybackRuntimeError(error_header)
# If it has an error code, raise that
if response.status_code in [403, 502]:
raise WaybackRuntimeError(response.headers)
# Put together the URL where this page is archived
try:
archive_id = response.headers['Content-Location']
except KeyError:
# If it can't find that key raise the error
raise WaybackRuntimeError(dict(status_code=response.status_code, headers=response.headers))
archive_url = urljoin(domain, archive_id)
# Determine if the response was cached
cached = 'X-Page-Cache' in response.headers and response.headers['X-Page-Cache'] == 'HIT'
if cached:
if not accept_cache:
raise CachedPage("archive.org returned a cached version of this page: {}".format(
archive_url
))
# Return that
return archive_url | [
"def",
"capture",
"(",
"target_url",
",",
"user_agent",
"=",
"\"savepagenow (https://github.com/pastpages/savepagenow)\"",
",",
"accept_cache",
"=",
"False",
")",
":",
"# Put together the URL that will save our request",
"domain",
"=",
"\"https://web.archive.org\"",
"save_url",
"=",
"urljoin",
"(",
"domain",
",",
"\"/save/\"",
")",
"request_url",
"=",
"save_url",
"+",
"target_url",
"# Send the capture request to achive.org",
"headers",
"=",
"{",
"'User-Agent'",
":",
"user_agent",
",",
"}",
"response",
"=",
"requests",
".",
"get",
"(",
"request_url",
",",
"headers",
"=",
"headers",
")",
"# If it has an error header, raise that.",
"has_error_header",
"=",
"'X-Archive-Wayback-Runtime-Error'",
"in",
"response",
".",
"headers",
"if",
"has_error_header",
":",
"error_header",
"=",
"response",
".",
"headers",
"[",
"'X-Archive-Wayback-Runtime-Error'",
"]",
"if",
"error_header",
"==",
"'RobotAccessControlException: Blocked By Robots'",
":",
"raise",
"BlockedByRobots",
"(",
"\"archive.org returned blocked by robots.txt error\"",
")",
"else",
":",
"raise",
"WaybackRuntimeError",
"(",
"error_header",
")",
"# If it has an error code, raise that",
"if",
"response",
".",
"status_code",
"in",
"[",
"403",
",",
"502",
"]",
":",
"raise",
"WaybackRuntimeError",
"(",
"response",
".",
"headers",
")",
"# Put together the URL where this page is archived",
"try",
":",
"archive_id",
"=",
"response",
".",
"headers",
"[",
"'Content-Location'",
"]",
"except",
"KeyError",
":",
"# If it can't find that key raise the error",
"raise",
"WaybackRuntimeError",
"(",
"dict",
"(",
"status_code",
"=",
"response",
".",
"status_code",
",",
"headers",
"=",
"response",
".",
"headers",
")",
")",
"archive_url",
"=",
"urljoin",
"(",
"domain",
",",
"archive_id",
")",
"# Determine if the response was cached",
"cached",
"=",
"'X-Page-Cache'",
"in",
"response",
".",
"headers",
"and",
"response",
".",
"headers",
"[",
"'X-Page-Cache'",
"]",
"==",
"'HIT'",
"if",
"cached",
":",
"if",
"not",
"accept_cache",
":",
"raise",
"CachedPage",
"(",
"\"archive.org returned a cached version of this page: {}\"",
".",
"format",
"(",
"archive_url",
")",
")",
"# Return that",
"return",
"archive_url"
]
| Archives the provided URL using archive.org's Wayback Machine.
Returns the archive.org URL where the capture is stored.
Raises a CachedPage exception if archive.org declines to conduct a new
capture and returns a previous snapshot instead.
To silence that exception, pass into True to the ``accept_cache`` keyword
argument. | [
"Archives",
"the",
"provided",
"URL",
"using",
"archive",
".",
"org",
"s",
"Wayback",
"Machine",
"."
]
| 9555ffb10905fe1b0d2452be2bd8a7d4338a8379 | https://github.com/pastpages/savepagenow/blob/9555ffb10905fe1b0d2452be2bd8a7d4338a8379/savepagenow/api.py#L8-L65 | train |
pastpages/savepagenow | savepagenow/api.py | capture_or_cache | def capture_or_cache(
target_url,
user_agent="savepagenow (https://github.com/pastpages/savepagenow)"
):
"""
Archives the provided URL using archive.org's Wayback Machine, unless
the page has been recently captured.
Returns a tuple with the archive.org URL where the capture is stored,
along with a boolean indicating if a new capture was conducted.
If the boolean is True, archive.org conducted a new capture. If it is False,
archive.org has returned a recently cached capture instead, likely taken
in the previous minutes.
"""
try:
return capture(target_url, user_agent=user_agent, accept_cache=False), True
except CachedPage:
return capture(target_url, user_agent=user_agent, accept_cache=True), False | python | def capture_or_cache(
target_url,
user_agent="savepagenow (https://github.com/pastpages/savepagenow)"
):
"""
Archives the provided URL using archive.org's Wayback Machine, unless
the page has been recently captured.
Returns a tuple with the archive.org URL where the capture is stored,
along with a boolean indicating if a new capture was conducted.
If the boolean is True, archive.org conducted a new capture. If it is False,
archive.org has returned a recently cached capture instead, likely taken
in the previous minutes.
"""
try:
return capture(target_url, user_agent=user_agent, accept_cache=False), True
except CachedPage:
return capture(target_url, user_agent=user_agent, accept_cache=True), False | [
"def",
"capture_or_cache",
"(",
"target_url",
",",
"user_agent",
"=",
"\"savepagenow (https://github.com/pastpages/savepagenow)\"",
")",
":",
"try",
":",
"return",
"capture",
"(",
"target_url",
",",
"user_agent",
"=",
"user_agent",
",",
"accept_cache",
"=",
"False",
")",
",",
"True",
"except",
"CachedPage",
":",
"return",
"capture",
"(",
"target_url",
",",
"user_agent",
"=",
"user_agent",
",",
"accept_cache",
"=",
"True",
")",
",",
"False"
]
| Archives the provided URL using archive.org's Wayback Machine, unless
the page has been recently captured.
Returns a tuple with the archive.org URL where the capture is stored,
along with a boolean indicating if a new capture was conducted.
If the boolean is True, archive.org conducted a new capture. If it is False,
archive.org has returned a recently cached capture instead, likely taken
in the previous minutes. | [
"Archives",
"the",
"provided",
"URL",
"using",
"archive",
".",
"org",
"s",
"Wayback",
"Machine",
"unless",
"the",
"page",
"has",
"been",
"recently",
"captured",
"."
]
| 9555ffb10905fe1b0d2452be2bd8a7d4338a8379 | https://github.com/pastpages/savepagenow/blob/9555ffb10905fe1b0d2452be2bd8a7d4338a8379/savepagenow/api.py#L68-L86 | train |
quandyfactory/dicttoxml | dicttoxml.py | get_unique_id | def get_unique_id(element):
"""Returns a unique id for a given element"""
this_id = make_id(element)
dup = True
while dup:
if this_id not in ids:
dup = False
ids.append(this_id)
else:
this_id = make_id(element)
return ids[-1] | python | def get_unique_id(element):
"""Returns a unique id for a given element"""
this_id = make_id(element)
dup = True
while dup:
if this_id not in ids:
dup = False
ids.append(this_id)
else:
this_id = make_id(element)
return ids[-1] | [
"def",
"get_unique_id",
"(",
"element",
")",
":",
"this_id",
"=",
"make_id",
"(",
"element",
")",
"dup",
"=",
"True",
"while",
"dup",
":",
"if",
"this_id",
"not",
"in",
"ids",
":",
"dup",
"=",
"False",
"ids",
".",
"append",
"(",
"this_id",
")",
"else",
":",
"this_id",
"=",
"make_id",
"(",
"element",
")",
"return",
"ids",
"[",
"-",
"1",
"]"
]
| Returns a unique id for a given element | [
"Returns",
"a",
"unique",
"id",
"for",
"a",
"given",
"element"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L70-L80 | train |
quandyfactory/dicttoxml | dicttoxml.py | get_xml_type | def get_xml_type(val):
"""Returns the data type for the xml type attribute"""
if type(val).__name__ in ('str', 'unicode'):
return 'str'
if type(val).__name__ in ('int', 'long'):
return 'int'
if type(val).__name__ == 'float':
return 'float'
if type(val).__name__ == 'bool':
return 'bool'
if isinstance(val, numbers.Number):
return 'number'
if type(val).__name__ == 'NoneType':
return 'null'
if isinstance(val, dict):
return 'dict'
if isinstance(val, collections.Iterable):
return 'list'
return type(val).__name__ | python | def get_xml_type(val):
"""Returns the data type for the xml type attribute"""
if type(val).__name__ in ('str', 'unicode'):
return 'str'
if type(val).__name__ in ('int', 'long'):
return 'int'
if type(val).__name__ == 'float':
return 'float'
if type(val).__name__ == 'bool':
return 'bool'
if isinstance(val, numbers.Number):
return 'number'
if type(val).__name__ == 'NoneType':
return 'null'
if isinstance(val, dict):
return 'dict'
if isinstance(val, collections.Iterable):
return 'list'
return type(val).__name__ | [
"def",
"get_xml_type",
"(",
"val",
")",
":",
"if",
"type",
"(",
"val",
")",
".",
"__name__",
"in",
"(",
"'str'",
",",
"'unicode'",
")",
":",
"return",
"'str'",
"if",
"type",
"(",
"val",
")",
".",
"__name__",
"in",
"(",
"'int'",
",",
"'long'",
")",
":",
"return",
"'int'",
"if",
"type",
"(",
"val",
")",
".",
"__name__",
"==",
"'float'",
":",
"return",
"'float'",
"if",
"type",
"(",
"val",
")",
".",
"__name__",
"==",
"'bool'",
":",
"return",
"'bool'",
"if",
"isinstance",
"(",
"val",
",",
"numbers",
".",
"Number",
")",
":",
"return",
"'number'",
"if",
"type",
"(",
"val",
")",
".",
"__name__",
"==",
"'NoneType'",
":",
"return",
"'null'",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"return",
"'dict'",
"if",
"isinstance",
"(",
"val",
",",
"collections",
".",
"Iterable",
")",
":",
"return",
"'list'",
"return",
"type",
"(",
"val",
")",
".",
"__name__"
]
| Returns the data type for the xml type attribute | [
"Returns",
"the",
"data",
"type",
"for",
"the",
"xml",
"type",
"attribute"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L83-L101 | train |
quandyfactory/dicttoxml | dicttoxml.py | make_attrstring | def make_attrstring(attr):
"""Returns an attribute string in the form key="val" """
attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()])
return '%s%s' % (' ' if attrstring != '' else '', attrstring) | python | def make_attrstring(attr):
"""Returns an attribute string in the form key="val" """
attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()])
return '%s%s' % (' ' if attrstring != '' else '', attrstring) | [
"def",
"make_attrstring",
"(",
"attr",
")",
":",
"attrstring",
"=",
"' '",
".",
"join",
"(",
"[",
"'%s=\"%s\"'",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"attr",
".",
"items",
"(",
")",
"]",
")",
"return",
"'%s%s'",
"%",
"(",
"' '",
"if",
"attrstring",
"!=",
"''",
"else",
"''",
",",
"attrstring",
")"
]
| Returns an attribute string in the form key="val" | [
"Returns",
"an",
"attribute",
"string",
"in",
"the",
"form",
"key",
"=",
"val"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L115-L118 | train |
quandyfactory/dicttoxml | dicttoxml.py | key_is_valid_xml | def key_is_valid_xml(key):
"""Checks that a key is a valid XML name"""
LOG.info('Inside key_is_valid_xml(). Testing "%s"' % (unicode_me(key)))
test_xml = '<?xml version="1.0" encoding="UTF-8" ?><%s>foo</%s>' % (key, key)
try:
parseString(test_xml)
return True
except Exception: # minidom does not implement exceptions well
return False | python | def key_is_valid_xml(key):
"""Checks that a key is a valid XML name"""
LOG.info('Inside key_is_valid_xml(). Testing "%s"' % (unicode_me(key)))
test_xml = '<?xml version="1.0" encoding="UTF-8" ?><%s>foo</%s>' % (key, key)
try:
parseString(test_xml)
return True
except Exception: # minidom does not implement exceptions well
return False | [
"def",
"key_is_valid_xml",
"(",
"key",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside key_is_valid_xml(). Testing \"%s\"'",
"%",
"(",
"unicode_me",
"(",
"key",
")",
")",
")",
"test_xml",
"=",
"'<?xml version=\"1.0\" encoding=\"UTF-8\" ?><%s>foo</%s>'",
"%",
"(",
"key",
",",
"key",
")",
"try",
":",
"parseString",
"(",
"test_xml",
")",
"return",
"True",
"except",
"Exception",
":",
"# minidom does not implement exceptions well",
"return",
"False"
]
| Checks that a key is a valid XML name | [
"Checks",
"that",
"a",
"key",
"is",
"a",
"valid",
"XML",
"name"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L121-L129 | train |
quandyfactory/dicttoxml | dicttoxml.py | make_valid_xml_name | def make_valid_xml_name(key, attr):
"""Tests an XML name and fixes it if invalid"""
LOG.info('Inside make_valid_xml_name(). Testing key "%s" with attr "%s"' % (
unicode_me(key), unicode_me(attr))
)
key = escape_xml(key)
attr = escape_xml(attr)
# pass through if key is already valid
if key_is_valid_xml(key):
return key, attr
# prepend a lowercase n if the key is numeric
if key.isdigit():
return 'n%s' % (key), attr
# replace spaces with underscores if that fixes the problem
if key_is_valid_xml(key.replace(' ', '_')):
return key.replace(' ', '_'), attr
# key is still invalid - move it into a name attribute
attr['name'] = key
key = 'key'
return key, attr | python | def make_valid_xml_name(key, attr):
"""Tests an XML name and fixes it if invalid"""
LOG.info('Inside make_valid_xml_name(). Testing key "%s" with attr "%s"' % (
unicode_me(key), unicode_me(attr))
)
key = escape_xml(key)
attr = escape_xml(attr)
# pass through if key is already valid
if key_is_valid_xml(key):
return key, attr
# prepend a lowercase n if the key is numeric
if key.isdigit():
return 'n%s' % (key), attr
# replace spaces with underscores if that fixes the problem
if key_is_valid_xml(key.replace(' ', '_')):
return key.replace(' ', '_'), attr
# key is still invalid - move it into a name attribute
attr['name'] = key
key = 'key'
return key, attr | [
"def",
"make_valid_xml_name",
"(",
"key",
",",
"attr",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside make_valid_xml_name(). Testing key \"%s\" with attr \"%s\"'",
"%",
"(",
"unicode_me",
"(",
"key",
")",
",",
"unicode_me",
"(",
"attr",
")",
")",
")",
"key",
"=",
"escape_xml",
"(",
"key",
")",
"attr",
"=",
"escape_xml",
"(",
"attr",
")",
"# pass through if key is already valid",
"if",
"key_is_valid_xml",
"(",
"key",
")",
":",
"return",
"key",
",",
"attr",
"# prepend a lowercase n if the key is numeric",
"if",
"key",
".",
"isdigit",
"(",
")",
":",
"return",
"'n%s'",
"%",
"(",
"key",
")",
",",
"attr",
"# replace spaces with underscores if that fixes the problem",
"if",
"key_is_valid_xml",
"(",
"key",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
")",
":",
"return",
"key",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
",",
"attr",
"# key is still invalid - move it into a name attribute",
"attr",
"[",
"'name'",
"]",
"=",
"key",
"key",
"=",
"'key'",
"return",
"key",
",",
"attr"
]
| Tests an XML name and fixes it if invalid | [
"Tests",
"an",
"XML",
"name",
"and",
"fixes",
"it",
"if",
"invalid"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L132-L155 | train |
quandyfactory/dicttoxml | dicttoxml.py | convert | def convert(obj, ids, attr_type, item_func, cdata, parent='root'):
"""Routes the elements of an object to the right function to convert them
based on their data type"""
LOG.info('Inside convert(). obj type is: "%s", obj="%s"' % (type(obj).__name__, unicode_me(obj)))
item_name = item_func(parent)
if isinstance(obj, numbers.Number) or type(obj) in (str, unicode):
return convert_kv(item_name, obj, attr_type, cdata)
if hasattr(obj, 'isoformat'):
return convert_kv(item_name, obj.isoformat(), attr_type, cdata)
if type(obj) == bool:
return convert_bool(item_name, obj, attr_type, cdata)
if obj is None:
return convert_none(item_name, '', attr_type, cdata)
if isinstance(obj, dict):
return convert_dict(obj, ids, parent, attr_type, item_func, cdata)
if isinstance(obj, collections.Iterable):
return convert_list(obj, ids, parent, attr_type, item_func, cdata)
raise TypeError('Unsupported data type: %s (%s)' % (obj, type(obj).__name__)) | python | def convert(obj, ids, attr_type, item_func, cdata, parent='root'):
"""Routes the elements of an object to the right function to convert them
based on their data type"""
LOG.info('Inside convert(). obj type is: "%s", obj="%s"' % (type(obj).__name__, unicode_me(obj)))
item_name = item_func(parent)
if isinstance(obj, numbers.Number) or type(obj) in (str, unicode):
return convert_kv(item_name, obj, attr_type, cdata)
if hasattr(obj, 'isoformat'):
return convert_kv(item_name, obj.isoformat(), attr_type, cdata)
if type(obj) == bool:
return convert_bool(item_name, obj, attr_type, cdata)
if obj is None:
return convert_none(item_name, '', attr_type, cdata)
if isinstance(obj, dict):
return convert_dict(obj, ids, parent, attr_type, item_func, cdata)
if isinstance(obj, collections.Iterable):
return convert_list(obj, ids, parent, attr_type, item_func, cdata)
raise TypeError('Unsupported data type: %s (%s)' % (obj, type(obj).__name__)) | [
"def",
"convert",
"(",
"obj",
",",
"ids",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
",",
"parent",
"=",
"'root'",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside convert(). obj type is: \"%s\", obj=\"%s\"'",
"%",
"(",
"type",
"(",
"obj",
")",
".",
"__name__",
",",
"unicode_me",
"(",
"obj",
")",
")",
")",
"item_name",
"=",
"item_func",
"(",
"parent",
")",
"if",
"isinstance",
"(",
"obj",
",",
"numbers",
".",
"Number",
")",
"or",
"type",
"(",
"obj",
")",
"in",
"(",
"str",
",",
"unicode",
")",
":",
"return",
"convert_kv",
"(",
"item_name",
",",
"obj",
",",
"attr_type",
",",
"cdata",
")",
"if",
"hasattr",
"(",
"obj",
",",
"'isoformat'",
")",
":",
"return",
"convert_kv",
"(",
"item_name",
",",
"obj",
".",
"isoformat",
"(",
")",
",",
"attr_type",
",",
"cdata",
")",
"if",
"type",
"(",
"obj",
")",
"==",
"bool",
":",
"return",
"convert_bool",
"(",
"item_name",
",",
"obj",
",",
"attr_type",
",",
"cdata",
")",
"if",
"obj",
"is",
"None",
":",
"return",
"convert_none",
"(",
"item_name",
",",
"''",
",",
"attr_type",
",",
"cdata",
")",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"convert_dict",
"(",
"obj",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
"if",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"Iterable",
")",
":",
"return",
"convert_list",
"(",
"obj",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
"raise",
"TypeError",
"(",
"'Unsupported data type: %s (%s)'",
"%",
"(",
"obj",
",",
"type",
"(",
"obj",
")",
".",
"__name__",
")",
")"
]
| Routes the elements of an object to the right function to convert them
based on their data type | [
"Routes",
"the",
"elements",
"of",
"an",
"object",
"to",
"the",
"right",
"function",
"to",
"convert",
"them",
"based",
"on",
"their",
"data",
"type"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L168-L194 | train |
quandyfactory/dicttoxml | dicttoxml.py | convert_dict | def convert_dict(obj, ids, parent, attr_type, item_func, cdata):
"""Converts a dict into an XML string."""
LOG.info('Inside convert_dict(): obj type is: "%s", obj="%s"' % (
type(obj).__name__, unicode_me(obj))
)
output = []
addline = output.append
item_name = item_func(parent)
for key, val in obj.items():
LOG.info('Looping inside convert_dict(): key="%s", val="%s", type(val)="%s"' % (
unicode_me(key), unicode_me(val), type(val).__name__)
)
attr = {} if not ids else {'id': '%s' % (get_unique_id(parent)) }
key, attr = make_valid_xml_name(key, attr)
if isinstance(val, numbers.Number) or type(val) in (str, unicode):
addline(convert_kv(key, val, attr_type, attr, cdata))
elif hasattr(val, 'isoformat'): # datetime
addline(convert_kv(key, val.isoformat(), attr_type, attr, cdata))
elif type(val) == bool:
addline(convert_bool(key, val, attr_type, attr, cdata))
elif isinstance(val, dict):
if attr_type:
attr['type'] = get_xml_type(val)
addline('<%s%s>%s</%s>' % (
key, make_attrstring(attr),
convert_dict(val, ids, key, attr_type, item_func, cdata),
key
)
)
elif isinstance(val, collections.Iterable):
if attr_type:
attr['type'] = get_xml_type(val)
addline('<%s%s>%s</%s>' % (
key,
make_attrstring(attr),
convert_list(val, ids, key, attr_type, item_func, cdata),
key
)
)
elif val is None:
addline(convert_none(key, val, attr_type, attr, cdata))
else:
raise TypeError('Unsupported data type: %s (%s)' % (
val, type(val).__name__)
)
return ''.join(output) | python | def convert_dict(obj, ids, parent, attr_type, item_func, cdata):
"""Converts a dict into an XML string."""
LOG.info('Inside convert_dict(): obj type is: "%s", obj="%s"' % (
type(obj).__name__, unicode_me(obj))
)
output = []
addline = output.append
item_name = item_func(parent)
for key, val in obj.items():
LOG.info('Looping inside convert_dict(): key="%s", val="%s", type(val)="%s"' % (
unicode_me(key), unicode_me(val), type(val).__name__)
)
attr = {} if not ids else {'id': '%s' % (get_unique_id(parent)) }
key, attr = make_valid_xml_name(key, attr)
if isinstance(val, numbers.Number) or type(val) in (str, unicode):
addline(convert_kv(key, val, attr_type, attr, cdata))
elif hasattr(val, 'isoformat'): # datetime
addline(convert_kv(key, val.isoformat(), attr_type, attr, cdata))
elif type(val) == bool:
addline(convert_bool(key, val, attr_type, attr, cdata))
elif isinstance(val, dict):
if attr_type:
attr['type'] = get_xml_type(val)
addline('<%s%s>%s</%s>' % (
key, make_attrstring(attr),
convert_dict(val, ids, key, attr_type, item_func, cdata),
key
)
)
elif isinstance(val, collections.Iterable):
if attr_type:
attr['type'] = get_xml_type(val)
addline('<%s%s>%s</%s>' % (
key,
make_attrstring(attr),
convert_list(val, ids, key, attr_type, item_func, cdata),
key
)
)
elif val is None:
addline(convert_none(key, val, attr_type, attr, cdata))
else:
raise TypeError('Unsupported data type: %s (%s)' % (
val, type(val).__name__)
)
return ''.join(output) | [
"def",
"convert_dict",
"(",
"obj",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside convert_dict(): obj type is: \"%s\", obj=\"%s\"'",
"%",
"(",
"type",
"(",
"obj",
")",
".",
"__name__",
",",
"unicode_me",
"(",
"obj",
")",
")",
")",
"output",
"=",
"[",
"]",
"addline",
"=",
"output",
".",
"append",
"item_name",
"=",
"item_func",
"(",
"parent",
")",
"for",
"key",
",",
"val",
"in",
"obj",
".",
"items",
"(",
")",
":",
"LOG",
".",
"info",
"(",
"'Looping inside convert_dict(): key=\"%s\", val=\"%s\", type(val)=\"%s\"'",
"%",
"(",
"unicode_me",
"(",
"key",
")",
",",
"unicode_me",
"(",
"val",
")",
",",
"type",
"(",
"val",
")",
".",
"__name__",
")",
")",
"attr",
"=",
"{",
"}",
"if",
"not",
"ids",
"else",
"{",
"'id'",
":",
"'%s'",
"%",
"(",
"get_unique_id",
"(",
"parent",
")",
")",
"}",
"key",
",",
"attr",
"=",
"make_valid_xml_name",
"(",
"key",
",",
"attr",
")",
"if",
"isinstance",
"(",
"val",
",",
"numbers",
".",
"Number",
")",
"or",
"type",
"(",
"val",
")",
"in",
"(",
"str",
",",
"unicode",
")",
":",
"addline",
"(",
"convert_kv",
"(",
"key",
",",
"val",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"hasattr",
"(",
"val",
",",
"'isoformat'",
")",
":",
"# datetime",
"addline",
"(",
"convert_kv",
"(",
"key",
",",
"val",
".",
"isoformat",
"(",
")",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"type",
"(",
"val",
")",
"==",
"bool",
":",
"addline",
"(",
"convert_bool",
"(",
"key",
",",
"val",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"if",
"attr_type",
":",
"attr",
"[",
"'type'",
"]",
"=",
"get_xml_type",
"(",
"val",
")",
"addline",
"(",
"'<%s%s>%s</%s>'",
"%",
"(",
"key",
",",
"make_attrstring",
"(",
"attr",
")",
",",
"convert_dict",
"(",
"val",
",",
"ids",
",",
"key",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"key",
")",
")",
"elif",
"isinstance",
"(",
"val",
",",
"collections",
".",
"Iterable",
")",
":",
"if",
"attr_type",
":",
"attr",
"[",
"'type'",
"]",
"=",
"get_xml_type",
"(",
"val",
")",
"addline",
"(",
"'<%s%s>%s</%s>'",
"%",
"(",
"key",
",",
"make_attrstring",
"(",
"attr",
")",
",",
"convert_list",
"(",
"val",
",",
"ids",
",",
"key",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"key",
")",
")",
"elif",
"val",
"is",
"None",
":",
"addline",
"(",
"convert_none",
"(",
"key",
",",
"val",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unsupported data type: %s (%s)'",
"%",
"(",
"val",
",",
"type",
"(",
"val",
")",
".",
"__name__",
")",
")",
"return",
"''",
".",
"join",
"(",
"output",
")"
]
| Converts a dict into an XML string. | [
"Converts",
"a",
"dict",
"into",
"an",
"XML",
"string",
"."
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L197-L254 | train |
quandyfactory/dicttoxml | dicttoxml.py | convert_list | def convert_list(items, ids, parent, attr_type, item_func, cdata):
"""Converts a list into an XML string."""
LOG.info('Inside convert_list()')
output = []
addline = output.append
item_name = item_func(parent)
if ids:
this_id = get_unique_id(parent)
for i, item in enumerate(items):
LOG.info('Looping inside convert_list(): item="%s", item_name="%s", type="%s"' % (
unicode_me(item), item_name, type(item).__name__)
)
attr = {} if not ids else { 'id': '%s_%s' % (this_id, i+1) }
if isinstance(item, numbers.Number) or type(item) in (str, unicode):
addline(convert_kv(item_name, item, attr_type, attr, cdata))
elif hasattr(item, 'isoformat'): # datetime
addline(convert_kv(item_name, item.isoformat(), attr_type, attr, cdata))
elif type(item) == bool:
addline(convert_bool(item_name, item, attr_type, attr, cdata))
elif isinstance(item, dict):
if not attr_type:
addline('<%s>%s</%s>' % (
item_name,
convert_dict(item, ids, parent, attr_type, item_func, cdata),
item_name,
)
)
else:
addline('<%s type="dict">%s</%s>' % (
item_name,
convert_dict(item, ids, parent, attr_type, item_func, cdata),
item_name,
)
)
elif isinstance(item, collections.Iterable):
if not attr_type:
addline('<%s %s>%s</%s>' % (
item_name, make_attrstring(attr),
convert_list(item, ids, item_name, attr_type, item_func, cdata),
item_name,
)
)
else:
addline('<%s type="list"%s>%s</%s>' % (
item_name, make_attrstring(attr),
convert_list(item, ids, item_name, attr_type, item_func, cdata),
item_name,
)
)
elif item is None:
addline(convert_none(item_name, None, attr_type, attr, cdata))
else:
raise TypeError('Unsupported data type: %s (%s)' % (
item, type(item).__name__)
)
return ''.join(output) | python | def convert_list(items, ids, parent, attr_type, item_func, cdata):
"""Converts a list into an XML string."""
LOG.info('Inside convert_list()')
output = []
addline = output.append
item_name = item_func(parent)
if ids:
this_id = get_unique_id(parent)
for i, item in enumerate(items):
LOG.info('Looping inside convert_list(): item="%s", item_name="%s", type="%s"' % (
unicode_me(item), item_name, type(item).__name__)
)
attr = {} if not ids else { 'id': '%s_%s' % (this_id, i+1) }
if isinstance(item, numbers.Number) or type(item) in (str, unicode):
addline(convert_kv(item_name, item, attr_type, attr, cdata))
elif hasattr(item, 'isoformat'): # datetime
addline(convert_kv(item_name, item.isoformat(), attr_type, attr, cdata))
elif type(item) == bool:
addline(convert_bool(item_name, item, attr_type, attr, cdata))
elif isinstance(item, dict):
if not attr_type:
addline('<%s>%s</%s>' % (
item_name,
convert_dict(item, ids, parent, attr_type, item_func, cdata),
item_name,
)
)
else:
addline('<%s type="dict">%s</%s>' % (
item_name,
convert_dict(item, ids, parent, attr_type, item_func, cdata),
item_name,
)
)
elif isinstance(item, collections.Iterable):
if not attr_type:
addline('<%s %s>%s</%s>' % (
item_name, make_attrstring(attr),
convert_list(item, ids, item_name, attr_type, item_func, cdata),
item_name,
)
)
else:
addline('<%s type="list"%s>%s</%s>' % (
item_name, make_attrstring(attr),
convert_list(item, ids, item_name, attr_type, item_func, cdata),
item_name,
)
)
elif item is None:
addline(convert_none(item_name, None, attr_type, attr, cdata))
else:
raise TypeError('Unsupported data type: %s (%s)' % (
item, type(item).__name__)
)
return ''.join(output) | [
"def",
"convert_list",
"(",
"items",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside convert_list()'",
")",
"output",
"=",
"[",
"]",
"addline",
"=",
"output",
".",
"append",
"item_name",
"=",
"item_func",
"(",
"parent",
")",
"if",
"ids",
":",
"this_id",
"=",
"get_unique_id",
"(",
"parent",
")",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"items",
")",
":",
"LOG",
".",
"info",
"(",
"'Looping inside convert_list(): item=\"%s\", item_name=\"%s\", type=\"%s\"'",
"%",
"(",
"unicode_me",
"(",
"item",
")",
",",
"item_name",
",",
"type",
"(",
"item",
")",
".",
"__name__",
")",
")",
"attr",
"=",
"{",
"}",
"if",
"not",
"ids",
"else",
"{",
"'id'",
":",
"'%s_%s'",
"%",
"(",
"this_id",
",",
"i",
"+",
"1",
")",
"}",
"if",
"isinstance",
"(",
"item",
",",
"numbers",
".",
"Number",
")",
"or",
"type",
"(",
"item",
")",
"in",
"(",
"str",
",",
"unicode",
")",
":",
"addline",
"(",
"convert_kv",
"(",
"item_name",
",",
"item",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"hasattr",
"(",
"item",
",",
"'isoformat'",
")",
":",
"# datetime",
"addline",
"(",
"convert_kv",
"(",
"item_name",
",",
"item",
".",
"isoformat",
"(",
")",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"type",
"(",
"item",
")",
"==",
"bool",
":",
"addline",
"(",
"convert_bool",
"(",
"item_name",
",",
"item",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"elif",
"isinstance",
"(",
"item",
",",
"dict",
")",
":",
"if",
"not",
"attr_type",
":",
"addline",
"(",
"'<%s>%s</%s>'",
"%",
"(",
"item_name",
",",
"convert_dict",
"(",
"item",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"item_name",
",",
")",
")",
"else",
":",
"addline",
"(",
"'<%s type=\"dict\">%s</%s>'",
"%",
"(",
"item_name",
",",
"convert_dict",
"(",
"item",
",",
"ids",
",",
"parent",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"item_name",
",",
")",
")",
"elif",
"isinstance",
"(",
"item",
",",
"collections",
".",
"Iterable",
")",
":",
"if",
"not",
"attr_type",
":",
"addline",
"(",
"'<%s %s>%s</%s>'",
"%",
"(",
"item_name",
",",
"make_attrstring",
"(",
"attr",
")",
",",
"convert_list",
"(",
"item",
",",
"ids",
",",
"item_name",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"item_name",
",",
")",
")",
"else",
":",
"addline",
"(",
"'<%s type=\"list\"%s>%s</%s>'",
"%",
"(",
"item_name",
",",
"make_attrstring",
"(",
"attr",
")",
",",
"convert_list",
"(",
"item",
",",
"ids",
",",
"item_name",
",",
"attr_type",
",",
"item_func",
",",
"cdata",
")",
",",
"item_name",
",",
")",
")",
"elif",
"item",
"is",
"None",
":",
"addline",
"(",
"convert_none",
"(",
"item_name",
",",
"None",
",",
"attr_type",
",",
"attr",
",",
"cdata",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unsupported data type: %s (%s)'",
"%",
"(",
"item",
",",
"type",
"(",
"item",
")",
".",
"__name__",
")",
")",
"return",
"''",
".",
"join",
"(",
"output",
")"
]
| Converts a list into an XML string. | [
"Converts",
"a",
"list",
"into",
"an",
"XML",
"string",
"."
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L257-L321 | train |
quandyfactory/dicttoxml | dicttoxml.py | convert_kv | def convert_kv(key, val, attr_type, attr={}, cdata=False):
"""Converts a number or string into an XML element"""
LOG.info('Inside convert_kv(): key="%s", val="%s", type(val) is: "%s"' % (
unicode_me(key), unicode_me(val), type(val).__name__)
)
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = get_xml_type(val)
attrstring = make_attrstring(attr)
return '<%s%s>%s</%s>' % (
key, attrstring,
wrap_cdata(val) if cdata == True else escape_xml(val),
key
) | python | def convert_kv(key, val, attr_type, attr={}, cdata=False):
"""Converts a number or string into an XML element"""
LOG.info('Inside convert_kv(): key="%s", val="%s", type(val) is: "%s"' % (
unicode_me(key), unicode_me(val), type(val).__name__)
)
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = get_xml_type(val)
attrstring = make_attrstring(attr)
return '<%s%s>%s</%s>' % (
key, attrstring,
wrap_cdata(val) if cdata == True else escape_xml(val),
key
) | [
"def",
"convert_kv",
"(",
"key",
",",
"val",
",",
"attr_type",
",",
"attr",
"=",
"{",
"}",
",",
"cdata",
"=",
"False",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside convert_kv(): key=\"%s\", val=\"%s\", type(val) is: \"%s\"'",
"%",
"(",
"unicode_me",
"(",
"key",
")",
",",
"unicode_me",
"(",
"val",
")",
",",
"type",
"(",
"val",
")",
".",
"__name__",
")",
")",
"key",
",",
"attr",
"=",
"make_valid_xml_name",
"(",
"key",
",",
"attr",
")",
"if",
"attr_type",
":",
"attr",
"[",
"'type'",
"]",
"=",
"get_xml_type",
"(",
"val",
")",
"attrstring",
"=",
"make_attrstring",
"(",
"attr",
")",
"return",
"'<%s%s>%s</%s>'",
"%",
"(",
"key",
",",
"attrstring",
",",
"wrap_cdata",
"(",
"val",
")",
"if",
"cdata",
"==",
"True",
"else",
"escape_xml",
"(",
"val",
")",
",",
"key",
")"
]
| Converts a number or string into an XML element | [
"Converts",
"a",
"number",
"or",
"string",
"into",
"an",
"XML",
"element"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L324-L339 | train |
quandyfactory/dicttoxml | dicttoxml.py | convert_bool | def convert_bool(key, val, attr_type, attr={}, cdata=False):
"""Converts a boolean into an XML element"""
LOG.info('Inside convert_bool(): key="%s", val="%s", type(val) is: "%s"' % (
unicode_me(key), unicode_me(val), type(val).__name__)
)
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = get_xml_type(val)
attrstring = make_attrstring(attr)
return '<%s%s>%s</%s>' % (key, attrstring, unicode(val).lower(), key) | python | def convert_bool(key, val, attr_type, attr={}, cdata=False):
"""Converts a boolean into an XML element"""
LOG.info('Inside convert_bool(): key="%s", val="%s", type(val) is: "%s"' % (
unicode_me(key), unicode_me(val), type(val).__name__)
)
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = get_xml_type(val)
attrstring = make_attrstring(attr)
return '<%s%s>%s</%s>' % (key, attrstring, unicode(val).lower(), key) | [
"def",
"convert_bool",
"(",
"key",
",",
"val",
",",
"attr_type",
",",
"attr",
"=",
"{",
"}",
",",
"cdata",
"=",
"False",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside convert_bool(): key=\"%s\", val=\"%s\", type(val) is: \"%s\"'",
"%",
"(",
"unicode_me",
"(",
"key",
")",
",",
"unicode_me",
"(",
"val",
")",
",",
"type",
"(",
"val",
")",
".",
"__name__",
")",
")",
"key",
",",
"attr",
"=",
"make_valid_xml_name",
"(",
"key",
",",
"attr",
")",
"if",
"attr_type",
":",
"attr",
"[",
"'type'",
"]",
"=",
"get_xml_type",
"(",
"val",
")",
"attrstring",
"=",
"make_attrstring",
"(",
"attr",
")",
"return",
"'<%s%s>%s</%s>'",
"%",
"(",
"key",
",",
"attrstring",
",",
"unicode",
"(",
"val",
")",
".",
"lower",
"(",
")",
",",
"key",
")"
]
| Converts a boolean into an XML element | [
"Converts",
"a",
"boolean",
"into",
"an",
"XML",
"element"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L342-L353 | train |
quandyfactory/dicttoxml | dicttoxml.py | convert_none | def convert_none(key, val, attr_type, attr={}, cdata=False):
"""Converts a null value into an XML element"""
LOG.info('Inside convert_none(): key="%s"' % (unicode_me(key)))
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = get_xml_type(val)
attrstring = make_attrstring(attr)
return '<%s%s></%s>' % (key, attrstring, key) | python | def convert_none(key, val, attr_type, attr={}, cdata=False):
"""Converts a null value into an XML element"""
LOG.info('Inside convert_none(): key="%s"' % (unicode_me(key)))
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = get_xml_type(val)
attrstring = make_attrstring(attr)
return '<%s%s></%s>' % (key, attrstring, key) | [
"def",
"convert_none",
"(",
"key",
",",
"val",
",",
"attr_type",
",",
"attr",
"=",
"{",
"}",
",",
"cdata",
"=",
"False",
")",
":",
"LOG",
".",
"info",
"(",
"'Inside convert_none(): key=\"%s\"'",
"%",
"(",
"unicode_me",
"(",
"key",
")",
")",
")",
"key",
",",
"attr",
"=",
"make_valid_xml_name",
"(",
"key",
",",
"attr",
")",
"if",
"attr_type",
":",
"attr",
"[",
"'type'",
"]",
"=",
"get_xml_type",
"(",
"val",
")",
"attrstring",
"=",
"make_attrstring",
"(",
"attr",
")",
"return",
"'<%s%s></%s>'",
"%",
"(",
"key",
",",
"attrstring",
",",
"key",
")"
]
| Converts a null value into an XML element | [
"Converts",
"a",
"null",
"value",
"into",
"an",
"XML",
"element"
]
| 2016fe9817ad03b26aa5f1a475f5b79ad6757b96 | https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L356-L365 | train |
gruns/icecream | icecream/icecream.py | getCallSourceLines | def getCallSourceLines(callFrame, icNames, icMethod):
"""Raises NoSourceAvailableError."""
code = callFrame.f_code
# inspect.getblock(), which is called internally by inspect.getsource(),
# only returns the first line of <code> when <code> represents a top-level
# module, not the entire module's source, as needed here. The
#
# if ismodule(object):
# return lines, 0
#
# check in inspect.py doesn't account for code objects of modules, only
# actual module objects themselves.
#
# A workaround is to call findsource() directly on code objects of modules,
# which bypasses getblock().
#
# Also, the errors raised differ between Python2 and Python3 . In Python2,
# inspect.findsource() and inspect.getsource() raise IOErrors. In Python3,
# inspect.findsource() and inspect.getsource() raise OSErrors.
try:
if code.co_name == '<module>': # Module -> use workaround above.
parentBlockStartLine = 1
lines = inspect.findsource(code)[0] # Raises [IO/OS]Error.
parentBlockSource = ''.join(lines)
else: # Not a module -> use inspect.getsource() normally.
parentBlockStartLine = code.co_firstlineno
parentBlockSource = inspect.getsource(code) # Raises [IO/OS]Error.
except (IOError, OSError) as err:
if 'source code' in err.args[0]:
raise NoSourceAvailableError()
else:
raise
lineno = inspect.getframeinfo(callFrame)[1]
linenoRelativeToParent = lineno - parentBlockStartLine + 1
# There could be multiple ic() calls on the same line(s), like
#
# ic(1); ic(2); ic(3,
# 4,
# 5); ic(6)
#
# so include all of them. Which invocation is the appropriate one will be
# determined later via bytecode offset calculations.
#
# TODO(grun): Support invocations of ic() where ic() is an attribute chain
# in the AST. For example, support
#
# import icecream
# icecream.ic()
#
# and
#
# class Foo:
# blah = ic
# Foo.blah()
#
parentBlockSource = textwrap.dedent(parentBlockSource)
potentialCalls = [
node for node in ast.walk(ast.parse(parentBlockSource))
if isAstNodeIceCreamCall(node, icNames, icMethod) and
linenoRelativeToParent in getAllLineNumbersOfAstNode(node)]
if not potentialCalls:
# TODO(grun): Add note that to NoSourceAvailableError that this
# situation can occur when the underlying source changed during
# execution.
raise NoSourceAvailableError()
endLine = lineno - parentBlockStartLine + 1
startLine = min(call.lineno for call in potentialCalls)
lines = parentBlockSource.splitlines()[startLine - 1: endLine]
# inspect's lineno attribute doesn't point to the closing right parenthesis
# if the closing right parenthesis is on its own line without any
# arguments. E.g.
#
# ic(1,
# 2 <--- inspect's reported lineno.
# ) <--- Should be the reported lineno.
#
# Detect this situation and add the missing right parenthesis.
if isCallStrMissingClosingRightParenthesis('\n'.join(lines).strip()):
lines.append(')')
source = stripCommentsAndNewlines('\n'.join(lines)).strip()
absoluteStartLineNum = parentBlockStartLine + startLine - 1
startLineOffset = calculateLineOffsets(code)[absoluteStartLineNum]
return source, absoluteStartLineNum, startLineOffset | python | def getCallSourceLines(callFrame, icNames, icMethod):
"""Raises NoSourceAvailableError."""
code = callFrame.f_code
# inspect.getblock(), which is called internally by inspect.getsource(),
# only returns the first line of <code> when <code> represents a top-level
# module, not the entire module's source, as needed here. The
#
# if ismodule(object):
# return lines, 0
#
# check in inspect.py doesn't account for code objects of modules, only
# actual module objects themselves.
#
# A workaround is to call findsource() directly on code objects of modules,
# which bypasses getblock().
#
# Also, the errors raised differ between Python2 and Python3 . In Python2,
# inspect.findsource() and inspect.getsource() raise IOErrors. In Python3,
# inspect.findsource() and inspect.getsource() raise OSErrors.
try:
if code.co_name == '<module>': # Module -> use workaround above.
parentBlockStartLine = 1
lines = inspect.findsource(code)[0] # Raises [IO/OS]Error.
parentBlockSource = ''.join(lines)
else: # Not a module -> use inspect.getsource() normally.
parentBlockStartLine = code.co_firstlineno
parentBlockSource = inspect.getsource(code) # Raises [IO/OS]Error.
except (IOError, OSError) as err:
if 'source code' in err.args[0]:
raise NoSourceAvailableError()
else:
raise
lineno = inspect.getframeinfo(callFrame)[1]
linenoRelativeToParent = lineno - parentBlockStartLine + 1
# There could be multiple ic() calls on the same line(s), like
#
# ic(1); ic(2); ic(3,
# 4,
# 5); ic(6)
#
# so include all of them. Which invocation is the appropriate one will be
# determined later via bytecode offset calculations.
#
# TODO(grun): Support invocations of ic() where ic() is an attribute chain
# in the AST. For example, support
#
# import icecream
# icecream.ic()
#
# and
#
# class Foo:
# blah = ic
# Foo.blah()
#
parentBlockSource = textwrap.dedent(parentBlockSource)
potentialCalls = [
node for node in ast.walk(ast.parse(parentBlockSource))
if isAstNodeIceCreamCall(node, icNames, icMethod) and
linenoRelativeToParent in getAllLineNumbersOfAstNode(node)]
if not potentialCalls:
# TODO(grun): Add note that to NoSourceAvailableError that this
# situation can occur when the underlying source changed during
# execution.
raise NoSourceAvailableError()
endLine = lineno - parentBlockStartLine + 1
startLine = min(call.lineno for call in potentialCalls)
lines = parentBlockSource.splitlines()[startLine - 1: endLine]
# inspect's lineno attribute doesn't point to the closing right parenthesis
# if the closing right parenthesis is on its own line without any
# arguments. E.g.
#
# ic(1,
# 2 <--- inspect's reported lineno.
# ) <--- Should be the reported lineno.
#
# Detect this situation and add the missing right parenthesis.
if isCallStrMissingClosingRightParenthesis('\n'.join(lines).strip()):
lines.append(')')
source = stripCommentsAndNewlines('\n'.join(lines)).strip()
absoluteStartLineNum = parentBlockStartLine + startLine - 1
startLineOffset = calculateLineOffsets(code)[absoluteStartLineNum]
return source, absoluteStartLineNum, startLineOffset | [
"def",
"getCallSourceLines",
"(",
"callFrame",
",",
"icNames",
",",
"icMethod",
")",
":",
"code",
"=",
"callFrame",
".",
"f_code",
"# inspect.getblock(), which is called internally by inspect.getsource(),",
"# only returns the first line of <code> when <code> represents a top-level",
"# module, not the entire module's source, as needed here. The",
"#",
"# if ismodule(object):",
"# return lines, 0",
"#",
"# check in inspect.py doesn't account for code objects of modules, only",
"# actual module objects themselves.",
"#",
"# A workaround is to call findsource() directly on code objects of modules,",
"# which bypasses getblock().",
"#",
"# Also, the errors raised differ between Python2 and Python3 . In Python2,",
"# inspect.findsource() and inspect.getsource() raise IOErrors. In Python3,",
"# inspect.findsource() and inspect.getsource() raise OSErrors.",
"try",
":",
"if",
"code",
".",
"co_name",
"==",
"'<module>'",
":",
"# Module -> use workaround above.",
"parentBlockStartLine",
"=",
"1",
"lines",
"=",
"inspect",
".",
"findsource",
"(",
"code",
")",
"[",
"0",
"]",
"# Raises [IO/OS]Error.",
"parentBlockSource",
"=",
"''",
".",
"join",
"(",
"lines",
")",
"else",
":",
"# Not a module -> use inspect.getsource() normally.",
"parentBlockStartLine",
"=",
"code",
".",
"co_firstlineno",
"parentBlockSource",
"=",
"inspect",
".",
"getsource",
"(",
"code",
")",
"# Raises [IO/OS]Error.",
"except",
"(",
"IOError",
",",
"OSError",
")",
"as",
"err",
":",
"if",
"'source code'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
":",
"raise",
"NoSourceAvailableError",
"(",
")",
"else",
":",
"raise",
"lineno",
"=",
"inspect",
".",
"getframeinfo",
"(",
"callFrame",
")",
"[",
"1",
"]",
"linenoRelativeToParent",
"=",
"lineno",
"-",
"parentBlockStartLine",
"+",
"1",
"# There could be multiple ic() calls on the same line(s), like",
"#",
"# ic(1); ic(2); ic(3,",
"# 4,",
"# 5); ic(6)",
"#",
"# so include all of them. Which invocation is the appropriate one will be",
"# determined later via bytecode offset calculations.",
"#",
"# TODO(grun): Support invocations of ic() where ic() is an attribute chain",
"# in the AST. For example, support",
"#",
"# import icecream",
"# icecream.ic()",
"#",
"# and",
"#",
"# class Foo:",
"# blah = ic",
"# Foo.blah()",
"#",
"parentBlockSource",
"=",
"textwrap",
".",
"dedent",
"(",
"parentBlockSource",
")",
"potentialCalls",
"=",
"[",
"node",
"for",
"node",
"in",
"ast",
".",
"walk",
"(",
"ast",
".",
"parse",
"(",
"parentBlockSource",
")",
")",
"if",
"isAstNodeIceCreamCall",
"(",
"node",
",",
"icNames",
",",
"icMethod",
")",
"and",
"linenoRelativeToParent",
"in",
"getAllLineNumbersOfAstNode",
"(",
"node",
")",
"]",
"if",
"not",
"potentialCalls",
":",
"# TODO(grun): Add note that to NoSourceAvailableError that this",
"# situation can occur when the underlying source changed during",
"# execution.",
"raise",
"NoSourceAvailableError",
"(",
")",
"endLine",
"=",
"lineno",
"-",
"parentBlockStartLine",
"+",
"1",
"startLine",
"=",
"min",
"(",
"call",
".",
"lineno",
"for",
"call",
"in",
"potentialCalls",
")",
"lines",
"=",
"parentBlockSource",
".",
"splitlines",
"(",
")",
"[",
"startLine",
"-",
"1",
":",
"endLine",
"]",
"# inspect's lineno attribute doesn't point to the closing right parenthesis",
"# if the closing right parenthesis is on its own line without any",
"# arguments. E.g.",
"#",
"# ic(1,",
"# 2 <--- inspect's reported lineno.",
"# ) <--- Should be the reported lineno.",
"#",
"# Detect this situation and add the missing right parenthesis.",
"if",
"isCallStrMissingClosingRightParenthesis",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
".",
"strip",
"(",
")",
")",
":",
"lines",
".",
"append",
"(",
"')'",
")",
"source",
"=",
"stripCommentsAndNewlines",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")",
".",
"strip",
"(",
")",
"absoluteStartLineNum",
"=",
"parentBlockStartLine",
"+",
"startLine",
"-",
"1",
"startLineOffset",
"=",
"calculateLineOffsets",
"(",
"code",
")",
"[",
"absoluteStartLineNum",
"]",
"return",
"source",
",",
"absoluteStartLineNum",
",",
"startLineOffset"
]
| Raises NoSourceAvailableError. | [
"Raises",
"NoSourceAvailableError",
"."
]
| cb4f3d50ec747637721fe58b80f2cc2a2baedabf | https://github.com/gruns/icecream/blob/cb4f3d50ec747637721fe58b80f2cc2a2baedabf/icecream/icecream.py#L275-L366 | train |
Vaelor/python-mattermost-driver | src/mattermostdriver/driver.py | Driver.init_websocket | def init_websocket(self, event_handler, websocket_cls=Websocket):
"""
Will initialize the websocket connection to the mattermost server.
This should be run after login(), because the websocket needs to make
an authentification.
See https://api.mattermost.com/v4/#tag/WebSocket for which
websocket events mattermost sends.
Example of a really simple event_handler function
.. code:: python
@asyncio.coroutine
def my_event_handler(message):
print(message)
:param event_handler: The function to handle the websocket events. Takes one argument.
:type event_handler: Function(message)
:return: The event loop
"""
self.websocket = websocket_cls(self.options, self.client.token)
loop = asyncio.get_event_loop()
loop.run_until_complete(self.websocket.connect(event_handler))
return loop | python | def init_websocket(self, event_handler, websocket_cls=Websocket):
"""
Will initialize the websocket connection to the mattermost server.
This should be run after login(), because the websocket needs to make
an authentification.
See https://api.mattermost.com/v4/#tag/WebSocket for which
websocket events mattermost sends.
Example of a really simple event_handler function
.. code:: python
@asyncio.coroutine
def my_event_handler(message):
print(message)
:param event_handler: The function to handle the websocket events. Takes one argument.
:type event_handler: Function(message)
:return: The event loop
"""
self.websocket = websocket_cls(self.options, self.client.token)
loop = asyncio.get_event_loop()
loop.run_until_complete(self.websocket.connect(event_handler))
return loop | [
"def",
"init_websocket",
"(",
"self",
",",
"event_handler",
",",
"websocket_cls",
"=",
"Websocket",
")",
":",
"self",
".",
"websocket",
"=",
"websocket_cls",
"(",
"self",
".",
"options",
",",
"self",
".",
"client",
".",
"token",
")",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"loop",
".",
"run_until_complete",
"(",
"self",
".",
"websocket",
".",
"connect",
"(",
"event_handler",
")",
")",
"return",
"loop"
]
| Will initialize the websocket connection to the mattermost server.
This should be run after login(), because the websocket needs to make
an authentification.
See https://api.mattermost.com/v4/#tag/WebSocket for which
websocket events mattermost sends.
Example of a really simple event_handler function
.. code:: python
@asyncio.coroutine
def my_event_handler(message):
print(message)
:param event_handler: The function to handle the websocket events. Takes one argument.
:type event_handler: Function(message)
:return: The event loop | [
"Will",
"initialize",
"the",
"websocket",
"connection",
"to",
"the",
"mattermost",
"server",
"."
]
| ad1a936130096e39c2e1b76d78913e5950e06ca5 | https://github.com/Vaelor/python-mattermost-driver/blob/ad1a936130096e39c2e1b76d78913e5950e06ca5/src/mattermostdriver/driver.py#L114-L140 | train |
Vaelor/python-mattermost-driver | src/mattermostdriver/driver.py | Driver.login | def login(self):
"""
Logs the user in.
The log in information is saved in the client
- userid
- username
- cookies
:return: The raw response from the request
"""
if self.options['token']:
self.client.token = self.options['token']
result = self.users.get_user('me')
else:
response = self.users.login_user({
'login_id': self.options['login_id'],
'password': self.options['password'],
'token': self.options['mfa_token']
})
if response.status_code == 200:
self.client.token = response.headers['Token']
self.client.cookies = response.cookies
try:
result = response.json()
except ValueError:
log.debug('Could not convert response to json, returning raw response')
result = response
log.debug(result)
if 'id' in result:
self.client.userid = result['id']
if 'username' in result:
self.client.username = result['username']
return result | python | def login(self):
"""
Logs the user in.
The log in information is saved in the client
- userid
- username
- cookies
:return: The raw response from the request
"""
if self.options['token']:
self.client.token = self.options['token']
result = self.users.get_user('me')
else:
response = self.users.login_user({
'login_id': self.options['login_id'],
'password': self.options['password'],
'token': self.options['mfa_token']
})
if response.status_code == 200:
self.client.token = response.headers['Token']
self.client.cookies = response.cookies
try:
result = response.json()
except ValueError:
log.debug('Could not convert response to json, returning raw response')
result = response
log.debug(result)
if 'id' in result:
self.client.userid = result['id']
if 'username' in result:
self.client.username = result['username']
return result | [
"def",
"login",
"(",
"self",
")",
":",
"if",
"self",
".",
"options",
"[",
"'token'",
"]",
":",
"self",
".",
"client",
".",
"token",
"=",
"self",
".",
"options",
"[",
"'token'",
"]",
"result",
"=",
"self",
".",
"users",
".",
"get_user",
"(",
"'me'",
")",
"else",
":",
"response",
"=",
"self",
".",
"users",
".",
"login_user",
"(",
"{",
"'login_id'",
":",
"self",
".",
"options",
"[",
"'login_id'",
"]",
",",
"'password'",
":",
"self",
".",
"options",
"[",
"'password'",
"]",
",",
"'token'",
":",
"self",
".",
"options",
"[",
"'mfa_token'",
"]",
"}",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"client",
".",
"token",
"=",
"response",
".",
"headers",
"[",
"'Token'",
"]",
"self",
".",
"client",
".",
"cookies",
"=",
"response",
".",
"cookies",
"try",
":",
"result",
"=",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"log",
".",
"debug",
"(",
"'Could not convert response to json, returning raw response'",
")",
"result",
"=",
"response",
"log",
".",
"debug",
"(",
"result",
")",
"if",
"'id'",
"in",
"result",
":",
"self",
".",
"client",
".",
"userid",
"=",
"result",
"[",
"'id'",
"]",
"if",
"'username'",
"in",
"result",
":",
"self",
".",
"client",
".",
"username",
"=",
"result",
"[",
"'username'",
"]",
"return",
"result"
]
| Logs the user in.
The log in information is saved in the client
- userid
- username
- cookies
:return: The raw response from the request | [
"Logs",
"the",
"user",
"in",
"."
]
| ad1a936130096e39c2e1b76d78913e5950e06ca5 | https://github.com/Vaelor/python-mattermost-driver/blob/ad1a936130096e39c2e1b76d78913e5950e06ca5/src/mattermostdriver/driver.py#L142-L178 | train |
Vaelor/python-mattermost-driver | src/mattermostdriver/websocket.py | Websocket.connect | def connect(self, event_handler):
"""
Connect to the websocket and authenticate it.
When the authentication has finished, start the loop listening for messages,
sending a ping to the server to keep the connection alive.
:param event_handler: Every websocket event will be passed there. Takes one argument.
:type event_handler: Function(message)
:return:
"""
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
if not self.options['verify']:
context.verify_mode = ssl.CERT_NONE
scheme = 'wss://'
if self.options['scheme'] != 'https':
scheme = 'ws://'
context = None
url = '{scheme:s}{url:s}:{port:s}{basepath:s}/websocket'.format(
scheme=scheme,
url=self.options['url'],
port=str(self.options['port']),
basepath=self.options['basepath']
)
websocket = yield from websockets.connect(
url,
ssl=context,
)
yield from self._authenticate_websocket(websocket, event_handler)
yield from self._start_loop(websocket, event_handler) | python | def connect(self, event_handler):
"""
Connect to the websocket and authenticate it.
When the authentication has finished, start the loop listening for messages,
sending a ping to the server to keep the connection alive.
:param event_handler: Every websocket event will be passed there. Takes one argument.
:type event_handler: Function(message)
:return:
"""
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
if not self.options['verify']:
context.verify_mode = ssl.CERT_NONE
scheme = 'wss://'
if self.options['scheme'] != 'https':
scheme = 'ws://'
context = None
url = '{scheme:s}{url:s}:{port:s}{basepath:s}/websocket'.format(
scheme=scheme,
url=self.options['url'],
port=str(self.options['port']),
basepath=self.options['basepath']
)
websocket = yield from websockets.connect(
url,
ssl=context,
)
yield from self._authenticate_websocket(websocket, event_handler)
yield from self._start_loop(websocket, event_handler) | [
"def",
"connect",
"(",
"self",
",",
"event_handler",
")",
":",
"context",
"=",
"ssl",
".",
"create_default_context",
"(",
"purpose",
"=",
"ssl",
".",
"Purpose",
".",
"CLIENT_AUTH",
")",
"if",
"not",
"self",
".",
"options",
"[",
"'verify'",
"]",
":",
"context",
".",
"verify_mode",
"=",
"ssl",
".",
"CERT_NONE",
"scheme",
"=",
"'wss://'",
"if",
"self",
".",
"options",
"[",
"'scheme'",
"]",
"!=",
"'https'",
":",
"scheme",
"=",
"'ws://'",
"context",
"=",
"None",
"url",
"=",
"'{scheme:s}{url:s}:{port:s}{basepath:s}/websocket'",
".",
"format",
"(",
"scheme",
"=",
"scheme",
",",
"url",
"=",
"self",
".",
"options",
"[",
"'url'",
"]",
",",
"port",
"=",
"str",
"(",
"self",
".",
"options",
"[",
"'port'",
"]",
")",
",",
"basepath",
"=",
"self",
".",
"options",
"[",
"'basepath'",
"]",
")",
"websocket",
"=",
"yield",
"from",
"websockets",
".",
"connect",
"(",
"url",
",",
"ssl",
"=",
"context",
",",
")",
"yield",
"from",
"self",
".",
"_authenticate_websocket",
"(",
"websocket",
",",
"event_handler",
")",
"yield",
"from",
"self",
".",
"_start_loop",
"(",
"websocket",
",",
"event_handler",
")"
]
| Connect to the websocket and authenticate it.
When the authentication has finished, start the loop listening for messages,
sending a ping to the server to keep the connection alive.
:param event_handler: Every websocket event will be passed there. Takes one argument.
:type event_handler: Function(message)
:return: | [
"Connect",
"to",
"the",
"websocket",
"and",
"authenticate",
"it",
".",
"When",
"the",
"authentication",
"has",
"finished",
"start",
"the",
"loop",
"listening",
"for",
"messages",
"sending",
"a",
"ping",
"to",
"the",
"server",
"to",
"keep",
"the",
"connection",
"alive",
"."
]
| ad1a936130096e39c2e1b76d78913e5950e06ca5 | https://github.com/Vaelor/python-mattermost-driver/blob/ad1a936130096e39c2e1b76d78913e5950e06ca5/src/mattermostdriver/websocket.py#L19-L51 | train |
Vaelor/python-mattermost-driver | src/mattermostdriver/websocket.py | Websocket._authenticate_websocket | def _authenticate_websocket(self, websocket, event_handler):
"""
Sends a authentication challenge over a websocket.
This is not needed when we just send the cookie we got on login
when connecting to the websocket.
"""
log.debug('Authenticating websocket')
json_data = json.dumps({
"seq": 1,
"action": "authentication_challenge",
"data": {
"token": self._token
}
}).encode('utf8')
yield from websocket.send(json_data)
while True:
message = yield from websocket.recv()
status = json.loads(message)
log.debug(status)
# We want to pass the events to the event_handler already
# because the hello event could arrive before the authentication ok response
yield from event_handler(message)
if ('status' in status and status['status'] == 'OK') and \
('seq_reply' in status and status['seq_reply'] == 1):
log.info('Websocket authentification OK')
return True
elif 'seq_reply' in status and status['seq_reply'] == 1:
log.error('Websocket authentification failed') | python | def _authenticate_websocket(self, websocket, event_handler):
"""
Sends a authentication challenge over a websocket.
This is not needed when we just send the cookie we got on login
when connecting to the websocket.
"""
log.debug('Authenticating websocket')
json_data = json.dumps({
"seq": 1,
"action": "authentication_challenge",
"data": {
"token": self._token
}
}).encode('utf8')
yield from websocket.send(json_data)
while True:
message = yield from websocket.recv()
status = json.loads(message)
log.debug(status)
# We want to pass the events to the event_handler already
# because the hello event could arrive before the authentication ok response
yield from event_handler(message)
if ('status' in status and status['status'] == 'OK') and \
('seq_reply' in status and status['seq_reply'] == 1):
log.info('Websocket authentification OK')
return True
elif 'seq_reply' in status and status['seq_reply'] == 1:
log.error('Websocket authentification failed') | [
"def",
"_authenticate_websocket",
"(",
"self",
",",
"websocket",
",",
"event_handler",
")",
":",
"log",
".",
"debug",
"(",
"'Authenticating websocket'",
")",
"json_data",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"seq\"",
":",
"1",
",",
"\"action\"",
":",
"\"authentication_challenge\"",
",",
"\"data\"",
":",
"{",
"\"token\"",
":",
"self",
".",
"_token",
"}",
"}",
")",
".",
"encode",
"(",
"'utf8'",
")",
"yield",
"from",
"websocket",
".",
"send",
"(",
"json_data",
")",
"while",
"True",
":",
"message",
"=",
"yield",
"from",
"websocket",
".",
"recv",
"(",
")",
"status",
"=",
"json",
".",
"loads",
"(",
"message",
")",
"log",
".",
"debug",
"(",
"status",
")",
"# We want to pass the events to the event_handler already",
"# because the hello event could arrive before the authentication ok response",
"yield",
"from",
"event_handler",
"(",
"message",
")",
"if",
"(",
"'status'",
"in",
"status",
"and",
"status",
"[",
"'status'",
"]",
"==",
"'OK'",
")",
"and",
"(",
"'seq_reply'",
"in",
"status",
"and",
"status",
"[",
"'seq_reply'",
"]",
"==",
"1",
")",
":",
"log",
".",
"info",
"(",
"'Websocket authentification OK'",
")",
"return",
"True",
"elif",
"'seq_reply'",
"in",
"status",
"and",
"status",
"[",
"'seq_reply'",
"]",
"==",
"1",
":",
"log",
".",
"error",
"(",
"'Websocket authentification failed'",
")"
]
| Sends a authentication challenge over a websocket.
This is not needed when we just send the cookie we got on login
when connecting to the websocket. | [
"Sends",
"a",
"authentication",
"challenge",
"over",
"a",
"websocket",
".",
"This",
"is",
"not",
"needed",
"when",
"we",
"just",
"send",
"the",
"cookie",
"we",
"got",
"on",
"login",
"when",
"connecting",
"to",
"the",
"websocket",
"."
]
| ad1a936130096e39c2e1b76d78913e5950e06ca5 | https://github.com/Vaelor/python-mattermost-driver/blob/ad1a936130096e39c2e1b76d78913e5950e06ca5/src/mattermostdriver/websocket.py#L73-L100 | train |
paulgb/runipy | runipy/notebook_runner.py | NotebookRunner.run_cell | def run_cell(self, cell):
"""Run a notebook cell and update the output of that cell in-place."""
logging.info('Running cell:\n%s\n', cell.input)
self.kc.execute(cell.input)
reply = self.kc.get_shell_msg()
status = reply['content']['status']
traceback_text = ''
if status == 'error':
traceback_text = 'Cell raised uncaught exception: \n' + \
'\n'.join(reply['content']['traceback'])
logging.info(traceback_text)
else:
logging.info('Cell returned')
outs = list()
while True:
try:
msg = self.kc.get_iopub_msg(timeout=1)
if msg['msg_type'] == 'status':
if msg['content']['execution_state'] == 'idle':
break
except Empty:
# execution state should return to idle
# before the queue becomes empty,
# if it doesn't, something bad has happened
raise
content = msg['content']
msg_type = msg['msg_type']
# IPython 3.0.0-dev writes pyerr/pyout in the notebook format
# but uses error/execute_result in the message spec. This does the
# translation needed for tests to pass with IPython 3.0.0-dev
notebook3_format_conversions = {
'error': 'pyerr',
'execute_result': 'pyout'
}
msg_type = notebook3_format_conversions.get(msg_type, msg_type)
out = NotebookNode(output_type=msg_type)
if 'execution_count' in content:
cell['prompt_number'] = content['execution_count']
out.prompt_number = content['execution_count']
if msg_type in ('status', 'pyin', 'execute_input'):
continue
elif msg_type == 'stream':
out.stream = content['name']
# in msgspec 5, this is name, text
# in msgspec 4, this is name, data
if 'text' in content:
out.text = content['text']
else:
out.text = content['data']
elif msg_type in ('display_data', 'pyout'):
for mime, data in content['data'].items():
try:
attr = self.MIME_MAP[mime]
except KeyError:
raise NotImplementedError(
'unhandled mime type: %s' % mime
)
# In notebook version <= 3 JSON data is stored as a string
# Evaluation of IPython2's JSON gives strings directly
# Therefore do not encode for IPython versions prior to 3
json_encode = (
IPython.version_info[0] >= 3 and
mime == "application/json")
data_out = data if not json_encode else json.dumps(data)
setattr(out, attr, data_out)
elif msg_type == 'pyerr':
out.ename = content['ename']
out.evalue = content['evalue']
out.traceback = content['traceback']
elif msg_type == 'clear_output':
outs = list()
continue
else:
raise NotImplementedError(
'unhandled iopub message: %s' % msg_type
)
outs.append(out)
cell['outputs'] = outs
if status == 'error':
raise NotebookError(traceback_text) | python | def run_cell(self, cell):
"""Run a notebook cell and update the output of that cell in-place."""
logging.info('Running cell:\n%s\n', cell.input)
self.kc.execute(cell.input)
reply = self.kc.get_shell_msg()
status = reply['content']['status']
traceback_text = ''
if status == 'error':
traceback_text = 'Cell raised uncaught exception: \n' + \
'\n'.join(reply['content']['traceback'])
logging.info(traceback_text)
else:
logging.info('Cell returned')
outs = list()
while True:
try:
msg = self.kc.get_iopub_msg(timeout=1)
if msg['msg_type'] == 'status':
if msg['content']['execution_state'] == 'idle':
break
except Empty:
# execution state should return to idle
# before the queue becomes empty,
# if it doesn't, something bad has happened
raise
content = msg['content']
msg_type = msg['msg_type']
# IPython 3.0.0-dev writes pyerr/pyout in the notebook format
# but uses error/execute_result in the message spec. This does the
# translation needed for tests to pass with IPython 3.0.0-dev
notebook3_format_conversions = {
'error': 'pyerr',
'execute_result': 'pyout'
}
msg_type = notebook3_format_conversions.get(msg_type, msg_type)
out = NotebookNode(output_type=msg_type)
if 'execution_count' in content:
cell['prompt_number'] = content['execution_count']
out.prompt_number = content['execution_count']
if msg_type in ('status', 'pyin', 'execute_input'):
continue
elif msg_type == 'stream':
out.stream = content['name']
# in msgspec 5, this is name, text
# in msgspec 4, this is name, data
if 'text' in content:
out.text = content['text']
else:
out.text = content['data']
elif msg_type in ('display_data', 'pyout'):
for mime, data in content['data'].items():
try:
attr = self.MIME_MAP[mime]
except KeyError:
raise NotImplementedError(
'unhandled mime type: %s' % mime
)
# In notebook version <= 3 JSON data is stored as a string
# Evaluation of IPython2's JSON gives strings directly
# Therefore do not encode for IPython versions prior to 3
json_encode = (
IPython.version_info[0] >= 3 and
mime == "application/json")
data_out = data if not json_encode else json.dumps(data)
setattr(out, attr, data_out)
elif msg_type == 'pyerr':
out.ename = content['ename']
out.evalue = content['evalue']
out.traceback = content['traceback']
elif msg_type == 'clear_output':
outs = list()
continue
else:
raise NotImplementedError(
'unhandled iopub message: %s' % msg_type
)
outs.append(out)
cell['outputs'] = outs
if status == 'error':
raise NotebookError(traceback_text) | [
"def",
"run_cell",
"(",
"self",
",",
"cell",
")",
":",
"logging",
".",
"info",
"(",
"'Running cell:\\n%s\\n'",
",",
"cell",
".",
"input",
")",
"self",
".",
"kc",
".",
"execute",
"(",
"cell",
".",
"input",
")",
"reply",
"=",
"self",
".",
"kc",
".",
"get_shell_msg",
"(",
")",
"status",
"=",
"reply",
"[",
"'content'",
"]",
"[",
"'status'",
"]",
"traceback_text",
"=",
"''",
"if",
"status",
"==",
"'error'",
":",
"traceback_text",
"=",
"'Cell raised uncaught exception: \\n'",
"+",
"'\\n'",
".",
"join",
"(",
"reply",
"[",
"'content'",
"]",
"[",
"'traceback'",
"]",
")",
"logging",
".",
"info",
"(",
"traceback_text",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Cell returned'",
")",
"outs",
"=",
"list",
"(",
")",
"while",
"True",
":",
"try",
":",
"msg",
"=",
"self",
".",
"kc",
".",
"get_iopub_msg",
"(",
"timeout",
"=",
"1",
")",
"if",
"msg",
"[",
"'msg_type'",
"]",
"==",
"'status'",
":",
"if",
"msg",
"[",
"'content'",
"]",
"[",
"'execution_state'",
"]",
"==",
"'idle'",
":",
"break",
"except",
"Empty",
":",
"# execution state should return to idle",
"# before the queue becomes empty,",
"# if it doesn't, something bad has happened",
"raise",
"content",
"=",
"msg",
"[",
"'content'",
"]",
"msg_type",
"=",
"msg",
"[",
"'msg_type'",
"]",
"# IPython 3.0.0-dev writes pyerr/pyout in the notebook format",
"# but uses error/execute_result in the message spec. This does the",
"# translation needed for tests to pass with IPython 3.0.0-dev",
"notebook3_format_conversions",
"=",
"{",
"'error'",
":",
"'pyerr'",
",",
"'execute_result'",
":",
"'pyout'",
"}",
"msg_type",
"=",
"notebook3_format_conversions",
".",
"get",
"(",
"msg_type",
",",
"msg_type",
")",
"out",
"=",
"NotebookNode",
"(",
"output_type",
"=",
"msg_type",
")",
"if",
"'execution_count'",
"in",
"content",
":",
"cell",
"[",
"'prompt_number'",
"]",
"=",
"content",
"[",
"'execution_count'",
"]",
"out",
".",
"prompt_number",
"=",
"content",
"[",
"'execution_count'",
"]",
"if",
"msg_type",
"in",
"(",
"'status'",
",",
"'pyin'",
",",
"'execute_input'",
")",
":",
"continue",
"elif",
"msg_type",
"==",
"'stream'",
":",
"out",
".",
"stream",
"=",
"content",
"[",
"'name'",
"]",
"# in msgspec 5, this is name, text",
"# in msgspec 4, this is name, data",
"if",
"'text'",
"in",
"content",
":",
"out",
".",
"text",
"=",
"content",
"[",
"'text'",
"]",
"else",
":",
"out",
".",
"text",
"=",
"content",
"[",
"'data'",
"]",
"elif",
"msg_type",
"in",
"(",
"'display_data'",
",",
"'pyout'",
")",
":",
"for",
"mime",
",",
"data",
"in",
"content",
"[",
"'data'",
"]",
".",
"items",
"(",
")",
":",
"try",
":",
"attr",
"=",
"self",
".",
"MIME_MAP",
"[",
"mime",
"]",
"except",
"KeyError",
":",
"raise",
"NotImplementedError",
"(",
"'unhandled mime type: %s'",
"%",
"mime",
")",
"# In notebook version <= 3 JSON data is stored as a string",
"# Evaluation of IPython2's JSON gives strings directly",
"# Therefore do not encode for IPython versions prior to 3",
"json_encode",
"=",
"(",
"IPython",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
"and",
"mime",
"==",
"\"application/json\"",
")",
"data_out",
"=",
"data",
"if",
"not",
"json_encode",
"else",
"json",
".",
"dumps",
"(",
"data",
")",
"setattr",
"(",
"out",
",",
"attr",
",",
"data_out",
")",
"elif",
"msg_type",
"==",
"'pyerr'",
":",
"out",
".",
"ename",
"=",
"content",
"[",
"'ename'",
"]",
"out",
".",
"evalue",
"=",
"content",
"[",
"'evalue'",
"]",
"out",
".",
"traceback",
"=",
"content",
"[",
"'traceback'",
"]",
"elif",
"msg_type",
"==",
"'clear_output'",
":",
"outs",
"=",
"list",
"(",
")",
"continue",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'unhandled iopub message: %s'",
"%",
"msg_type",
")",
"outs",
".",
"append",
"(",
"out",
")",
"cell",
"[",
"'outputs'",
"]",
"=",
"outs",
"if",
"status",
"==",
"'error'",
":",
"raise",
"NotebookError",
"(",
"traceback_text",
")"
]
| Run a notebook cell and update the output of that cell in-place. | [
"Run",
"a",
"notebook",
"cell",
"and",
"update",
"the",
"output",
"of",
"that",
"cell",
"in",
"-",
"place",
"."
]
| d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe | https://github.com/paulgb/runipy/blob/d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe/runipy/notebook_runner.py#L138-L226 | train |
paulgb/runipy | runipy/notebook_runner.py | NotebookRunner.iter_code_cells | def iter_code_cells(self):
"""Iterate over the notebook cells containing code."""
for ws in self.nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
yield cell | python | def iter_code_cells(self):
"""Iterate over the notebook cells containing code."""
for ws in self.nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
yield cell | [
"def",
"iter_code_cells",
"(",
"self",
")",
":",
"for",
"ws",
"in",
"self",
".",
"nb",
".",
"worksheets",
":",
"for",
"cell",
"in",
"ws",
".",
"cells",
":",
"if",
"cell",
".",
"cell_type",
"==",
"'code'",
":",
"yield",
"cell"
]
| Iterate over the notebook cells containing code. | [
"Iterate",
"over",
"the",
"notebook",
"cells",
"containing",
"code",
"."
]
| d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe | https://github.com/paulgb/runipy/blob/d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe/runipy/notebook_runner.py#L228-L233 | train |
paulgb/runipy | runipy/notebook_runner.py | NotebookRunner.run_notebook | def run_notebook(self, skip_exceptions=False, progress_callback=None):
"""
Run all the notebook cells in order and update the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops).
"""
for i, cell in enumerate(self.iter_code_cells()):
try:
self.run_cell(cell)
except NotebookError:
if not skip_exceptions:
raise
if progress_callback:
progress_callback(i) | python | def run_notebook(self, skip_exceptions=False, progress_callback=None):
"""
Run all the notebook cells in order and update the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops).
"""
for i, cell in enumerate(self.iter_code_cells()):
try:
self.run_cell(cell)
except NotebookError:
if not skip_exceptions:
raise
if progress_callback:
progress_callback(i) | [
"def",
"run_notebook",
"(",
"self",
",",
"skip_exceptions",
"=",
"False",
",",
"progress_callback",
"=",
"None",
")",
":",
"for",
"i",
",",
"cell",
"in",
"enumerate",
"(",
"self",
".",
"iter_code_cells",
"(",
")",
")",
":",
"try",
":",
"self",
".",
"run_cell",
"(",
"cell",
")",
"except",
"NotebookError",
":",
"if",
"not",
"skip_exceptions",
":",
"raise",
"if",
"progress_callback",
":",
"progress_callback",
"(",
"i",
")"
]
| Run all the notebook cells in order and update the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops). | [
"Run",
"all",
"the",
"notebook",
"cells",
"in",
"order",
"and",
"update",
"the",
"outputs",
"in",
"-",
"place",
"."
]
| d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe | https://github.com/paulgb/runipy/blob/d48c4c522bd1d66dcc5c1c09e70a92bfb58360fe/runipy/notebook_runner.py#L235-L249 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.