repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
geophysics-ubonn/crtomo_tools | lib/crtomo/eitManager.py | eitMan.model | def model(self, **kwargs):
"""Run the forward modeling for all frequencies.
Use :py:func:`crtomo.eitManager.eitMan.measurements` to retrieve the
resulting synthetic measurement spectra.
Parameters
----------
**kwargs : dict, optional
All kwargs are directly provide to the underlying
:py:func:`crtomo.tdManager.tdMan.model` function calls.
"""
for key, td in self.tds.items():
td.model(**kwargs) | python | def model(self, **kwargs):
"""Run the forward modeling for all frequencies.
Use :py:func:`crtomo.eitManager.eitMan.measurements` to retrieve the
resulting synthetic measurement spectra.
Parameters
----------
**kwargs : dict, optional
All kwargs are directly provide to the underlying
:py:func:`crtomo.tdManager.tdMan.model` function calls.
"""
for key, td in self.tds.items():
td.model(**kwargs) | [
"def",
"model",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"key",
",",
"td",
"in",
"self",
".",
"tds",
".",
"items",
"(",
")",
":",
"td",
".",
"model",
"(",
"*",
"*",
"kwargs",
")"
]
| Run the forward modeling for all frequencies.
Use :py:func:`crtomo.eitManager.eitMan.measurements` to retrieve the
resulting synthetic measurement spectra.
Parameters
----------
**kwargs : dict, optional
All kwargs are directly provide to the underlying
:py:func:`crtomo.tdManager.tdMan.model` function calls. | [
"Run",
"the",
"forward",
"modeling",
"for",
"all",
"frequencies",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L474-L488 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/eitManager.py | eitMan.measurements | def measurements(self):
"""Return modeled measurements
1. dimension: frequency
2. dimension: config-number
3. dimension: 2: magnitude and phase (resistivity)
"""
m_all = np.array([self.tds[key].measurements() for key in
sorted(self.tds.keys())])
return m_all | python | def measurements(self):
"""Return modeled measurements
1. dimension: frequency
2. dimension: config-number
3. dimension: 2: magnitude and phase (resistivity)
"""
m_all = np.array([self.tds[key].measurements() for key in
sorted(self.tds.keys())])
return m_all | [
"def",
"measurements",
"(",
"self",
")",
":",
"m_all",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"tds",
"[",
"key",
"]",
".",
"measurements",
"(",
")",
"for",
"key",
"in",
"sorted",
"(",
"self",
".",
"tds",
".",
"keys",
"(",
")",
")",
"]",
")",
"return",
"m_all"
]
| Return modeled measurements
1. dimension: frequency
2. dimension: config-number
3. dimension: 2: magnitude and phase (resistivity) | [
"Return",
"modeled",
"measurements"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L490-L500 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/eitManager.py | eitMan.get_measurement_responses | def get_measurement_responses(self):
"""Return a dictionary of sip_responses for the modeled SIP spectra
Note that this function does NOT check that each frequency contains the
same configurations!
Returns
-------
responses : dict
Dictionary with configurations as keys
"""
# take configurations from first tomodir
configs = self.tds[sorted(self.tds.keys())[0]].configs.configs
measurements = self.measurements()
responses = {}
for config, sip_measurement in zip(configs,
np.rollaxis(measurements, 1)):
sip = sip_response(
frequencies=self.frequencies,
rmag=sip_measurement[:, 0],
rpha=sip_measurement[:, 1]
)
responses[tuple(config)] = sip
return responses | python | def get_measurement_responses(self):
"""Return a dictionary of sip_responses for the modeled SIP spectra
Note that this function does NOT check that each frequency contains the
same configurations!
Returns
-------
responses : dict
Dictionary with configurations as keys
"""
# take configurations from first tomodir
configs = self.tds[sorted(self.tds.keys())[0]].configs.configs
measurements = self.measurements()
responses = {}
for config, sip_measurement in zip(configs,
np.rollaxis(measurements, 1)):
sip = sip_response(
frequencies=self.frequencies,
rmag=sip_measurement[:, 0],
rpha=sip_measurement[:, 1]
)
responses[tuple(config)] = sip
return responses | [
"def",
"get_measurement_responses",
"(",
"self",
")",
":",
"# take configurations from first tomodir",
"configs",
"=",
"self",
".",
"tds",
"[",
"sorted",
"(",
"self",
".",
"tds",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"]",
".",
"configs",
".",
"configs",
"measurements",
"=",
"self",
".",
"measurements",
"(",
")",
"responses",
"=",
"{",
"}",
"for",
"config",
",",
"sip_measurement",
"in",
"zip",
"(",
"configs",
",",
"np",
".",
"rollaxis",
"(",
"measurements",
",",
"1",
")",
")",
":",
"sip",
"=",
"sip_response",
"(",
"frequencies",
"=",
"self",
".",
"frequencies",
",",
"rmag",
"=",
"sip_measurement",
"[",
":",
",",
"0",
"]",
",",
"rpha",
"=",
"sip_measurement",
"[",
":",
",",
"1",
"]",
")",
"responses",
"[",
"tuple",
"(",
"config",
")",
"]",
"=",
"sip",
"return",
"responses"
]
| Return a dictionary of sip_responses for the modeled SIP spectra
Note that this function does NOT check that each frequency contains the
same configurations!
Returns
-------
responses : dict
Dictionary with configurations as keys | [
"Return",
"a",
"dictionary",
"of",
"sip_responses",
"for",
"the",
"modeled",
"SIP",
"spectra"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L502-L527 | train |
reorx/torext | examples/formal_project/manage.py | create_database | def create_database(name, number=1, force_clear=False):
"""Command to create a database
"""
print 'Got:'
print 'name', name, type(name)
print 'number', number, type(number)
print 'force_clear', force_clear, type(force_clear) | python | def create_database(name, number=1, force_clear=False):
"""Command to create a database
"""
print 'Got:'
print 'name', name, type(name)
print 'number', number, type(number)
print 'force_clear', force_clear, type(force_clear) | [
"def",
"create_database",
"(",
"name",
",",
"number",
"=",
"1",
",",
"force_clear",
"=",
"False",
")",
":",
"print",
"'Got:'",
"print",
"'name'",
",",
"name",
",",
"type",
"(",
"name",
")",
"print",
"'number'",
",",
"number",
",",
"type",
"(",
"number",
")",
"print",
"'force_clear'",
",",
"force_clear",
",",
"type",
"(",
"force_clear",
")"
]
| Command to create a database | [
"Command",
"to",
"create",
"a",
"database"
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/examples/formal_project/manage.py#L10-L16 | train |
NiklasRosenstein/py-bundler | bundler/nativedeps/windll.py | _get_long_path_name | def _get_long_path_name(path):
"""
Returns the long path name for a Windows path, i.e. the properly cased
path of an existing file or directory.
"""
# Thanks to http://stackoverflow.com/a/3694799/791713
buf = ctypes.create_unicode_buffer(len(path) + 1)
GetLongPathNameW = ctypes.windll.kernel32.GetLongPathNameW
res = GetLongPathNameW(path, buf, len(path) + 1)
if res == 0 or res > 260:
return path
else:
return buf.value | python | def _get_long_path_name(path):
"""
Returns the long path name for a Windows path, i.e. the properly cased
path of an existing file or directory.
"""
# Thanks to http://stackoverflow.com/a/3694799/791713
buf = ctypes.create_unicode_buffer(len(path) + 1)
GetLongPathNameW = ctypes.windll.kernel32.GetLongPathNameW
res = GetLongPathNameW(path, buf, len(path) + 1)
if res == 0 or res > 260:
return path
else:
return buf.value | [
"def",
"_get_long_path_name",
"(",
"path",
")",
":",
"# Thanks to http://stackoverflow.com/a/3694799/791713",
"buf",
"=",
"ctypes",
".",
"create_unicode_buffer",
"(",
"len",
"(",
"path",
")",
"+",
"1",
")",
"GetLongPathNameW",
"=",
"ctypes",
".",
"windll",
".",
"kernel32",
".",
"GetLongPathNameW",
"res",
"=",
"GetLongPathNameW",
"(",
"path",
",",
"buf",
",",
"len",
"(",
"path",
")",
"+",
"1",
")",
"if",
"res",
"==",
"0",
"or",
"res",
">",
"260",
":",
"return",
"path",
"else",
":",
"return",
"buf",
".",
"value"
]
| Returns the long path name for a Windows path, i.e. the properly cased
path of an existing file or directory. | [
"Returns",
"the",
"long",
"path",
"name",
"for",
"a",
"Windows",
"path",
"i",
".",
"e",
".",
"the",
"properly",
"cased",
"path",
"of",
"an",
"existing",
"file",
"or",
"directory",
"."
]
| 80dd6dc971667ba015f7f67481417c45cc757231 | https://github.com/NiklasRosenstein/py-bundler/blob/80dd6dc971667ba015f7f67481417c45cc757231/bundler/nativedeps/windll.py#L44-L57 | train |
NiklasRosenstein/py-bundler | bundler/nativedeps/windll.py | get_dependency_walker | def get_dependency_walker():
"""
Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable.
"""
for dirname in os.getenv('PATH', '').split(os.pathsep):
filename = os.path.join(dirname, 'depends.exe')
if os.path.isfile(filename):
logger.info('Dependency Walker found at "{}"'.format(filename))
return filename
temp_exe = os.path.join(tempfile.gettempdir(), 'depends.exe')
temp_dll = os.path.join(tempfile.gettempdir(), 'depends.dll')
if os.path.isfile(temp_exe):
logger.info('Dependency Walker found at "{}"'.format(temp_exe))
return temp_exe
logger.info('Dependency Walker not found. Downloading ...')
with urlopen('http://dependencywalker.com/depends22_x64.zip') as fp:
data = fp.read()
logger.info('Extracting Dependency Walker to "{}"'.format(temp_exe))
with zipfile.ZipFile(io.BytesIO(data)) as fp:
with fp.open('depends.exe') as src:
with open(temp_exe, 'wb') as dst:
shutil.copyfileobj(src, dst)
with fp.open('depends.dll') as src:
with open(temp_dll, 'wb') as dst:
shutil.copyfileobj(src, dst)
return temp_exe | python | def get_dependency_walker():
"""
Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable.
"""
for dirname in os.getenv('PATH', '').split(os.pathsep):
filename = os.path.join(dirname, 'depends.exe')
if os.path.isfile(filename):
logger.info('Dependency Walker found at "{}"'.format(filename))
return filename
temp_exe = os.path.join(tempfile.gettempdir(), 'depends.exe')
temp_dll = os.path.join(tempfile.gettempdir(), 'depends.dll')
if os.path.isfile(temp_exe):
logger.info('Dependency Walker found at "{}"'.format(temp_exe))
return temp_exe
logger.info('Dependency Walker not found. Downloading ...')
with urlopen('http://dependencywalker.com/depends22_x64.zip') as fp:
data = fp.read()
logger.info('Extracting Dependency Walker to "{}"'.format(temp_exe))
with zipfile.ZipFile(io.BytesIO(data)) as fp:
with fp.open('depends.exe') as src:
with open(temp_exe, 'wb') as dst:
shutil.copyfileobj(src, dst)
with fp.open('depends.dll') as src:
with open(temp_dll, 'wb') as dst:
shutil.copyfileobj(src, dst)
return temp_exe | [
"def",
"get_dependency_walker",
"(",
")",
":",
"for",
"dirname",
"in",
"os",
".",
"getenv",
"(",
"'PATH'",
",",
"''",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"'depends.exe'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"logger",
".",
"info",
"(",
"'Dependency Walker found at \"{}\"'",
".",
"format",
"(",
"filename",
")",
")",
"return",
"filename",
"temp_exe",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'depends.exe'",
")",
"temp_dll",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'depends.dll'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"temp_exe",
")",
":",
"logger",
".",
"info",
"(",
"'Dependency Walker found at \"{}\"'",
".",
"format",
"(",
"temp_exe",
")",
")",
"return",
"temp_exe",
"logger",
".",
"info",
"(",
"'Dependency Walker not found. Downloading ...'",
")",
"with",
"urlopen",
"(",
"'http://dependencywalker.com/depends22_x64.zip'",
")",
"as",
"fp",
":",
"data",
"=",
"fp",
".",
"read",
"(",
")",
"logger",
".",
"info",
"(",
"'Extracting Dependency Walker to \"{}\"'",
".",
"format",
"(",
"temp_exe",
")",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"io",
".",
"BytesIO",
"(",
"data",
")",
")",
"as",
"fp",
":",
"with",
"fp",
".",
"open",
"(",
"'depends.exe'",
")",
"as",
"src",
":",
"with",
"open",
"(",
"temp_exe",
",",
"'wb'",
")",
"as",
"dst",
":",
"shutil",
".",
"copyfileobj",
"(",
"src",
",",
"dst",
")",
"with",
"fp",
".",
"open",
"(",
"'depends.dll'",
")",
"as",
"src",
":",
"with",
"open",
"(",
"temp_dll",
",",
"'wb'",
")",
"as",
"dst",
":",
"shutil",
".",
"copyfileobj",
"(",
"src",
",",
"dst",
")",
"return",
"temp_exe"
]
| Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable. | [
"Checks",
"if",
"depends",
".",
"exe",
"is",
"in",
"the",
"system",
"PATH",
".",
"If",
"not",
"it",
"will",
"be",
"downloaded",
"and",
"extracted",
"to",
"a",
"temporary",
"directory",
".",
"Note",
"that",
"the",
"file",
"will",
"not",
"be",
"deleted",
"afterwards",
"."
]
| 80dd6dc971667ba015f7f67481417c45cc757231 | https://github.com/NiklasRosenstein/py-bundler/blob/80dd6dc971667ba015f7f67481417c45cc757231/bundler/nativedeps/windll.py#L60-L94 | train |
reorx/torext | torext/script.py | Manager.prepare | def prepare(self, setup_func):
"""This decorator wrap a function which setup a environment before
running a command
@manager.prepare(setup_func)
def some_command():
pass
"""
assert inspect.isfunction(setup_func)
argsspec = inspect.getargspec(setup_func)
if argsspec.args:
raise ValueError("prepare function shouldn't have any arguments")
def decorator(command_func):
@functools.wraps(command_func)
def wrapper(*args, **kwgs):
# Run setup_func before command_func
setup_func()
return command_func(*args, **kwgs)
return wrapper
return decorator | python | def prepare(self, setup_func):
"""This decorator wrap a function which setup a environment before
running a command
@manager.prepare(setup_func)
def some_command():
pass
"""
assert inspect.isfunction(setup_func)
argsspec = inspect.getargspec(setup_func)
if argsspec.args:
raise ValueError("prepare function shouldn't have any arguments")
def decorator(command_func):
@functools.wraps(command_func)
def wrapper(*args, **kwgs):
# Run setup_func before command_func
setup_func()
return command_func(*args, **kwgs)
return wrapper
return decorator | [
"def",
"prepare",
"(",
"self",
",",
"setup_func",
")",
":",
"assert",
"inspect",
".",
"isfunction",
"(",
"setup_func",
")",
"argsspec",
"=",
"inspect",
".",
"getargspec",
"(",
"setup_func",
")",
"if",
"argsspec",
".",
"args",
":",
"raise",
"ValueError",
"(",
"\"prepare function shouldn't have any arguments\"",
")",
"def",
"decorator",
"(",
"command_func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"command_func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwgs",
")",
":",
"# Run setup_func before command_func",
"setup_func",
"(",
")",
"return",
"command_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwgs",
")",
"return",
"wrapper",
"return",
"decorator"
]
| This decorator wrap a function which setup a environment before
running a command
@manager.prepare(setup_func)
def some_command():
pass | [
"This",
"decorator",
"wrap",
"a",
"function",
"which",
"setup",
"a",
"environment",
"before",
"running",
"a",
"command"
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/script.py#L220-L239 | train |
Nic30/hwtGraph | hwtGraph/elk/fromHwt/utils.py | addPort | def addPort(n: LNode, intf: Interface):
"""
Add LayoutExternalPort for interface
"""
d = PortTypeFromDir(intf._direction)
ext_p = LayoutExternalPort(
n, name=intf._name, direction=d, node2lnode=n._node2lnode)
ext_p.originObj = originObjOfPort(intf)
n.children.append(ext_p)
addPortToLNode(ext_p, intf, reverseDirection=True)
return ext_p | python | def addPort(n: LNode, intf: Interface):
"""
Add LayoutExternalPort for interface
"""
d = PortTypeFromDir(intf._direction)
ext_p = LayoutExternalPort(
n, name=intf._name, direction=d, node2lnode=n._node2lnode)
ext_p.originObj = originObjOfPort(intf)
n.children.append(ext_p)
addPortToLNode(ext_p, intf, reverseDirection=True)
return ext_p | [
"def",
"addPort",
"(",
"n",
":",
"LNode",
",",
"intf",
":",
"Interface",
")",
":",
"d",
"=",
"PortTypeFromDir",
"(",
"intf",
".",
"_direction",
")",
"ext_p",
"=",
"LayoutExternalPort",
"(",
"n",
",",
"name",
"=",
"intf",
".",
"_name",
",",
"direction",
"=",
"d",
",",
"node2lnode",
"=",
"n",
".",
"_node2lnode",
")",
"ext_p",
".",
"originObj",
"=",
"originObjOfPort",
"(",
"intf",
")",
"n",
".",
"children",
".",
"append",
"(",
"ext_p",
")",
"addPortToLNode",
"(",
"ext_p",
",",
"intf",
",",
"reverseDirection",
"=",
"True",
")",
"return",
"ext_p"
]
| Add LayoutExternalPort for interface | [
"Add",
"LayoutExternalPort",
"for",
"interface"
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/utils.py#L231-L241 | train |
tslight/treepick | treepick/draw.py | Draw.drawtree | def drawtree(self):
'''
Loop over the object, process path attribute sets, and drawlines based
on their current contents.
'''
self.win.erase()
self.line = 0
for child, depth in self.traverse():
child.curline = self.curline
child.picked = self.picked
child.expanded = self.expanded
child.sized = self.sized
if depth == 0:
continue
if self.line == self.curline:
self.color.curline(child.name, child.picked)
children = child.children
name = child.name
else:
self.color.default(child.name, child.picked)
if child.name in self.sized and not self.sized[child.name]:
self.sized[child.name] = " [" + du(child.name) + "]"
child.drawline(depth, self.line, self.win)
self.line += 1
self.win.refresh()
self.mkheader(name)
self.mkfooter(name, children) | python | def drawtree(self):
'''
Loop over the object, process path attribute sets, and drawlines based
on their current contents.
'''
self.win.erase()
self.line = 0
for child, depth in self.traverse():
child.curline = self.curline
child.picked = self.picked
child.expanded = self.expanded
child.sized = self.sized
if depth == 0:
continue
if self.line == self.curline:
self.color.curline(child.name, child.picked)
children = child.children
name = child.name
else:
self.color.default(child.name, child.picked)
if child.name in self.sized and not self.sized[child.name]:
self.sized[child.name] = " [" + du(child.name) + "]"
child.drawline(depth, self.line, self.win)
self.line += 1
self.win.refresh()
self.mkheader(name)
self.mkfooter(name, children) | [
"def",
"drawtree",
"(",
"self",
")",
":",
"self",
".",
"win",
".",
"erase",
"(",
")",
"self",
".",
"line",
"=",
"0",
"for",
"child",
",",
"depth",
"in",
"self",
".",
"traverse",
"(",
")",
":",
"child",
".",
"curline",
"=",
"self",
".",
"curline",
"child",
".",
"picked",
"=",
"self",
".",
"picked",
"child",
".",
"expanded",
"=",
"self",
".",
"expanded",
"child",
".",
"sized",
"=",
"self",
".",
"sized",
"if",
"depth",
"==",
"0",
":",
"continue",
"if",
"self",
".",
"line",
"==",
"self",
".",
"curline",
":",
"self",
".",
"color",
".",
"curline",
"(",
"child",
".",
"name",
",",
"child",
".",
"picked",
")",
"children",
"=",
"child",
".",
"children",
"name",
"=",
"child",
".",
"name",
"else",
":",
"self",
".",
"color",
".",
"default",
"(",
"child",
".",
"name",
",",
"child",
".",
"picked",
")",
"if",
"child",
".",
"name",
"in",
"self",
".",
"sized",
"and",
"not",
"self",
".",
"sized",
"[",
"child",
".",
"name",
"]",
":",
"self",
".",
"sized",
"[",
"child",
".",
"name",
"]",
"=",
"\" [\"",
"+",
"du",
"(",
"child",
".",
"name",
")",
"+",
"\"]\"",
"child",
".",
"drawline",
"(",
"depth",
",",
"self",
".",
"line",
",",
"self",
".",
"win",
")",
"self",
".",
"line",
"+=",
"1",
"self",
".",
"win",
".",
"refresh",
"(",
")",
"self",
".",
"mkheader",
"(",
"name",
")",
"self",
".",
"mkfooter",
"(",
"name",
",",
"children",
")"
]
| Loop over the object, process path attribute sets, and drawlines based
on their current contents. | [
"Loop",
"over",
"the",
"object",
"process",
"path",
"attribute",
"sets",
"and",
"drawlines",
"based",
"on",
"their",
"current",
"contents",
"."
]
| 7adf838900f11e8845e17d8c79bb2b23617aec2c | https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/draw.py#L64-L90 | train |
albert12132/templar | templar/api/config.py | import_config | def import_config(config_path):
"""Import a Config from a given path, relative to the current directory.
The module specified by the config file must contain a variable called `configuration` that is
assigned to a Config object.
"""
if not os.path.isfile(config_path):
raise ConfigBuilderError(
'Could not find config file: ' + config_path)
loader = importlib.machinery.SourceFileLoader(config_path, config_path)
module = loader.load_module()
if not hasattr(module, 'config') or not isinstance(module.config, Config):
raise ConfigBuilderError(
'Could not load config file "{}": config files must contain '
'a variable called "config" that is '
'assigned to a Config object.'.format(config_path))
return module.config | python | def import_config(config_path):
"""Import a Config from a given path, relative to the current directory.
The module specified by the config file must contain a variable called `configuration` that is
assigned to a Config object.
"""
if not os.path.isfile(config_path):
raise ConfigBuilderError(
'Could not find config file: ' + config_path)
loader = importlib.machinery.SourceFileLoader(config_path, config_path)
module = loader.load_module()
if not hasattr(module, 'config') or not isinstance(module.config, Config):
raise ConfigBuilderError(
'Could not load config file "{}": config files must contain '
'a variable called "config" that is '
'assigned to a Config object.'.format(config_path))
return module.config | [
"def",
"import_config",
"(",
"config_path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"config_path",
")",
":",
"raise",
"ConfigBuilderError",
"(",
"'Could not find config file: '",
"+",
"config_path",
")",
"loader",
"=",
"importlib",
".",
"machinery",
".",
"SourceFileLoader",
"(",
"config_path",
",",
"config_path",
")",
"module",
"=",
"loader",
".",
"load_module",
"(",
")",
"if",
"not",
"hasattr",
"(",
"module",
",",
"'config'",
")",
"or",
"not",
"isinstance",
"(",
"module",
".",
"config",
",",
"Config",
")",
":",
"raise",
"ConfigBuilderError",
"(",
"'Could not load config file \"{}\": config files must contain '",
"'a variable called \"config\" that is '",
"'assigned to a Config object.'",
".",
"format",
"(",
"config_path",
")",
")",
"return",
"module",
".",
"config"
]
| Import a Config from a given path, relative to the current directory.
The module specified by the config file must contain a variable called `configuration` that is
assigned to a Config object. | [
"Import",
"a",
"Config",
"from",
"a",
"given",
"path",
"relative",
"to",
"the",
"current",
"directory",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/api/config.py#L216-L233 | train |
rhayes777/PyAutoFit | autofit/optimize/optimizer.py | grid | def grid(fitness_function, no_dimensions, step_size):
"""
Grid search using a fitness function over a given number of dimensions and a given step size between inclusive
limits of 0 and 1.
Parameters
----------
fitness_function: function
A function that takes a tuple of floats as an argument
no_dimensions: int
The number of dimensions of the grid search
step_size: float
The step size of the grid search
Returns
-------
best_arguments: tuple[float]
The tuple of arguments that gave the highest fitness
"""
best_fitness = float("-inf")
best_arguments = None
for arguments in make_lists(no_dimensions, step_size):
fitness = fitness_function(tuple(arguments))
if fitness > best_fitness:
best_fitness = fitness
best_arguments = tuple(arguments)
return best_arguments | python | def grid(fitness_function, no_dimensions, step_size):
"""
Grid search using a fitness function over a given number of dimensions and a given step size between inclusive
limits of 0 and 1.
Parameters
----------
fitness_function: function
A function that takes a tuple of floats as an argument
no_dimensions: int
The number of dimensions of the grid search
step_size: float
The step size of the grid search
Returns
-------
best_arguments: tuple[float]
The tuple of arguments that gave the highest fitness
"""
best_fitness = float("-inf")
best_arguments = None
for arguments in make_lists(no_dimensions, step_size):
fitness = fitness_function(tuple(arguments))
if fitness > best_fitness:
best_fitness = fitness
best_arguments = tuple(arguments)
return best_arguments | [
"def",
"grid",
"(",
"fitness_function",
",",
"no_dimensions",
",",
"step_size",
")",
":",
"best_fitness",
"=",
"float",
"(",
"\"-inf\"",
")",
"best_arguments",
"=",
"None",
"for",
"arguments",
"in",
"make_lists",
"(",
"no_dimensions",
",",
"step_size",
")",
":",
"fitness",
"=",
"fitness_function",
"(",
"tuple",
"(",
"arguments",
")",
")",
"if",
"fitness",
">",
"best_fitness",
":",
"best_fitness",
"=",
"fitness",
"best_arguments",
"=",
"tuple",
"(",
"arguments",
")",
"return",
"best_arguments"
]
| Grid search using a fitness function over a given number of dimensions and a given step size between inclusive
limits of 0 and 1.
Parameters
----------
fitness_function: function
A function that takes a tuple of floats as an argument
no_dimensions: int
The number of dimensions of the grid search
step_size: float
The step size of the grid search
Returns
-------
best_arguments: tuple[float]
The tuple of arguments that gave the highest fitness | [
"Grid",
"search",
"using",
"a",
"fitness",
"function",
"over",
"a",
"given",
"number",
"of",
"dimensions",
"and",
"a",
"given",
"step",
"size",
"between",
"inclusive",
"limits",
"of",
"0",
"and",
"1",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/optimizer.py#L1-L29 | train |
rhayes777/PyAutoFit | autofit/optimize/optimizer.py | make_lists | def make_lists(no_dimensions, step_size, centre_steps=True):
"""
Create a list of lists of floats covering every combination across no_dimensions of points of integer step size
between 0 and 1 inclusive.
Parameters
----------
no_dimensions: int
The number of dimensions, that is the length of the lists
step_size: float
The step size
centre_steps: bool
Returns
-------
lists: [[float]]
A list of lists
"""
if no_dimensions == 0:
return [[]]
sub_lists = make_lists(no_dimensions - 1, step_size, centre_steps=centre_steps)
return [[step_size * value + (0.5 * step_size if centre_steps else 0)] + sub_list for value in
range(0, int((1 / step_size))) for sub_list in sub_lists] | python | def make_lists(no_dimensions, step_size, centre_steps=True):
"""
Create a list of lists of floats covering every combination across no_dimensions of points of integer step size
between 0 and 1 inclusive.
Parameters
----------
no_dimensions: int
The number of dimensions, that is the length of the lists
step_size: float
The step size
centre_steps: bool
Returns
-------
lists: [[float]]
A list of lists
"""
if no_dimensions == 0:
return [[]]
sub_lists = make_lists(no_dimensions - 1, step_size, centre_steps=centre_steps)
return [[step_size * value + (0.5 * step_size if centre_steps else 0)] + sub_list for value in
range(0, int((1 / step_size))) for sub_list in sub_lists] | [
"def",
"make_lists",
"(",
"no_dimensions",
",",
"step_size",
",",
"centre_steps",
"=",
"True",
")",
":",
"if",
"no_dimensions",
"==",
"0",
":",
"return",
"[",
"[",
"]",
"]",
"sub_lists",
"=",
"make_lists",
"(",
"no_dimensions",
"-",
"1",
",",
"step_size",
",",
"centre_steps",
"=",
"centre_steps",
")",
"return",
"[",
"[",
"step_size",
"*",
"value",
"+",
"(",
"0.5",
"*",
"step_size",
"if",
"centre_steps",
"else",
"0",
")",
"]",
"+",
"sub_list",
"for",
"value",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"(",
"1",
"/",
"step_size",
")",
")",
")",
"for",
"sub_list",
"in",
"sub_lists",
"]"
]
| Create a list of lists of floats covering every combination across no_dimensions of points of integer step size
between 0 and 1 inclusive.
Parameters
----------
no_dimensions: int
The number of dimensions, that is the length of the lists
step_size: float
The step size
centre_steps: bool
Returns
-------
lists: [[float]]
A list of lists | [
"Create",
"a",
"list",
"of",
"lists",
"of",
"floats",
"covering",
"every",
"combination",
"across",
"no_dimensions",
"of",
"points",
"of",
"integer",
"step",
"size",
"between",
"0",
"and",
"1",
"inclusive",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/optimizer.py#L32-L55 | train |
Nic30/hwtGraph | hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py | portCnt | def portCnt(port):
"""
recursively count number of ports without children
"""
if port.children:
return sum(map(lambda p: portCnt(p), port.children))
else:
return 1 | python | def portCnt(port):
"""
recursively count number of ports without children
"""
if port.children:
return sum(map(lambda p: portCnt(p), port.children))
else:
return 1 | [
"def",
"portCnt",
"(",
"port",
")",
":",
"if",
"port",
".",
"children",
":",
"return",
"sum",
"(",
"map",
"(",
"lambda",
"p",
":",
"portCnt",
"(",
"p",
")",
",",
"port",
".",
"children",
")",
")",
"else",
":",
"return",
"1"
]
| recursively count number of ports without children | [
"recursively",
"count",
"number",
"of",
"ports",
"without",
"children"
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py#L43-L50 | train |
Nic30/hwtGraph | hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py | copyPort | def copyPort(port, targetLNode, reverseDir, topPortName=None):
"""
Create identical port on targetNode
"""
newP = _copyPort(port, targetLNode, reverseDir)
if topPortName is not None:
newP.name = topPortName
return newP | python | def copyPort(port, targetLNode, reverseDir, topPortName=None):
"""
Create identical port on targetNode
"""
newP = _copyPort(port, targetLNode, reverseDir)
if topPortName is not None:
newP.name = topPortName
return newP | [
"def",
"copyPort",
"(",
"port",
",",
"targetLNode",
",",
"reverseDir",
",",
"topPortName",
"=",
"None",
")",
":",
"newP",
"=",
"_copyPort",
"(",
"port",
",",
"targetLNode",
",",
"reverseDir",
")",
"if",
"topPortName",
"is",
"not",
"None",
":",
"newP",
".",
"name",
"=",
"topPortName",
"return",
"newP"
]
| Create identical port on targetNode | [
"Create",
"identical",
"port",
"on",
"targetNode"
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py#L76-L85 | train |
Nic30/hwtGraph | hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py | walkSignalPorts | def walkSignalPorts(rootPort: LPort):
"""
recursively walk ports without any children
"""
if rootPort.children:
for ch in rootPort.children:
yield from walkSignalPorts(ch)
else:
yield rootPort | python | def walkSignalPorts(rootPort: LPort):
"""
recursively walk ports without any children
"""
if rootPort.children:
for ch in rootPort.children:
yield from walkSignalPorts(ch)
else:
yield rootPort | [
"def",
"walkSignalPorts",
"(",
"rootPort",
":",
"LPort",
")",
":",
"if",
"rootPort",
".",
"children",
":",
"for",
"ch",
"in",
"rootPort",
".",
"children",
":",
"yield",
"from",
"walkSignalPorts",
"(",
"ch",
")",
"else",
":",
"yield",
"rootPort"
]
| recursively walk ports without any children | [
"recursively",
"walk",
"ports",
"without",
"any",
"children"
]
| 6b7d4fdd759f263a0fdd2736f02f123e44e4354f | https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/mergeSplitsOnInterfaces.py#L88-L96 | train |
zalando-stups/lizzy-client | lizzy_client/cli.py | agent_error | def agent_error(e: requests.HTTPError, fatal=True):
"""
Prints an agent error and exits
"""
try:
data = e.response.json()
details = data['detail'] # type: str
except JSONDecodeError:
details = e.response.text or str(e.response)
lines = ('[AGENT] {}'.format(line) for line in details.splitlines())
msg = '\n' + '\n'.join(lines)
if fatal:
fatal_error(msg)
else:
error(msg) | python | def agent_error(e: requests.HTTPError, fatal=True):
"""
Prints an agent error and exits
"""
try:
data = e.response.json()
details = data['detail'] # type: str
except JSONDecodeError:
details = e.response.text or str(e.response)
lines = ('[AGENT] {}'.format(line) for line in details.splitlines())
msg = '\n' + '\n'.join(lines)
if fatal:
fatal_error(msg)
else:
error(msg) | [
"def",
"agent_error",
"(",
"e",
":",
"requests",
".",
"HTTPError",
",",
"fatal",
"=",
"True",
")",
":",
"try",
":",
"data",
"=",
"e",
".",
"response",
".",
"json",
"(",
")",
"details",
"=",
"data",
"[",
"'detail'",
"]",
"# type: str",
"except",
"JSONDecodeError",
":",
"details",
"=",
"e",
".",
"response",
".",
"text",
"or",
"str",
"(",
"e",
".",
"response",
")",
"lines",
"=",
"(",
"'[AGENT] {}'",
".",
"format",
"(",
"line",
")",
"for",
"line",
"in",
"details",
".",
"splitlines",
"(",
")",
")",
"msg",
"=",
"'\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"lines",
")",
"if",
"fatal",
":",
"fatal_error",
"(",
"msg",
")",
"else",
":",
"error",
"(",
"msg",
")"
]
| Prints an agent error and exits | [
"Prints",
"an",
"agent",
"error",
"and",
"exits"
]
| 0af9733ca5a25ebd0a9dc1453f2a7592efcee56a | https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L99-L115 | train |
zalando-stups/lizzy-client | lizzy_client/cli.py | parse_stack_refs | def parse_stack_refs(stack_references: List[str]) -> List[str]:
'''
Check if items included in `stack_references` are Senza definition
file paths or stack name reference. If Senza definition file path,
substitute the definition file path by the stack name in the same
position on the list.
'''
stack_names = []
references = list(stack_references)
references.reverse()
while references:
current = references.pop()
# current that might be a file
file_path = os.path.abspath(current)
if os.path.exists(file_path) and os.path.isfile(file_path):
try:
with open(file_path) as fd:
data = yaml.safe_load(fd)
current = data['SenzaInfo']['StackName']
except (KeyError, TypeError, YAMLError):
raise click.UsageError(
'Invalid senza definition {}'.format(current)
)
stack_names.append(current)
return stack_names | python | def parse_stack_refs(stack_references: List[str]) -> List[str]:
'''
Check if items included in `stack_references` are Senza definition
file paths or stack name reference. If Senza definition file path,
substitute the definition file path by the stack name in the same
position on the list.
'''
stack_names = []
references = list(stack_references)
references.reverse()
while references:
current = references.pop()
# current that might be a file
file_path = os.path.abspath(current)
if os.path.exists(file_path) and os.path.isfile(file_path):
try:
with open(file_path) as fd:
data = yaml.safe_load(fd)
current = data['SenzaInfo']['StackName']
except (KeyError, TypeError, YAMLError):
raise click.UsageError(
'Invalid senza definition {}'.format(current)
)
stack_names.append(current)
return stack_names | [
"def",
"parse_stack_refs",
"(",
"stack_references",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"str",
"]",
":",
"stack_names",
"=",
"[",
"]",
"references",
"=",
"list",
"(",
"stack_references",
")",
"references",
".",
"reverse",
"(",
")",
"while",
"references",
":",
"current",
"=",
"references",
".",
"pop",
"(",
")",
"# current that might be a file",
"file_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"current",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"try",
":",
"with",
"open",
"(",
"file_path",
")",
"as",
"fd",
":",
"data",
"=",
"yaml",
".",
"safe_load",
"(",
"fd",
")",
"current",
"=",
"data",
"[",
"'SenzaInfo'",
"]",
"[",
"'StackName'",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
",",
"YAMLError",
")",
":",
"raise",
"click",
".",
"UsageError",
"(",
"'Invalid senza definition {}'",
".",
"format",
"(",
"current",
")",
")",
"stack_names",
".",
"append",
"(",
"current",
")",
"return",
"stack_names"
]
| Check if items included in `stack_references` are Senza definition
file paths or stack name reference. If Senza definition file path,
substitute the definition file path by the stack name in the same
position on the list. | [
"Check",
"if",
"items",
"included",
"in",
"stack_references",
"are",
"Senza",
"definition",
"file",
"paths",
"or",
"stack",
"name",
"reference",
".",
"If",
"Senza",
"definition",
"file",
"path",
"substitute",
"the",
"definition",
"file",
"path",
"by",
"the",
"stack",
"name",
"in",
"the",
"same",
"position",
"on",
"the",
"list",
"."
]
| 0af9733ca5a25ebd0a9dc1453f2a7592efcee56a | https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L147-L171 | train |
zalando-stups/lizzy-client | lizzy_client/cli.py | list_stacks | def list_stacks(stack_ref: List[str], all: bool, remote: str, region: str,
watch: int, output: str):
"""List Lizzy stacks"""
lizzy = setup_lizzy_client(remote)
stack_references = parse_stack_refs(stack_ref)
while True:
rows = []
for stack in lizzy.get_stacks(stack_references, region=region):
creation_time = dateutil.parser.parse(stack['creation_time'])
rows.append({'stack_name': stack['stack_name'],
'version': stack['version'],
'status': stack['status'],
'creation_time': creation_time.timestamp(),
'description': stack['description']})
rows.sort(key=lambda x: (x['stack_name'], x['version']))
with OutputFormat(output):
print_table(
'stack_name version status creation_time description'.split(),
rows, styles=STYLES, titles=TITLES)
if watch: # pragma: no cover
time.sleep(watch)
click.clear()
else:
break | python | def list_stacks(stack_ref: List[str], all: bool, remote: str, region: str,
watch: int, output: str):
"""List Lizzy stacks"""
lizzy = setup_lizzy_client(remote)
stack_references = parse_stack_refs(stack_ref)
while True:
rows = []
for stack in lizzy.get_stacks(stack_references, region=region):
creation_time = dateutil.parser.parse(stack['creation_time'])
rows.append({'stack_name': stack['stack_name'],
'version': stack['version'],
'status': stack['status'],
'creation_time': creation_time.timestamp(),
'description': stack['description']})
rows.sort(key=lambda x: (x['stack_name'], x['version']))
with OutputFormat(output):
print_table(
'stack_name version status creation_time description'.split(),
rows, styles=STYLES, titles=TITLES)
if watch: # pragma: no cover
time.sleep(watch)
click.clear()
else:
break | [
"def",
"list_stacks",
"(",
"stack_ref",
":",
"List",
"[",
"str",
"]",
",",
"all",
":",
"bool",
",",
"remote",
":",
"str",
",",
"region",
":",
"str",
",",
"watch",
":",
"int",
",",
"output",
":",
"str",
")",
":",
"lizzy",
"=",
"setup_lizzy_client",
"(",
"remote",
")",
"stack_references",
"=",
"parse_stack_refs",
"(",
"stack_ref",
")",
"while",
"True",
":",
"rows",
"=",
"[",
"]",
"for",
"stack",
"in",
"lizzy",
".",
"get_stacks",
"(",
"stack_references",
",",
"region",
"=",
"region",
")",
":",
"creation_time",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"stack",
"[",
"'creation_time'",
"]",
")",
"rows",
".",
"append",
"(",
"{",
"'stack_name'",
":",
"stack",
"[",
"'stack_name'",
"]",
",",
"'version'",
":",
"stack",
"[",
"'version'",
"]",
",",
"'status'",
":",
"stack",
"[",
"'status'",
"]",
",",
"'creation_time'",
":",
"creation_time",
".",
"timestamp",
"(",
")",
",",
"'description'",
":",
"stack",
"[",
"'description'",
"]",
"}",
")",
"rows",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
"[",
"'stack_name'",
"]",
",",
"x",
"[",
"'version'",
"]",
")",
")",
"with",
"OutputFormat",
"(",
"output",
")",
":",
"print_table",
"(",
"'stack_name version status creation_time description'",
".",
"split",
"(",
")",
",",
"rows",
",",
"styles",
"=",
"STYLES",
",",
"titles",
"=",
"TITLES",
")",
"if",
"watch",
":",
"# pragma: no cover",
"time",
".",
"sleep",
"(",
"watch",
")",
"click",
".",
"clear",
"(",
")",
"else",
":",
"break"
]
| List Lizzy stacks | [
"List",
"Lizzy",
"stacks"
]
| 0af9733ca5a25ebd0a9dc1453f2a7592efcee56a | https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L348-L374 | train |
zalando-stups/lizzy-client | lizzy_client/cli.py | traffic | def traffic(stack_name: str,
stack_version: Optional[str],
percentage: Optional[int],
region: Optional[str],
remote: Optional[str],
output: Optional[str]):
'''Manage stack traffic'''
lizzy = setup_lizzy_client(remote)
if percentage is None:
stack_reference = [stack_name]
with Action('Requesting traffic info..'):
stack_weights = []
for stack in lizzy.get_stacks(stack_reference, region=region):
if stack['status'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
stack_id = '{stack_name}-{version}'.format_map(stack)
traffic = lizzy.get_traffic(stack_id, region=region)
stack_weights.append({
'stack_name': stack_name,
'version': stack['version'],
'identifier': stack_id,
'weight%': traffic['weight']
})
cols = 'stack_name version identifier weight%'.split()
with OutputFormat(output):
print_table(cols,
sorted(stack_weights, key=lambda x: x['identifier']))
else:
with Action('Requesting traffic change..'):
stack_id = '{stack_name}-{stack_version}'.format_map(locals())
lizzy.traffic(stack_id, percentage, region=region) | python | def traffic(stack_name: str,
stack_version: Optional[str],
percentage: Optional[int],
region: Optional[str],
remote: Optional[str],
output: Optional[str]):
'''Manage stack traffic'''
lizzy = setup_lizzy_client(remote)
if percentage is None:
stack_reference = [stack_name]
with Action('Requesting traffic info..'):
stack_weights = []
for stack in lizzy.get_stacks(stack_reference, region=region):
if stack['status'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
stack_id = '{stack_name}-{version}'.format_map(stack)
traffic = lizzy.get_traffic(stack_id, region=region)
stack_weights.append({
'stack_name': stack_name,
'version': stack['version'],
'identifier': stack_id,
'weight%': traffic['weight']
})
cols = 'stack_name version identifier weight%'.split()
with OutputFormat(output):
print_table(cols,
sorted(stack_weights, key=lambda x: x['identifier']))
else:
with Action('Requesting traffic change..'):
stack_id = '{stack_name}-{stack_version}'.format_map(locals())
lizzy.traffic(stack_id, percentage, region=region) | [
"def",
"traffic",
"(",
"stack_name",
":",
"str",
",",
"stack_version",
":",
"Optional",
"[",
"str",
"]",
",",
"percentage",
":",
"Optional",
"[",
"int",
"]",
",",
"region",
":",
"Optional",
"[",
"str",
"]",
",",
"remote",
":",
"Optional",
"[",
"str",
"]",
",",
"output",
":",
"Optional",
"[",
"str",
"]",
")",
":",
"lizzy",
"=",
"setup_lizzy_client",
"(",
"remote",
")",
"if",
"percentage",
"is",
"None",
":",
"stack_reference",
"=",
"[",
"stack_name",
"]",
"with",
"Action",
"(",
"'Requesting traffic info..'",
")",
":",
"stack_weights",
"=",
"[",
"]",
"for",
"stack",
"in",
"lizzy",
".",
"get_stacks",
"(",
"stack_reference",
",",
"region",
"=",
"region",
")",
":",
"if",
"stack",
"[",
"'status'",
"]",
"in",
"[",
"'CREATE_COMPLETE'",
",",
"'UPDATE_COMPLETE'",
"]",
":",
"stack_id",
"=",
"'{stack_name}-{version}'",
".",
"format_map",
"(",
"stack",
")",
"traffic",
"=",
"lizzy",
".",
"get_traffic",
"(",
"stack_id",
",",
"region",
"=",
"region",
")",
"stack_weights",
".",
"append",
"(",
"{",
"'stack_name'",
":",
"stack_name",
",",
"'version'",
":",
"stack",
"[",
"'version'",
"]",
",",
"'identifier'",
":",
"stack_id",
",",
"'weight%'",
":",
"traffic",
"[",
"'weight'",
"]",
"}",
")",
"cols",
"=",
"'stack_name version identifier weight%'",
".",
"split",
"(",
")",
"with",
"OutputFormat",
"(",
"output",
")",
":",
"print_table",
"(",
"cols",
",",
"sorted",
"(",
"stack_weights",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'identifier'",
"]",
")",
")",
"else",
":",
"with",
"Action",
"(",
"'Requesting traffic change..'",
")",
":",
"stack_id",
"=",
"'{stack_name}-{stack_version}'",
".",
"format_map",
"(",
"locals",
"(",
")",
")",
"lizzy",
".",
"traffic",
"(",
"stack_id",
",",
"percentage",
",",
"region",
"=",
"region",
")"
]
| Manage stack traffic | [
"Manage",
"stack",
"traffic"
]
| 0af9733ca5a25ebd0a9dc1453f2a7592efcee56a | https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L387-L418 | train |
zalando-stups/lizzy-client | lizzy_client/cli.py | scale | def scale(stack_name: str,
stack_version: Optional[str],
new_scale: int,
region: Optional[str],
remote: Optional[str]):
'''Rescale a stack'''
lizzy = setup_lizzy_client(remote)
with Action('Requesting rescale..'):
stack_id = '{stack_name}-{stack_version}'.format_map(locals())
lizzy.scale(stack_id, new_scale, region=region) | python | def scale(stack_name: str,
stack_version: Optional[str],
new_scale: int,
region: Optional[str],
remote: Optional[str]):
'''Rescale a stack'''
lizzy = setup_lizzy_client(remote)
with Action('Requesting rescale..'):
stack_id = '{stack_name}-{stack_version}'.format_map(locals())
lizzy.scale(stack_id, new_scale, region=region) | [
"def",
"scale",
"(",
"stack_name",
":",
"str",
",",
"stack_version",
":",
"Optional",
"[",
"str",
"]",
",",
"new_scale",
":",
"int",
",",
"region",
":",
"Optional",
"[",
"str",
"]",
",",
"remote",
":",
"Optional",
"[",
"str",
"]",
")",
":",
"lizzy",
"=",
"setup_lizzy_client",
"(",
"remote",
")",
"with",
"Action",
"(",
"'Requesting rescale..'",
")",
":",
"stack_id",
"=",
"'{stack_name}-{stack_version}'",
".",
"format_map",
"(",
"locals",
"(",
")",
")",
"lizzy",
".",
"scale",
"(",
"stack_id",
",",
"new_scale",
",",
"region",
"=",
"region",
")"
]
| Rescale a stack | [
"Rescale",
"a",
"stack"
]
| 0af9733ca5a25ebd0a9dc1453f2a7592efcee56a | https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L428-L438 | train |
zalando-stups/lizzy-client | lizzy_client/cli.py | delete | def delete(stack_ref: List[str],
region: str, dry_run: bool, force: bool, remote: str):
"""Delete Cloud Formation stacks"""
lizzy = setup_lizzy_client(remote)
stack_refs = get_stack_refs(stack_ref)
all_with_version = all(stack.version is not None
for stack in stack_refs)
# this is misleading but it's the current behaviour of senza
# TODO Lizzy list (stack_refs) to see if it actually matches more than one stack
# to match senza behaviour
if (not all_with_version and not dry_run and not force):
fatal_error(
'Error: {} matching stacks found. '.format(len(stack_refs)) +
'Please use the "--force" flag if you really want to delete multiple stacks.')
# TODO pass force option to agent
output = ''
for stack in stack_refs:
if stack.version is not None:
stack_id = '{stack.name}-{stack.version}'.format(stack=stack)
else:
stack_id = stack.name
with Action("Requesting stack '{stack_id}' deletion..",
stack_id=stack_id):
output = lizzy.delete(stack_id, region=region, dry_run=dry_run)
print(output) | python | def delete(stack_ref: List[str],
region: str, dry_run: bool, force: bool, remote: str):
"""Delete Cloud Formation stacks"""
lizzy = setup_lizzy_client(remote)
stack_refs = get_stack_refs(stack_ref)
all_with_version = all(stack.version is not None
for stack in stack_refs)
# this is misleading but it's the current behaviour of senza
# TODO Lizzy list (stack_refs) to see if it actually matches more than one stack
# to match senza behaviour
if (not all_with_version and not dry_run and not force):
fatal_error(
'Error: {} matching stacks found. '.format(len(stack_refs)) +
'Please use the "--force" flag if you really want to delete multiple stacks.')
# TODO pass force option to agent
output = ''
for stack in stack_refs:
if stack.version is not None:
stack_id = '{stack.name}-{stack.version}'.format(stack=stack)
else:
stack_id = stack.name
with Action("Requesting stack '{stack_id}' deletion..",
stack_id=stack_id):
output = lizzy.delete(stack_id, region=region, dry_run=dry_run)
print(output) | [
"def",
"delete",
"(",
"stack_ref",
":",
"List",
"[",
"str",
"]",
",",
"region",
":",
"str",
",",
"dry_run",
":",
"bool",
",",
"force",
":",
"bool",
",",
"remote",
":",
"str",
")",
":",
"lizzy",
"=",
"setup_lizzy_client",
"(",
"remote",
")",
"stack_refs",
"=",
"get_stack_refs",
"(",
"stack_ref",
")",
"all_with_version",
"=",
"all",
"(",
"stack",
".",
"version",
"is",
"not",
"None",
"for",
"stack",
"in",
"stack_refs",
")",
"# this is misleading but it's the current behaviour of senza",
"# TODO Lizzy list (stack_refs) to see if it actually matches more than one stack",
"# to match senza behaviour",
"if",
"(",
"not",
"all_with_version",
"and",
"not",
"dry_run",
"and",
"not",
"force",
")",
":",
"fatal_error",
"(",
"'Error: {} matching stacks found. '",
".",
"format",
"(",
"len",
"(",
"stack_refs",
")",
")",
"+",
"'Please use the \"--force\" flag if you really want to delete multiple stacks.'",
")",
"# TODO pass force option to agent",
"output",
"=",
"''",
"for",
"stack",
"in",
"stack_refs",
":",
"if",
"stack",
".",
"version",
"is",
"not",
"None",
":",
"stack_id",
"=",
"'{stack.name}-{stack.version}'",
".",
"format",
"(",
"stack",
"=",
"stack",
")",
"else",
":",
"stack_id",
"=",
"stack",
".",
"name",
"with",
"Action",
"(",
"\"Requesting stack '{stack_id}' deletion..\"",
",",
"stack_id",
"=",
"stack_id",
")",
":",
"output",
"=",
"lizzy",
".",
"delete",
"(",
"stack_id",
",",
"region",
"=",
"region",
",",
"dry_run",
"=",
"dry_run",
")",
"print",
"(",
"output",
")"
]
| Delete Cloud Formation stacks | [
"Delete",
"Cloud",
"Formation",
"stacks"
]
| 0af9733ca5a25ebd0a9dc1453f2a7592efcee56a | https://github.com/zalando-stups/lizzy-client/blob/0af9733ca5a25ebd0a9dc1453f2a7592efcee56a/lizzy_client/cli.py#L449-L478 | train |
unt-libraries/pyuntl | pyuntl/metadata_generator.py | pydict2xml | def pydict2xml(filename, metadata_dict, **kwargs):
"""Create an XML file.
Takes a path to where the XML file should be created
and a metadata dictionary.
"""
try:
f = open(filename, 'w')
f.write(pydict2xmlstring(metadata_dict, **kwargs).encode('utf-8'))
f.close()
except:
raise MetadataGeneratorException(
'Failed to create an XML file. Filename: %s' % (filename)
) | python | def pydict2xml(filename, metadata_dict, **kwargs):
"""Create an XML file.
Takes a path to where the XML file should be created
and a metadata dictionary.
"""
try:
f = open(filename, 'w')
f.write(pydict2xmlstring(metadata_dict, **kwargs).encode('utf-8'))
f.close()
except:
raise MetadataGeneratorException(
'Failed to create an XML file. Filename: %s' % (filename)
) | [
"def",
"pydict2xml",
"(",
"filename",
",",
"metadata_dict",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"filename",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"pydict2xmlstring",
"(",
"metadata_dict",
",",
"*",
"*",
"kwargs",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"f",
".",
"close",
"(",
")",
"except",
":",
"raise",
"MetadataGeneratorException",
"(",
"'Failed to create an XML file. Filename: %s'",
"%",
"(",
"filename",
")",
")"
]
| Create an XML file.
Takes a path to where the XML file should be created
and a metadata dictionary. | [
"Create",
"an",
"XML",
"file",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/metadata_generator.py#L96-L109 | train |
unt-libraries/pyuntl | pyuntl/metadata_generator.py | pydict2xmlstring | def pydict2xmlstring(metadata_dict, **kwargs):
"""Create an XML string from a metadata dictionary."""
ordering = kwargs.get('ordering', UNTL_XML_ORDER)
root_label = kwargs.get('root_label', 'metadata')
root_namespace = kwargs.get('root_namespace', None)
elements_namespace = kwargs.get('elements_namespace', None)
namespace_map = kwargs.get('namespace_map', None)
root_attributes = kwargs.get('root_attributes', None)
# Set any root namespace and namespace map.
if root_namespace and namespace_map:
root = Element(root_namespace + root_label, nsmap=namespace_map)
elif namespace_map:
root = Element(root_label, nsmap=namespace_map)
else:
root = Element(root_label)
# Set any root element attributes.
if root_attributes:
for key, value in root_attributes.items():
root.attrib[key] = value
# Create an XML structure from field list.
for metadata_key in ordering:
if metadata_key in metadata_dict:
for element in metadata_dict[metadata_key]:
if 'content' in element and 'qualifier' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'qualifier': element['qualifier']},
namespace=elements_namespace,
)
elif 'content' in element and 'role' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'role': element['role']},
namespace=elements_namespace,
)
elif 'content' in element and 'scheme' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'scheme': element['scheme']},
namespace=elements_namespace,
)
elif 'content' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
namespace=elements_namespace,
)
# Create the XML tree.
return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root,
pretty_print=True
) | python | def pydict2xmlstring(metadata_dict, **kwargs):
"""Create an XML string from a metadata dictionary."""
ordering = kwargs.get('ordering', UNTL_XML_ORDER)
root_label = kwargs.get('root_label', 'metadata')
root_namespace = kwargs.get('root_namespace', None)
elements_namespace = kwargs.get('elements_namespace', None)
namespace_map = kwargs.get('namespace_map', None)
root_attributes = kwargs.get('root_attributes', None)
# Set any root namespace and namespace map.
if root_namespace and namespace_map:
root = Element(root_namespace + root_label, nsmap=namespace_map)
elif namespace_map:
root = Element(root_label, nsmap=namespace_map)
else:
root = Element(root_label)
# Set any root element attributes.
if root_attributes:
for key, value in root_attributes.items():
root.attrib[key] = value
# Create an XML structure from field list.
for metadata_key in ordering:
if metadata_key in metadata_dict:
for element in metadata_dict[metadata_key]:
if 'content' in element and 'qualifier' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'qualifier': element['qualifier']},
namespace=elements_namespace,
)
elif 'content' in element and 'role' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'role': element['role']},
namespace=elements_namespace,
)
elif 'content' in element and 'scheme' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
attribs={'scheme': element['scheme']},
namespace=elements_namespace,
)
elif 'content' in element:
create_dict_subelement(
root,
metadata_key,
element['content'],
namespace=elements_namespace,
)
# Create the XML tree.
return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root,
pretty_print=True
) | [
"def",
"pydict2xmlstring",
"(",
"metadata_dict",
",",
"*",
"*",
"kwargs",
")",
":",
"ordering",
"=",
"kwargs",
".",
"get",
"(",
"'ordering'",
",",
"UNTL_XML_ORDER",
")",
"root_label",
"=",
"kwargs",
".",
"get",
"(",
"'root_label'",
",",
"'metadata'",
")",
"root_namespace",
"=",
"kwargs",
".",
"get",
"(",
"'root_namespace'",
",",
"None",
")",
"elements_namespace",
"=",
"kwargs",
".",
"get",
"(",
"'elements_namespace'",
",",
"None",
")",
"namespace_map",
"=",
"kwargs",
".",
"get",
"(",
"'namespace_map'",
",",
"None",
")",
"root_attributes",
"=",
"kwargs",
".",
"get",
"(",
"'root_attributes'",
",",
"None",
")",
"# Set any root namespace and namespace map.",
"if",
"root_namespace",
"and",
"namespace_map",
":",
"root",
"=",
"Element",
"(",
"root_namespace",
"+",
"root_label",
",",
"nsmap",
"=",
"namespace_map",
")",
"elif",
"namespace_map",
":",
"root",
"=",
"Element",
"(",
"root_label",
",",
"nsmap",
"=",
"namespace_map",
")",
"else",
":",
"root",
"=",
"Element",
"(",
"root_label",
")",
"# Set any root element attributes.",
"if",
"root_attributes",
":",
"for",
"key",
",",
"value",
"in",
"root_attributes",
".",
"items",
"(",
")",
":",
"root",
".",
"attrib",
"[",
"key",
"]",
"=",
"value",
"# Create an XML structure from field list.",
"for",
"metadata_key",
"in",
"ordering",
":",
"if",
"metadata_key",
"in",
"metadata_dict",
":",
"for",
"element",
"in",
"metadata_dict",
"[",
"metadata_key",
"]",
":",
"if",
"'content'",
"in",
"element",
"and",
"'qualifier'",
"in",
"element",
":",
"create_dict_subelement",
"(",
"root",
",",
"metadata_key",
",",
"element",
"[",
"'content'",
"]",
",",
"attribs",
"=",
"{",
"'qualifier'",
":",
"element",
"[",
"'qualifier'",
"]",
"}",
",",
"namespace",
"=",
"elements_namespace",
",",
")",
"elif",
"'content'",
"in",
"element",
"and",
"'role'",
"in",
"element",
":",
"create_dict_subelement",
"(",
"root",
",",
"metadata_key",
",",
"element",
"[",
"'content'",
"]",
",",
"attribs",
"=",
"{",
"'role'",
":",
"element",
"[",
"'role'",
"]",
"}",
",",
"namespace",
"=",
"elements_namespace",
",",
")",
"elif",
"'content'",
"in",
"element",
"and",
"'scheme'",
"in",
"element",
":",
"create_dict_subelement",
"(",
"root",
",",
"metadata_key",
",",
"element",
"[",
"'content'",
"]",
",",
"attribs",
"=",
"{",
"'scheme'",
":",
"element",
"[",
"'scheme'",
"]",
"}",
",",
"namespace",
"=",
"elements_namespace",
",",
")",
"elif",
"'content'",
"in",
"element",
":",
"create_dict_subelement",
"(",
"root",
",",
"metadata_key",
",",
"element",
"[",
"'content'",
"]",
",",
"namespace",
"=",
"elements_namespace",
",",
")",
"# Create the XML tree.",
"return",
"'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'",
"+",
"tostring",
"(",
"root",
",",
"pretty_print",
"=",
"True",
")"
]
| Create an XML string from a metadata dictionary. | [
"Create",
"an",
"XML",
"string",
"from",
"a",
"metadata",
"dictionary",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/metadata_generator.py#L112-L170 | train |
unt-libraries/pyuntl | pyuntl/metadata_generator.py | create_dict_subelement | def create_dict_subelement(root, subelement, content, **kwargs):
"""Create a XML subelement from a Python dictionary."""
attribs = kwargs.get('attribs', None)
namespace = kwargs.get('namespace', None)
key = subelement
# Add subelement's namespace and attributes.
if namespace and attribs:
subelement = SubElement(root, namespace + subelement, attribs)
elif namespace:
subelement = SubElement(root, namespace + subelement)
elif attribs:
subelement = SubElement(root, subelement, attribs)
# Otherwise, create SubElement without any extra data.
else:
subelement = SubElement(root, subelement)
if not isinstance(content, dict):
subelement.text = content
# Do special case ordering for degree children on etd_ms.
elif key == 'degree':
for degree_order_key in DEGREE_ORDER:
for descriptor, value in content.items():
if descriptor == degree_order_key:
sub_descriptors = SubElement(subelement, descriptor)
sub_descriptors.text = value
else:
for descriptor, value in content.items():
sub_descriptors = SubElement(subelement, descriptor)
sub_descriptors.text = value | python | def create_dict_subelement(root, subelement, content, **kwargs):
"""Create a XML subelement from a Python dictionary."""
attribs = kwargs.get('attribs', None)
namespace = kwargs.get('namespace', None)
key = subelement
# Add subelement's namespace and attributes.
if namespace and attribs:
subelement = SubElement(root, namespace + subelement, attribs)
elif namespace:
subelement = SubElement(root, namespace + subelement)
elif attribs:
subelement = SubElement(root, subelement, attribs)
# Otherwise, create SubElement without any extra data.
else:
subelement = SubElement(root, subelement)
if not isinstance(content, dict):
subelement.text = content
# Do special case ordering for degree children on etd_ms.
elif key == 'degree':
for degree_order_key in DEGREE_ORDER:
for descriptor, value in content.items():
if descriptor == degree_order_key:
sub_descriptors = SubElement(subelement, descriptor)
sub_descriptors.text = value
else:
for descriptor, value in content.items():
sub_descriptors = SubElement(subelement, descriptor)
sub_descriptors.text = value | [
"def",
"create_dict_subelement",
"(",
"root",
",",
"subelement",
",",
"content",
",",
"*",
"*",
"kwargs",
")",
":",
"attribs",
"=",
"kwargs",
".",
"get",
"(",
"'attribs'",
",",
"None",
")",
"namespace",
"=",
"kwargs",
".",
"get",
"(",
"'namespace'",
",",
"None",
")",
"key",
"=",
"subelement",
"# Add subelement's namespace and attributes.",
"if",
"namespace",
"and",
"attribs",
":",
"subelement",
"=",
"SubElement",
"(",
"root",
",",
"namespace",
"+",
"subelement",
",",
"attribs",
")",
"elif",
"namespace",
":",
"subelement",
"=",
"SubElement",
"(",
"root",
",",
"namespace",
"+",
"subelement",
")",
"elif",
"attribs",
":",
"subelement",
"=",
"SubElement",
"(",
"root",
",",
"subelement",
",",
"attribs",
")",
"# Otherwise, create SubElement without any extra data.",
"else",
":",
"subelement",
"=",
"SubElement",
"(",
"root",
",",
"subelement",
")",
"if",
"not",
"isinstance",
"(",
"content",
",",
"dict",
")",
":",
"subelement",
".",
"text",
"=",
"content",
"# Do special case ordering for degree children on etd_ms.",
"elif",
"key",
"==",
"'degree'",
":",
"for",
"degree_order_key",
"in",
"DEGREE_ORDER",
":",
"for",
"descriptor",
",",
"value",
"in",
"content",
".",
"items",
"(",
")",
":",
"if",
"descriptor",
"==",
"degree_order_key",
":",
"sub_descriptors",
"=",
"SubElement",
"(",
"subelement",
",",
"descriptor",
")",
"sub_descriptors",
".",
"text",
"=",
"value",
"else",
":",
"for",
"descriptor",
",",
"value",
"in",
"content",
".",
"items",
"(",
")",
":",
"sub_descriptors",
"=",
"SubElement",
"(",
"subelement",
",",
"descriptor",
")",
"sub_descriptors",
".",
"text",
"=",
"value"
]
| Create a XML subelement from a Python dictionary. | [
"Create",
"a",
"XML",
"subelement",
"from",
"a",
"Python",
"dictionary",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/metadata_generator.py#L173-L201 | train |
unt-libraries/pyuntl | pyuntl/metadata_generator.py | highwiredict2xmlstring | def highwiredict2xmlstring(highwire_elements, ordering=HIGHWIRE_ORDER):
"""Create an XML string from the highwire data dictionary."""
# Sort the elements by the ordering list.
highwire_elements.sort(key=lambda obj: ordering.index(obj.name))
root = Element('metadata')
for element in highwire_elements:
attribs = {'name': element.name, 'content': element.content}
SubElement(root, 'meta', attribs)
# Create the XML tree.
return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root,
pretty_print=True
) | python | def highwiredict2xmlstring(highwire_elements, ordering=HIGHWIRE_ORDER):
"""Create an XML string from the highwire data dictionary."""
# Sort the elements by the ordering list.
highwire_elements.sort(key=lambda obj: ordering.index(obj.name))
root = Element('metadata')
for element in highwire_elements:
attribs = {'name': element.name, 'content': element.content}
SubElement(root, 'meta', attribs)
# Create the XML tree.
return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(
root,
pretty_print=True
) | [
"def",
"highwiredict2xmlstring",
"(",
"highwire_elements",
",",
"ordering",
"=",
"HIGHWIRE_ORDER",
")",
":",
"# Sort the elements by the ordering list.",
"highwire_elements",
".",
"sort",
"(",
"key",
"=",
"lambda",
"obj",
":",
"ordering",
".",
"index",
"(",
"obj",
".",
"name",
")",
")",
"root",
"=",
"Element",
"(",
"'metadata'",
")",
"for",
"element",
"in",
"highwire_elements",
":",
"attribs",
"=",
"{",
"'name'",
":",
"element",
".",
"name",
",",
"'content'",
":",
"element",
".",
"content",
"}",
"SubElement",
"(",
"root",
",",
"'meta'",
",",
"attribs",
")",
"# Create the XML tree.",
"return",
"'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'",
"+",
"tostring",
"(",
"root",
",",
"pretty_print",
"=",
"True",
")"
]
| Create an XML string from the highwire data dictionary. | [
"Create",
"an",
"XML",
"string",
"from",
"the",
"highwire",
"data",
"dictionary",
"."
]
| f92413302897dab948aac18ee9e482ace0187bd4 | https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/metadata_generator.py#L204-L216 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/binaries.py | get | def get(binary_name):
"""return a valid path to the given binary. Return an error if no existing
binary can be found.
Parameters
----------
binary_name: string
a binary name used as a key in the 'binaries' dictionary above
Return
------
string
full path to binary
"""
if binary_name not in binaries:
raise Exception('binary_name: {0} not found'.format(binary_name))
system = platform.system()
binary_list = binaries[binary_name][system]
# check list for a valid entry
for filename in binary_list:
valid_file = shutil.which(filename)
if valid_file:
return os.path.abspath(valid_file) | python | def get(binary_name):
"""return a valid path to the given binary. Return an error if no existing
binary can be found.
Parameters
----------
binary_name: string
a binary name used as a key in the 'binaries' dictionary above
Return
------
string
full path to binary
"""
if binary_name not in binaries:
raise Exception('binary_name: {0} not found'.format(binary_name))
system = platform.system()
binary_list = binaries[binary_name][system]
# check list for a valid entry
for filename in binary_list:
valid_file = shutil.which(filename)
if valid_file:
return os.path.abspath(valid_file) | [
"def",
"get",
"(",
"binary_name",
")",
":",
"if",
"binary_name",
"not",
"in",
"binaries",
":",
"raise",
"Exception",
"(",
"'binary_name: {0} not found'",
".",
"format",
"(",
"binary_name",
")",
")",
"system",
"=",
"platform",
".",
"system",
"(",
")",
"binary_list",
"=",
"binaries",
"[",
"binary_name",
"]",
"[",
"system",
"]",
"# check list for a valid entry",
"for",
"filename",
"in",
"binary_list",
":",
"valid_file",
"=",
"shutil",
".",
"which",
"(",
"filename",
")",
"if",
"valid_file",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"valid_file",
")"
]
| return a valid path to the given binary. Return an error if no existing
binary can be found.
Parameters
----------
binary_name: string
a binary name used as a key in the 'binaries' dictionary above
Return
------
string
full path to binary | [
"return",
"a",
"valid",
"path",
"to",
"the",
"given",
"binary",
".",
"Return",
"an",
"error",
"if",
"no",
"existing",
"binary",
"can",
"be",
"found",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/binaries.py#L79-L103 | train |
JawboneHealth/jhhalchemy | jhhalchemy/migrate.py | get_upgrade_lock | def get_upgrade_lock(dbname, connect_str, timeout=LOCK_TIMEOUT):
"""
Wait until you can get the lock, then yield it, and eventually release it.
Inspired by: http://arr.gr/blog/2016/05/mysql-named-locks-in-python-context-managers/
:param dbname: database to upgrade
:param connect_str: connection string to the database
:param timeout: how long to wait between tries for the lock, default 5 seconds
"""
#
# Open connection and try to get the lock
#
engine = sqlalchemy.create_engine(connect_str)
cursor = engine.execute("SELECT GET_LOCK('upgrade_{}', {})".format(dbname, timeout))
lock = cursor.scalar()
cursor.close()
#
# Keep trying until you get it.
#
while not lock:
logger.info('Cannot acquire {} upgrade lock. Sleeping {} seconds.'.format(dbname, timeout))
time.sleep(timeout)
cursor = engine.execute("SELECT GET_LOCK('upgrade_{}', {})".format(dbname, timeout))
lock = cursor.scalar()
cursor.close()
logger.info('Acquired {} upgrade lock'.format(dbname))
yield lock
#
# Release the lock and close the connection.
#
cursor = engine.execute("SELECT RELEASE_LOCK('upgrade_{}')".format(dbname))
cursor.close()
engine.dispose()
logger.info('Released {} upgrade lock'.format(dbname)) | python | def get_upgrade_lock(dbname, connect_str, timeout=LOCK_TIMEOUT):
"""
Wait until you can get the lock, then yield it, and eventually release it.
Inspired by: http://arr.gr/blog/2016/05/mysql-named-locks-in-python-context-managers/
:param dbname: database to upgrade
:param connect_str: connection string to the database
:param timeout: how long to wait between tries for the lock, default 5 seconds
"""
#
# Open connection and try to get the lock
#
engine = sqlalchemy.create_engine(connect_str)
cursor = engine.execute("SELECT GET_LOCK('upgrade_{}', {})".format(dbname, timeout))
lock = cursor.scalar()
cursor.close()
#
# Keep trying until you get it.
#
while not lock:
logger.info('Cannot acquire {} upgrade lock. Sleeping {} seconds.'.format(dbname, timeout))
time.sleep(timeout)
cursor = engine.execute("SELECT GET_LOCK('upgrade_{}', {})".format(dbname, timeout))
lock = cursor.scalar()
cursor.close()
logger.info('Acquired {} upgrade lock'.format(dbname))
yield lock
#
# Release the lock and close the connection.
#
cursor = engine.execute("SELECT RELEASE_LOCK('upgrade_{}')".format(dbname))
cursor.close()
engine.dispose()
logger.info('Released {} upgrade lock'.format(dbname)) | [
"def",
"get_upgrade_lock",
"(",
"dbname",
",",
"connect_str",
",",
"timeout",
"=",
"LOCK_TIMEOUT",
")",
":",
"#",
"# Open connection and try to get the lock",
"#",
"engine",
"=",
"sqlalchemy",
".",
"create_engine",
"(",
"connect_str",
")",
"cursor",
"=",
"engine",
".",
"execute",
"(",
"\"SELECT GET_LOCK('upgrade_{}', {})\"",
".",
"format",
"(",
"dbname",
",",
"timeout",
")",
")",
"lock",
"=",
"cursor",
".",
"scalar",
"(",
")",
"cursor",
".",
"close",
"(",
")",
"#",
"# Keep trying until you get it.",
"#",
"while",
"not",
"lock",
":",
"logger",
".",
"info",
"(",
"'Cannot acquire {} upgrade lock. Sleeping {} seconds.'",
".",
"format",
"(",
"dbname",
",",
"timeout",
")",
")",
"time",
".",
"sleep",
"(",
"timeout",
")",
"cursor",
"=",
"engine",
".",
"execute",
"(",
"\"SELECT GET_LOCK('upgrade_{}', {})\"",
".",
"format",
"(",
"dbname",
",",
"timeout",
")",
")",
"lock",
"=",
"cursor",
".",
"scalar",
"(",
")",
"cursor",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"'Acquired {} upgrade lock'",
".",
"format",
"(",
"dbname",
")",
")",
"yield",
"lock",
"#",
"# Release the lock and close the connection.",
"#",
"cursor",
"=",
"engine",
".",
"execute",
"(",
"\"SELECT RELEASE_LOCK('upgrade_{}')\"",
".",
"format",
"(",
"dbname",
")",
")",
"cursor",
".",
"close",
"(",
")",
"engine",
".",
"dispose",
"(",
")",
"logger",
".",
"info",
"(",
"'Released {} upgrade lock'",
".",
"format",
"(",
"dbname",
")",
")"
]
| Wait until you can get the lock, then yield it, and eventually release it.
Inspired by: http://arr.gr/blog/2016/05/mysql-named-locks-in-python-context-managers/
:param dbname: database to upgrade
:param connect_str: connection string to the database
:param timeout: how long to wait between tries for the lock, default 5 seconds | [
"Wait",
"until",
"you",
"can",
"get",
"the",
"lock",
"then",
"yield",
"it",
"and",
"eventually",
"release",
"it",
"."
]
| ca0011d644e404561a142c9d7f0a8a569f1f4f27 | https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/migrate.py#L25-L61 | train |
JawboneHealth/jhhalchemy | jhhalchemy/migrate.py | upgrade | def upgrade(dbname, connect_str, alembic_conf):
"""
Get the database's upgrade lock and run alembic.
:param dbname: Name of the database to upgrade/create
:param connect_str: Connection string to the database (usually Flask's SQLALCHEMY_DATABASE_URI)
:param alembic_conf: location of alembic.ini
"""
#
# The db has to exist before we can get the lock. On the off-chance that another process creates the db between
# checking if it exists and running the create, ignore the exception.
#
if not sqlalchemy_utils.database_exists(connect_str):
logger.info('Creating {}'.format(dbname))
try:
sqlalchemy_utils.create_database(connect_str)
except sqlalchemy.exc.ProgrammingError as exc:
if not sqlalchemy_utils.database_exists(connect_str):
logger.error('Could not create {}'.format(dbname))
raise exc
with get_upgrade_lock(dbname, connect_str):
alembic_config = alembic.config.Config(
alembic_conf,
attributes={'configure_logger': False})
logger.info('Upgrading {} to head'.format(dbname))
alembic.command.upgrade(alembic_config, 'head') | python | def upgrade(dbname, connect_str, alembic_conf):
"""
Get the database's upgrade lock and run alembic.
:param dbname: Name of the database to upgrade/create
:param connect_str: Connection string to the database (usually Flask's SQLALCHEMY_DATABASE_URI)
:param alembic_conf: location of alembic.ini
"""
#
# The db has to exist before we can get the lock. On the off-chance that another process creates the db between
# checking if it exists and running the create, ignore the exception.
#
if not sqlalchemy_utils.database_exists(connect_str):
logger.info('Creating {}'.format(dbname))
try:
sqlalchemy_utils.create_database(connect_str)
except sqlalchemy.exc.ProgrammingError as exc:
if not sqlalchemy_utils.database_exists(connect_str):
logger.error('Could not create {}'.format(dbname))
raise exc
with get_upgrade_lock(dbname, connect_str):
alembic_config = alembic.config.Config(
alembic_conf,
attributes={'configure_logger': False})
logger.info('Upgrading {} to head'.format(dbname))
alembic.command.upgrade(alembic_config, 'head') | [
"def",
"upgrade",
"(",
"dbname",
",",
"connect_str",
",",
"alembic_conf",
")",
":",
"#",
"# The db has to exist before we can get the lock. On the off-chance that another process creates the db between",
"# checking if it exists and running the create, ignore the exception.",
"#",
"if",
"not",
"sqlalchemy_utils",
".",
"database_exists",
"(",
"connect_str",
")",
":",
"logger",
".",
"info",
"(",
"'Creating {}'",
".",
"format",
"(",
"dbname",
")",
")",
"try",
":",
"sqlalchemy_utils",
".",
"create_database",
"(",
"connect_str",
")",
"except",
"sqlalchemy",
".",
"exc",
".",
"ProgrammingError",
"as",
"exc",
":",
"if",
"not",
"sqlalchemy_utils",
".",
"database_exists",
"(",
"connect_str",
")",
":",
"logger",
".",
"error",
"(",
"'Could not create {}'",
".",
"format",
"(",
"dbname",
")",
")",
"raise",
"exc",
"with",
"get_upgrade_lock",
"(",
"dbname",
",",
"connect_str",
")",
":",
"alembic_config",
"=",
"alembic",
".",
"config",
".",
"Config",
"(",
"alembic_conf",
",",
"attributes",
"=",
"{",
"'configure_logger'",
":",
"False",
"}",
")",
"logger",
".",
"info",
"(",
"'Upgrading {} to head'",
".",
"format",
"(",
"dbname",
")",
")",
"alembic",
".",
"command",
".",
"upgrade",
"(",
"alembic_config",
",",
"'head'",
")"
]
| Get the database's upgrade lock and run alembic.
:param dbname: Name of the database to upgrade/create
:param connect_str: Connection string to the database (usually Flask's SQLALCHEMY_DATABASE_URI)
:param alembic_conf: location of alembic.ini | [
"Get",
"the",
"database",
"s",
"upgrade",
"lock",
"and",
"run",
"alembic",
"."
]
| ca0011d644e404561a142c9d7f0a8a569f1f4f27 | https://github.com/JawboneHealth/jhhalchemy/blob/ca0011d644e404561a142c9d7f0a8a569f1f4f27/jhhalchemy/migrate.py#L64-L90 | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/cfg.py | crtomo_config.write_to_file | def write_to_file(self, filename):
""" Write the configuration to a file. Use the correct order of values.
"""
fid = open(filename, 'w')
for key in self.key_order:
if(key == -1):
fid.write('\n')
else:
fid.write('{0}\n'.format(self[key]))
fid.close() | python | def write_to_file(self, filename):
""" Write the configuration to a file. Use the correct order of values.
"""
fid = open(filename, 'w')
for key in self.key_order:
if(key == -1):
fid.write('\n')
else:
fid.write('{0}\n'.format(self[key]))
fid.close() | [
"def",
"write_to_file",
"(",
"self",
",",
"filename",
")",
":",
"fid",
"=",
"open",
"(",
"filename",
",",
"'w'",
")",
"for",
"key",
"in",
"self",
".",
"key_order",
":",
"if",
"(",
"key",
"==",
"-",
"1",
")",
":",
"fid",
".",
"write",
"(",
"'\\n'",
")",
"else",
":",
"fid",
".",
"write",
"(",
"'{0}\\n'",
".",
"format",
"(",
"self",
"[",
"key",
"]",
")",
")",
"fid",
".",
"close",
"(",
")"
]
| Write the configuration to a file. Use the correct order of values. | [
"Write",
"the",
"configuration",
"to",
"a",
"file",
".",
"Use",
"the",
"correct",
"order",
"of",
"values",
"."
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/cfg.py#L216-L227 | train |
gofed/gofedlib | gofedlib/go/importpath/parser.py | ImportPathParser.parse | def parse(self, importpath):
"""Parse import path. Determine if the path is native or starts with known prefix.
:param importpath: import path to parse
:type importpath: str
:return: bool
"""
# reset default values
self.native = False
self._prefix = ""
self._package = ""
url = re.sub(r'http://', '', importpath)
url = re.sub(r'https://', '', url)
# is import path native package?
if url.split('/')[0] in self.native_packages["packages"]:
self.native = True
return self
for regex in self.known_ipprefixes:
match = re.search(regex, url)
if match:
self._prefix = match.group(1)
if match.group(3):
self._package = match.group(3)
return self
raise ValueError("Import path prefix for '%s' not recognized" % importpath) | python | def parse(self, importpath):
"""Parse import path. Determine if the path is native or starts with known prefix.
:param importpath: import path to parse
:type importpath: str
:return: bool
"""
# reset default values
self.native = False
self._prefix = ""
self._package = ""
url = re.sub(r'http://', '', importpath)
url = re.sub(r'https://', '', url)
# is import path native package?
if url.split('/')[0] in self.native_packages["packages"]:
self.native = True
return self
for regex in self.known_ipprefixes:
match = re.search(regex, url)
if match:
self._prefix = match.group(1)
if match.group(3):
self._package = match.group(3)
return self
raise ValueError("Import path prefix for '%s' not recognized" % importpath) | [
"def",
"parse",
"(",
"self",
",",
"importpath",
")",
":",
"# reset default values",
"self",
".",
"native",
"=",
"False",
"self",
".",
"_prefix",
"=",
"\"\"",
"self",
".",
"_package",
"=",
"\"\"",
"url",
"=",
"re",
".",
"sub",
"(",
"r'http://'",
",",
"''",
",",
"importpath",
")",
"url",
"=",
"re",
".",
"sub",
"(",
"r'https://'",
",",
"''",
",",
"url",
")",
"# is import path native package?",
"if",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"in",
"self",
".",
"native_packages",
"[",
"\"packages\"",
"]",
":",
"self",
".",
"native",
"=",
"True",
"return",
"self",
"for",
"regex",
"in",
"self",
".",
"known_ipprefixes",
":",
"match",
"=",
"re",
".",
"search",
"(",
"regex",
",",
"url",
")",
"if",
"match",
":",
"self",
".",
"_prefix",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"match",
".",
"group",
"(",
"3",
")",
":",
"self",
".",
"_package",
"=",
"match",
".",
"group",
"(",
"3",
")",
"return",
"self",
"raise",
"ValueError",
"(",
"\"Import path prefix for '%s' not recognized\"",
"%",
"importpath",
")"
]
| Parse import path. Determine if the path is native or starts with known prefix.
:param importpath: import path to parse
:type importpath: str
:return: bool | [
"Parse",
"import",
"path",
".",
"Determine",
"if",
"the",
"path",
"is",
"native",
"or",
"starts",
"with",
"known",
"prefix",
"."
]
| 0674c248fe3d8706f98f912996b65af469f96b10 | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/go/importpath/parser.py#L22-L50 | train |
albert12132/templar | templar/markdown.py | sub_retab | def sub_retab(match):
r"""Remove all tabs and convert them into spaces.
PARAMETERS:
match -- regex match; uses re_retab pattern: \1 is text before tab,
\2 is a consecutive string of tabs.
A simple substitution of 4 spaces would result in the following:
to\tlive # original
to live # simple substitution
Instead, we convert tabs like the following:
to\tlive # original
to live # the tab *looks* like two spaces, so we convert
# it to two spaces
"""
before = match.group(1)
tabs = len(match.group(2))
return before + (' ' * (TAB_SIZE * tabs - len(before) % TAB_SIZE)) | python | def sub_retab(match):
r"""Remove all tabs and convert them into spaces.
PARAMETERS:
match -- regex match; uses re_retab pattern: \1 is text before tab,
\2 is a consecutive string of tabs.
A simple substitution of 4 spaces would result in the following:
to\tlive # original
to live # simple substitution
Instead, we convert tabs like the following:
to\tlive # original
to live # the tab *looks* like two spaces, so we convert
# it to two spaces
"""
before = match.group(1)
tabs = len(match.group(2))
return before + (' ' * (TAB_SIZE * tabs - len(before) % TAB_SIZE)) | [
"def",
"sub_retab",
"(",
"match",
")",
":",
"before",
"=",
"match",
".",
"group",
"(",
"1",
")",
"tabs",
"=",
"len",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"return",
"before",
"+",
"(",
"' '",
"*",
"(",
"TAB_SIZE",
"*",
"tabs",
"-",
"len",
"(",
"before",
")",
"%",
"TAB_SIZE",
")",
")"
]
| r"""Remove all tabs and convert them into spaces.
PARAMETERS:
match -- regex match; uses re_retab pattern: \1 is text before tab,
\2 is a consecutive string of tabs.
A simple substitution of 4 spaces would result in the following:
to\tlive # original
to live # simple substitution
Instead, we convert tabs like the following:
to\tlive # original
to live # the tab *looks* like two spaces, so we convert
# it to two spaces | [
"r",
"Remove",
"all",
"tabs",
"and",
"convert",
"them",
"into",
"spaces",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L81-L101 | train |
albert12132/templar | templar/markdown.py | handle_whitespace | def handle_whitespace(text):
r"""Handles whitespace cleanup.
Tabs are "smartly" retabbed (see sub_retab). Lines that contain
only whitespace are truncated to a single newline.
"""
text = re_retab.sub(sub_retab, text)
text = re_whitespace.sub('', text).strip()
return text | python | def handle_whitespace(text):
r"""Handles whitespace cleanup.
Tabs are "smartly" retabbed (see sub_retab). Lines that contain
only whitespace are truncated to a single newline.
"""
text = re_retab.sub(sub_retab, text)
text = re_whitespace.sub('', text).strip()
return text | [
"def",
"handle_whitespace",
"(",
"text",
")",
":",
"text",
"=",
"re_retab",
".",
"sub",
"(",
"sub_retab",
",",
"text",
")",
"text",
"=",
"re_whitespace",
".",
"sub",
"(",
"''",
",",
"text",
")",
".",
"strip",
"(",
")",
"return",
"text"
]
| r"""Handles whitespace cleanup.
Tabs are "smartly" retabbed (see sub_retab). Lines that contain
only whitespace are truncated to a single newline. | [
"r",
"Handles",
"whitespace",
"cleanup",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L103-L111 | train |
albert12132/templar | templar/markdown.py | get_variables | def get_variables(text):
"""Extracts variables that can be used in templating engines.
Each variable is defined on a single line in the following way:
~ var: text
The ~ must be at the start of a newline, followed by at least one
space. var can be any sequence of characters that does not contain
a ":". text can be any sequence of characters.
RETURNS:
text -- str; text with all variable definitions removed
variables -- dict; variable to value mappings
"""
variables = {var: value for var, value in re_vars.findall(text)}
text = re_vars.sub('', text)
return text, variables | python | def get_variables(text):
"""Extracts variables that can be used in templating engines.
Each variable is defined on a single line in the following way:
~ var: text
The ~ must be at the start of a newline, followed by at least one
space. var can be any sequence of characters that does not contain
a ":". text can be any sequence of characters.
RETURNS:
text -- str; text with all variable definitions removed
variables -- dict; variable to value mappings
"""
variables = {var: value for var, value in re_vars.findall(text)}
text = re_vars.sub('', text)
return text, variables | [
"def",
"get_variables",
"(",
"text",
")",
":",
"variables",
"=",
"{",
"var",
":",
"value",
"for",
"var",
",",
"value",
"in",
"re_vars",
".",
"findall",
"(",
"text",
")",
"}",
"text",
"=",
"re_vars",
".",
"sub",
"(",
"''",
",",
"text",
")",
"return",
"text",
",",
"variables"
]
| Extracts variables that can be used in templating engines.
Each variable is defined on a single line in the following way:
~ var: text
The ~ must be at the start of a newline, followed by at least one
space. var can be any sequence of characters that does not contain
a ":". text can be any sequence of characters.
RETURNS:
text -- str; text with all variable definitions removed
variables -- dict; variable to value mappings | [
"Extracts",
"variables",
"that",
"can",
"be",
"used",
"in",
"templating",
"engines",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L120-L137 | train |
albert12132/templar | templar/markdown.py | get_references | def get_references(text):
"""Retrieves all link references within the text.
Link references can be defined anywhere in the text, and look like
this:
[id]: www.example.com "optional title"
A link (either <a> or <img>) can then refer to the link reference:
[this is a link][id]
Link IDs are case insensitive. Link references are also removed
from the text after they have been retrieved.
RETURNS:
text -- str; text with all link labels removed
references -- dict; link ids to (URL, title), where title is the
empty string if it is omitted.
"""
references = {}
for ref_id, link, _, title in re_references.findall(text):
ref_id = re.sub(r'<(.*?)>', r'\1', ref_id).lower().strip()
references[ref_id] = (link, title)
text = re_references.sub('', text)
return text, references | python | def get_references(text):
"""Retrieves all link references within the text.
Link references can be defined anywhere in the text, and look like
this:
[id]: www.example.com "optional title"
A link (either <a> or <img>) can then refer to the link reference:
[this is a link][id]
Link IDs are case insensitive. Link references are also removed
from the text after they have been retrieved.
RETURNS:
text -- str; text with all link labels removed
references -- dict; link ids to (URL, title), where title is the
empty string if it is omitted.
"""
references = {}
for ref_id, link, _, title in re_references.findall(text):
ref_id = re.sub(r'<(.*?)>', r'\1', ref_id).lower().strip()
references[ref_id] = (link, title)
text = re_references.sub('', text)
return text, references | [
"def",
"get_references",
"(",
"text",
")",
":",
"references",
"=",
"{",
"}",
"for",
"ref_id",
",",
"link",
",",
"_",
",",
"title",
"in",
"re_references",
".",
"findall",
"(",
"text",
")",
":",
"ref_id",
"=",
"re",
".",
"sub",
"(",
"r'<(.*?)>'",
",",
"r'\\1'",
",",
"ref_id",
")",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"references",
"[",
"ref_id",
"]",
"=",
"(",
"link",
",",
"title",
")",
"text",
"=",
"re_references",
".",
"sub",
"(",
"''",
",",
"text",
")",
"return",
"text",
",",
"references"
]
| Retrieves all link references within the text.
Link references can be defined anywhere in the text, and look like
this:
[id]: www.example.com "optional title"
A link (either <a> or <img>) can then refer to the link reference:
[this is a link][id]
Link IDs are case insensitive. Link references are also removed
from the text after they have been retrieved.
RETURNS:
text -- str; text with all link labels removed
references -- dict; link ids to (URL, title), where title is the
empty string if it is omitted. | [
"Retrieves",
"all",
"link",
"references",
"within",
"the",
"text",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L155-L180 | train |
albert12132/templar | templar/markdown.py | get_footnote_backreferences | def get_footnote_backreferences(text, markdown_obj):
"""Retrieves all footnote backreferences within the text.
Fotnote backreferences can be defined anywhere in the text, and
look like this:
[^id]: text
The corresponding footnote reference can then be placed anywhere in
the text
This is some text.[^id]
Footnote IDs are case insensitive. Footnote references are also
removed from the text after they have been retrieved.
RETURNS:
text -- str; text with all link labels removed
references -- dict; link ids to (URL, title), where title is the
empty string if it is omitted.
"""
footnotes = OrderedDict()
for footnote_id, footnote in re_footnote_backreferences.findall(text):
footnote_id = re.sub(r'<(.*?)>', r'\1', footnote_id).lower().strip()
footnote = re.sub(r'^[ ]{0,4}', '', footnote, flags=re.M)
footnotes[footnote_id] = footnote
text = re_footnote_backreferences.sub('', text)
return text, footnotes | python | def get_footnote_backreferences(text, markdown_obj):
"""Retrieves all footnote backreferences within the text.
Fotnote backreferences can be defined anywhere in the text, and
look like this:
[^id]: text
The corresponding footnote reference can then be placed anywhere in
the text
This is some text.[^id]
Footnote IDs are case insensitive. Footnote references are also
removed from the text after they have been retrieved.
RETURNS:
text -- str; text with all link labels removed
references -- dict; link ids to (URL, title), where title is the
empty string if it is omitted.
"""
footnotes = OrderedDict()
for footnote_id, footnote in re_footnote_backreferences.findall(text):
footnote_id = re.sub(r'<(.*?)>', r'\1', footnote_id).lower().strip()
footnote = re.sub(r'^[ ]{0,4}', '', footnote, flags=re.M)
footnotes[footnote_id] = footnote
text = re_footnote_backreferences.sub('', text)
return text, footnotes | [
"def",
"get_footnote_backreferences",
"(",
"text",
",",
"markdown_obj",
")",
":",
"footnotes",
"=",
"OrderedDict",
"(",
")",
"for",
"footnote_id",
",",
"footnote",
"in",
"re_footnote_backreferences",
".",
"findall",
"(",
"text",
")",
":",
"footnote_id",
"=",
"re",
".",
"sub",
"(",
"r'<(.*?)>'",
",",
"r'\\1'",
",",
"footnote_id",
")",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"footnote",
"=",
"re",
".",
"sub",
"(",
"r'^[ ]{0,4}'",
",",
"''",
",",
"footnote",
",",
"flags",
"=",
"re",
".",
"M",
")",
"footnotes",
"[",
"footnote_id",
"]",
"=",
"footnote",
"text",
"=",
"re_footnote_backreferences",
".",
"sub",
"(",
"''",
",",
"text",
")",
"return",
"text",
",",
"footnotes"
]
| Retrieves all footnote backreferences within the text.
Fotnote backreferences can be defined anywhere in the text, and
look like this:
[^id]: text
The corresponding footnote reference can then be placed anywhere in
the text
This is some text.[^id]
Footnote IDs are case insensitive. Footnote references are also
removed from the text after they have been retrieved.
RETURNS:
text -- str; text with all link labels removed
references -- dict; link ids to (URL, title), where title is the
empty string if it is omitted. | [
"Retrieves",
"all",
"footnote",
"backreferences",
"within",
"the",
"text",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L194-L221 | train |
albert12132/templar | templar/markdown.py | hash_blocks | def hash_blocks(text, hashes):
"""Hashes HTML block tags.
PARAMETERS:
text -- str; Markdown text
hashes -- dict; a dictionary of all hashes, where keys are hashes
and values are their unhashed versions.
When HTML block tags are used, all content inside the tags is
preserved as-is, without any Markdown processing. See block_tags
for a list of block tags.
"""
def sub(match):
block = match.group(1)
hashed = hash_text(block, 'block')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_block.sub(sub, text) | python | def hash_blocks(text, hashes):
"""Hashes HTML block tags.
PARAMETERS:
text -- str; Markdown text
hashes -- dict; a dictionary of all hashes, where keys are hashes
and values are their unhashed versions.
When HTML block tags are used, all content inside the tags is
preserved as-is, without any Markdown processing. See block_tags
for a list of block tags.
"""
def sub(match):
block = match.group(1)
hashed = hash_text(block, 'block')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_block.sub(sub, text) | [
"def",
"hash_blocks",
"(",
"text",
",",
"hashes",
")",
":",
"def",
"sub",
"(",
"match",
")",
":",
"block",
"=",
"match",
".",
"group",
"(",
"1",
")",
"hashed",
"=",
"hash_text",
"(",
"block",
",",
"'block'",
")",
"hashes",
"[",
"hashed",
"]",
"=",
"block",
"return",
"'\\n\\n'",
"+",
"hashed",
"+",
"'\\n\\n'",
"return",
"re_block",
".",
"sub",
"(",
"sub",
",",
"text",
")"
]
| Hashes HTML block tags.
PARAMETERS:
text -- str; Markdown text
hashes -- dict; a dictionary of all hashes, where keys are hashes
and values are their unhashed versions.
When HTML block tags are used, all content inside the tags is
preserved as-is, without any Markdown processing. See block_tags
for a list of block tags. | [
"Hashes",
"HTML",
"block",
"tags",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L282-L299 | train |
albert12132/templar | templar/markdown.py | hash_lists | def hash_lists(text, hashes, markdown_obj):
"""Hashes ordered and unordered lists.
re_list captures as many consecutive list items as possible and
groups them into one list. Before hashing the lists, the items
are recursively converted from Markdown to HTML. Upon unhashing,
the lists will be ready in their final form.
An attempt at list formatting is done by adding two spaces to
each list item. Since list conversion is a recursive process,
each nested list will add an additional two spaces to list items.
The only exception is for pre blocks -- these are "pulled out" of
indentation when the list is unhashed.
A note on implementation: Markdown syntax for list items is
essentially the same, except everything is shifted to the right by
four spaces. This assumption is made when recursively converting
list items.
List items that consist of only a single paragraph of text are
"pulled out" of the paragraph (that is, the <p> tag is removed).
This differs slightly from original Markdown syntax, which encloses
list items in <p> tags if list items are separated by one or more
blank lines.
"""
for style, marker in (('u', '[+*-]'), ('o', r'\d+\.')):
list_re = re.compile(re_list % (marker, marker), re.S | re.X)
# import pdb
# pdb.set_trace()
for match in list_re.finditer(text):
if not match:
continue
lst = match.group(1)
items = re.split(r'(?:\n|\A) {0,3}%s ' % marker, lst)[1:]
whole_list = ''
for item in items:
item = re.sub(r'^ {1,4}', '', item, flags=re.M)
item = markdown_obj.convert(item)
par_match = re.match('<p>(.*?)</p>', item, flags=re.S)
if par_match and par_match.group(0) == item.strip():
item = par_match.group(1)
whole_list += '<li>{}</li>\n'.format(item)
whole_list = '<{0}l>\n{1}\n</{0}l>'.format(
style,
re.sub('^', ' ', whole_list.strip(), flags=re.M))
hashed = hash_text(whole_list, 'list')
hashes[hashed] = whole_list
start = text.index(match.group(0))
end = start + len(match.group(0))
text = text[:start] + '\n\n' + hashed + '\n\n' + text[end:]
return text | python | def hash_lists(text, hashes, markdown_obj):
"""Hashes ordered and unordered lists.
re_list captures as many consecutive list items as possible and
groups them into one list. Before hashing the lists, the items
are recursively converted from Markdown to HTML. Upon unhashing,
the lists will be ready in their final form.
An attempt at list formatting is done by adding two spaces to
each list item. Since list conversion is a recursive process,
each nested list will add an additional two spaces to list items.
The only exception is for pre blocks -- these are "pulled out" of
indentation when the list is unhashed.
A note on implementation: Markdown syntax for list items is
essentially the same, except everything is shifted to the right by
four spaces. This assumption is made when recursively converting
list items.
List items that consist of only a single paragraph of text are
"pulled out" of the paragraph (that is, the <p> tag is removed).
This differs slightly from original Markdown syntax, which encloses
list items in <p> tags if list items are separated by one or more
blank lines.
"""
for style, marker in (('u', '[+*-]'), ('o', r'\d+\.')):
list_re = re.compile(re_list % (marker, marker), re.S | re.X)
# import pdb
# pdb.set_trace()
for match in list_re.finditer(text):
if not match:
continue
lst = match.group(1)
items = re.split(r'(?:\n|\A) {0,3}%s ' % marker, lst)[1:]
whole_list = ''
for item in items:
item = re.sub(r'^ {1,4}', '', item, flags=re.M)
item = markdown_obj.convert(item)
par_match = re.match('<p>(.*?)</p>', item, flags=re.S)
if par_match and par_match.group(0) == item.strip():
item = par_match.group(1)
whole_list += '<li>{}</li>\n'.format(item)
whole_list = '<{0}l>\n{1}\n</{0}l>'.format(
style,
re.sub('^', ' ', whole_list.strip(), flags=re.M))
hashed = hash_text(whole_list, 'list')
hashes[hashed] = whole_list
start = text.index(match.group(0))
end = start + len(match.group(0))
text = text[:start] + '\n\n' + hashed + '\n\n' + text[end:]
return text | [
"def",
"hash_lists",
"(",
"text",
",",
"hashes",
",",
"markdown_obj",
")",
":",
"for",
"style",
",",
"marker",
"in",
"(",
"(",
"'u'",
",",
"'[+*-]'",
")",
",",
"(",
"'o'",
",",
"r'\\d+\\.'",
")",
")",
":",
"list_re",
"=",
"re",
".",
"compile",
"(",
"re_list",
"%",
"(",
"marker",
",",
"marker",
")",
",",
"re",
".",
"S",
"|",
"re",
".",
"X",
")",
"# import pdb",
"# pdb.set_trace()",
"for",
"match",
"in",
"list_re",
".",
"finditer",
"(",
"text",
")",
":",
"if",
"not",
"match",
":",
"continue",
"lst",
"=",
"match",
".",
"group",
"(",
"1",
")",
"items",
"=",
"re",
".",
"split",
"(",
"r'(?:\\n|\\A) {0,3}%s '",
"%",
"marker",
",",
"lst",
")",
"[",
"1",
":",
"]",
"whole_list",
"=",
"''",
"for",
"item",
"in",
"items",
":",
"item",
"=",
"re",
".",
"sub",
"(",
"r'^ {1,4}'",
",",
"''",
",",
"item",
",",
"flags",
"=",
"re",
".",
"M",
")",
"item",
"=",
"markdown_obj",
".",
"convert",
"(",
"item",
")",
"par_match",
"=",
"re",
".",
"match",
"(",
"'<p>(.*?)</p>'",
",",
"item",
",",
"flags",
"=",
"re",
".",
"S",
")",
"if",
"par_match",
"and",
"par_match",
".",
"group",
"(",
"0",
")",
"==",
"item",
".",
"strip",
"(",
")",
":",
"item",
"=",
"par_match",
".",
"group",
"(",
"1",
")",
"whole_list",
"+=",
"'<li>{}</li>\\n'",
".",
"format",
"(",
"item",
")",
"whole_list",
"=",
"'<{0}l>\\n{1}\\n</{0}l>'",
".",
"format",
"(",
"style",
",",
"re",
".",
"sub",
"(",
"'^'",
",",
"' '",
",",
"whole_list",
".",
"strip",
"(",
")",
",",
"flags",
"=",
"re",
".",
"M",
")",
")",
"hashed",
"=",
"hash_text",
"(",
"whole_list",
",",
"'list'",
")",
"hashes",
"[",
"hashed",
"]",
"=",
"whole_list",
"start",
"=",
"text",
".",
"index",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
"end",
"=",
"start",
"+",
"len",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
"text",
"=",
"text",
"[",
":",
"start",
"]",
"+",
"'\\n\\n'",
"+",
"hashed",
"+",
"'\\n\\n'",
"+",
"text",
"[",
"end",
":",
"]",
"return",
"text"
]
| Hashes ordered and unordered lists.
re_list captures as many consecutive list items as possible and
groups them into one list. Before hashing the lists, the items
are recursively converted from Markdown to HTML. Upon unhashing,
the lists will be ready in their final form.
An attempt at list formatting is done by adding two spaces to
each list item. Since list conversion is a recursive process,
each nested list will add an additional two spaces to list items.
The only exception is for pre blocks -- these are "pulled out" of
indentation when the list is unhashed.
A note on implementation: Markdown syntax for list items is
essentially the same, except everything is shifted to the right by
four spaces. This assumption is made when recursively converting
list items.
List items that consist of only a single paragraph of text are
"pulled out" of the paragraph (that is, the <p> tag is removed).
This differs slightly from original Markdown syntax, which encloses
list items in <p> tags if list items are separated by one or more
blank lines. | [
"Hashes",
"ordered",
"and",
"unordered",
"lists",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L320-L370 | train |
albert12132/templar | templar/markdown.py | hash_blockquotes | def hash_blockquotes(text, hashes, markdown_obj):
"""Hashes block quotes.
Block quotes are defined to be lines that start with "> " (the
space is not optional).
All Markdown syntax in a blockquote is recursively converted,
which allows (among other things) headers, codeblocks, and
blockquotes to be used inside of blockquotes. The "> " is simply
stripped from the front of any blockquote lines and the result is
recursively converted.
"""
def sub(match):
block = match.group(1).strip()
block = re.sub(r'(?:(?<=\n)|(?<=\A))> ?', '', block)
block = markdown_obj.convert(block)
block = '<blockquote>{}</blockquote>'.format(block)
hashed = hash_text(block, 'blockquote')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_blockquote.sub(sub, text) | python | def hash_blockquotes(text, hashes, markdown_obj):
"""Hashes block quotes.
Block quotes are defined to be lines that start with "> " (the
space is not optional).
All Markdown syntax in a blockquote is recursively converted,
which allows (among other things) headers, codeblocks, and
blockquotes to be used inside of blockquotes. The "> " is simply
stripped from the front of any blockquote lines and the result is
recursively converted.
"""
def sub(match):
block = match.group(1).strip()
block = re.sub(r'(?:(?<=\n)|(?<=\A))> ?', '', block)
block = markdown_obj.convert(block)
block = '<blockquote>{}</blockquote>'.format(block)
hashed = hash_text(block, 'blockquote')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_blockquote.sub(sub, text) | [
"def",
"hash_blockquotes",
"(",
"text",
",",
"hashes",
",",
"markdown_obj",
")",
":",
"def",
"sub",
"(",
"match",
")",
":",
"block",
"=",
"match",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"block",
"=",
"re",
".",
"sub",
"(",
"r'(?:(?<=\\n)|(?<=\\A))> ?'",
",",
"''",
",",
"block",
")",
"block",
"=",
"markdown_obj",
".",
"convert",
"(",
"block",
")",
"block",
"=",
"'<blockquote>{}</blockquote>'",
".",
"format",
"(",
"block",
")",
"hashed",
"=",
"hash_text",
"(",
"block",
",",
"'blockquote'",
")",
"hashes",
"[",
"hashed",
"]",
"=",
"block",
"return",
"'\\n\\n'",
"+",
"hashed",
"+",
"'\\n\\n'",
"return",
"re_blockquote",
".",
"sub",
"(",
"sub",
",",
"text",
")"
]
| Hashes block quotes.
Block quotes are defined to be lines that start with "> " (the
space is not optional).
All Markdown syntax in a blockquote is recursively converted,
which allows (among other things) headers, codeblocks, and
blockquotes to be used inside of blockquotes. The "> " is simply
stripped from the front of any blockquote lines and the result is
recursively converted. | [
"Hashes",
"block",
"quotes",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L418-L438 | train |
albert12132/templar | templar/markdown.py | hash_codes | def hash_codes(text, hashes):
"""Hashes inline code tags.
Code tags can begin with an arbitrary number of back-ticks, as long
as the close contains the same number of back-ticks. This allows
back-ticks to be used within the code tag.
HTML entities (&, <, >, ", ') are automatically escaped inside the
code tag.
"""
def sub(match):
code = '<code>{}</code>'.format(escape(match.group(2)))
hashed = hash_text(code, 'code')
hashes[hashed] = code
return hashed
return re_code.sub(sub, text) | python | def hash_codes(text, hashes):
"""Hashes inline code tags.
Code tags can begin with an arbitrary number of back-ticks, as long
as the close contains the same number of back-ticks. This allows
back-ticks to be used within the code tag.
HTML entities (&, <, >, ", ') are automatically escaped inside the
code tag.
"""
def sub(match):
code = '<code>{}</code>'.format(escape(match.group(2)))
hashed = hash_text(code, 'code')
hashes[hashed] = code
return hashed
return re_code.sub(sub, text) | [
"def",
"hash_codes",
"(",
"text",
",",
"hashes",
")",
":",
"def",
"sub",
"(",
"match",
")",
":",
"code",
"=",
"'<code>{}</code>'",
".",
"format",
"(",
"escape",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"hashed",
"=",
"hash_text",
"(",
"code",
",",
"'code'",
")",
"hashes",
"[",
"hashed",
"]",
"=",
"code",
"return",
"hashed",
"return",
"re_code",
".",
"sub",
"(",
"sub",
",",
"text",
")"
]
| Hashes inline code tags.
Code tags can begin with an arbitrary number of back-ticks, as long
as the close contains the same number of back-ticks. This allows
back-ticks to be used within the code tag.
HTML entities (&, <, >, ", ') are automatically escaped inside the
code tag. | [
"Hashes",
"inline",
"code",
"tags",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L491-L506 | train |
albert12132/templar | templar/markdown.py | hash_tags | def hash_tags(text, hashes):
"""Hashes any non-block tags.
Only the tags themselves are hashed -- the contains surrounded
by tags are not touched. Indeed, there is no notion of "contained"
text for non-block tags.
Inline tags that are to be hashed are not white-listed, which
allows users to define their own tags. These user-defined tags
will also be preserved in their original form until the controller
(see link.py) is applied to them.
"""
def sub(match):
hashed = hash_text(match.group(0), 'tag')
hashes[hashed] = match.group(0)
return hashed
return re_tag.sub(sub, text) | python | def hash_tags(text, hashes):
"""Hashes any non-block tags.
Only the tags themselves are hashed -- the contains surrounded
by tags are not touched. Indeed, there is no notion of "contained"
text for non-block tags.
Inline tags that are to be hashed are not white-listed, which
allows users to define their own tags. These user-defined tags
will also be preserved in their original form until the controller
(see link.py) is applied to them.
"""
def sub(match):
hashed = hash_text(match.group(0), 'tag')
hashes[hashed] = match.group(0)
return hashed
return re_tag.sub(sub, text) | [
"def",
"hash_tags",
"(",
"text",
",",
"hashes",
")",
":",
"def",
"sub",
"(",
"match",
")",
":",
"hashed",
"=",
"hash_text",
"(",
"match",
".",
"group",
"(",
"0",
")",
",",
"'tag'",
")",
"hashes",
"[",
"hashed",
"]",
"=",
"match",
".",
"group",
"(",
"0",
")",
"return",
"hashed",
"return",
"re_tag",
".",
"sub",
"(",
"sub",
",",
"text",
")"
]
| Hashes any non-block tags.
Only the tags themselves are hashed -- the contains surrounded
by tags are not touched. Indeed, there is no notion of "contained"
text for non-block tags.
Inline tags that are to be hashed are not white-listed, which
allows users to define their own tags. These user-defined tags
will also be preserved in their original form until the controller
(see link.py) is applied to them. | [
"Hashes",
"any",
"non",
"-",
"block",
"tags",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L634-L650 | train |
albert12132/templar | templar/markdown.py | unhash | def unhash(text, hashes):
"""Unhashes all hashed entites in the hashes dictionary.
The pattern for hashes is defined by re_hash. After everything is
unhashed, <pre> blocks are "pulled out" of whatever indentation
level in which they used to be (e.g. in a list).
"""
def retrieve_match(match):
return hashes[match.group(0)]
while re_hash.search(text):
text = re_hash.sub(retrieve_match, text)
text = re_pre_tag.sub(lambda m: re.sub('^' + m.group(1), '', m.group(0), flags=re.M), text)
return text | python | def unhash(text, hashes):
"""Unhashes all hashed entites in the hashes dictionary.
The pattern for hashes is defined by re_hash. After everything is
unhashed, <pre> blocks are "pulled out" of whatever indentation
level in which they used to be (e.g. in a list).
"""
def retrieve_match(match):
return hashes[match.group(0)]
while re_hash.search(text):
text = re_hash.sub(retrieve_match, text)
text = re_pre_tag.sub(lambda m: re.sub('^' + m.group(1), '', m.group(0), flags=re.M), text)
return text | [
"def",
"unhash",
"(",
"text",
",",
"hashes",
")",
":",
"def",
"retrieve_match",
"(",
"match",
")",
":",
"return",
"hashes",
"[",
"match",
".",
"group",
"(",
"0",
")",
"]",
"while",
"re_hash",
".",
"search",
"(",
"text",
")",
":",
"text",
"=",
"re_hash",
".",
"sub",
"(",
"retrieve_match",
",",
"text",
")",
"text",
"=",
"re_pre_tag",
".",
"sub",
"(",
"lambda",
"m",
":",
"re",
".",
"sub",
"(",
"'^'",
"+",
"m",
".",
"group",
"(",
"1",
")",
",",
"''",
",",
"m",
".",
"group",
"(",
"0",
")",
",",
"flags",
"=",
"re",
".",
"M",
")",
",",
"text",
")",
"return",
"text"
]
| Unhashes all hashed entites in the hashes dictionary.
The pattern for hashes is defined by re_hash. After everything is
unhashed, <pre> blocks are "pulled out" of whatever indentation
level in which they used to be (e.g. in a list). | [
"Unhashes",
"all",
"hashed",
"entites",
"in",
"the",
"hashes",
"dictionary",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L673-L685 | train |
albert12132/templar | templar/markdown.py | paragraph_sub | def paragraph_sub(match):
"""Captures paragraphs."""
text = re.sub(r' \n', r'\n<br/>\n', match.group(0).strip())
return '<p>{}</p>'.format(text) | python | def paragraph_sub(match):
"""Captures paragraphs."""
text = re.sub(r' \n', r'\n<br/>\n', match.group(0).strip())
return '<p>{}</p>'.format(text) | [
"def",
"paragraph_sub",
"(",
"match",
")",
":",
"text",
"=",
"re",
".",
"sub",
"(",
"r' \\n'",
",",
"r'\\n<br/>\\n'",
",",
"match",
".",
"group",
"(",
"0",
")",
".",
"strip",
"(",
")",
")",
"return",
"'<p>{}</p>'",
".",
"format",
"(",
"text",
")"
]
| Captures paragraphs. | [
"Captures",
"paragraphs",
"."
]
| 39851c89730ab69e5c73d0a46adca2a44ecc4165 | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L845-L848 | train |
gofed/gofedlib | gofedlib/graphs/graphutils.py | GraphUtils.truncateGraph | def truncateGraph(graph, root_nodes):
"""Create a set of all nodes containg the root_nodes and
all nodes reacheable from them
"""
subgraph = Graph()
for node in root_nodes:
subgraph = GraphUtils.joinGraphs(subgraph, GraphUtils.getReacheableSubgraph(graph, node))
return subgraph | python | def truncateGraph(graph, root_nodes):
"""Create a set of all nodes containg the root_nodes and
all nodes reacheable from them
"""
subgraph = Graph()
for node in root_nodes:
subgraph = GraphUtils.joinGraphs(subgraph, GraphUtils.getReacheableSubgraph(graph, node))
return subgraph | [
"def",
"truncateGraph",
"(",
"graph",
",",
"root_nodes",
")",
":",
"subgraph",
"=",
"Graph",
"(",
")",
"for",
"node",
"in",
"root_nodes",
":",
"subgraph",
"=",
"GraphUtils",
".",
"joinGraphs",
"(",
"subgraph",
",",
"GraphUtils",
".",
"getReacheableSubgraph",
"(",
"graph",
",",
"node",
")",
")",
"return",
"subgraph"
]
| Create a set of all nodes containg the root_nodes and
all nodes reacheable from them | [
"Create",
"a",
"set",
"of",
"all",
"nodes",
"containg",
"the",
"root_nodes",
"and",
"all",
"nodes",
"reacheable",
"from",
"them"
]
| 0674c248fe3d8706f98f912996b65af469f96b10 | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/graphs/graphutils.py#L136-L144 | train |
gofed/gofedlib | gofedlib/graphs/graphutils.py | GraphUtils.filterGraph | def filterGraph(graph, node_fnc):
"""Remove all nodes for with node_fnc does not hold
"""
nodes = filter(lambda l: node_fnc(l), graph.nodes())
edges = {}
gedges = graph.edges()
for u in gedges:
if u not in nodes:
continue
for v in gedges[u]:
if v not in nodes:
continue
try:
edges[u].append(v)
except KeyError:
edges[u] = [v]
return Graph(nodes, edges) | python | def filterGraph(graph, node_fnc):
"""Remove all nodes for with node_fnc does not hold
"""
nodes = filter(lambda l: node_fnc(l), graph.nodes())
edges = {}
gedges = graph.edges()
for u in gedges:
if u not in nodes:
continue
for v in gedges[u]:
if v not in nodes:
continue
try:
edges[u].append(v)
except KeyError:
edges[u] = [v]
return Graph(nodes, edges) | [
"def",
"filterGraph",
"(",
"graph",
",",
"node_fnc",
")",
":",
"nodes",
"=",
"filter",
"(",
"lambda",
"l",
":",
"node_fnc",
"(",
"l",
")",
",",
"graph",
".",
"nodes",
"(",
")",
")",
"edges",
"=",
"{",
"}",
"gedges",
"=",
"graph",
".",
"edges",
"(",
")",
"for",
"u",
"in",
"gedges",
":",
"if",
"u",
"not",
"in",
"nodes",
":",
"continue",
"for",
"v",
"in",
"gedges",
"[",
"u",
"]",
":",
"if",
"v",
"not",
"in",
"nodes",
":",
"continue",
"try",
":",
"edges",
"[",
"u",
"]",
".",
"append",
"(",
"v",
")",
"except",
"KeyError",
":",
"edges",
"[",
"u",
"]",
"=",
"[",
"v",
"]",
"return",
"Graph",
"(",
"nodes",
",",
"edges",
")"
]
| Remove all nodes for with node_fnc does not hold | [
"Remove",
"all",
"nodes",
"for",
"with",
"node_fnc",
"does",
"not",
"hold"
]
| 0674c248fe3d8706f98f912996b65af469f96b10 | https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/graphs/graphutils.py#L147-L165 | train |
tslight/treepick | treepick/paths.py | Paths.listdir | def listdir(self, path):
'''
Return a list of all non dotfiles in a given directory.
'''
for f in os.listdir(path):
if not f.startswith('.'):
yield f | python | def listdir(self, path):
'''
Return a list of all non dotfiles in a given directory.
'''
for f in os.listdir(path):
if not f.startswith('.'):
yield f | [
"def",
"listdir",
"(",
"self",
",",
"path",
")",
":",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"if",
"not",
"f",
".",
"startswith",
"(",
"'.'",
")",
":",
"yield",
"f"
]
| Return a list of all non dotfiles in a given directory. | [
"Return",
"a",
"list",
"of",
"all",
"non",
"dotfiles",
"in",
"a",
"given",
"directory",
"."
]
| 7adf838900f11e8845e17d8c79bb2b23617aec2c | https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L27-L33 | train |
tslight/treepick | treepick/paths.py | Paths.getchildren | def getchildren(self):
'''
Create list of absolute paths to be used to instantiate path objects
for traversal, based on whether or not hidden attribute is set.
'''
try:
if self.hidden:
return [os.path.join(self.name, child)
for child in sorted(self.listdir(self.name))]
else:
return [os.path.join(self.name, child)
for child in sorted(os.listdir(self.name))]
except OSError:
return None | python | def getchildren(self):
'''
Create list of absolute paths to be used to instantiate path objects
for traversal, based on whether or not hidden attribute is set.
'''
try:
if self.hidden:
return [os.path.join(self.name, child)
for child in sorted(self.listdir(self.name))]
else:
return [os.path.join(self.name, child)
for child in sorted(os.listdir(self.name))]
except OSError:
return None | [
"def",
"getchildren",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"hidden",
":",
"return",
"[",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"name",
",",
"child",
")",
"for",
"child",
"in",
"sorted",
"(",
"self",
".",
"listdir",
"(",
"self",
".",
"name",
")",
")",
"]",
"else",
":",
"return",
"[",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"name",
",",
"child",
")",
"for",
"child",
"in",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"self",
".",
"name",
")",
")",
"]",
"except",
"OSError",
":",
"return",
"None"
]
| Create list of absolute paths to be used to instantiate path objects
for traversal, based on whether or not hidden attribute is set. | [
"Create",
"list",
"of",
"absolute",
"paths",
"to",
"be",
"used",
"to",
"instantiate",
"path",
"objects",
"for",
"traversal",
"based",
"on",
"whether",
"or",
"not",
"hidden",
"attribute",
"is",
"set",
"."
]
| 7adf838900f11e8845e17d8c79bb2b23617aec2c | https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L35-L48 | train |
tslight/treepick | treepick/paths.py | Paths.getpaths | def getpaths(self):
'''
If we have children, use a list comprehension to instantiate new paths
objects to traverse.
'''
self.children = self.getchildren()
if self.children is None:
return
if self.paths is None:
self.paths = [Paths(self.screen,
os.path.join(self.name, child),
self.hidden,
self.picked,
self.expanded,
self.sized)
for child in self.children]
return self.paths | python | def getpaths(self):
'''
If we have children, use a list comprehension to instantiate new paths
objects to traverse.
'''
self.children = self.getchildren()
if self.children is None:
return
if self.paths is None:
self.paths = [Paths(self.screen,
os.path.join(self.name, child),
self.hidden,
self.picked,
self.expanded,
self.sized)
for child in self.children]
return self.paths | [
"def",
"getpaths",
"(",
"self",
")",
":",
"self",
".",
"children",
"=",
"self",
".",
"getchildren",
"(",
")",
"if",
"self",
".",
"children",
"is",
"None",
":",
"return",
"if",
"self",
".",
"paths",
"is",
"None",
":",
"self",
".",
"paths",
"=",
"[",
"Paths",
"(",
"self",
".",
"screen",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"name",
",",
"child",
")",
",",
"self",
".",
"hidden",
",",
"self",
".",
"picked",
",",
"self",
".",
"expanded",
",",
"self",
".",
"sized",
")",
"for",
"child",
"in",
"self",
".",
"children",
"]",
"return",
"self",
".",
"paths"
]
| If we have children, use a list comprehension to instantiate new paths
objects to traverse. | [
"If",
"we",
"have",
"children",
"use",
"a",
"list",
"comprehension",
"to",
"instantiate",
"new",
"paths",
"objects",
"to",
"traverse",
"."
]
| 7adf838900f11e8845e17d8c79bb2b23617aec2c | https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L50-L66 | train |
tslight/treepick | treepick/paths.py | Paths.traverse | def traverse(self):
'''
Recursive generator that lazily unfolds the filesystem.
'''
yield self, 0
if self.name in self.expanded:
for path in self.getpaths():
for child, depth in path.traverse():
yield child, depth + 1 | python | def traverse(self):
'''
Recursive generator that lazily unfolds the filesystem.
'''
yield self, 0
if self.name in self.expanded:
for path in self.getpaths():
for child, depth in path.traverse():
yield child, depth + 1 | [
"def",
"traverse",
"(",
"self",
")",
":",
"yield",
"self",
",",
"0",
"if",
"self",
".",
"name",
"in",
"self",
".",
"expanded",
":",
"for",
"path",
"in",
"self",
".",
"getpaths",
"(",
")",
":",
"for",
"child",
",",
"depth",
"in",
"path",
".",
"traverse",
"(",
")",
":",
"yield",
"child",
",",
"depth",
"+",
"1"
]
| Recursive generator that lazily unfolds the filesystem. | [
"Recursive",
"generator",
"that",
"lazily",
"unfolds",
"the",
"filesystem",
"."
]
| 7adf838900f11e8845e17d8c79bb2b23617aec2c | https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/paths.py#L68-L76 | train |
geophysics-ubonn/crtomo_tools | src/grid_extralines_gen_decouplings.py | line_line_intersect | def line_line_intersect(x, y):
"""Compute the intersection point of two lines
Parameters
----------
x = x4 array: x1, x2, x3, x4
y = x4 array: y1, y2, y3, y4
line 1 is defined by p1,p2
line 2 is defined by p3,p4
Returns
-------
Ix: x-coordinate of intersection
Iy: y-coordinate of intersection
"""
A = x[0] * y[1] - y[0] * x[1]
B = x[2] * y[3] - y[2] * x[4]
C = (x[0] - x[1]) * (y[2] - y[3]) - (y[0] - y[1]) * (x[2] - x[3])
Ix = (A * (x[2] - x[3]) - (x[0] - x[1]) * B) / C
Iy = (A * (y[2] - y[3]) - (y[0] - y[1]) * B) / C
return Ix, Iy | python | def line_line_intersect(x, y):
"""Compute the intersection point of two lines
Parameters
----------
x = x4 array: x1, x2, x3, x4
y = x4 array: y1, y2, y3, y4
line 1 is defined by p1,p2
line 2 is defined by p3,p4
Returns
-------
Ix: x-coordinate of intersection
Iy: y-coordinate of intersection
"""
A = x[0] * y[1] - y[0] * x[1]
B = x[2] * y[3] - y[2] * x[4]
C = (x[0] - x[1]) * (y[2] - y[3]) - (y[0] - y[1]) * (x[2] - x[3])
Ix = (A * (x[2] - x[3]) - (x[0] - x[1]) * B) / C
Iy = (A * (y[2] - y[3]) - (y[0] - y[1]) * B) / C
return Ix, Iy | [
"def",
"line_line_intersect",
"(",
"x",
",",
"y",
")",
":",
"A",
"=",
"x",
"[",
"0",
"]",
"*",
"y",
"[",
"1",
"]",
"-",
"y",
"[",
"0",
"]",
"*",
"x",
"[",
"1",
"]",
"B",
"=",
"x",
"[",
"2",
"]",
"*",
"y",
"[",
"3",
"]",
"-",
"y",
"[",
"2",
"]",
"*",
"x",
"[",
"4",
"]",
"C",
"=",
"(",
"x",
"[",
"0",
"]",
"-",
"x",
"[",
"1",
"]",
")",
"*",
"(",
"y",
"[",
"2",
"]",
"-",
"y",
"[",
"3",
"]",
")",
"-",
"(",
"y",
"[",
"0",
"]",
"-",
"y",
"[",
"1",
"]",
")",
"*",
"(",
"x",
"[",
"2",
"]",
"-",
"x",
"[",
"3",
"]",
")",
"Ix",
"=",
"(",
"A",
"*",
"(",
"x",
"[",
"2",
"]",
"-",
"x",
"[",
"3",
"]",
")",
"-",
"(",
"x",
"[",
"0",
"]",
"-",
"x",
"[",
"1",
"]",
")",
"*",
"B",
")",
"/",
"C",
"Iy",
"=",
"(",
"A",
"*",
"(",
"y",
"[",
"2",
"]",
"-",
"y",
"[",
"3",
"]",
")",
"-",
"(",
"y",
"[",
"0",
"]",
"-",
"y",
"[",
"1",
"]",
")",
"*",
"B",
")",
"/",
"C",
"return",
"Ix",
",",
"Iy"
]
| Compute the intersection point of two lines
Parameters
----------
x = x4 array: x1, x2, x3, x4
y = x4 array: y1, y2, y3, y4
line 1 is defined by p1,p2
line 2 is defined by p3,p4
Returns
-------
Ix: x-coordinate of intersection
Iy: y-coordinate of intersection | [
"Compute",
"the",
"intersection",
"point",
"of",
"two",
"lines"
]
| 27c3e21a557f8df1c12455b96c4c2e00e08a5b4a | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/grid_extralines_gen_decouplings.py#L116-L137 | train |
redhat-openstack/python-tripleo-helper | tripleohelper/utils.py | pkg_data_filename | def pkg_data_filename(resource_name, filename=None):
"""Returns the path of a file installed along the package
"""
resource_filename = pkg_resources.resource_filename(
tripleohelper.__name__,
resource_name
)
if filename is not None:
resource_filename = os.path.join(resource_filename, filename)
return resource_filename | python | def pkg_data_filename(resource_name, filename=None):
"""Returns the path of a file installed along the package
"""
resource_filename = pkg_resources.resource_filename(
tripleohelper.__name__,
resource_name
)
if filename is not None:
resource_filename = os.path.join(resource_filename, filename)
return resource_filename | [
"def",
"pkg_data_filename",
"(",
"resource_name",
",",
"filename",
"=",
"None",
")",
":",
"resource_filename",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"tripleohelper",
".",
"__name__",
",",
"resource_name",
")",
"if",
"filename",
"is",
"not",
"None",
":",
"resource_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"resource_filename",
",",
"filename",
")",
"return",
"resource_filename"
]
| Returns the path of a file installed along the package | [
"Returns",
"the",
"path",
"of",
"a",
"file",
"installed",
"along",
"the",
"package"
]
| bfa165538335edb1088170c7a92f097167225c81 | https://github.com/redhat-openstack/python-tripleo-helper/blob/bfa165538335edb1088170c7a92f097167225c81/tripleohelper/utils.py#L23-L32 | train |
peterbe/gg | gg/builtins/merge/gg_merge.py | merge | def merge(config):
"""Merge the current branch into master."""
repo = config.repo
active_branch = repo.active_branch
if active_branch.name == "master":
error_out("You're already on the master branch.")
if repo.is_dirty():
error_out(
'Repo is "dirty". ({})'.format(
", ".join([repr(x.b_path) for x in repo.index.diff(None)])
)
)
branch_name = active_branch.name
state = read(config.configfile)
origin_name = state.get("ORIGIN_NAME", "origin")
upstream_remote = None
for remote in repo.remotes:
if remote.name == origin_name:
upstream_remote = remote
break
if not upstream_remote:
error_out("No remote called {!r} found".format(origin_name))
repo.heads.master.checkout()
upstream_remote.pull(repo.heads.master)
repo.git.merge(branch_name)
repo.git.branch("-d", branch_name)
success_out("Branch {!r} deleted.".format(branch_name))
info_out("NOW, you might want to run:\n")
info_out("git push origin master\n\n")
push_for_you = input("Run that push? [Y/n] ").lower().strip() != "n"
if push_for_you:
upstream_remote.push("master")
success_out("Current master pushed to {}".format(upstream_remote.name)) | python | def merge(config):
"""Merge the current branch into master."""
repo = config.repo
active_branch = repo.active_branch
if active_branch.name == "master":
error_out("You're already on the master branch.")
if repo.is_dirty():
error_out(
'Repo is "dirty". ({})'.format(
", ".join([repr(x.b_path) for x in repo.index.diff(None)])
)
)
branch_name = active_branch.name
state = read(config.configfile)
origin_name = state.get("ORIGIN_NAME", "origin")
upstream_remote = None
for remote in repo.remotes:
if remote.name == origin_name:
upstream_remote = remote
break
if not upstream_remote:
error_out("No remote called {!r} found".format(origin_name))
repo.heads.master.checkout()
upstream_remote.pull(repo.heads.master)
repo.git.merge(branch_name)
repo.git.branch("-d", branch_name)
success_out("Branch {!r} deleted.".format(branch_name))
info_out("NOW, you might want to run:\n")
info_out("git push origin master\n\n")
push_for_you = input("Run that push? [Y/n] ").lower().strip() != "n"
if push_for_you:
upstream_remote.push("master")
success_out("Current master pushed to {}".format(upstream_remote.name)) | [
"def",
"merge",
"(",
"config",
")",
":",
"repo",
"=",
"config",
".",
"repo",
"active_branch",
"=",
"repo",
".",
"active_branch",
"if",
"active_branch",
".",
"name",
"==",
"\"master\"",
":",
"error_out",
"(",
"\"You're already on the master branch.\"",
")",
"if",
"repo",
".",
"is_dirty",
"(",
")",
":",
"error_out",
"(",
"'Repo is \"dirty\". ({})'",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"[",
"repr",
"(",
"x",
".",
"b_path",
")",
"for",
"x",
"in",
"repo",
".",
"index",
".",
"diff",
"(",
"None",
")",
"]",
")",
")",
")",
"branch_name",
"=",
"active_branch",
".",
"name",
"state",
"=",
"read",
"(",
"config",
".",
"configfile",
")",
"origin_name",
"=",
"state",
".",
"get",
"(",
"\"ORIGIN_NAME\"",
",",
"\"origin\"",
")",
"upstream_remote",
"=",
"None",
"for",
"remote",
"in",
"repo",
".",
"remotes",
":",
"if",
"remote",
".",
"name",
"==",
"origin_name",
":",
"upstream_remote",
"=",
"remote",
"break",
"if",
"not",
"upstream_remote",
":",
"error_out",
"(",
"\"No remote called {!r} found\"",
".",
"format",
"(",
"origin_name",
")",
")",
"repo",
".",
"heads",
".",
"master",
".",
"checkout",
"(",
")",
"upstream_remote",
".",
"pull",
"(",
"repo",
".",
"heads",
".",
"master",
")",
"repo",
".",
"git",
".",
"merge",
"(",
"branch_name",
")",
"repo",
".",
"git",
".",
"branch",
"(",
"\"-d\"",
",",
"branch_name",
")",
"success_out",
"(",
"\"Branch {!r} deleted.\"",
".",
"format",
"(",
"branch_name",
")",
")",
"info_out",
"(",
"\"NOW, you might want to run:\\n\"",
")",
"info_out",
"(",
"\"git push origin master\\n\\n\"",
")",
"push_for_you",
"=",
"input",
"(",
"\"Run that push? [Y/n] \"",
")",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"!=",
"\"n\"",
"if",
"push_for_you",
":",
"upstream_remote",
".",
"push",
"(",
"\"master\"",
")",
"success_out",
"(",
"\"Current master pushed to {}\"",
".",
"format",
"(",
"upstream_remote",
".",
"name",
")",
")"
]
| Merge the current branch into master. | [
"Merge",
"the",
"current",
"branch",
"into",
"master",
"."
]
| 2aace5bdb4a9b1cb65bea717784edf54c63b7bad | https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/merge/gg_merge.py#L8-L48 | train |
edx/edx-celeryutils | celery_utils/chordable_django_backend.py | chord_task | def chord_task(*args, **kwargs):
u"""
Override of the default task decorator to specify use of this backend.
"""
given_backend = kwargs.get(u'backend', None)
if not isinstance(given_backend, ChordableDjangoBackend):
kwargs[u'backend'] = ChordableDjangoBackend(kwargs.get('app', current_app))
return task(*args, **kwargs) | python | def chord_task(*args, **kwargs):
u"""
Override of the default task decorator to specify use of this backend.
"""
given_backend = kwargs.get(u'backend', None)
if not isinstance(given_backend, ChordableDjangoBackend):
kwargs[u'backend'] = ChordableDjangoBackend(kwargs.get('app', current_app))
return task(*args, **kwargs) | [
"def",
"chord_task",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"given_backend",
"=",
"kwargs",
".",
"get",
"(",
"u'backend'",
",",
"None",
")",
"if",
"not",
"isinstance",
"(",
"given_backend",
",",
"ChordableDjangoBackend",
")",
":",
"kwargs",
"[",
"u'backend'",
"]",
"=",
"ChordableDjangoBackend",
"(",
"kwargs",
".",
"get",
"(",
"'app'",
",",
"current_app",
")",
")",
"return",
"task",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| u"""
Override of the default task decorator to specify use of this backend. | [
"u",
"Override",
"of",
"the",
"default",
"task",
"decorator",
"to",
"specify",
"use",
"of",
"this",
"backend",
"."
]
| d8745f5f0929ad154fad779a19fbefe7f51e9498 | https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L89-L96 | train |
edx/edx-celeryutils | celery_utils/chordable_django_backend.py | ChordableDjangoBackend._cleanup | def _cleanup(self, status, expires_multiplier=1):
u"""
Clean up expired records.
Will remove all entries for any ChordData whose callback result is in
state <status> that was marked completed more than
(self.expires * <expires_multipler>) ago.
"""
# self.expires is inherited, and defaults to 1 day (or setting CELERY_TASK_RESULT_EXPIRES)
expires = self.expires if isinstance(self.expires, timedelta) else timedelta(seconds=self.expires)
expires = expires * expires_multiplier
chords_to_delete = ChordData.objects.filter(
callback_result__date_done__lte=datetime.now() - expires,
callback_result__status=status
).iterator()
for _chord in chords_to_delete:
subtask_ids = [subtask.task_id for subtask in _chord.completed_results.all()]
_chord.completed_results.clear()
TaskMeta.objects.filter(task_id__in=subtask_ids).delete()
_chord.callback_result.delete()
_chord.delete() | python | def _cleanup(self, status, expires_multiplier=1):
u"""
Clean up expired records.
Will remove all entries for any ChordData whose callback result is in
state <status> that was marked completed more than
(self.expires * <expires_multipler>) ago.
"""
# self.expires is inherited, and defaults to 1 day (or setting CELERY_TASK_RESULT_EXPIRES)
expires = self.expires if isinstance(self.expires, timedelta) else timedelta(seconds=self.expires)
expires = expires * expires_multiplier
chords_to_delete = ChordData.objects.filter(
callback_result__date_done__lte=datetime.now() - expires,
callback_result__status=status
).iterator()
for _chord in chords_to_delete:
subtask_ids = [subtask.task_id for subtask in _chord.completed_results.all()]
_chord.completed_results.clear()
TaskMeta.objects.filter(task_id__in=subtask_ids).delete()
_chord.callback_result.delete()
_chord.delete() | [
"def",
"_cleanup",
"(",
"self",
",",
"status",
",",
"expires_multiplier",
"=",
"1",
")",
":",
"# self.expires is inherited, and defaults to 1 day (or setting CELERY_TASK_RESULT_EXPIRES)",
"expires",
"=",
"self",
".",
"expires",
"if",
"isinstance",
"(",
"self",
".",
"expires",
",",
"timedelta",
")",
"else",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"expires",
")",
"expires",
"=",
"expires",
"*",
"expires_multiplier",
"chords_to_delete",
"=",
"ChordData",
".",
"objects",
".",
"filter",
"(",
"callback_result__date_done__lte",
"=",
"datetime",
".",
"now",
"(",
")",
"-",
"expires",
",",
"callback_result__status",
"=",
"status",
")",
".",
"iterator",
"(",
")",
"for",
"_chord",
"in",
"chords_to_delete",
":",
"subtask_ids",
"=",
"[",
"subtask",
".",
"task_id",
"for",
"subtask",
"in",
"_chord",
".",
"completed_results",
".",
"all",
"(",
")",
"]",
"_chord",
".",
"completed_results",
".",
"clear",
"(",
")",
"TaskMeta",
".",
"objects",
".",
"filter",
"(",
"task_id__in",
"=",
"subtask_ids",
")",
".",
"delete",
"(",
")",
"_chord",
".",
"callback_result",
".",
"delete",
"(",
")",
"_chord",
".",
"delete",
"(",
")"
]
| u"""
Clean up expired records.
Will remove all entries for any ChordData whose callback result is in
state <status> that was marked completed more than
(self.expires * <expires_multipler>) ago. | [
"u",
"Clean",
"up",
"expired",
"records",
"."
]
| d8745f5f0929ad154fad779a19fbefe7f51e9498 | https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L113-L133 | train |
edx/edx-celeryutils | celery_utils/chordable_django_backend.py | ChordableDjangoBackend.on_chord_part_return | def on_chord_part_return(self, task, state, result, propagate=False): # pylint: disable=redefined-outer-name
u"""
Update the linking ChordData object and execute callback if needed.
Parameters
----------
subtask: The subtask that just finished executing. Most useful values
are stored on subtask.request.
state: the status of the just-finished subtask.
result: the resulting value of subtask execution.
propagate: unused here, we check CELERY_CHORD_PROPAGATES and the
chord's options in chord_data.execute_callback()
"""
with transaction.atomic():
chord_data = ChordData.objects.select_for_update().get( # select_for_update will prevent race conditions
callback_result__task_id=task.request.chord[u'options'][u'task_id']
)
_ = TaskMeta.objects.update_or_create(
task_id=task.request.id,
defaults={
u'status': state,
u'result': result
}
)
if chord_data.is_ready():
# we don't use celery beat, so this is as good a place as any to fire off periodic cleanup tasks
self.get_suitable_app(current_app).tasks[u'celery.backend_cleanup'].apply_async()
chord_data.execute_callback() | python | def on_chord_part_return(self, task, state, result, propagate=False): # pylint: disable=redefined-outer-name
u"""
Update the linking ChordData object and execute callback if needed.
Parameters
----------
subtask: The subtask that just finished executing. Most useful values
are stored on subtask.request.
state: the status of the just-finished subtask.
result: the resulting value of subtask execution.
propagate: unused here, we check CELERY_CHORD_PROPAGATES and the
chord's options in chord_data.execute_callback()
"""
with transaction.atomic():
chord_data = ChordData.objects.select_for_update().get( # select_for_update will prevent race conditions
callback_result__task_id=task.request.chord[u'options'][u'task_id']
)
_ = TaskMeta.objects.update_or_create(
task_id=task.request.id,
defaults={
u'status': state,
u'result': result
}
)
if chord_data.is_ready():
# we don't use celery beat, so this is as good a place as any to fire off periodic cleanup tasks
self.get_suitable_app(current_app).tasks[u'celery.backend_cleanup'].apply_async()
chord_data.execute_callback() | [
"def",
"on_chord_part_return",
"(",
"self",
",",
"task",
",",
"state",
",",
"result",
",",
"propagate",
"=",
"False",
")",
":",
"# pylint: disable=redefined-outer-name",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"chord_data",
"=",
"ChordData",
".",
"objects",
".",
"select_for_update",
"(",
")",
".",
"get",
"(",
"# select_for_update will prevent race conditions",
"callback_result__task_id",
"=",
"task",
".",
"request",
".",
"chord",
"[",
"u'options'",
"]",
"[",
"u'task_id'",
"]",
")",
"_",
"=",
"TaskMeta",
".",
"objects",
".",
"update_or_create",
"(",
"task_id",
"=",
"task",
".",
"request",
".",
"id",
",",
"defaults",
"=",
"{",
"u'status'",
":",
"state",
",",
"u'result'",
":",
"result",
"}",
")",
"if",
"chord_data",
".",
"is_ready",
"(",
")",
":",
"# we don't use celery beat, so this is as good a place as any to fire off periodic cleanup tasks",
"self",
".",
"get_suitable_app",
"(",
"current_app",
")",
".",
"tasks",
"[",
"u'celery.backend_cleanup'",
"]",
".",
"apply_async",
"(",
")",
"chord_data",
".",
"execute_callback",
"(",
")"
]
| u"""
Update the linking ChordData object and execute callback if needed.
Parameters
----------
subtask: The subtask that just finished executing. Most useful values
are stored on subtask.request.
state: the status of the just-finished subtask.
result: the resulting value of subtask execution.
propagate: unused here, we check CELERY_CHORD_PROPAGATES and the
chord's options in chord_data.execute_callback() | [
"u",
"Update",
"the",
"linking",
"ChordData",
"object",
"and",
"execute",
"callback",
"if",
"needed",
"."
]
| d8745f5f0929ad154fad779a19fbefe7f51e9498 | https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L151-L179 | train |
edx/edx-celeryutils | celery_utils/chordable_django_backend.py | ChordableDjangoBackend.apply_chord | def apply_chord(self, header, partial_args, group_id, body, **options):
u"""
Instantiate a linking ChordData object before executing subtasks.
Parameters
----------
header: a list of incomplete subtask signatures, with partial
different-per-instance arguments already set.
partial_args: list of same-per-instance subtask arguments.
group_id: a uuid that proved unnecessary in our approach. We use
the callback's frozen TaskMeta id as a linking piece of data.
body: the callback task signature, with all non-subtask-dependent
arguments already set.
Return value is the (unfinished) AsyncResult for body.
"""
callback_entry = TaskMeta.objects.create(task_id=body.id)
chord_data = ChordData.objects.create(callback_result=callback_entry)
for subtask in header:
subtask_entry = TaskMeta.objects.create(task_id=subtask.id)
chord_data.completed_results.add(subtask_entry)
if body.options.get(u'use_iterator', None) is None:
body.options[u'use_iterator'] = True
chord_data.serialized_callback = json.dumps(body)
chord_data.save()
return header(*partial_args, task_id=group_id) | python | def apply_chord(self, header, partial_args, group_id, body, **options):
u"""
Instantiate a linking ChordData object before executing subtasks.
Parameters
----------
header: a list of incomplete subtask signatures, with partial
different-per-instance arguments already set.
partial_args: list of same-per-instance subtask arguments.
group_id: a uuid that proved unnecessary in our approach. We use
the callback's frozen TaskMeta id as a linking piece of data.
body: the callback task signature, with all non-subtask-dependent
arguments already set.
Return value is the (unfinished) AsyncResult for body.
"""
callback_entry = TaskMeta.objects.create(task_id=body.id)
chord_data = ChordData.objects.create(callback_result=callback_entry)
for subtask in header:
subtask_entry = TaskMeta.objects.create(task_id=subtask.id)
chord_data.completed_results.add(subtask_entry)
if body.options.get(u'use_iterator', None) is None:
body.options[u'use_iterator'] = True
chord_data.serialized_callback = json.dumps(body)
chord_data.save()
return header(*partial_args, task_id=group_id) | [
"def",
"apply_chord",
"(",
"self",
",",
"header",
",",
"partial_args",
",",
"group_id",
",",
"body",
",",
"*",
"*",
"options",
")",
":",
"callback_entry",
"=",
"TaskMeta",
".",
"objects",
".",
"create",
"(",
"task_id",
"=",
"body",
".",
"id",
")",
"chord_data",
"=",
"ChordData",
".",
"objects",
".",
"create",
"(",
"callback_result",
"=",
"callback_entry",
")",
"for",
"subtask",
"in",
"header",
":",
"subtask_entry",
"=",
"TaskMeta",
".",
"objects",
".",
"create",
"(",
"task_id",
"=",
"subtask",
".",
"id",
")",
"chord_data",
".",
"completed_results",
".",
"add",
"(",
"subtask_entry",
")",
"if",
"body",
".",
"options",
".",
"get",
"(",
"u'use_iterator'",
",",
"None",
")",
"is",
"None",
":",
"body",
".",
"options",
"[",
"u'use_iterator'",
"]",
"=",
"True",
"chord_data",
".",
"serialized_callback",
"=",
"json",
".",
"dumps",
"(",
"body",
")",
"chord_data",
".",
"save",
"(",
")",
"return",
"header",
"(",
"*",
"partial_args",
",",
"task_id",
"=",
"group_id",
")"
]
| u"""
Instantiate a linking ChordData object before executing subtasks.
Parameters
----------
header: a list of incomplete subtask signatures, with partial
different-per-instance arguments already set.
partial_args: list of same-per-instance subtask arguments.
group_id: a uuid that proved unnecessary in our approach. We use
the callback's frozen TaskMeta id as a linking piece of data.
body: the callback task signature, with all non-subtask-dependent
arguments already set.
Return value is the (unfinished) AsyncResult for body. | [
"u",
"Instantiate",
"a",
"linking",
"ChordData",
"object",
"before",
"executing",
"subtasks",
"."
]
| d8745f5f0929ad154fad779a19fbefe7f51e9498 | https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L181-L208 | train |
edx/edx-celeryutils | celery_utils/chordable_django_backend.py | ChordableDjangoBackend.get_suitable_app | def get_suitable_app(cls, given_app):
u"""
Return a clone of given_app with ChordableDjangoBackend, if needed.
"""
if not isinstance(getattr(given_app, 'backend', None), ChordableDjangoBackend):
return_app = deepcopy(given_app)
return_app.backend = ChordableDjangoBackend(return_app)
return return_app
else:
return given_app | python | def get_suitable_app(cls, given_app):
u"""
Return a clone of given_app with ChordableDjangoBackend, if needed.
"""
if not isinstance(getattr(given_app, 'backend', None), ChordableDjangoBackend):
return_app = deepcopy(given_app)
return_app.backend = ChordableDjangoBackend(return_app)
return return_app
else:
return given_app | [
"def",
"get_suitable_app",
"(",
"cls",
",",
"given_app",
")",
":",
"if",
"not",
"isinstance",
"(",
"getattr",
"(",
"given_app",
",",
"'backend'",
",",
"None",
")",
",",
"ChordableDjangoBackend",
")",
":",
"return_app",
"=",
"deepcopy",
"(",
"given_app",
")",
"return_app",
".",
"backend",
"=",
"ChordableDjangoBackend",
"(",
"return_app",
")",
"return",
"return_app",
"else",
":",
"return",
"given_app"
]
| u"""
Return a clone of given_app with ChordableDjangoBackend, if needed. | [
"u",
"Return",
"a",
"clone",
"of",
"given_app",
"with",
"ChordableDjangoBackend",
"if",
"needed",
"."
]
| d8745f5f0929ad154fad779a19fbefe7f51e9498 | https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/chordable_django_backend.py#L221-L230 | train |
rhayes777/PyAutoFit | autofit/mapper/prior_model.py | PriorModel.linked_model_for_class | def linked_model_for_class(self, cls, make_constants_variable=False, **kwargs):
"""
Create a PriorModel wrapping the specified class with attributes from this instance. Priors can be overridden
using keyword arguments. Any constructor arguments of the new class for which there is no attribute associated
with this class and no keyword argument are created from config.
If make_constants_variable is True then constants associated with this instance will be used to set the mean
of priors in the new instance rather than overriding them.
Parameters
----------
cls: class
The class that the new PriorModel will wrap
make_constants_variable: bool
If True constants from this instance will be used to determine the mean values for priors in the new
instance rather than overriding them
kwargs
Keyword arguments passed in here are used to override attributes from this instance or add new attributes
Returns
-------
new_model: PriorModel
A new prior model with priors derived from this instance
"""
constructor_args = inspect.getfullargspec(cls).args
attribute_tuples = self.attribute_tuples
new_model = PriorModel(cls)
for attribute_tuple in attribute_tuples:
name = attribute_tuple.name
if name in constructor_args or (
is_tuple_like_attribute_name(name) and tuple_name(name) in constructor_args):
attribute = kwargs[name] if name in kwargs else attribute_tuple.value
if make_constants_variable and isinstance(attribute, Constant):
new_attribute = getattr(new_model, name)
if isinstance(new_attribute, Prior):
new_attribute.mean = attribute.value
continue
setattr(new_model, name, attribute)
return new_model | python | def linked_model_for_class(self, cls, make_constants_variable=False, **kwargs):
"""
Create a PriorModel wrapping the specified class with attributes from this instance. Priors can be overridden
using keyword arguments. Any constructor arguments of the new class for which there is no attribute associated
with this class and no keyword argument are created from config.
If make_constants_variable is True then constants associated with this instance will be used to set the mean
of priors in the new instance rather than overriding them.
Parameters
----------
cls: class
The class that the new PriorModel will wrap
make_constants_variable: bool
If True constants from this instance will be used to determine the mean values for priors in the new
instance rather than overriding them
kwargs
Keyword arguments passed in here are used to override attributes from this instance or add new attributes
Returns
-------
new_model: PriorModel
A new prior model with priors derived from this instance
"""
constructor_args = inspect.getfullargspec(cls).args
attribute_tuples = self.attribute_tuples
new_model = PriorModel(cls)
for attribute_tuple in attribute_tuples:
name = attribute_tuple.name
if name in constructor_args or (
is_tuple_like_attribute_name(name) and tuple_name(name) in constructor_args):
attribute = kwargs[name] if name in kwargs else attribute_tuple.value
if make_constants_variable and isinstance(attribute, Constant):
new_attribute = getattr(new_model, name)
if isinstance(new_attribute, Prior):
new_attribute.mean = attribute.value
continue
setattr(new_model, name, attribute)
return new_model | [
"def",
"linked_model_for_class",
"(",
"self",
",",
"cls",
",",
"make_constants_variable",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"constructor_args",
"=",
"inspect",
".",
"getfullargspec",
"(",
"cls",
")",
".",
"args",
"attribute_tuples",
"=",
"self",
".",
"attribute_tuples",
"new_model",
"=",
"PriorModel",
"(",
"cls",
")",
"for",
"attribute_tuple",
"in",
"attribute_tuples",
":",
"name",
"=",
"attribute_tuple",
".",
"name",
"if",
"name",
"in",
"constructor_args",
"or",
"(",
"is_tuple_like_attribute_name",
"(",
"name",
")",
"and",
"tuple_name",
"(",
"name",
")",
"in",
"constructor_args",
")",
":",
"attribute",
"=",
"kwargs",
"[",
"name",
"]",
"if",
"name",
"in",
"kwargs",
"else",
"attribute_tuple",
".",
"value",
"if",
"make_constants_variable",
"and",
"isinstance",
"(",
"attribute",
",",
"Constant",
")",
":",
"new_attribute",
"=",
"getattr",
"(",
"new_model",
",",
"name",
")",
"if",
"isinstance",
"(",
"new_attribute",
",",
"Prior",
")",
":",
"new_attribute",
".",
"mean",
"=",
"attribute",
".",
"value",
"continue",
"setattr",
"(",
"new_model",
",",
"name",
",",
"attribute",
")",
"return",
"new_model"
]
| Create a PriorModel wrapping the specified class with attributes from this instance. Priors can be overridden
using keyword arguments. Any constructor arguments of the new class for which there is no attribute associated
with this class and no keyword argument are created from config.
If make_constants_variable is True then constants associated with this instance will be used to set the mean
of priors in the new instance rather than overriding them.
Parameters
----------
cls: class
The class that the new PriorModel will wrap
make_constants_variable: bool
If True constants from this instance will be used to determine the mean values for priors in the new
instance rather than overriding them
kwargs
Keyword arguments passed in here are used to override attributes from this instance or add new attributes
Returns
-------
new_model: PriorModel
A new prior model with priors derived from this instance | [
"Create",
"a",
"PriorModel",
"wrapping",
"the",
"specified",
"class",
"with",
"attributes",
"from",
"this",
"instance",
".",
"Priors",
"can",
"be",
"overridden",
"using",
"keyword",
"arguments",
".",
"Any",
"constructor",
"arguments",
"of",
"the",
"new",
"class",
"for",
"which",
"there",
"is",
"no",
"attribute",
"associated",
"with",
"this",
"class",
"and",
"no",
"keyword",
"argument",
"are",
"created",
"from",
"config",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/prior_model.py#L293-L331 | train |
rhayes777/PyAutoFit | autofit/mapper/prior_model.py | PriorModel.instance_for_arguments | def instance_for_arguments(self, arguments: {Prior: float}):
"""
Create an instance of the associated class for a set of arguments
Parameters
----------
arguments: {Prior: float}
Dictionary mapping_matrix priors to attribute analysis_path and value pairs
Returns
-------
An instance of the class
"""
for prior, value in arguments.items():
prior.assert_within_limits(value)
model_arguments = {t.name: arguments[t.prior] for t in self.direct_prior_tuples}
constant_arguments = {t.name: t.constant.value for t in self.direct_constant_tuples}
for tuple_prior in self.tuple_prior_tuples:
model_arguments[tuple_prior.name] = tuple_prior.prior.value_for_arguments(arguments)
for prior_model_tuple in self.direct_prior_model_tuples:
model_arguments[prior_model_tuple.name] = prior_model_tuple.prior_model.instance_for_arguments(arguments)
return self.cls(**{**model_arguments, **constant_arguments}) | python | def instance_for_arguments(self, arguments: {Prior: float}):
"""
Create an instance of the associated class for a set of arguments
Parameters
----------
arguments: {Prior: float}
Dictionary mapping_matrix priors to attribute analysis_path and value pairs
Returns
-------
An instance of the class
"""
for prior, value in arguments.items():
prior.assert_within_limits(value)
model_arguments = {t.name: arguments[t.prior] for t in self.direct_prior_tuples}
constant_arguments = {t.name: t.constant.value for t in self.direct_constant_tuples}
for tuple_prior in self.tuple_prior_tuples:
model_arguments[tuple_prior.name] = tuple_prior.prior.value_for_arguments(arguments)
for prior_model_tuple in self.direct_prior_model_tuples:
model_arguments[prior_model_tuple.name] = prior_model_tuple.prior_model.instance_for_arguments(arguments)
return self.cls(**{**model_arguments, **constant_arguments}) | [
"def",
"instance_for_arguments",
"(",
"self",
",",
"arguments",
":",
"{",
"Prior",
":",
"float",
"}",
")",
":",
"for",
"prior",
",",
"value",
"in",
"arguments",
".",
"items",
"(",
")",
":",
"prior",
".",
"assert_within_limits",
"(",
"value",
")",
"model_arguments",
"=",
"{",
"t",
".",
"name",
":",
"arguments",
"[",
"t",
".",
"prior",
"]",
"for",
"t",
"in",
"self",
".",
"direct_prior_tuples",
"}",
"constant_arguments",
"=",
"{",
"t",
".",
"name",
":",
"t",
".",
"constant",
".",
"value",
"for",
"t",
"in",
"self",
".",
"direct_constant_tuples",
"}",
"for",
"tuple_prior",
"in",
"self",
".",
"tuple_prior_tuples",
":",
"model_arguments",
"[",
"tuple_prior",
".",
"name",
"]",
"=",
"tuple_prior",
".",
"prior",
".",
"value_for_arguments",
"(",
"arguments",
")",
"for",
"prior_model_tuple",
"in",
"self",
".",
"direct_prior_model_tuples",
":",
"model_arguments",
"[",
"prior_model_tuple",
".",
"name",
"]",
"=",
"prior_model_tuple",
".",
"prior_model",
".",
"instance_for_arguments",
"(",
"arguments",
")",
"return",
"self",
".",
"cls",
"(",
"*",
"*",
"{",
"*",
"*",
"model_arguments",
",",
"*",
"*",
"constant_arguments",
"}",
")"
]
| Create an instance of the associated class for a set of arguments
Parameters
----------
arguments: {Prior: float}
Dictionary mapping_matrix priors to attribute analysis_path and value pairs
Returns
-------
An instance of the class | [
"Create",
"an",
"instance",
"of",
"the",
"associated",
"class",
"for",
"a",
"set",
"of",
"arguments"
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/prior_model.py#L422-L444 | train |
rhayes777/PyAutoFit | autofit/mapper/prior_model.py | PriorModel.gaussian_prior_model_for_arguments | def gaussian_prior_model_for_arguments(self, arguments):
"""
Create a new instance of model mapper with a set of Gaussian priors based on tuples provided by a previous \
nonlinear search.
Parameters
----------
arguments: [(float, float)]
Tuples providing the mean and sigma of gaussians
Returns
-------
new_model: ModelMapper
A new model mapper populated with Gaussian priors
"""
new_model = copy.deepcopy(self)
model_arguments = {t.name: arguments[t.prior] for t in self.direct_prior_tuples}
for tuple_prior_tuple in self.tuple_prior_tuples:
setattr(new_model, tuple_prior_tuple.name,
tuple_prior_tuple.prior.gaussian_tuple_prior_for_arguments(arguments))
for prior_tuple in self.direct_prior_tuples:
setattr(new_model, prior_tuple.name, model_arguments[prior_tuple.name])
for constant_tuple in self.constant_tuples:
setattr(new_model, constant_tuple.name, constant_tuple.constant)
for name, prior_model in self.direct_prior_model_tuples:
setattr(new_model, name, prior_model.gaussian_prior_model_for_arguments(arguments))
return new_model | python | def gaussian_prior_model_for_arguments(self, arguments):
"""
Create a new instance of model mapper with a set of Gaussian priors based on tuples provided by a previous \
nonlinear search.
Parameters
----------
arguments: [(float, float)]
Tuples providing the mean and sigma of gaussians
Returns
-------
new_model: ModelMapper
A new model mapper populated with Gaussian priors
"""
new_model = copy.deepcopy(self)
model_arguments = {t.name: arguments[t.prior] for t in self.direct_prior_tuples}
for tuple_prior_tuple in self.tuple_prior_tuples:
setattr(new_model, tuple_prior_tuple.name,
tuple_prior_tuple.prior.gaussian_tuple_prior_for_arguments(arguments))
for prior_tuple in self.direct_prior_tuples:
setattr(new_model, prior_tuple.name, model_arguments[prior_tuple.name])
for constant_tuple in self.constant_tuples:
setattr(new_model, constant_tuple.name, constant_tuple.constant)
for name, prior_model in self.direct_prior_model_tuples:
setattr(new_model, name, prior_model.gaussian_prior_model_for_arguments(arguments))
return new_model | [
"def",
"gaussian_prior_model_for_arguments",
"(",
"self",
",",
"arguments",
")",
":",
"new_model",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"model_arguments",
"=",
"{",
"t",
".",
"name",
":",
"arguments",
"[",
"t",
".",
"prior",
"]",
"for",
"t",
"in",
"self",
".",
"direct_prior_tuples",
"}",
"for",
"tuple_prior_tuple",
"in",
"self",
".",
"tuple_prior_tuples",
":",
"setattr",
"(",
"new_model",
",",
"tuple_prior_tuple",
".",
"name",
",",
"tuple_prior_tuple",
".",
"prior",
".",
"gaussian_tuple_prior_for_arguments",
"(",
"arguments",
")",
")",
"for",
"prior_tuple",
"in",
"self",
".",
"direct_prior_tuples",
":",
"setattr",
"(",
"new_model",
",",
"prior_tuple",
".",
"name",
",",
"model_arguments",
"[",
"prior_tuple",
".",
"name",
"]",
")",
"for",
"constant_tuple",
"in",
"self",
".",
"constant_tuples",
":",
"setattr",
"(",
"new_model",
",",
"constant_tuple",
".",
"name",
",",
"constant_tuple",
".",
"constant",
")",
"for",
"name",
",",
"prior_model",
"in",
"self",
".",
"direct_prior_model_tuples",
":",
"setattr",
"(",
"new_model",
",",
"name",
",",
"prior_model",
".",
"gaussian_prior_model_for_arguments",
"(",
"arguments",
")",
")",
"return",
"new_model"
]
| Create a new instance of model mapper with a set of Gaussian priors based on tuples provided by a previous \
nonlinear search.
Parameters
----------
arguments: [(float, float)]
Tuples providing the mean and sigma of gaussians
Returns
-------
new_model: ModelMapper
A new model mapper populated with Gaussian priors | [
"Create",
"a",
"new",
"instance",
"of",
"model",
"mapper",
"with",
"a",
"set",
"of",
"Gaussian",
"priors",
"based",
"on",
"tuples",
"provided",
"by",
"a",
"previous",
"\\",
"nonlinear",
"search",
"."
]
| a9e6144abb08edfc6a6906c4030d7119bf8d3e14 | https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/prior_model.py#L446-L476 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.load_post | def load_post(self, wp_post_id):
"""
Refresh local content for a single post from the the WordPress REST API.
This can be called from a webhook on the WordPress side when a post is updated.
:param wp_post_id: the wordpress post ID
:return: the fully loaded local post object
"""
path = "sites/{}/posts/{}".format(self.site_id, wp_post_id)
response = self.get(path)
if response.ok and response.text:
api_post = response.json()
self.get_ref_data_map(bulk_mode=False)
self.load_wp_post(api_post, bulk_mode=False)
# the post should exist in the db now, so return it so that callers can work with it
try:
post = Post.objects.get(site_id=self.site_id, wp_id=wp_post_id)
except Exception as ex:
logger.exception("Unable to load post with wp_post_id={}:\n{}".format(wp_post_id, ex.message))
else:
return post
else:
logger.warning("Unable to load post with wp_post_id={}:\n{}".format(wp_post_id, response.text)) | python | def load_post(self, wp_post_id):
"""
Refresh local content for a single post from the the WordPress REST API.
This can be called from a webhook on the WordPress side when a post is updated.
:param wp_post_id: the wordpress post ID
:return: the fully loaded local post object
"""
path = "sites/{}/posts/{}".format(self.site_id, wp_post_id)
response = self.get(path)
if response.ok and response.text:
api_post = response.json()
self.get_ref_data_map(bulk_mode=False)
self.load_wp_post(api_post, bulk_mode=False)
# the post should exist in the db now, so return it so that callers can work with it
try:
post = Post.objects.get(site_id=self.site_id, wp_id=wp_post_id)
except Exception as ex:
logger.exception("Unable to load post with wp_post_id={}:\n{}".format(wp_post_id, ex.message))
else:
return post
else:
logger.warning("Unable to load post with wp_post_id={}:\n{}".format(wp_post_id, response.text)) | [
"def",
"load_post",
"(",
"self",
",",
"wp_post_id",
")",
":",
"path",
"=",
"\"sites/{}/posts/{}\"",
".",
"format",
"(",
"self",
".",
"site_id",
",",
"wp_post_id",
")",
"response",
"=",
"self",
".",
"get",
"(",
"path",
")",
"if",
"response",
".",
"ok",
"and",
"response",
".",
"text",
":",
"api_post",
"=",
"response",
".",
"json",
"(",
")",
"self",
".",
"get_ref_data_map",
"(",
"bulk_mode",
"=",
"False",
")",
"self",
".",
"load_wp_post",
"(",
"api_post",
",",
"bulk_mode",
"=",
"False",
")",
"# the post should exist in the db now, so return it so that callers can work with it",
"try",
":",
"post",
"=",
"Post",
".",
"objects",
".",
"get",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"wp_post_id",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"exception",
"(",
"\"Unable to load post with wp_post_id={}:\\n{}\"",
".",
"format",
"(",
"wp_post_id",
",",
"ex",
".",
"message",
")",
")",
"else",
":",
"return",
"post",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Unable to load post with wp_post_id={}:\\n{}\"",
".",
"format",
"(",
"wp_post_id",
",",
"response",
".",
"text",
")",
")"
]
| Refresh local content for a single post from the the WordPress REST API.
This can be called from a webhook on the WordPress side when a post is updated.
:param wp_post_id: the wordpress post ID
:return: the fully loaded local post object | [
"Refresh",
"local",
"content",
"for",
"a",
"single",
"post",
"from",
"the",
"the",
"WordPress",
"REST",
"API",
".",
"This",
"can",
"be",
"called",
"from",
"a",
"webhook",
"on",
"the",
"WordPress",
"side",
"when",
"a",
"post",
"is",
"updated",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L70-L96 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.load_categories | def load_categories(self, max_pages=30):
"""
Load all WordPress categories from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading categories")
# clear them all out so we don't get dupes if requested
if self.purge_first:
Category.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/categories".format(self.site_id)
params = {"number": 100}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_categories = response.json().get("categories")
if not api_categories:
# we're done here
break
categories = []
for api_category in api_categories:
# if it exists locally, update local version if anything has changed
existing_category = Category.objects.filter(site_id=self.site_id, wp_id=api_category["ID"]).first()
if existing_category:
self.update_existing_category(existing_category, api_category)
else:
categories.append(self.get_new_category(api_category))
if categories:
Category.objects.bulk_create(categories)
elif not self.full:
# we're done here
break
# get next page
page += 1
params["page"] = page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return | python | def load_categories(self, max_pages=30):
"""
Load all WordPress categories from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading categories")
# clear them all out so we don't get dupes if requested
if self.purge_first:
Category.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/categories".format(self.site_id)
params = {"number": 100}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_categories = response.json().get("categories")
if not api_categories:
# we're done here
break
categories = []
for api_category in api_categories:
# if it exists locally, update local version if anything has changed
existing_category = Category.objects.filter(site_id=self.site_id, wp_id=api_category["ID"]).first()
if existing_category:
self.update_existing_category(existing_category, api_category)
else:
categories.append(self.get_new_category(api_category))
if categories:
Category.objects.bulk_create(categories)
elif not self.full:
# we're done here
break
# get next page
page += 1
params["page"] = page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return | [
"def",
"load_categories",
"(",
"self",
",",
"max_pages",
"=",
"30",
")",
":",
"logger",
".",
"info",
"(",
"\"loading categories\"",
")",
"# clear them all out so we don't get dupes if requested",
"if",
"self",
".",
"purge_first",
":",
"Category",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
")",
".",
"delete",
"(",
")",
"path",
"=",
"\"sites/{}/categories\"",
".",
"format",
"(",
"self",
".",
"site_id",
")",
"params",
"=",
"{",
"\"number\"",
":",
"100",
"}",
"page",
"=",
"1",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"while",
"response",
".",
"ok",
"and",
"response",
".",
"text",
"and",
"page",
"<",
"max_pages",
":",
"logger",
".",
"info",
"(",
"\" - page: %d\"",
",",
"page",
")",
"api_categories",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"categories\"",
")",
"if",
"not",
"api_categories",
":",
"# we're done here",
"break",
"categories",
"=",
"[",
"]",
"for",
"api_category",
"in",
"api_categories",
":",
"# if it exists locally, update local version if anything has changed",
"existing_category",
"=",
"Category",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_category",
"[",
"\"ID\"",
"]",
")",
".",
"first",
"(",
")",
"if",
"existing_category",
":",
"self",
".",
"update_existing_category",
"(",
"existing_category",
",",
"api_category",
")",
"else",
":",
"categories",
".",
"append",
"(",
"self",
".",
"get_new_category",
"(",
"api_category",
")",
")",
"if",
"categories",
":",
"Category",
".",
"objects",
".",
"bulk_create",
"(",
"categories",
")",
"elif",
"not",
"self",
".",
"full",
":",
"# we're done here",
"break",
"# get next page",
"page",
"+=",
"1",
"params",
"[",
"\"page\"",
"]",
"=",
"page",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"return"
]
| Load all WordPress categories from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None | [
"Load",
"all",
"WordPress",
"categories",
"from",
"the",
"given",
"site",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L154-L207 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.get_new_category | def get_new_category(self, api_category):
"""
Instantiate a new Category from api data.
:param api_category: the api data for the Category
:return: the new Category
"""
return Category(site_id=self.site_id,
wp_id=api_category["ID"],
**self.api_object_data("category", api_category)) | python | def get_new_category(self, api_category):
"""
Instantiate a new Category from api data.
:param api_category: the api data for the Category
:return: the new Category
"""
return Category(site_id=self.site_id,
wp_id=api_category["ID"],
**self.api_object_data("category", api_category)) | [
"def",
"get_new_category",
"(",
"self",
",",
"api_category",
")",
":",
"return",
"Category",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_category",
"[",
"\"ID\"",
"]",
",",
"*",
"*",
"self",
".",
"api_object_data",
"(",
"\"category\"",
",",
"api_category",
")",
")"
]
| Instantiate a new Category from api data.
:param api_category: the api data for the Category
:return: the new Category | [
"Instantiate",
"a",
"new",
"Category",
"from",
"api",
"data",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L209-L218 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.load_tags | def load_tags(self, max_pages=30):
"""
Load all WordPress tags from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading tags")
# clear them all out so we don't get dupes if requested
if self.purge_first:
Tag.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/tags".format(self.site_id)
params = {"number": 1000}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_tags = response.json().get("tags")
if not api_tags:
# we're done here
break
tags = []
for api_tag in api_tags:
# if it exists locally, update local version if anything has changed
existing_tag = Tag.objects.filter(site_id=self.site_id, wp_id=api_tag["ID"]).first()
if existing_tag:
self.update_existing_tag(existing_tag, api_tag)
else:
tags.append(self.get_new_tag(api_tag))
if tags:
Tag.objects.bulk_create(tags)
elif not self.full:
# we're done here
break
# get next page
page += 1
params["page"] = page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return | python | def load_tags(self, max_pages=30):
"""
Load all WordPress tags from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading tags")
# clear them all out so we don't get dupes if requested
if self.purge_first:
Tag.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/tags".format(self.site_id)
params = {"number": 1000}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_tags = response.json().get("tags")
if not api_tags:
# we're done here
break
tags = []
for api_tag in api_tags:
# if it exists locally, update local version if anything has changed
existing_tag = Tag.objects.filter(site_id=self.site_id, wp_id=api_tag["ID"]).first()
if existing_tag:
self.update_existing_tag(existing_tag, api_tag)
else:
tags.append(self.get_new_tag(api_tag))
if tags:
Tag.objects.bulk_create(tags)
elif not self.full:
# we're done here
break
# get next page
page += 1
params["page"] = page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return | [
"def",
"load_tags",
"(",
"self",
",",
"max_pages",
"=",
"30",
")",
":",
"logger",
".",
"info",
"(",
"\"loading tags\"",
")",
"# clear them all out so we don't get dupes if requested",
"if",
"self",
".",
"purge_first",
":",
"Tag",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
")",
".",
"delete",
"(",
")",
"path",
"=",
"\"sites/{}/tags\"",
".",
"format",
"(",
"self",
".",
"site_id",
")",
"params",
"=",
"{",
"\"number\"",
":",
"1000",
"}",
"page",
"=",
"1",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"while",
"response",
".",
"ok",
"and",
"response",
".",
"text",
"and",
"page",
"<",
"max_pages",
":",
"logger",
".",
"info",
"(",
"\" - page: %d\"",
",",
"page",
")",
"api_tags",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"tags\"",
")",
"if",
"not",
"api_tags",
":",
"# we're done here",
"break",
"tags",
"=",
"[",
"]",
"for",
"api_tag",
"in",
"api_tags",
":",
"# if it exists locally, update local version if anything has changed",
"existing_tag",
"=",
"Tag",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_tag",
"[",
"\"ID\"",
"]",
")",
".",
"first",
"(",
")",
"if",
"existing_tag",
":",
"self",
".",
"update_existing_tag",
"(",
"existing_tag",
",",
"api_tag",
")",
"else",
":",
"tags",
".",
"append",
"(",
"self",
".",
"get_new_tag",
"(",
"api_tag",
")",
")",
"if",
"tags",
":",
"Tag",
".",
"objects",
".",
"bulk_create",
"(",
"tags",
")",
"elif",
"not",
"self",
".",
"full",
":",
"# we're done here",
"break",
"# get next page",
"page",
"+=",
"1",
"params",
"[",
"\"page\"",
"]",
"=",
"page",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"return"
]
| Load all WordPress tags from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None | [
"Load",
"all",
"WordPress",
"tags",
"from",
"the",
"given",
"site",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L220-L273 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.get_new_tag | def get_new_tag(self, api_tag):
"""
Instantiate a new Tag from api data.
:param api_tag: the api data for the Tag
:return: the new Tag
"""
return Tag(site_id=self.site_id,
wp_id=api_tag["ID"],
**self.api_object_data("tag", api_tag)) | python | def get_new_tag(self, api_tag):
"""
Instantiate a new Tag from api data.
:param api_tag: the api data for the Tag
:return: the new Tag
"""
return Tag(site_id=self.site_id,
wp_id=api_tag["ID"],
**self.api_object_data("tag", api_tag)) | [
"def",
"get_new_tag",
"(",
"self",
",",
"api_tag",
")",
":",
"return",
"Tag",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_tag",
"[",
"\"ID\"",
"]",
",",
"*",
"*",
"self",
".",
"api_object_data",
"(",
"\"tag\"",
",",
"api_tag",
")",
")"
]
| Instantiate a new Tag from api data.
:param api_tag: the api data for the Tag
:return: the new Tag | [
"Instantiate",
"a",
"new",
"Tag",
"from",
"api",
"data",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L275-L284 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.load_authors | def load_authors(self, max_pages=10):
"""
Load all WordPress authors from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading authors")
# clear them all out so we don't get dupes if requested
if self.purge_first:
Author.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/users".format(self.site_id)
params = {"number": 100}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_users = response.json().get("users")
if not api_users:
# we're done here
break
authors = []
for api_author in api_users:
# if it exists locally, update local version if anything has changed
existing_author = Author.objects.filter(site_id=self.site_id, wp_id=api_author["ID"]).first()
if existing_author:
self.update_existing_author(existing_author, api_author)
else:
authors.append(self.get_new_author(api_author))
if authors:
Author.objects.bulk_create(authors)
elif not self.full:
# we're done here
break
# get next page
# this endpoint doesn't have a page param, so use offset
params["offset"] = page * 100
page += 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return | python | def load_authors(self, max_pages=10):
"""
Load all WordPress authors from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading authors")
# clear them all out so we don't get dupes if requested
if self.purge_first:
Author.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/users".format(self.site_id)
params = {"number": 100}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_users = response.json().get("users")
if not api_users:
# we're done here
break
authors = []
for api_author in api_users:
# if it exists locally, update local version if anything has changed
existing_author = Author.objects.filter(site_id=self.site_id, wp_id=api_author["ID"]).first()
if existing_author:
self.update_existing_author(existing_author, api_author)
else:
authors.append(self.get_new_author(api_author))
if authors:
Author.objects.bulk_create(authors)
elif not self.full:
# we're done here
break
# get next page
# this endpoint doesn't have a page param, so use offset
params["offset"] = page * 100
page += 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return | [
"def",
"load_authors",
"(",
"self",
",",
"max_pages",
"=",
"10",
")",
":",
"logger",
".",
"info",
"(",
"\"loading authors\"",
")",
"# clear them all out so we don't get dupes if requested",
"if",
"self",
".",
"purge_first",
":",
"Author",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
")",
".",
"delete",
"(",
")",
"path",
"=",
"\"sites/{}/users\"",
".",
"format",
"(",
"self",
".",
"site_id",
")",
"params",
"=",
"{",
"\"number\"",
":",
"100",
"}",
"page",
"=",
"1",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"while",
"response",
".",
"ok",
"and",
"response",
".",
"text",
"and",
"page",
"<",
"max_pages",
":",
"logger",
".",
"info",
"(",
"\" - page: %d\"",
",",
"page",
")",
"api_users",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"users\"",
")",
"if",
"not",
"api_users",
":",
"# we're done here",
"break",
"authors",
"=",
"[",
"]",
"for",
"api_author",
"in",
"api_users",
":",
"# if it exists locally, update local version if anything has changed",
"existing_author",
"=",
"Author",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_author",
"[",
"\"ID\"",
"]",
")",
".",
"first",
"(",
")",
"if",
"existing_author",
":",
"self",
".",
"update_existing_author",
"(",
"existing_author",
",",
"api_author",
")",
"else",
":",
"authors",
".",
"append",
"(",
"self",
".",
"get_new_author",
"(",
"api_author",
")",
")",
"if",
"authors",
":",
"Author",
".",
"objects",
".",
"bulk_create",
"(",
"authors",
")",
"elif",
"not",
"self",
".",
"full",
":",
"# we're done here",
"break",
"# get next page",
"# this endpoint doesn't have a page param, so use offset",
"params",
"[",
"\"offset\"",
"]",
"=",
"page",
"*",
"100",
"page",
"+=",
"1",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"return"
]
| Load all WordPress authors from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None | [
"Load",
"all",
"WordPress",
"authors",
"from",
"the",
"given",
"site",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L286-L340 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.get_new_author | def get_new_author(self, api_author):
"""
Instantiate a new Author from api data.
:param api_author: the api data for the Author
:return: the new Author
"""
return Author(site_id=self.site_id,
wp_id=api_author["ID"],
**self.api_object_data("author", api_author)) | python | def get_new_author(self, api_author):
"""
Instantiate a new Author from api data.
:param api_author: the api data for the Author
:return: the new Author
"""
return Author(site_id=self.site_id,
wp_id=api_author["ID"],
**self.api_object_data("author", api_author)) | [
"def",
"get_new_author",
"(",
"self",
",",
"api_author",
")",
":",
"return",
"Author",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_author",
"[",
"\"ID\"",
"]",
",",
"*",
"*",
"self",
".",
"api_object_data",
"(",
"\"author\"",
",",
"api_author",
")",
")"
]
| Instantiate a new Author from api data.
:param api_author: the api data for the Author
:return: the new Author | [
"Instantiate",
"a",
"new",
"Author",
"from",
"api",
"data",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L342-L351 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.load_media | def load_media(self, max_pages=150):
"""
Load all WordPress media from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading media")
# clear them all out so we don't get dupes
if self.purge_first:
logger.warning("purging ALL media from site %s", self.site_id)
Media.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/media".format(self.site_id)
params = {"number": 100}
self.set_media_params_after(params)
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_medias = response.json().get("media")
if not api_medias:
# we're done here
break
medias = []
for api_media in api_medias:
# exclude media items that are not attached to posts (for now)
if api_media["post_ID"] != 0:
# if it exists locally, update local version if anything has changed
existing_media = Media.objects.filter(site_id=self.site_id, wp_id=api_media["ID"]).first()
if existing_media:
self.update_existing_media(existing_media, api_media)
else:
medias.append(self.get_new_media(api_media))
if medias:
Media.objects.bulk_create(medias)
# get next page
page += 1
params["page"] = page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return | python | def load_media(self, max_pages=150):
"""
Load all WordPress media from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None
"""
logger.info("loading media")
# clear them all out so we don't get dupes
if self.purge_first:
logger.warning("purging ALL media from site %s", self.site_id)
Media.objects.filter(site_id=self.site_id).delete()
path = "sites/{}/media".format(self.site_id)
params = {"number": 100}
self.set_media_params_after(params)
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
while response.ok and response.text and page < max_pages:
logger.info(" - page: %d", page)
api_medias = response.json().get("media")
if not api_medias:
# we're done here
break
medias = []
for api_media in api_medias:
# exclude media items that are not attached to posts (for now)
if api_media["post_ID"] != 0:
# if it exists locally, update local version if anything has changed
existing_media = Media.objects.filter(site_id=self.site_id, wp_id=api_media["ID"]).first()
if existing_media:
self.update_existing_media(existing_media, api_media)
else:
medias.append(self.get_new_media(api_media))
if medias:
Media.objects.bulk_create(medias)
# get next page
page += 1
params["page"] = page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return | [
"def",
"load_media",
"(",
"self",
",",
"max_pages",
"=",
"150",
")",
":",
"logger",
".",
"info",
"(",
"\"loading media\"",
")",
"# clear them all out so we don't get dupes",
"if",
"self",
".",
"purge_first",
":",
"logger",
".",
"warning",
"(",
"\"purging ALL media from site %s\"",
",",
"self",
".",
"site_id",
")",
"Media",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
")",
".",
"delete",
"(",
")",
"path",
"=",
"\"sites/{}/media\"",
".",
"format",
"(",
"self",
".",
"site_id",
")",
"params",
"=",
"{",
"\"number\"",
":",
"100",
"}",
"self",
".",
"set_media_params_after",
"(",
"params",
")",
"page",
"=",
"1",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"while",
"response",
".",
"ok",
"and",
"response",
".",
"text",
"and",
"page",
"<",
"max_pages",
":",
"logger",
".",
"info",
"(",
"\" - page: %d\"",
",",
"page",
")",
"api_medias",
"=",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"media\"",
")",
"if",
"not",
"api_medias",
":",
"# we're done here",
"break",
"medias",
"=",
"[",
"]",
"for",
"api_media",
"in",
"api_medias",
":",
"# exclude media items that are not attached to posts (for now)",
"if",
"api_media",
"[",
"\"post_ID\"",
"]",
"!=",
"0",
":",
"# if it exists locally, update local version if anything has changed",
"existing_media",
"=",
"Media",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_media",
"[",
"\"ID\"",
"]",
")",
".",
"first",
"(",
")",
"if",
"existing_media",
":",
"self",
".",
"update_existing_media",
"(",
"existing_media",
",",
"api_media",
")",
"else",
":",
"medias",
".",
"append",
"(",
"self",
".",
"get_new_media",
"(",
"api_media",
")",
")",
"if",
"medias",
":",
"Media",
".",
"objects",
".",
"bulk_create",
"(",
"medias",
")",
"# get next page",
"page",
"+=",
"1",
"params",
"[",
"\"page\"",
"]",
"=",
"page",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"return"
]
| Load all WordPress media from the given site.
:param max_pages: kill counter to avoid infinite looping
:return: None | [
"Load",
"all",
"WordPress",
"media",
"from",
"the",
"given",
"site",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L353-L408 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.get_new_media | def get_new_media(self, api_media):
"""
Instantiate a new Media from api data.
:param api_media: the api data for the Media
:return: the new Media
"""
return Media(site_id=self.site_id,
wp_id=api_media["ID"],
**self.api_object_data("media", api_media)) | python | def get_new_media(self, api_media):
"""
Instantiate a new Media from api data.
:param api_media: the api data for the Media
:return: the new Media
"""
return Media(site_id=self.site_id,
wp_id=api_media["ID"],
**self.api_object_data("media", api_media)) | [
"def",
"get_new_media",
"(",
"self",
",",
"api_media",
")",
":",
"return",
"Media",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_media",
"[",
"\"ID\"",
"]",
",",
"*",
"*",
"self",
".",
"api_object_data",
"(",
"\"media\"",
",",
"api_media",
")",
")"
]
| Instantiate a new Media from api data.
:param api_media: the api data for the Media
:return: the new Media | [
"Instantiate",
"a",
"new",
"Media",
"from",
"api",
"data",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L427-L436 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.get_ref_data_map | def get_ref_data_map(self, bulk_mode=True):
"""
Get referential data from the local db into the self.ref_data_map dictionary.
This allows for fast FK lookups when looping through posts.
:param bulk_mode: if True, actually get all of the existing ref data
else this would be too much memory, so just build empty dicts
:return: None
"""
if bulk_mode:
self.ref_data_map = {
"authors": {a.wp_id: a for a in Author.objects.filter(site_id=self.site_id)},
"categories": {c.wp_id: c for c in Category.objects.filter(site_id=self.site_id)},
"tags": {t.wp_id: t for t in Tag.objects.filter(site_id=self.site_id)},
"media": {m.wp_id: m for m in Media.objects.filter(site_id=self.site_id)}
}
else:
# in single post mode, WP ref data is handled dynamically for the post
self.ref_data_map = {
"authors": {},
"categories": {},
"tags": {},
"media": {}
} | python | def get_ref_data_map(self, bulk_mode=True):
"""
Get referential data from the local db into the self.ref_data_map dictionary.
This allows for fast FK lookups when looping through posts.
:param bulk_mode: if True, actually get all of the existing ref data
else this would be too much memory, so just build empty dicts
:return: None
"""
if bulk_mode:
self.ref_data_map = {
"authors": {a.wp_id: a for a in Author.objects.filter(site_id=self.site_id)},
"categories": {c.wp_id: c for c in Category.objects.filter(site_id=self.site_id)},
"tags": {t.wp_id: t for t in Tag.objects.filter(site_id=self.site_id)},
"media": {m.wp_id: m for m in Media.objects.filter(site_id=self.site_id)}
}
else:
# in single post mode, WP ref data is handled dynamically for the post
self.ref_data_map = {
"authors": {},
"categories": {},
"tags": {},
"media": {}
} | [
"def",
"get_ref_data_map",
"(",
"self",
",",
"bulk_mode",
"=",
"True",
")",
":",
"if",
"bulk_mode",
":",
"self",
".",
"ref_data_map",
"=",
"{",
"\"authors\"",
":",
"{",
"a",
".",
"wp_id",
":",
"a",
"for",
"a",
"in",
"Author",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
")",
"}",
",",
"\"categories\"",
":",
"{",
"c",
".",
"wp_id",
":",
"c",
"for",
"c",
"in",
"Category",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
")",
"}",
",",
"\"tags\"",
":",
"{",
"t",
".",
"wp_id",
":",
"t",
"for",
"t",
"in",
"Tag",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
")",
"}",
",",
"\"media\"",
":",
"{",
"m",
".",
"wp_id",
":",
"m",
"for",
"m",
"in",
"Media",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
")",
"}",
"}",
"else",
":",
"# in single post mode, WP ref data is handled dynamically for the post",
"self",
".",
"ref_data_map",
"=",
"{",
"\"authors\"",
":",
"{",
"}",
",",
"\"categories\"",
":",
"{",
"}",
",",
"\"tags\"",
":",
"{",
"}",
",",
"\"media\"",
":",
"{",
"}",
"}"
]
| Get referential data from the local db into the self.ref_data_map dictionary.
This allows for fast FK lookups when looping through posts.
:param bulk_mode: if True, actually get all of the existing ref data
else this would be too much memory, so just build empty dicts
:return: None | [
"Get",
"referential",
"data",
"from",
"the",
"local",
"db",
"into",
"the",
"self",
".",
"ref_data_map",
"dictionary",
".",
"This",
"allows",
"for",
"fast",
"FK",
"lookups",
"when",
"looping",
"through",
"posts",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L438-L461 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.load_posts | def load_posts(self, post_type=None, max_pages=200, status=None):
"""
Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None
"""
logger.info("loading posts with post_type=%s", post_type)
# clear them all out so we don't get dupes
if self.purge_first:
Post.objects.filter(site_id=self.site_id, post_type=post_type).delete()
path = "sites/{}/posts".format(self.site_id)
# type allows us to pull information about pages, attachments, guest-authors, etc.
# you know, posts that aren't posts... thank you WordPress!
if not post_type:
post_type = "post"
if not status:
status = "publish"
params = {"number": self.batch_size, "type": post_type, "status": status}
self.set_posts_param_modified_after(params, post_type, status)
# get first page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
# process all posts in the response
self.process_posts_response(response, path, params, max_pages) | python | def load_posts(self, post_type=None, max_pages=200, status=None):
"""
Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None
"""
logger.info("loading posts with post_type=%s", post_type)
# clear them all out so we don't get dupes
if self.purge_first:
Post.objects.filter(site_id=self.site_id, post_type=post_type).delete()
path = "sites/{}/posts".format(self.site_id)
# type allows us to pull information about pages, attachments, guest-authors, etc.
# you know, posts that aren't posts... thank you WordPress!
if not post_type:
post_type = "post"
if not status:
status = "publish"
params = {"number": self.batch_size, "type": post_type, "status": status}
self.set_posts_param_modified_after(params, post_type, status)
# get first page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
# process all posts in the response
self.process_posts_response(response, path, params, max_pages) | [
"def",
"load_posts",
"(",
"self",
",",
"post_type",
"=",
"None",
",",
"max_pages",
"=",
"200",
",",
"status",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"loading posts with post_type=%s\"",
",",
"post_type",
")",
"# clear them all out so we don't get dupes",
"if",
"self",
".",
"purge_first",
":",
"Post",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"post_type",
"=",
"post_type",
")",
".",
"delete",
"(",
")",
"path",
"=",
"\"sites/{}/posts\"",
".",
"format",
"(",
"self",
".",
"site_id",
")",
"# type allows us to pull information about pages, attachments, guest-authors, etc.",
"# you know, posts that aren't posts... thank you WordPress!",
"if",
"not",
"post_type",
":",
"post_type",
"=",
"\"post\"",
"if",
"not",
"status",
":",
"status",
"=",
"\"publish\"",
"params",
"=",
"{",
"\"number\"",
":",
"self",
".",
"batch_size",
",",
"\"type\"",
":",
"post_type",
",",
"\"status\"",
":",
"status",
"}",
"self",
".",
"set_posts_param_modified_after",
"(",
"params",
",",
"post_type",
",",
"status",
")",
"# get first page",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"# process all posts in the response",
"self",
".",
"process_posts_response",
"(",
"response",
",",
"path",
",",
"params",
",",
"max_pages",
")"
]
| Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None | [
"Load",
"all",
"WordPress",
"posts",
"of",
"a",
"given",
"post_type",
"from",
"a",
"site",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L463-L498 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.set_posts_param_modified_after | def set_posts_param_modified_after(self, params, post_type, status):
"""
Set modified_after date to "continue where we left off" if appropriate
:param params: the GET params dict, which may be updated to include the "modified_after" key
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param status: publish, private, draft, etc.
:return: None
"""
if not self.purge_first and not self.full and not self.modified_after:
if status == "any":
latest = Post.objects.filter(post_type=post_type).order_by("-modified").first()
else:
latest = Post.objects.filter(post_type=post_type, status=status).order_by("-modified").first()
if latest:
self.modified_after = latest.modified
if self.modified_after:
params["modified_after"] = self.modified_after.isoformat()
logger.info("getting posts after: %s", params["modified_after"]) | python | def set_posts_param_modified_after(self, params, post_type, status):
"""
Set modified_after date to "continue where we left off" if appropriate
:param params: the GET params dict, which may be updated to include the "modified_after" key
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param status: publish, private, draft, etc.
:return: None
"""
if not self.purge_first and not self.full and not self.modified_after:
if status == "any":
latest = Post.objects.filter(post_type=post_type).order_by("-modified").first()
else:
latest = Post.objects.filter(post_type=post_type, status=status).order_by("-modified").first()
if latest:
self.modified_after = latest.modified
if self.modified_after:
params["modified_after"] = self.modified_after.isoformat()
logger.info("getting posts after: %s", params["modified_after"]) | [
"def",
"set_posts_param_modified_after",
"(",
"self",
",",
"params",
",",
"post_type",
",",
"status",
")",
":",
"if",
"not",
"self",
".",
"purge_first",
"and",
"not",
"self",
".",
"full",
"and",
"not",
"self",
".",
"modified_after",
":",
"if",
"status",
"==",
"\"any\"",
":",
"latest",
"=",
"Post",
".",
"objects",
".",
"filter",
"(",
"post_type",
"=",
"post_type",
")",
".",
"order_by",
"(",
"\"-modified\"",
")",
".",
"first",
"(",
")",
"else",
":",
"latest",
"=",
"Post",
".",
"objects",
".",
"filter",
"(",
"post_type",
"=",
"post_type",
",",
"status",
"=",
"status",
")",
".",
"order_by",
"(",
"\"-modified\"",
")",
".",
"first",
"(",
")",
"if",
"latest",
":",
"self",
".",
"modified_after",
"=",
"latest",
".",
"modified",
"if",
"self",
".",
"modified_after",
":",
"params",
"[",
"\"modified_after\"",
"]",
"=",
"self",
".",
"modified_after",
".",
"isoformat",
"(",
")",
"logger",
".",
"info",
"(",
"\"getting posts after: %s\"",
",",
"params",
"[",
"\"modified_after\"",
"]",
")"
]
| Set modified_after date to "continue where we left off" if appropriate
:param params: the GET params dict, which may be updated to include the "modified_after" key
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param status: publish, private, draft, etc.
:return: None | [
"Set",
"modified_after",
"date",
"to",
"continue",
"where",
"we",
"left",
"off",
"if",
"appropriate"
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L500-L519 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.load_wp_post | def load_wp_post(self, api_post, bulk_mode=True, post_categories=None, post_tags=None, post_media_attachments=None, posts=None):
"""
Load a single post from API data.
:param api_post: the API data for the post
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param post_categories: a mapping of Categories in the site, keyed by post ID
:param post_tags: a mapping of Tags in the site, keyed by post ID
:param post_media_attachments: a mapping of Media in the site, keyed by post ID
:param posts: a list of posts to be created or updated
:return: None
"""
# initialize reference vars if none supplied
if post_categories is None:
post_categories = {}
if post_tags is None:
post_tags = {}
if post_media_attachments is None:
post_media_attachments = {}
if posts is None:
posts = []
# process objects related to this post
author = None
if api_post["author"].get("ID"):
author = self.process_post_author(bulk_mode, api_post["author"])
# process many-to-many fields
self.process_post_categories(bulk_mode, api_post, post_categories)
self.process_post_tags(bulk_mode, api_post, post_tags)
self.process_post_media_attachments(bulk_mode, api_post, post_media_attachments)
# if this post exists, update it; else create it
existing_post = Post.objects.filter(site_id=self.site_id, wp_id=api_post["ID"]).first()
if existing_post:
self.process_existing_post(existing_post, api_post, author, post_categories, post_tags, post_media_attachments)
else:
self.process_new_post(bulk_mode, api_post, posts, author, post_categories, post_tags, post_media_attachments)
# if this is a real post (not an attachment, page, etc.), sync child attachments that haven been deleted
# these are generally other posts with post_type=attachment representing media that has been "uploaded to the post"
# they can be deleted on the WP side, creating an orphan here without this step.
if api_post["type"] == "post":
self.sync_deleted_attachments(api_post) | python | def load_wp_post(self, api_post, bulk_mode=True, post_categories=None, post_tags=None, post_media_attachments=None, posts=None):
"""
Load a single post from API data.
:param api_post: the API data for the post
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param post_categories: a mapping of Categories in the site, keyed by post ID
:param post_tags: a mapping of Tags in the site, keyed by post ID
:param post_media_attachments: a mapping of Media in the site, keyed by post ID
:param posts: a list of posts to be created or updated
:return: None
"""
# initialize reference vars if none supplied
if post_categories is None:
post_categories = {}
if post_tags is None:
post_tags = {}
if post_media_attachments is None:
post_media_attachments = {}
if posts is None:
posts = []
# process objects related to this post
author = None
if api_post["author"].get("ID"):
author = self.process_post_author(bulk_mode, api_post["author"])
# process many-to-many fields
self.process_post_categories(bulk_mode, api_post, post_categories)
self.process_post_tags(bulk_mode, api_post, post_tags)
self.process_post_media_attachments(bulk_mode, api_post, post_media_attachments)
# if this post exists, update it; else create it
existing_post = Post.objects.filter(site_id=self.site_id, wp_id=api_post["ID"]).first()
if existing_post:
self.process_existing_post(existing_post, api_post, author, post_categories, post_tags, post_media_attachments)
else:
self.process_new_post(bulk_mode, api_post, posts, author, post_categories, post_tags, post_media_attachments)
# if this is a real post (not an attachment, page, etc.), sync child attachments that haven been deleted
# these are generally other posts with post_type=attachment representing media that has been "uploaded to the post"
# they can be deleted on the WP side, creating an orphan here without this step.
if api_post["type"] == "post":
self.sync_deleted_attachments(api_post) | [
"def",
"load_wp_post",
"(",
"self",
",",
"api_post",
",",
"bulk_mode",
"=",
"True",
",",
"post_categories",
"=",
"None",
",",
"post_tags",
"=",
"None",
",",
"post_media_attachments",
"=",
"None",
",",
"posts",
"=",
"None",
")",
":",
"# initialize reference vars if none supplied",
"if",
"post_categories",
"is",
"None",
":",
"post_categories",
"=",
"{",
"}",
"if",
"post_tags",
"is",
"None",
":",
"post_tags",
"=",
"{",
"}",
"if",
"post_media_attachments",
"is",
"None",
":",
"post_media_attachments",
"=",
"{",
"}",
"if",
"posts",
"is",
"None",
":",
"posts",
"=",
"[",
"]",
"# process objects related to this post",
"author",
"=",
"None",
"if",
"api_post",
"[",
"\"author\"",
"]",
".",
"get",
"(",
"\"ID\"",
")",
":",
"author",
"=",
"self",
".",
"process_post_author",
"(",
"bulk_mode",
",",
"api_post",
"[",
"\"author\"",
"]",
")",
"# process many-to-many fields",
"self",
".",
"process_post_categories",
"(",
"bulk_mode",
",",
"api_post",
",",
"post_categories",
")",
"self",
".",
"process_post_tags",
"(",
"bulk_mode",
",",
"api_post",
",",
"post_tags",
")",
"self",
".",
"process_post_media_attachments",
"(",
"bulk_mode",
",",
"api_post",
",",
"post_media_attachments",
")",
"# if this post exists, update it; else create it",
"existing_post",
"=",
"Post",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_post",
"[",
"\"ID\"",
"]",
")",
".",
"first",
"(",
")",
"if",
"existing_post",
":",
"self",
".",
"process_existing_post",
"(",
"existing_post",
",",
"api_post",
",",
"author",
",",
"post_categories",
",",
"post_tags",
",",
"post_media_attachments",
")",
"else",
":",
"self",
".",
"process_new_post",
"(",
"bulk_mode",
",",
"api_post",
",",
"posts",
",",
"author",
",",
"post_categories",
",",
"post_tags",
",",
"post_media_attachments",
")",
"# if this is a real post (not an attachment, page, etc.), sync child attachments that haven been deleted",
"# these are generally other posts with post_type=attachment representing media that has been \"uploaded to the post\"",
"# they can be deleted on the WP side, creating an orphan here without this step.",
"if",
"api_post",
"[",
"\"type\"",
"]",
"==",
"\"post\"",
":",
"self",
".",
"sync_deleted_attachments",
"(",
"api_post",
")"
]
| Load a single post from API data.
:param api_post: the API data for the post
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param post_categories: a mapping of Categories in the site, keyed by post ID
:param post_tags: a mapping of Tags in the site, keyed by post ID
:param post_media_attachments: a mapping of Media in the site, keyed by post ID
:param posts: a list of posts to be created or updated
:return: None | [
"Load",
"a",
"single",
"post",
"from",
"API",
"data",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L587-L633 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_author | def process_post_author(self, bulk_mode, api_author):
"""
Create or update an Author related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_author: the data in the api for the Author
:return: the up-to-date Author object
"""
# get from the ref data map if in bulk mode, else look it up from the db
if bulk_mode:
author = self.ref_data_map["authors"].get(api_author["ID"])
if author:
self.update_existing_author(author, api_author)
else:
# if the author wasn't found (likely because it's a Byline or guest author, not a user),
# go ahead and create the author now
author = Author.objects.create(site_id=self.site_id,
wp_id=api_author["ID"],
**self.api_object_data("author", api_author))
else:
# do a direct db lookup if we're not in bulk mode
author, created = self.get_or_create_author(api_author)
if author and not created:
self.update_existing_author(author, api_author)
# add to the ref data map so we don't try to create it again
if author:
self.ref_data_map["authors"][api_author["ID"]] = author
return author | python | def process_post_author(self, bulk_mode, api_author):
"""
Create or update an Author related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_author: the data in the api for the Author
:return: the up-to-date Author object
"""
# get from the ref data map if in bulk mode, else look it up from the db
if bulk_mode:
author = self.ref_data_map["authors"].get(api_author["ID"])
if author:
self.update_existing_author(author, api_author)
else:
# if the author wasn't found (likely because it's a Byline or guest author, not a user),
# go ahead and create the author now
author = Author.objects.create(site_id=self.site_id,
wp_id=api_author["ID"],
**self.api_object_data("author", api_author))
else:
# do a direct db lookup if we're not in bulk mode
author, created = self.get_or_create_author(api_author)
if author and not created:
self.update_existing_author(author, api_author)
# add to the ref data map so we don't try to create it again
if author:
self.ref_data_map["authors"][api_author["ID"]] = author
return author | [
"def",
"process_post_author",
"(",
"self",
",",
"bulk_mode",
",",
"api_author",
")",
":",
"# get from the ref data map if in bulk mode, else look it up from the db",
"if",
"bulk_mode",
":",
"author",
"=",
"self",
".",
"ref_data_map",
"[",
"\"authors\"",
"]",
".",
"get",
"(",
"api_author",
"[",
"\"ID\"",
"]",
")",
"if",
"author",
":",
"self",
".",
"update_existing_author",
"(",
"author",
",",
"api_author",
")",
"else",
":",
"# if the author wasn't found (likely because it's a Byline or guest author, not a user),",
"# go ahead and create the author now",
"author",
"=",
"Author",
".",
"objects",
".",
"create",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_author",
"[",
"\"ID\"",
"]",
",",
"*",
"*",
"self",
".",
"api_object_data",
"(",
"\"author\"",
",",
"api_author",
")",
")",
"else",
":",
"# do a direct db lookup if we're not in bulk mode",
"author",
",",
"created",
"=",
"self",
".",
"get_or_create_author",
"(",
"api_author",
")",
"if",
"author",
"and",
"not",
"created",
":",
"self",
".",
"update_existing_author",
"(",
"author",
",",
"api_author",
")",
"# add to the ref data map so we don't try to create it again",
"if",
"author",
":",
"self",
".",
"ref_data_map",
"[",
"\"authors\"",
"]",
"[",
"api_author",
"[",
"\"ID\"",
"]",
"]",
"=",
"author",
"return",
"author"
]
| Create or update an Author related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_author: the data in the api for the Author
:return: the up-to-date Author object | [
"Create",
"or",
"update",
"an",
"Author",
"related",
"to",
"a",
"post",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L635-L664 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.get_or_create_author | def get_or_create_author(self, api_author):
"""
Find or create an Author object given API data.
:param api_author: the API data for the Author
:return: a tuple of an Author instance and a boolean indicating whether the author was created or not
"""
return Author.objects.get_or_create(site_id=self.site_id,
wp_id=api_author["ID"],
defaults=self.api_object_data("author", api_author)) | python | def get_or_create_author(self, api_author):
"""
Find or create an Author object given API data.
:param api_author: the API data for the Author
:return: a tuple of an Author instance and a boolean indicating whether the author was created or not
"""
return Author.objects.get_or_create(site_id=self.site_id,
wp_id=api_author["ID"],
defaults=self.api_object_data("author", api_author)) | [
"def",
"get_or_create_author",
"(",
"self",
",",
"api_author",
")",
":",
"return",
"Author",
".",
"objects",
".",
"get_or_create",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_author",
"[",
"\"ID\"",
"]",
",",
"defaults",
"=",
"self",
".",
"api_object_data",
"(",
"\"author\"",
",",
"api_author",
")",
")"
]
| Find or create an Author object given API data.
:param api_author: the API data for the Author
:return: a tuple of an Author instance and a boolean indicating whether the author was created or not | [
"Find",
"or",
"create",
"an",
"Author",
"object",
"given",
"API",
"data",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L666-L675 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_categories | def process_post_categories(self, bulk_mode, api_post, post_categories):
"""
Create or update Categories related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_categories: a mapping of Categories keyed by post ID
:return: None
"""
post_categories[api_post["ID"]] = []
for api_category in six.itervalues(api_post["categories"]):
category = self.process_post_category(bulk_mode, api_category)
if category:
post_categories[api_post["ID"]].append(category) | python | def process_post_categories(self, bulk_mode, api_post, post_categories):
"""
Create or update Categories related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_categories: a mapping of Categories keyed by post ID
:return: None
"""
post_categories[api_post["ID"]] = []
for api_category in six.itervalues(api_post["categories"]):
category = self.process_post_category(bulk_mode, api_category)
if category:
post_categories[api_post["ID"]].append(category) | [
"def",
"process_post_categories",
"(",
"self",
",",
"bulk_mode",
",",
"api_post",
",",
"post_categories",
")",
":",
"post_categories",
"[",
"api_post",
"[",
"\"ID\"",
"]",
"]",
"=",
"[",
"]",
"for",
"api_category",
"in",
"six",
".",
"itervalues",
"(",
"api_post",
"[",
"\"categories\"",
"]",
")",
":",
"category",
"=",
"self",
".",
"process_post_category",
"(",
"bulk_mode",
",",
"api_category",
")",
"if",
"category",
":",
"post_categories",
"[",
"api_post",
"[",
"\"ID\"",
"]",
"]",
".",
"append",
"(",
"category",
")"
]
| Create or update Categories related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_categories: a mapping of Categories keyed by post ID
:return: None | [
"Create",
"or",
"update",
"Categories",
"related",
"to",
"a",
"post",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L677-L690 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_category | def process_post_category(self, bulk_mode, api_category):
"""
Create or update a Category related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_category: the API data for the Category
:return: the Category object
"""
category = None
# try to get from the ref data map if in bulk mode
if bulk_mode:
category = self.ref_data_map["categories"].get(api_category["ID"])
# double check the db before giving up, we may have sync'd it in a previous run
if not category:
category, created = Category.objects.get_or_create(site_id=self.site_id,
wp_id=api_category["ID"],
defaults=self.api_object_data("category", api_category))
if category and not created:
self.update_existing_category(category, api_category)
# add to ref data map so later lookups work
if category:
self.ref_data_map["categories"][api_category["ID"]] = category
return category | python | def process_post_category(self, bulk_mode, api_category):
"""
Create or update a Category related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_category: the API data for the Category
:return: the Category object
"""
category = None
# try to get from the ref data map if in bulk mode
if bulk_mode:
category = self.ref_data_map["categories"].get(api_category["ID"])
# double check the db before giving up, we may have sync'd it in a previous run
if not category:
category, created = Category.objects.get_or_create(site_id=self.site_id,
wp_id=api_category["ID"],
defaults=self.api_object_data("category", api_category))
if category and not created:
self.update_existing_category(category, api_category)
# add to ref data map so later lookups work
if category:
self.ref_data_map["categories"][api_category["ID"]] = category
return category | [
"def",
"process_post_category",
"(",
"self",
",",
"bulk_mode",
",",
"api_category",
")",
":",
"category",
"=",
"None",
"# try to get from the ref data map if in bulk mode",
"if",
"bulk_mode",
":",
"category",
"=",
"self",
".",
"ref_data_map",
"[",
"\"categories\"",
"]",
".",
"get",
"(",
"api_category",
"[",
"\"ID\"",
"]",
")",
"# double check the db before giving up, we may have sync'd it in a previous run",
"if",
"not",
"category",
":",
"category",
",",
"created",
"=",
"Category",
".",
"objects",
".",
"get_or_create",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_category",
"[",
"\"ID\"",
"]",
",",
"defaults",
"=",
"self",
".",
"api_object_data",
"(",
"\"category\"",
",",
"api_category",
")",
")",
"if",
"category",
"and",
"not",
"created",
":",
"self",
".",
"update_existing_category",
"(",
"category",
",",
"api_category",
")",
"# add to ref data map so later lookups work",
"if",
"category",
":",
"self",
".",
"ref_data_map",
"[",
"\"categories\"",
"]",
"[",
"api_category",
"[",
"\"ID\"",
"]",
"]",
"=",
"category",
"return",
"category"
]
| Create or update a Category related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_category: the API data for the Category
:return: the Category object | [
"Create",
"or",
"update",
"a",
"Category",
"related",
"to",
"a",
"post",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L692-L719 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_tags | def process_post_tags(self, bulk_mode, api_post, post_tags):
"""
Create or update Tags related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_tags: a mapping of Tags keyed by post ID
:return: None
"""
post_tags[api_post["ID"]] = []
for api_tag in six.itervalues(api_post["tags"]):
tag = self.process_post_tag(bulk_mode, api_tag)
if tag:
post_tags[api_post["ID"]].append(tag) | python | def process_post_tags(self, bulk_mode, api_post, post_tags):
"""
Create or update Tags related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_tags: a mapping of Tags keyed by post ID
:return: None
"""
post_tags[api_post["ID"]] = []
for api_tag in six.itervalues(api_post["tags"]):
tag = self.process_post_tag(bulk_mode, api_tag)
if tag:
post_tags[api_post["ID"]].append(tag) | [
"def",
"process_post_tags",
"(",
"self",
",",
"bulk_mode",
",",
"api_post",
",",
"post_tags",
")",
":",
"post_tags",
"[",
"api_post",
"[",
"\"ID\"",
"]",
"]",
"=",
"[",
"]",
"for",
"api_tag",
"in",
"six",
".",
"itervalues",
"(",
"api_post",
"[",
"\"tags\"",
"]",
")",
":",
"tag",
"=",
"self",
".",
"process_post_tag",
"(",
"bulk_mode",
",",
"api_tag",
")",
"if",
"tag",
":",
"post_tags",
"[",
"api_post",
"[",
"\"ID\"",
"]",
"]",
".",
"append",
"(",
"tag",
")"
]
| Create or update Tags related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_tags: a mapping of Tags keyed by post ID
:return: None | [
"Create",
"or",
"update",
"Tags",
"related",
"to",
"a",
"post",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L721-L734 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_tag | def process_post_tag(self, bulk_mode, api_tag):
"""
Create or update a Tag related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_tag: the API data for the Tag
:return: the Tag object
"""
tag = None
# try to get from the ref data map if in bulk mode
if bulk_mode:
tag = self.ref_data_map["tags"].get(api_tag["ID"])
# double check the db before giving up, we may have sync'd it in a previous run
if not tag:
tag, created = Tag.objects.get_or_create(site_id=self.site_id,
wp_id=api_tag["ID"],
defaults=self.api_object_data("tag", api_tag))
if tag and not created:
self.update_existing_tag(tag, api_tag)
# add to ref data map so later lookups work
if tag:
self.ref_data_map["tags"][api_tag["ID"]] = tag
return tag | python | def process_post_tag(self, bulk_mode, api_tag):
"""
Create or update a Tag related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_tag: the API data for the Tag
:return: the Tag object
"""
tag = None
# try to get from the ref data map if in bulk mode
if bulk_mode:
tag = self.ref_data_map["tags"].get(api_tag["ID"])
# double check the db before giving up, we may have sync'd it in a previous run
if not tag:
tag, created = Tag.objects.get_or_create(site_id=self.site_id,
wp_id=api_tag["ID"],
defaults=self.api_object_data("tag", api_tag))
if tag and not created:
self.update_existing_tag(tag, api_tag)
# add to ref data map so later lookups work
if tag:
self.ref_data_map["tags"][api_tag["ID"]] = tag
return tag | [
"def",
"process_post_tag",
"(",
"self",
",",
"bulk_mode",
",",
"api_tag",
")",
":",
"tag",
"=",
"None",
"# try to get from the ref data map if in bulk mode",
"if",
"bulk_mode",
":",
"tag",
"=",
"self",
".",
"ref_data_map",
"[",
"\"tags\"",
"]",
".",
"get",
"(",
"api_tag",
"[",
"\"ID\"",
"]",
")",
"# double check the db before giving up, we may have sync'd it in a previous run",
"if",
"not",
"tag",
":",
"tag",
",",
"created",
"=",
"Tag",
".",
"objects",
".",
"get_or_create",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_tag",
"[",
"\"ID\"",
"]",
",",
"defaults",
"=",
"self",
".",
"api_object_data",
"(",
"\"tag\"",
",",
"api_tag",
")",
")",
"if",
"tag",
"and",
"not",
"created",
":",
"self",
".",
"update_existing_tag",
"(",
"tag",
",",
"api_tag",
")",
"# add to ref data map so later lookups work",
"if",
"tag",
":",
"self",
".",
"ref_data_map",
"[",
"\"tags\"",
"]",
"[",
"api_tag",
"[",
"\"ID\"",
"]",
"]",
"=",
"tag",
"return",
"tag"
]
| Create or update a Tag related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_tag: the API data for the Tag
:return: the Tag object | [
"Create",
"or",
"update",
"a",
"Tag",
"related",
"to",
"a",
"post",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L736-L762 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_media_attachments | def process_post_media_attachments(self, bulk_mode, api_post, post_media_attachments):
"""
Create or update Media objects related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the Post
:param post_media_attachments: a mapping of Media objects keyed by post ID
:return: None
"""
post_media_attachments[api_post["ID"]] = []
for api_attachment in six.itervalues(api_post["attachments"]):
attachment = self.process_post_media_attachment(bulk_mode, api_attachment)
if attachment:
post_media_attachments[api_post["ID"]].append(attachment) | python | def process_post_media_attachments(self, bulk_mode, api_post, post_media_attachments):
"""
Create or update Media objects related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the Post
:param post_media_attachments: a mapping of Media objects keyed by post ID
:return: None
"""
post_media_attachments[api_post["ID"]] = []
for api_attachment in six.itervalues(api_post["attachments"]):
attachment = self.process_post_media_attachment(bulk_mode, api_attachment)
if attachment:
post_media_attachments[api_post["ID"]].append(attachment) | [
"def",
"process_post_media_attachments",
"(",
"self",
",",
"bulk_mode",
",",
"api_post",
",",
"post_media_attachments",
")",
":",
"post_media_attachments",
"[",
"api_post",
"[",
"\"ID\"",
"]",
"]",
"=",
"[",
"]",
"for",
"api_attachment",
"in",
"six",
".",
"itervalues",
"(",
"api_post",
"[",
"\"attachments\"",
"]",
")",
":",
"attachment",
"=",
"self",
".",
"process_post_media_attachment",
"(",
"bulk_mode",
",",
"api_attachment",
")",
"if",
"attachment",
":",
"post_media_attachments",
"[",
"api_post",
"[",
"\"ID\"",
"]",
"]",
".",
"append",
"(",
"attachment",
")"
]
| Create or update Media objects related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the Post
:param post_media_attachments: a mapping of Media objects keyed by post ID
:return: None | [
"Create",
"or",
"update",
"Media",
"objects",
"related",
"to",
"a",
"post",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L764-L778 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_media_attachment | def process_post_media_attachment(self, bulk_mode, api_media_attachment):
"""
Create or update a Media attached to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_media_attachment: the API data for the Media
:return: the Media attachment object
"""
attachment = None
# try to get from the ref data map if in bulk mode
if bulk_mode:
attachment = self.ref_data_map["media"].get(api_media_attachment["ID"])
# double check the db before giving up, we may have sync'd it in a previous run
if not attachment:
# do a direct db lookup if we're not in bulk mode
attachment, created = self.get_or_create_media(api_media_attachment)
if attachment and not created:
self.update_existing_media(attachment, api_media_attachment)
# add to ref data map so later lookups work
if attachment:
self.ref_data_map["media"][api_media_attachment["ID"]] = attachment
return attachment | python | def process_post_media_attachment(self, bulk_mode, api_media_attachment):
"""
Create or update a Media attached to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_media_attachment: the API data for the Media
:return: the Media attachment object
"""
attachment = None
# try to get from the ref data map if in bulk mode
if bulk_mode:
attachment = self.ref_data_map["media"].get(api_media_attachment["ID"])
# double check the db before giving up, we may have sync'd it in a previous run
if not attachment:
# do a direct db lookup if we're not in bulk mode
attachment, created = self.get_or_create_media(api_media_attachment)
if attachment and not created:
self.update_existing_media(attachment, api_media_attachment)
# add to ref data map so later lookups work
if attachment:
self.ref_data_map["media"][api_media_attachment["ID"]] = attachment
return attachment | [
"def",
"process_post_media_attachment",
"(",
"self",
",",
"bulk_mode",
",",
"api_media_attachment",
")",
":",
"attachment",
"=",
"None",
"# try to get from the ref data map if in bulk mode",
"if",
"bulk_mode",
":",
"attachment",
"=",
"self",
".",
"ref_data_map",
"[",
"\"media\"",
"]",
".",
"get",
"(",
"api_media_attachment",
"[",
"\"ID\"",
"]",
")",
"# double check the db before giving up, we may have sync'd it in a previous run",
"if",
"not",
"attachment",
":",
"# do a direct db lookup if we're not in bulk mode",
"attachment",
",",
"created",
"=",
"self",
".",
"get_or_create_media",
"(",
"api_media_attachment",
")",
"if",
"attachment",
"and",
"not",
"created",
":",
"self",
".",
"update_existing_media",
"(",
"attachment",
",",
"api_media_attachment",
")",
"# add to ref data map so later lookups work",
"if",
"attachment",
":",
"self",
".",
"ref_data_map",
"[",
"\"media\"",
"]",
"[",
"api_media_attachment",
"[",
"\"ID\"",
"]",
"]",
"=",
"attachment",
"return",
"attachment"
]
| Create or update a Media attached to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_media_attachment: the API data for the Media
:return: the Media attachment object | [
"Create",
"or",
"update",
"a",
"Media",
"attached",
"to",
"a",
"post",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L780-L805 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.get_or_create_media | def get_or_create_media(self, api_media):
"""
Find or create a Media object given API data.
:param api_media: the API data for the Media
:return: a tuple of an Media instance and a boolean indicating whether the Media was created or not
"""
return Media.objects.get_or_create(site_id=self.site_id,
wp_id=api_media["ID"],
defaults=self.api_object_data("media", api_media)) | python | def get_or_create_media(self, api_media):
"""
Find or create a Media object given API data.
:param api_media: the API data for the Media
:return: a tuple of an Media instance and a boolean indicating whether the Media was created or not
"""
return Media.objects.get_or_create(site_id=self.site_id,
wp_id=api_media["ID"],
defaults=self.api_object_data("media", api_media)) | [
"def",
"get_or_create_media",
"(",
"self",
",",
"api_media",
")",
":",
"return",
"Media",
".",
"objects",
".",
"get_or_create",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"api_media",
"[",
"\"ID\"",
"]",
",",
"defaults",
"=",
"self",
".",
"api_object_data",
"(",
"\"media\"",
",",
"api_media",
")",
")"
]
| Find or create a Media object given API data.
:param api_media: the API data for the Media
:return: a tuple of an Media instance and a boolean indicating whether the Media was created or not | [
"Find",
"or",
"create",
"a",
"Media",
"object",
"given",
"API",
"data",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L807-L816 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_existing_post | def process_existing_post(existing_post, api_post, author, post_categories, post_tags, post_media_attachments):
"""
Sync attributes for a single post from WP API data.
:param existing_post: Post object that needs to be sync'd
:param api_post: the API data for the Post
:param author: the Author object of the post (should already exist in the db)
:param post_categories: the Categories to attach to the post (should already exist in the db)
:param post_tags: the Tags to attach to the post (should already exist in the db)
:param post_media_attachments: the Medias to attach to the post (should already exist in the db)
:return: None
"""
# don't bother checking what's different, just update all fields
existing_post.author = author
existing_post.post_date = api_post["date"]
existing_post.modified = api_post["modified"]
existing_post.title = api_post["title"]
existing_post.url = api_post["URL"]
existing_post.short_url = api_post["short_URL"]
existing_post.content = api_post["content"]
existing_post.excerpt = api_post["excerpt"]
existing_post.slug = api_post["slug"]
existing_post.guid = api_post["guid"]
existing_post.status = api_post["status"]
existing_post.sticky = api_post["sticky"]
existing_post.password = api_post["password"]
existing_post.parent = api_post["parent"]
existing_post.post_type = api_post["type"]
existing_post.likes_enabled = api_post["likes_enabled"]
existing_post.sharing_enabled = api_post["sharing_enabled"]
existing_post.like_count = api_post["like_count"]
existing_post.global_ID = api_post["global_ID"]
existing_post.featured_image = api_post["featured_image"]
existing_post.format = api_post["format"]
existing_post.menu_order = api_post["menu_order"]
existing_post.metadata = api_post["metadata"]
existing_post.post_thumbnail = api_post["post_thumbnail"]
WPAPILoader.process_post_many_to_many_field(existing_post, "categories", post_categories)
WPAPILoader.process_post_many_to_many_field(existing_post, "tags", post_tags)
WPAPILoader.process_post_many_to_many_field(existing_post, "attachments", post_media_attachments)
existing_post.save() | python | def process_existing_post(existing_post, api_post, author, post_categories, post_tags, post_media_attachments):
"""
Sync attributes for a single post from WP API data.
:param existing_post: Post object that needs to be sync'd
:param api_post: the API data for the Post
:param author: the Author object of the post (should already exist in the db)
:param post_categories: the Categories to attach to the post (should already exist in the db)
:param post_tags: the Tags to attach to the post (should already exist in the db)
:param post_media_attachments: the Medias to attach to the post (should already exist in the db)
:return: None
"""
# don't bother checking what's different, just update all fields
existing_post.author = author
existing_post.post_date = api_post["date"]
existing_post.modified = api_post["modified"]
existing_post.title = api_post["title"]
existing_post.url = api_post["URL"]
existing_post.short_url = api_post["short_URL"]
existing_post.content = api_post["content"]
existing_post.excerpt = api_post["excerpt"]
existing_post.slug = api_post["slug"]
existing_post.guid = api_post["guid"]
existing_post.status = api_post["status"]
existing_post.sticky = api_post["sticky"]
existing_post.password = api_post["password"]
existing_post.parent = api_post["parent"]
existing_post.post_type = api_post["type"]
existing_post.likes_enabled = api_post["likes_enabled"]
existing_post.sharing_enabled = api_post["sharing_enabled"]
existing_post.like_count = api_post["like_count"]
existing_post.global_ID = api_post["global_ID"]
existing_post.featured_image = api_post["featured_image"]
existing_post.format = api_post["format"]
existing_post.menu_order = api_post["menu_order"]
existing_post.metadata = api_post["metadata"]
existing_post.post_thumbnail = api_post["post_thumbnail"]
WPAPILoader.process_post_many_to_many_field(existing_post, "categories", post_categories)
WPAPILoader.process_post_many_to_many_field(existing_post, "tags", post_tags)
WPAPILoader.process_post_many_to_many_field(existing_post, "attachments", post_media_attachments)
existing_post.save() | [
"def",
"process_existing_post",
"(",
"existing_post",
",",
"api_post",
",",
"author",
",",
"post_categories",
",",
"post_tags",
",",
"post_media_attachments",
")",
":",
"# don't bother checking what's different, just update all fields",
"existing_post",
".",
"author",
"=",
"author",
"existing_post",
".",
"post_date",
"=",
"api_post",
"[",
"\"date\"",
"]",
"existing_post",
".",
"modified",
"=",
"api_post",
"[",
"\"modified\"",
"]",
"existing_post",
".",
"title",
"=",
"api_post",
"[",
"\"title\"",
"]",
"existing_post",
".",
"url",
"=",
"api_post",
"[",
"\"URL\"",
"]",
"existing_post",
".",
"short_url",
"=",
"api_post",
"[",
"\"short_URL\"",
"]",
"existing_post",
".",
"content",
"=",
"api_post",
"[",
"\"content\"",
"]",
"existing_post",
".",
"excerpt",
"=",
"api_post",
"[",
"\"excerpt\"",
"]",
"existing_post",
".",
"slug",
"=",
"api_post",
"[",
"\"slug\"",
"]",
"existing_post",
".",
"guid",
"=",
"api_post",
"[",
"\"guid\"",
"]",
"existing_post",
".",
"status",
"=",
"api_post",
"[",
"\"status\"",
"]",
"existing_post",
".",
"sticky",
"=",
"api_post",
"[",
"\"sticky\"",
"]",
"existing_post",
".",
"password",
"=",
"api_post",
"[",
"\"password\"",
"]",
"existing_post",
".",
"parent",
"=",
"api_post",
"[",
"\"parent\"",
"]",
"existing_post",
".",
"post_type",
"=",
"api_post",
"[",
"\"type\"",
"]",
"existing_post",
".",
"likes_enabled",
"=",
"api_post",
"[",
"\"likes_enabled\"",
"]",
"existing_post",
".",
"sharing_enabled",
"=",
"api_post",
"[",
"\"sharing_enabled\"",
"]",
"existing_post",
".",
"like_count",
"=",
"api_post",
"[",
"\"like_count\"",
"]",
"existing_post",
".",
"global_ID",
"=",
"api_post",
"[",
"\"global_ID\"",
"]",
"existing_post",
".",
"featured_image",
"=",
"api_post",
"[",
"\"featured_image\"",
"]",
"existing_post",
".",
"format",
"=",
"api_post",
"[",
"\"format\"",
"]",
"existing_post",
".",
"menu_order",
"=",
"api_post",
"[",
"\"menu_order\"",
"]",
"existing_post",
".",
"metadata",
"=",
"api_post",
"[",
"\"metadata\"",
"]",
"existing_post",
".",
"post_thumbnail",
"=",
"api_post",
"[",
"\"post_thumbnail\"",
"]",
"WPAPILoader",
".",
"process_post_many_to_many_field",
"(",
"existing_post",
",",
"\"categories\"",
",",
"post_categories",
")",
"WPAPILoader",
".",
"process_post_many_to_many_field",
"(",
"existing_post",
",",
"\"tags\"",
",",
"post_tags",
")",
"WPAPILoader",
".",
"process_post_many_to_many_field",
"(",
"existing_post",
",",
"\"attachments\"",
",",
"post_media_attachments",
")",
"existing_post",
".",
"save",
"(",
")"
]
| Sync attributes for a single post from WP API data.
:param existing_post: Post object that needs to be sync'd
:param api_post: the API data for the Post
:param author: the Author object of the post (should already exist in the db)
:param post_categories: the Categories to attach to the post (should already exist in the db)
:param post_tags: the Tags to attach to the post (should already exist in the db)
:param post_media_attachments: the Medias to attach to the post (should already exist in the db)
:return: None | [
"Sync",
"attributes",
"for",
"a",
"single",
"post",
"from",
"WP",
"API",
"data",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L819-L861 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_many_to_many_field | def process_post_many_to_many_field(existing_post, field, related_objects):
"""
Sync data for a many-to-many field related to a post using set differences.
:param existing_post: Post object that needs to be sync'd
:param field: the many-to-many field to update
:param related_objects: the list of objects for the field, that need to be sync'd to the Post
:return: None
"""
to_add = set(related_objects.get(existing_post.wp_id, set())) - set(getattr(existing_post, field).all())
to_remove = set(getattr(existing_post, field).all()) - set(related_objects.get(existing_post.wp_id, set()))
if to_add:
getattr(existing_post, field).add(*to_add)
if to_remove:
getattr(existing_post, field).remove(*to_remove) | python | def process_post_many_to_many_field(existing_post, field, related_objects):
"""
Sync data for a many-to-many field related to a post using set differences.
:param existing_post: Post object that needs to be sync'd
:param field: the many-to-many field to update
:param related_objects: the list of objects for the field, that need to be sync'd to the Post
:return: None
"""
to_add = set(related_objects.get(existing_post.wp_id, set())) - set(getattr(existing_post, field).all())
to_remove = set(getattr(existing_post, field).all()) - set(related_objects.get(existing_post.wp_id, set()))
if to_add:
getattr(existing_post, field).add(*to_add)
if to_remove:
getattr(existing_post, field).remove(*to_remove) | [
"def",
"process_post_many_to_many_field",
"(",
"existing_post",
",",
"field",
",",
"related_objects",
")",
":",
"to_add",
"=",
"set",
"(",
"related_objects",
".",
"get",
"(",
"existing_post",
".",
"wp_id",
",",
"set",
"(",
")",
")",
")",
"-",
"set",
"(",
"getattr",
"(",
"existing_post",
",",
"field",
")",
".",
"all",
"(",
")",
")",
"to_remove",
"=",
"set",
"(",
"getattr",
"(",
"existing_post",
",",
"field",
")",
".",
"all",
"(",
")",
")",
"-",
"set",
"(",
"related_objects",
".",
"get",
"(",
"existing_post",
".",
"wp_id",
",",
"set",
"(",
")",
")",
")",
"if",
"to_add",
":",
"getattr",
"(",
"existing_post",
",",
"field",
")",
".",
"add",
"(",
"*",
"to_add",
")",
"if",
"to_remove",
":",
"getattr",
"(",
"existing_post",
",",
"field",
")",
".",
"remove",
"(",
"*",
"to_remove",
")"
]
| Sync data for a many-to-many field related to a post using set differences.
:param existing_post: Post object that needs to be sync'd
:param field: the many-to-many field to update
:param related_objects: the list of objects for the field, that need to be sync'd to the Post
:return: None | [
"Sync",
"data",
"for",
"a",
"many",
"-",
"to",
"-",
"many",
"field",
"related",
"to",
"a",
"post",
"using",
"set",
"differences",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L864-L879 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.bulk_create_posts | def bulk_create_posts(self, posts, post_categories, post_tags, post_media_attachments):
"""
Actually do a db bulk creation of posts, and link up the many-to-many fields
:param posts: the list of Post objects to bulk create
:param post_categories: a mapping of Categories to add to newly created Posts
:param post_tags: a mapping of Tags to add to newly created Posts
:param post_media_attachments: a mapping of Medias to add to newly created Posts
:return: None
"""
Post.objects.bulk_create(posts)
# attach many-to-ones
for post_wp_id, categories in six.iteritems(post_categories):
Post.objects.get(site_id=self.site_id, wp_id=post_wp_id).categories.add(*categories)
for post_id, tags in six.iteritems(post_tags):
Post.objects.get(site_id=self.site_id, wp_id=post_id).tags.add(*tags)
for post_id, attachments in six.iteritems(post_media_attachments):
Post.objects.get(site_id=self.site_id, wp_id=post_id).attachments.add(*attachments) | python | def bulk_create_posts(self, posts, post_categories, post_tags, post_media_attachments):
"""
Actually do a db bulk creation of posts, and link up the many-to-many fields
:param posts: the list of Post objects to bulk create
:param post_categories: a mapping of Categories to add to newly created Posts
:param post_tags: a mapping of Tags to add to newly created Posts
:param post_media_attachments: a mapping of Medias to add to newly created Posts
:return: None
"""
Post.objects.bulk_create(posts)
# attach many-to-ones
for post_wp_id, categories in six.iteritems(post_categories):
Post.objects.get(site_id=self.site_id, wp_id=post_wp_id).categories.add(*categories)
for post_id, tags in six.iteritems(post_tags):
Post.objects.get(site_id=self.site_id, wp_id=post_id).tags.add(*tags)
for post_id, attachments in six.iteritems(post_media_attachments):
Post.objects.get(site_id=self.site_id, wp_id=post_id).attachments.add(*attachments) | [
"def",
"bulk_create_posts",
"(",
"self",
",",
"posts",
",",
"post_categories",
",",
"post_tags",
",",
"post_media_attachments",
")",
":",
"Post",
".",
"objects",
".",
"bulk_create",
"(",
"posts",
")",
"# attach many-to-ones",
"for",
"post_wp_id",
",",
"categories",
"in",
"six",
".",
"iteritems",
"(",
"post_categories",
")",
":",
"Post",
".",
"objects",
".",
"get",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"post_wp_id",
")",
".",
"categories",
".",
"add",
"(",
"*",
"categories",
")",
"for",
"post_id",
",",
"tags",
"in",
"six",
".",
"iteritems",
"(",
"post_tags",
")",
":",
"Post",
".",
"objects",
".",
"get",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"post_id",
")",
".",
"tags",
".",
"add",
"(",
"*",
"tags",
")",
"for",
"post_id",
",",
"attachments",
"in",
"six",
".",
"iteritems",
"(",
"post_media_attachments",
")",
":",
"Post",
".",
"objects",
".",
"get",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"wp_id",
"=",
"post_id",
")",
".",
"attachments",
".",
"add",
"(",
"*",
"attachments",
")"
]
| Actually do a db bulk creation of posts, and link up the many-to-many fields
:param posts: the list of Post objects to bulk create
:param post_categories: a mapping of Categories to add to newly created Posts
:param post_tags: a mapping of Tags to add to newly created Posts
:param post_media_attachments: a mapping of Medias to add to newly created Posts
:return: None | [
"Actually",
"do",
"a",
"db",
"bulk",
"creation",
"of",
"posts",
"and",
"link",
"up",
"the",
"many",
"-",
"to",
"-",
"many",
"fields"
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L928-L948 | train |
observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.sync_deleted_attachments | def sync_deleted_attachments(self, api_post):
"""
Remove Posts with post_type=attachment that have been removed from the given Post on the WordPress side.
Logic:
- get the list of Posts with post_type = attachment whose parent_id = this post_id
- get the corresponding list from WP API
- perform set difference
- delete extra local attachments if any
:param api_post: the API data for the Post
:return: None
"""
existing_IDs = set(Post.objects.filter(site_id=self.site_id,
post_type="attachment",
parent__icontains='"ID":{}'.format(api_post["ID"]))
.values_list("wp_id", flat=True))
# can't delete what we don't have
if existing_IDs:
api_IDs = set()
# call the API again to the get the full list of attachment posts whose parent is this post's wp_id
path = "sites/{}/posts/".format(self.site_id)
params = {
"type": "attachment",
"parent_id": api_post["ID"],
"fields": "ID",
"number": 100
}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
# loop around since there may be more than 100 attachments (example: really large slideshows)
while response.ok and response.text and page < 10:
api_json = response.json()
api_attachments = api_json.get("posts", [])
# iteratively extend the set to include this page's IDs
api_IDs |= set(a["ID"] for a in api_attachments)
# get next page
page += 1
next_page_handle = api_json.get("meta", {}).get("next_page")
if next_page_handle:
params["page_handle"] = next_page_handle
else:
# no more pages left
break
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return
# perform set difference
to_remove = existing_IDs - api_IDs
# purge the extras
if to_remove:
Post.objects.filter(site_id=self.site_id,
post_type="attachment",
parent__icontains='"ID":{}'.format(api_post["ID"]),
wp_id__in=list(to_remove)).delete() | python | def sync_deleted_attachments(self, api_post):
"""
Remove Posts with post_type=attachment that have been removed from the given Post on the WordPress side.
Logic:
- get the list of Posts with post_type = attachment whose parent_id = this post_id
- get the corresponding list from WP API
- perform set difference
- delete extra local attachments if any
:param api_post: the API data for the Post
:return: None
"""
existing_IDs = set(Post.objects.filter(site_id=self.site_id,
post_type="attachment",
parent__icontains='"ID":{}'.format(api_post["ID"]))
.values_list("wp_id", flat=True))
# can't delete what we don't have
if existing_IDs:
api_IDs = set()
# call the API again to the get the full list of attachment posts whose parent is this post's wp_id
path = "sites/{}/posts/".format(self.site_id)
params = {
"type": "attachment",
"parent_id": api_post["ID"],
"fields": "ID",
"number": 100
}
page = 1
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
# loop around since there may be more than 100 attachments (example: really large slideshows)
while response.ok and response.text and page < 10:
api_json = response.json()
api_attachments = api_json.get("posts", [])
# iteratively extend the set to include this page's IDs
api_IDs |= set(a["ID"] for a in api_attachments)
# get next page
page += 1
next_page_handle = api_json.get("meta", {}).get("next_page")
if next_page_handle:
params["page_handle"] = next_page_handle
else:
# no more pages left
break
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
return
# perform set difference
to_remove = existing_IDs - api_IDs
# purge the extras
if to_remove:
Post.objects.filter(site_id=self.site_id,
post_type="attachment",
parent__icontains='"ID":{}'.format(api_post["ID"]),
wp_id__in=list(to_remove)).delete() | [
"def",
"sync_deleted_attachments",
"(",
"self",
",",
"api_post",
")",
":",
"existing_IDs",
"=",
"set",
"(",
"Post",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"post_type",
"=",
"\"attachment\"",
",",
"parent__icontains",
"=",
"'\"ID\":{}'",
".",
"format",
"(",
"api_post",
"[",
"\"ID\"",
"]",
")",
")",
".",
"values_list",
"(",
"\"wp_id\"",
",",
"flat",
"=",
"True",
")",
")",
"# can't delete what we don't have",
"if",
"existing_IDs",
":",
"api_IDs",
"=",
"set",
"(",
")",
"# call the API again to the get the full list of attachment posts whose parent is this post's wp_id",
"path",
"=",
"\"sites/{}/posts/\"",
".",
"format",
"(",
"self",
".",
"site_id",
")",
"params",
"=",
"{",
"\"type\"",
":",
"\"attachment\"",
",",
"\"parent_id\"",
":",
"api_post",
"[",
"\"ID\"",
"]",
",",
"\"fields\"",
":",
"\"ID\"",
",",
"\"number\"",
":",
"100",
"}",
"page",
"=",
"1",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"# loop around since there may be more than 100 attachments (example: really large slideshows)",
"while",
"response",
".",
"ok",
"and",
"response",
".",
"text",
"and",
"page",
"<",
"10",
":",
"api_json",
"=",
"response",
".",
"json",
"(",
")",
"api_attachments",
"=",
"api_json",
".",
"get",
"(",
"\"posts\"",
",",
"[",
"]",
")",
"# iteratively extend the set to include this page's IDs",
"api_IDs",
"|=",
"set",
"(",
"a",
"[",
"\"ID\"",
"]",
"for",
"a",
"in",
"api_attachments",
")",
"# get next page",
"page",
"+=",
"1",
"next_page_handle",
"=",
"api_json",
".",
"get",
"(",
"\"meta\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"next_page\"",
")",
"if",
"next_page_handle",
":",
"params",
"[",
"\"page_handle\"",
"]",
"=",
"next_page_handle",
"else",
":",
"# no more pages left",
"break",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"return",
"# perform set difference",
"to_remove",
"=",
"existing_IDs",
"-",
"api_IDs",
"# purge the extras",
"if",
"to_remove",
":",
"Post",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"post_type",
"=",
"\"attachment\"",
",",
"parent__icontains",
"=",
"'\"ID\":{}'",
".",
"format",
"(",
"api_post",
"[",
"\"ID\"",
"]",
")",
",",
"wp_id__in",
"=",
"list",
"(",
"to_remove",
")",
")",
".",
"delete",
"(",
")"
]
| Remove Posts with post_type=attachment that have been removed from the given Post on the WordPress side.
Logic:
- get the list of Posts with post_type = attachment whose parent_id = this post_id
- get the corresponding list from WP API
- perform set difference
- delete extra local attachments if any
:param api_post: the API data for the Post
:return: None | [
"Remove",
"Posts",
"with",
"post_type",
"=",
"attachment",
"that",
"have",
"been",
"removed",
"from",
"the",
"given",
"Post",
"on",
"the",
"WordPress",
"side",
"."
]
| f0d96891d8ac5a69c8ba90e044876e756fad1bfe | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L950-L1020 | train |
tslight/treepick | treepick/actions.py | Actions.nextparent | def nextparent(self, parent, depth):
'''
Add lines to current line by traversing the grandparent object again
and once we reach our current line counting every line that is prefixed
with the parent directory.
'''
if depth > 1: # can't jump to parent of root node!
pdir = os.path.dirname(self.name)
line = 0
for c, d in parent.traverse():
if line > parent.curline and c.name.startswith(pdir):
parent.curline += 1
line += 1
else: # otherwise just skip to next directory
line = -1 # skip hidden parent node
for c, d in parent.traverse():
if line > parent.curline:
parent.curline += 1
if os.path.isdir(c.name) and c.name in parent.children[0:]:
break
line += 1 | python | def nextparent(self, parent, depth):
'''
Add lines to current line by traversing the grandparent object again
and once we reach our current line counting every line that is prefixed
with the parent directory.
'''
if depth > 1: # can't jump to parent of root node!
pdir = os.path.dirname(self.name)
line = 0
for c, d in parent.traverse():
if line > parent.curline and c.name.startswith(pdir):
parent.curline += 1
line += 1
else: # otherwise just skip to next directory
line = -1 # skip hidden parent node
for c, d in parent.traverse():
if line > parent.curline:
parent.curline += 1
if os.path.isdir(c.name) and c.name in parent.children[0:]:
break
line += 1 | [
"def",
"nextparent",
"(",
"self",
",",
"parent",
",",
"depth",
")",
":",
"if",
"depth",
">",
"1",
":",
"# can't jump to parent of root node!",
"pdir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"name",
")",
"line",
"=",
"0",
"for",
"c",
",",
"d",
"in",
"parent",
".",
"traverse",
"(",
")",
":",
"if",
"line",
">",
"parent",
".",
"curline",
"and",
"c",
".",
"name",
".",
"startswith",
"(",
"pdir",
")",
":",
"parent",
".",
"curline",
"+=",
"1",
"line",
"+=",
"1",
"else",
":",
"# otherwise just skip to next directory",
"line",
"=",
"-",
"1",
"# skip hidden parent node",
"for",
"c",
",",
"d",
"in",
"parent",
".",
"traverse",
"(",
")",
":",
"if",
"line",
">",
"parent",
".",
"curline",
":",
"parent",
".",
"curline",
"+=",
"1",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"c",
".",
"name",
")",
"and",
"c",
".",
"name",
"in",
"parent",
".",
"children",
"[",
"0",
":",
"]",
":",
"break",
"line",
"+=",
"1"
]
| Add lines to current line by traversing the grandparent object again
and once we reach our current line counting every line that is prefixed
with the parent directory. | [
"Add",
"lines",
"to",
"current",
"line",
"by",
"traversing",
"the",
"grandparent",
"object",
"again",
"and",
"once",
"we",
"reach",
"our",
"current",
"line",
"counting",
"every",
"line",
"that",
"is",
"prefixed",
"with",
"the",
"parent",
"directory",
"."
]
| 7adf838900f11e8845e17d8c79bb2b23617aec2c | https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/actions.py#L70-L90 | train |
tslight/treepick | treepick/actions.py | Actions.prevparent | def prevparent(self, parent, depth):
'''
Subtract lines from our curline if the name of a node is prefixed with
the parent directory when traversing the grandparent object.
'''
pdir = os.path.dirname(self.name)
if depth > 1: # can't jump to parent of root node!
for c, d in parent.traverse():
if c.name == self.name:
break
if c.name.startswith(pdir):
parent.curline -= 1
else: # otherwise jus skip to previous directory
pdir = self.name
# - 1 otherwise hidden parent node throws count off & our
# self.curline doesn't change!
line = -1
for c, d in parent.traverse():
if c.name == self.name:
break
if os.path.isdir(c.name) and c.name in parent.children[0:]:
parent.curline = line
line += 1
return pdir | python | def prevparent(self, parent, depth):
'''
Subtract lines from our curline if the name of a node is prefixed with
the parent directory when traversing the grandparent object.
'''
pdir = os.path.dirname(self.name)
if depth > 1: # can't jump to parent of root node!
for c, d in parent.traverse():
if c.name == self.name:
break
if c.name.startswith(pdir):
parent.curline -= 1
else: # otherwise jus skip to previous directory
pdir = self.name
# - 1 otherwise hidden parent node throws count off & our
# self.curline doesn't change!
line = -1
for c, d in parent.traverse():
if c.name == self.name:
break
if os.path.isdir(c.name) and c.name in parent.children[0:]:
parent.curline = line
line += 1
return pdir | [
"def",
"prevparent",
"(",
"self",
",",
"parent",
",",
"depth",
")",
":",
"pdir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"name",
")",
"if",
"depth",
">",
"1",
":",
"# can't jump to parent of root node!",
"for",
"c",
",",
"d",
"in",
"parent",
".",
"traverse",
"(",
")",
":",
"if",
"c",
".",
"name",
"==",
"self",
".",
"name",
":",
"break",
"if",
"c",
".",
"name",
".",
"startswith",
"(",
"pdir",
")",
":",
"parent",
".",
"curline",
"-=",
"1",
"else",
":",
"# otherwise jus skip to previous directory",
"pdir",
"=",
"self",
".",
"name",
"# - 1 otherwise hidden parent node throws count off & our",
"# self.curline doesn't change!",
"line",
"=",
"-",
"1",
"for",
"c",
",",
"d",
"in",
"parent",
".",
"traverse",
"(",
")",
":",
"if",
"c",
".",
"name",
"==",
"self",
".",
"name",
":",
"break",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"c",
".",
"name",
")",
"and",
"c",
".",
"name",
"in",
"parent",
".",
"children",
"[",
"0",
":",
"]",
":",
"parent",
".",
"curline",
"=",
"line",
"line",
"+=",
"1",
"return",
"pdir"
]
| Subtract lines from our curline if the name of a node is prefixed with
the parent directory when traversing the grandparent object. | [
"Subtract",
"lines",
"from",
"our",
"curline",
"if",
"the",
"name",
"of",
"a",
"node",
"is",
"prefixed",
"with",
"the",
"parent",
"directory",
"when",
"traversing",
"the",
"grandparent",
"object",
"."
]
| 7adf838900f11e8845e17d8c79bb2b23617aec2c | https://github.com/tslight/treepick/blob/7adf838900f11e8845e17d8c79bb2b23617aec2c/treepick/actions.py#L92-L115 | train |
peterbe/gg | gg/builtins/github.py | token | def token(config, token):
"""Store and fetch a GitHub access token"""
if not token:
info_out(
"To generate a personal API token, go to:\n\n\t"
"https://github.com/settings/tokens\n\n"
"To read more about it, go to:\n\n\t"
"https://help.github.com/articles/creating-an-access"
"-token-for-command-line-use/\n\n"
'Remember to enable "repo" in the scopes.'
)
token = getpass.getpass("GitHub API Token: ").strip()
url = urllib.parse.urljoin(config.github_url, "/user")
assert url.startswith("https://"), url
response = requests.get(url, headers={"Authorization": "token {}".format(token)})
if response.status_code == 200:
update(
config.configfile,
{
"GITHUB": {
"github_url": config.github_url,
"token": token,
"login": response.json()["login"],
}
},
)
name = response.json()["name"] or response.json()["login"]
success_out("Hi! {}".format(name))
else:
error_out("Failed - {} ({})".format(response.status_code, response.content)) | python | def token(config, token):
"""Store and fetch a GitHub access token"""
if not token:
info_out(
"To generate a personal API token, go to:\n\n\t"
"https://github.com/settings/tokens\n\n"
"To read more about it, go to:\n\n\t"
"https://help.github.com/articles/creating-an-access"
"-token-for-command-line-use/\n\n"
'Remember to enable "repo" in the scopes.'
)
token = getpass.getpass("GitHub API Token: ").strip()
url = urllib.parse.urljoin(config.github_url, "/user")
assert url.startswith("https://"), url
response = requests.get(url, headers={"Authorization": "token {}".format(token)})
if response.status_code == 200:
update(
config.configfile,
{
"GITHUB": {
"github_url": config.github_url,
"token": token,
"login": response.json()["login"],
}
},
)
name = response.json()["name"] or response.json()["login"]
success_out("Hi! {}".format(name))
else:
error_out("Failed - {} ({})".format(response.status_code, response.content)) | [
"def",
"token",
"(",
"config",
",",
"token",
")",
":",
"if",
"not",
"token",
":",
"info_out",
"(",
"\"To generate a personal API token, go to:\\n\\n\\t\"",
"\"https://github.com/settings/tokens\\n\\n\"",
"\"To read more about it, go to:\\n\\n\\t\"",
"\"https://help.github.com/articles/creating-an-access\"",
"\"-token-for-command-line-use/\\n\\n\"",
"'Remember to enable \"repo\" in the scopes.'",
")",
"token",
"=",
"getpass",
".",
"getpass",
"(",
"\"GitHub API Token: \"",
")",
".",
"strip",
"(",
")",
"url",
"=",
"urllib",
".",
"parse",
".",
"urljoin",
"(",
"config",
".",
"github_url",
",",
"\"/user\"",
")",
"assert",
"url",
".",
"startswith",
"(",
"\"https://\"",
")",
",",
"url",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"\"token {}\"",
".",
"format",
"(",
"token",
")",
"}",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"update",
"(",
"config",
".",
"configfile",
",",
"{",
"\"GITHUB\"",
":",
"{",
"\"github_url\"",
":",
"config",
".",
"github_url",
",",
"\"token\"",
":",
"token",
",",
"\"login\"",
":",
"response",
".",
"json",
"(",
")",
"[",
"\"login\"",
"]",
",",
"}",
"}",
",",
")",
"name",
"=",
"response",
".",
"json",
"(",
")",
"[",
"\"name\"",
"]",
"or",
"response",
".",
"json",
"(",
")",
"[",
"\"login\"",
"]",
"success_out",
"(",
"\"Hi! {}\"",
".",
"format",
"(",
"name",
")",
")",
"else",
":",
"error_out",
"(",
"\"Failed - {} ({})\"",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"content",
")",
")"
]
| Store and fetch a GitHub access token | [
"Store",
"and",
"fetch",
"a",
"GitHub",
"access",
"token"
]
| 2aace5bdb4a9b1cb65bea717784edf54c63b7bad | https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/github.py#L32-L61 | train |
reorx/torext | torext/handlers/base.py | log_response | def log_response(handler):
"""
Acturally, logging response is not a server's responsibility,
you should use http tools like Chrome Developer Tools to analyse the response.
Although this function and its setting(LOG_RESPONSE) is not recommended to use,
if you are laze as I was and working in development, nothing could stop you.
"""
content_type = handler._headers.get('Content-Type', None)
headers_str = handler._generate_headers()
block = 'Response Infomations:\n' + headers_str.strip()
if content_type and ('text' in content_type or 'json' in content_type):
limit = 0
if 'LOG_RESPONSE_LINE_LIMIT' in settings:
limit = settings['LOG_RESPONSE_LINE_LIMIT']
def cut(s):
if limit and len(s) > limit:
return [s[:limit]] + cut(s[limit:])
else:
return [s]
body = ''.join(handler._write_buffer)
lines = []
for i in body.split('\n'):
lines += ['| ' + j for j in cut(i)]
block += '\nBody:\n' + '\n'.join(lines)
app_log.info(block) | python | def log_response(handler):
"""
Acturally, logging response is not a server's responsibility,
you should use http tools like Chrome Developer Tools to analyse the response.
Although this function and its setting(LOG_RESPONSE) is not recommended to use,
if you are laze as I was and working in development, nothing could stop you.
"""
content_type = handler._headers.get('Content-Type', None)
headers_str = handler._generate_headers()
block = 'Response Infomations:\n' + headers_str.strip()
if content_type and ('text' in content_type or 'json' in content_type):
limit = 0
if 'LOG_RESPONSE_LINE_LIMIT' in settings:
limit = settings['LOG_RESPONSE_LINE_LIMIT']
def cut(s):
if limit and len(s) > limit:
return [s[:limit]] + cut(s[limit:])
else:
return [s]
body = ''.join(handler._write_buffer)
lines = []
for i in body.split('\n'):
lines += ['| ' + j for j in cut(i)]
block += '\nBody:\n' + '\n'.join(lines)
app_log.info(block) | [
"def",
"log_response",
"(",
"handler",
")",
":",
"content_type",
"=",
"handler",
".",
"_headers",
".",
"get",
"(",
"'Content-Type'",
",",
"None",
")",
"headers_str",
"=",
"handler",
".",
"_generate_headers",
"(",
")",
"block",
"=",
"'Response Infomations:\\n'",
"+",
"headers_str",
".",
"strip",
"(",
")",
"if",
"content_type",
"and",
"(",
"'text'",
"in",
"content_type",
"or",
"'json'",
"in",
"content_type",
")",
":",
"limit",
"=",
"0",
"if",
"'LOG_RESPONSE_LINE_LIMIT'",
"in",
"settings",
":",
"limit",
"=",
"settings",
"[",
"'LOG_RESPONSE_LINE_LIMIT'",
"]",
"def",
"cut",
"(",
"s",
")",
":",
"if",
"limit",
"and",
"len",
"(",
"s",
")",
">",
"limit",
":",
"return",
"[",
"s",
"[",
":",
"limit",
"]",
"]",
"+",
"cut",
"(",
"s",
"[",
"limit",
":",
"]",
")",
"else",
":",
"return",
"[",
"s",
"]",
"body",
"=",
"''",
".",
"join",
"(",
"handler",
".",
"_write_buffer",
")",
"lines",
"=",
"[",
"]",
"for",
"i",
"in",
"body",
".",
"split",
"(",
"'\\n'",
")",
":",
"lines",
"+=",
"[",
"'| '",
"+",
"j",
"for",
"j",
"in",
"cut",
"(",
"i",
")",
"]",
"block",
"+=",
"'\\nBody:\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"lines",
")",
"app_log",
".",
"info",
"(",
"block",
")"
]
| Acturally, logging response is not a server's responsibility,
you should use http tools like Chrome Developer Tools to analyse the response.
Although this function and its setting(LOG_RESPONSE) is not recommended to use,
if you are laze as I was and working in development, nothing could stop you. | [
"Acturally",
"logging",
"response",
"is",
"not",
"a",
"server",
"s",
"responsibility",
"you",
"should",
"use",
"http",
"tools",
"like",
"Chrome",
"Developer",
"Tools",
"to",
"analyse",
"the",
"response",
"."
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L25-L53 | train |
reorx/torext | torext/handlers/base.py | log_request | def log_request(handler):
"""
Logging request is opposite to response, sometime its necessary,
feel free to enable it.
"""
block = 'Request Infomations:\n' + _format_headers_log(handler.request.headers)
if handler.request.arguments:
block += '+----Arguments----+\n'
for k, v in handler.request.arguments.items():
block += '| {0:<15} | {1:<15} \n'.format(repr(k), repr(v))
app_log.info(block) | python | def log_request(handler):
"""
Logging request is opposite to response, sometime its necessary,
feel free to enable it.
"""
block = 'Request Infomations:\n' + _format_headers_log(handler.request.headers)
if handler.request.arguments:
block += '+----Arguments----+\n'
for k, v in handler.request.arguments.items():
block += '| {0:<15} | {1:<15} \n'.format(repr(k), repr(v))
app_log.info(block) | [
"def",
"log_request",
"(",
"handler",
")",
":",
"block",
"=",
"'Request Infomations:\\n'",
"+",
"_format_headers_log",
"(",
"handler",
".",
"request",
".",
"headers",
")",
"if",
"handler",
".",
"request",
".",
"arguments",
":",
"block",
"+=",
"'+----Arguments----+\\n'",
"for",
"k",
",",
"v",
"in",
"handler",
".",
"request",
".",
"arguments",
".",
"items",
"(",
")",
":",
"block",
"+=",
"'| {0:<15} | {1:<15} \\n'",
".",
"format",
"(",
"repr",
"(",
"k",
")",
",",
"repr",
"(",
"v",
")",
")",
"app_log",
".",
"info",
"(",
"block",
")"
]
| Logging request is opposite to response, sometime its necessary,
feel free to enable it. | [
"Logging",
"request",
"is",
"opposite",
"to",
"response",
"sometime",
"its",
"necessary",
"feel",
"free",
"to",
"enable",
"it",
"."
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L56-L68 | train |
reorx/torext | torext/handlers/base.py | BaseHandler._exception_default_handler | def _exception_default_handler(self, e):
"""This method is a copy of tornado.web.RequestHandler._handle_request_exception
"""
if isinstance(e, HTTPError):
if e.log_message:
format = "%d %s: " + e.log_message
args = [e.status_code, self._request_summary()] + list(e.args)
app_log.warning(format, *args)
if e.status_code not in httplib.responses:
app_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=True)
self.send_error(500, exc_info=sys.exc_info()) | python | def _exception_default_handler(self, e):
"""This method is a copy of tornado.web.RequestHandler._handle_request_exception
"""
if isinstance(e, HTTPError):
if e.log_message:
format = "%d %s: " + e.log_message
args = [e.status_code, self._request_summary()] + list(e.args)
app_log.warning(format, *args)
if e.status_code not in httplib.responses:
app_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=True)
self.send_error(500, exc_info=sys.exc_info()) | [
"def",
"_exception_default_handler",
"(",
"self",
",",
"e",
")",
":",
"if",
"isinstance",
"(",
"e",
",",
"HTTPError",
")",
":",
"if",
"e",
".",
"log_message",
":",
"format",
"=",
"\"%d %s: \"",
"+",
"e",
".",
"log_message",
"args",
"=",
"[",
"e",
".",
"status_code",
",",
"self",
".",
"_request_summary",
"(",
")",
"]",
"+",
"list",
"(",
"e",
".",
"args",
")",
"app_log",
".",
"warning",
"(",
"format",
",",
"*",
"args",
")",
"if",
"e",
".",
"status_code",
"not",
"in",
"httplib",
".",
"responses",
":",
"app_log",
".",
"error",
"(",
"\"Bad HTTP status code: %d\"",
",",
"e",
".",
"status_code",
")",
"self",
".",
"send_error",
"(",
"500",
",",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
")",
"else",
":",
"self",
".",
"send_error",
"(",
"e",
".",
"status_code",
",",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
")",
"else",
":",
"app_log",
".",
"error",
"(",
"\"Uncaught exception %s\\n%r\"",
",",
"self",
".",
"_request_summary",
"(",
")",
",",
"self",
".",
"request",
",",
"exc_info",
"=",
"True",
")",
"self",
".",
"send_error",
"(",
"500",
",",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
")"
]
| This method is a copy of tornado.web.RequestHandler._handle_request_exception | [
"This",
"method",
"is",
"a",
"copy",
"of",
"tornado",
".",
"web",
".",
"RequestHandler",
".",
"_handle_request_exception"
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L94-L110 | train |
reorx/torext | torext/handlers/base.py | BaseHandler._handle_request_exception | def _handle_request_exception(self, e):
"""This method handle HTTPError exceptions the same as how tornado does,
leave other exceptions to be handled by user defined handler function
maped in class attribute `EXCEPTION_HANDLERS`
Common HTTP status codes:
200 OK
301 Moved Permanently
302 Found
400 Bad Request
401 Unauthorized
403 Forbidden
404 Not Found
405 Method Not Allowed
500 Internal Server Error
It is suggested only to use above HTTP status codes
"""
handle_func = self._exception_default_handler
if self.EXCEPTION_HANDLERS:
for excs, func_name in self.EXCEPTION_HANDLERS.items():
if isinstance(e, excs):
handle_func = getattr(self, func_name)
break
handle_func(e)
if not self._finished:
self.finish() | python | def _handle_request_exception(self, e):
"""This method handle HTTPError exceptions the same as how tornado does,
leave other exceptions to be handled by user defined handler function
maped in class attribute `EXCEPTION_HANDLERS`
Common HTTP status codes:
200 OK
301 Moved Permanently
302 Found
400 Bad Request
401 Unauthorized
403 Forbidden
404 Not Found
405 Method Not Allowed
500 Internal Server Error
It is suggested only to use above HTTP status codes
"""
handle_func = self._exception_default_handler
if self.EXCEPTION_HANDLERS:
for excs, func_name in self.EXCEPTION_HANDLERS.items():
if isinstance(e, excs):
handle_func = getattr(self, func_name)
break
handle_func(e)
if not self._finished:
self.finish() | [
"def",
"_handle_request_exception",
"(",
"self",
",",
"e",
")",
":",
"handle_func",
"=",
"self",
".",
"_exception_default_handler",
"if",
"self",
".",
"EXCEPTION_HANDLERS",
":",
"for",
"excs",
",",
"func_name",
"in",
"self",
".",
"EXCEPTION_HANDLERS",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"e",
",",
"excs",
")",
":",
"handle_func",
"=",
"getattr",
"(",
"self",
",",
"func_name",
")",
"break",
"handle_func",
"(",
"e",
")",
"if",
"not",
"self",
".",
"_finished",
":",
"self",
".",
"finish",
"(",
")"
]
| This method handle HTTPError exceptions the same as how tornado does,
leave other exceptions to be handled by user defined handler function
maped in class attribute `EXCEPTION_HANDLERS`
Common HTTP status codes:
200 OK
301 Moved Permanently
302 Found
400 Bad Request
401 Unauthorized
403 Forbidden
404 Not Found
405 Method Not Allowed
500 Internal Server Error
It is suggested only to use above HTTP status codes | [
"This",
"method",
"handle",
"HTTPError",
"exceptions",
"the",
"same",
"as",
"how",
"tornado",
"does",
"leave",
"other",
"exceptions",
"to",
"be",
"handled",
"by",
"user",
"defined",
"handler",
"function",
"maped",
"in",
"class",
"attribute",
"EXCEPTION_HANDLERS"
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L112-L139 | train |
reorx/torext | torext/handlers/base.py | BaseHandler.flush | def flush(self, *args, **kwgs):
"""
Before `RequestHandler.flush` was called, we got the final _write_buffer.
This method will not be called in wsgi mode
"""
if settings['LOG_RESPONSE'] and not self._status_code == 500:
log_response(self)
super(BaseHandler, self).flush(*args, **kwgs) | python | def flush(self, *args, **kwgs):
"""
Before `RequestHandler.flush` was called, we got the final _write_buffer.
This method will not be called in wsgi mode
"""
if settings['LOG_RESPONSE'] and not self._status_code == 500:
log_response(self)
super(BaseHandler, self).flush(*args, **kwgs) | [
"def",
"flush",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwgs",
")",
":",
"if",
"settings",
"[",
"'LOG_RESPONSE'",
"]",
"and",
"not",
"self",
".",
"_status_code",
"==",
"500",
":",
"log_response",
"(",
"self",
")",
"super",
"(",
"BaseHandler",
",",
"self",
")",
".",
"flush",
"(",
"*",
"args",
",",
"*",
"*",
"kwgs",
")"
]
| Before `RequestHandler.flush` was called, we got the final _write_buffer.
This method will not be called in wsgi mode | [
"Before",
"RequestHandler",
".",
"flush",
"was",
"called",
"we",
"got",
"the",
"final",
"_write_buffer",
"."
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L166-L175 | train |
reorx/torext | torext/handlers/base.py | BaseHandler.write_json | def write_json(self, chunk, code=None, headers=None):
"""A convenient method that binds `chunk`, `code`, `headers` together
chunk could be any type of (str, dict, list)
"""
assert chunk is not None, 'None cound not be written in write_json'
self.set_header("Content-Type", "application/json; charset=UTF-8")
if isinstance(chunk, dict) or isinstance(chunk, list):
chunk = self.json_encode(chunk)
# convert chunk to utf8 before `RequestHandler.write()`
# so that if any error occurs, we can catch and log it
try:
chunk = utf8(chunk)
except Exception:
app_log.error('chunk encoding error, repr: %s' % repr(chunk))
raise_exc_info(sys.exc_info())
self.write(chunk)
if code:
self.set_status(code)
if headers:
for k, v in headers.items():
self.set_header(k, v) | python | def write_json(self, chunk, code=None, headers=None):
"""A convenient method that binds `chunk`, `code`, `headers` together
chunk could be any type of (str, dict, list)
"""
assert chunk is not None, 'None cound not be written in write_json'
self.set_header("Content-Type", "application/json; charset=UTF-8")
if isinstance(chunk, dict) or isinstance(chunk, list):
chunk = self.json_encode(chunk)
# convert chunk to utf8 before `RequestHandler.write()`
# so that if any error occurs, we can catch and log it
try:
chunk = utf8(chunk)
except Exception:
app_log.error('chunk encoding error, repr: %s' % repr(chunk))
raise_exc_info(sys.exc_info())
self.write(chunk)
if code:
self.set_status(code)
if headers:
for k, v in headers.items():
self.set_header(k, v) | [
"def",
"write_json",
"(",
"self",
",",
"chunk",
",",
"code",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"assert",
"chunk",
"is",
"not",
"None",
",",
"'None cound not be written in write_json'",
"self",
".",
"set_header",
"(",
"\"Content-Type\"",
",",
"\"application/json; charset=UTF-8\"",
")",
"if",
"isinstance",
"(",
"chunk",
",",
"dict",
")",
"or",
"isinstance",
"(",
"chunk",
",",
"list",
")",
":",
"chunk",
"=",
"self",
".",
"json_encode",
"(",
"chunk",
")",
"# convert chunk to utf8 before `RequestHandler.write()`",
"# so that if any error occurs, we can catch and log it",
"try",
":",
"chunk",
"=",
"utf8",
"(",
"chunk",
")",
"except",
"Exception",
":",
"app_log",
".",
"error",
"(",
"'chunk encoding error, repr: %s'",
"%",
"repr",
"(",
"chunk",
")",
")",
"raise_exc_info",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"self",
".",
"write",
"(",
"chunk",
")",
"if",
"code",
":",
"self",
".",
"set_status",
"(",
"code",
")",
"if",
"headers",
":",
"for",
"k",
",",
"v",
"in",
"headers",
".",
"items",
"(",
")",
":",
"self",
".",
"set_header",
"(",
"k",
",",
"v",
")"
]
| A convenient method that binds `chunk`, `code`, `headers` together
chunk could be any type of (str, dict, list) | [
"A",
"convenient",
"method",
"that",
"binds",
"chunk",
"code",
"headers",
"together"
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L177-L202 | train |
reorx/torext | torext/handlers/base.py | BaseHandler.write_file | def write_file(self, file_path, mime_type=None):
"""Copy from tornado.web.StaticFileHandler
"""
if not os.path.exists(file_path):
raise HTTPError(404)
if not os.path.isfile(file_path):
raise HTTPError(403, "%s is not a file", file_path)
stat_result = os.stat(file_path)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
if not mime_type:
mime_type, _encoding = mimetypes.guess_type(file_path)
if mime_type:
self.set_header("Content-Type", mime_type)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
with open(file_path, "rb") as file:
data = file.read()
hasher = hashlib.sha1()
hasher.update(data)
self.set_header("Etag", '"%s"' % hasher.hexdigest())
self.write(data) | python | def write_file(self, file_path, mime_type=None):
"""Copy from tornado.web.StaticFileHandler
"""
if not os.path.exists(file_path):
raise HTTPError(404)
if not os.path.isfile(file_path):
raise HTTPError(403, "%s is not a file", file_path)
stat_result = os.stat(file_path)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
if not mime_type:
mime_type, _encoding = mimetypes.guess_type(file_path)
if mime_type:
self.set_header("Content-Type", mime_type)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
with open(file_path, "rb") as file:
data = file.read()
hasher = hashlib.sha1()
hasher.update(data)
self.set_header("Etag", '"%s"' % hasher.hexdigest())
self.write(data) | [
"def",
"write_file",
"(",
"self",
",",
"file_path",
",",
"mime_type",
"=",
"None",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"HTTPError",
"(",
"404",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"raise",
"HTTPError",
"(",
"403",
",",
"\"%s is not a file\"",
",",
"file_path",
")",
"stat_result",
"=",
"os",
".",
"stat",
"(",
"file_path",
")",
"modified",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"stat_result",
"[",
"stat",
".",
"ST_MTIME",
"]",
")",
"self",
".",
"set_header",
"(",
"\"Last-Modified\"",
",",
"modified",
")",
"if",
"not",
"mime_type",
":",
"mime_type",
",",
"_encoding",
"=",
"mimetypes",
".",
"guess_type",
"(",
"file_path",
")",
"if",
"mime_type",
":",
"self",
".",
"set_header",
"(",
"\"Content-Type\"",
",",
"mime_type",
")",
"# Check the If-Modified-Since, and don't send the result if the",
"# content has not been modified",
"ims_value",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"If-Modified-Since\"",
")",
"if",
"ims_value",
"is",
"not",
"None",
":",
"date_tuple",
"=",
"email",
".",
"utils",
".",
"parsedate",
"(",
"ims_value",
")",
"if_since",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"time",
".",
"mktime",
"(",
"date_tuple",
")",
")",
"if",
"if_since",
">=",
"modified",
":",
"self",
".",
"set_status",
"(",
"304",
")",
"return",
"with",
"open",
"(",
"file_path",
",",
"\"rb\"",
")",
"as",
"file",
":",
"data",
"=",
"file",
".",
"read",
"(",
")",
"hasher",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"hasher",
".",
"update",
"(",
"data",
")",
"self",
".",
"set_header",
"(",
"\"Etag\"",
",",
"'\"%s\"'",
"%",
"hasher",
".",
"hexdigest",
"(",
")",
")",
"self",
".",
"write",
"(",
"data",
")"
]
| Copy from tornado.web.StaticFileHandler | [
"Copy",
"from",
"tornado",
".",
"web",
".",
"StaticFileHandler"
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L204-L237 | train |
reorx/torext | torext/handlers/base.py | BaseHandler.prepare | def prepare(self):
"""Behaves like a middleware between raw request and handling process,
If `PREPARES` is defined on handler class, which should be
a list, for example, ['auth', 'context'], method whose name
is constitute by prefix '_prepare_' and string in this list
will be executed by sequence. In this example, those methods are
`_prepare_auth` and `_prepare_context`
"""
if settings['LOG_REQUEST']:
log_request(self)
for i in self.PREPARES:
getattr(self, 'prepare_' + i)()
if self._finished:
return | python | def prepare(self):
"""Behaves like a middleware between raw request and handling process,
If `PREPARES` is defined on handler class, which should be
a list, for example, ['auth', 'context'], method whose name
is constitute by prefix '_prepare_' and string in this list
will be executed by sequence. In this example, those methods are
`_prepare_auth` and `_prepare_context`
"""
if settings['LOG_REQUEST']:
log_request(self)
for i in self.PREPARES:
getattr(self, 'prepare_' + i)()
if self._finished:
return | [
"def",
"prepare",
"(",
"self",
")",
":",
"if",
"settings",
"[",
"'LOG_REQUEST'",
"]",
":",
"log_request",
"(",
"self",
")",
"for",
"i",
"in",
"self",
".",
"PREPARES",
":",
"getattr",
"(",
"self",
",",
"'prepare_'",
"+",
"i",
")",
"(",
")",
"if",
"self",
".",
"_finished",
":",
"return"
]
| Behaves like a middleware between raw request and handling process,
If `PREPARES` is defined on handler class, which should be
a list, for example, ['auth', 'context'], method whose name
is constitute by prefix '_prepare_' and string in this list
will be executed by sequence. In this example, those methods are
`_prepare_auth` and `_prepare_context` | [
"Behaves",
"like",
"a",
"middleware",
"between",
"raw",
"request",
"and",
"handling",
"process"
]
| 84c4300ebc7fab0dbd11cf8b020bc7d4d1570171 | https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L250-L265 | train |
testedminds/sand | sand/csv.py | csv_to_dicts | def csv_to_dicts(file, header=None):
"""Reads a csv and returns a List of Dicts with keys given by header row."""
with open(file) as csvfile:
return [row for row in csv.DictReader(csvfile, fieldnames=header)] | python | def csv_to_dicts(file, header=None):
"""Reads a csv and returns a List of Dicts with keys given by header row."""
with open(file) as csvfile:
return [row for row in csv.DictReader(csvfile, fieldnames=header)] | [
"def",
"csv_to_dicts",
"(",
"file",
",",
"header",
"=",
"None",
")",
":",
"with",
"open",
"(",
"file",
")",
"as",
"csvfile",
":",
"return",
"[",
"row",
"for",
"row",
"in",
"csv",
".",
"DictReader",
"(",
"csvfile",
",",
"fieldnames",
"=",
"header",
")",
"]"
]
| Reads a csv and returns a List of Dicts with keys given by header row. | [
"Reads",
"a",
"csv",
"and",
"returns",
"a",
"List",
"of",
"Dicts",
"with",
"keys",
"given",
"by",
"header",
"row",
"."
]
| 234f0eedb0742920cdf26da9bc84bf3f863a2f02 | https://github.com/testedminds/sand/blob/234f0eedb0742920cdf26da9bc84bf3f863a2f02/sand/csv.py#L23-L26 | train |
peterbe/gg | gg/main.py | cli | def cli(config, configfile, verbose):
"""A glorious command line tool to make your life with git, GitHub
and Bugzilla much easier."""
config.verbose = verbose
config.configfile = configfile
if not os.path.isfile(configfile):
state.write(configfile, {}) | python | def cli(config, configfile, verbose):
"""A glorious command line tool to make your life with git, GitHub
and Bugzilla much easier."""
config.verbose = verbose
config.configfile = configfile
if not os.path.isfile(configfile):
state.write(configfile, {}) | [
"def",
"cli",
"(",
"config",
",",
"configfile",
",",
"verbose",
")",
":",
"config",
".",
"verbose",
"=",
"verbose",
"config",
".",
"configfile",
"=",
"configfile",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"configfile",
")",
":",
"state",
".",
"write",
"(",
"configfile",
",",
"{",
"}",
")"
]
| A glorious command line tool to make your life with git, GitHub
and Bugzilla much easier. | [
"A",
"glorious",
"command",
"line",
"tool",
"to",
"make",
"your",
"life",
"with",
"git",
"GitHub",
"and",
"Bugzilla",
"much",
"easier",
"."
]
| 2aace5bdb4a9b1cb65bea717784edf54c63b7bad | https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/main.py#L36-L42 | train |
ghukill/pyfc4 | pyfc4/models.py | Repository.parse_uri | def parse_uri(self, uri=None):
'''
parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef
Args:
uri (rdflib.term.URIRef,str): input URI
Returns:
rdflib.term.URIRef
'''
# no uri provided, assume root
if not uri:
return rdflib.term.URIRef(self.root)
# string uri provided
elif type(uri) == str:
# assume "short" uri, expand with repo root
if type(uri) == str and not uri.startswith('http'):
return rdflib.term.URIRef("%s%s" % (self.root, uri))
# else, assume full uri
else:
return rdflib.term.URIRef(uri)
# already rdflib.term.URIRef
elif type(uri) == rdflib.term.URIRef:
return uri
# unknown input
else:
raise TypeError('invalid URI input') | python | def parse_uri(self, uri=None):
'''
parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef
Args:
uri (rdflib.term.URIRef,str): input URI
Returns:
rdflib.term.URIRef
'''
# no uri provided, assume root
if not uri:
return rdflib.term.URIRef(self.root)
# string uri provided
elif type(uri) == str:
# assume "short" uri, expand with repo root
if type(uri) == str and not uri.startswith('http'):
return rdflib.term.URIRef("%s%s" % (self.root, uri))
# else, assume full uri
else:
return rdflib.term.URIRef(uri)
# already rdflib.term.URIRef
elif type(uri) == rdflib.term.URIRef:
return uri
# unknown input
else:
raise TypeError('invalid URI input') | [
"def",
"parse_uri",
"(",
"self",
",",
"uri",
"=",
"None",
")",
":",
"# no uri provided, assume root",
"if",
"not",
"uri",
":",
"return",
"rdflib",
".",
"term",
".",
"URIRef",
"(",
"self",
".",
"root",
")",
"# string uri provided",
"elif",
"type",
"(",
"uri",
")",
"==",
"str",
":",
"# assume \"short\" uri, expand with repo root",
"if",
"type",
"(",
"uri",
")",
"==",
"str",
"and",
"not",
"uri",
".",
"startswith",
"(",
"'http'",
")",
":",
"return",
"rdflib",
".",
"term",
".",
"URIRef",
"(",
"\"%s%s\"",
"%",
"(",
"self",
".",
"root",
",",
"uri",
")",
")",
"# else, assume full uri",
"else",
":",
"return",
"rdflib",
".",
"term",
".",
"URIRef",
"(",
"uri",
")",
"# already rdflib.term.URIRef",
"elif",
"type",
"(",
"uri",
")",
"==",
"rdflib",
".",
"term",
".",
"URIRef",
":",
"return",
"uri",
"# unknown input",
"else",
":",
"raise",
"TypeError",
"(",
"'invalid URI input'",
")"
]
| parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef
Args:
uri (rdflib.term.URIRef,str): input URI
Returns:
rdflib.term.URIRef | [
"parses",
"and",
"cleans",
"up",
"possible",
"uri",
"inputs",
"return",
"instance",
"of",
"rdflib",
".",
"term",
".",
"URIRef"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L105-L138 | train |
ghukill/pyfc4 | pyfc4/models.py | Repository.create_resource | def create_resource(self, resource_type=None, uri=None):
'''
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
'''
if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:
return resource_type(self, uri)
else:
raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource") | python | def create_resource(self, resource_type=None, uri=None):
'''
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
'''
if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:
return resource_type(self, uri)
else:
raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource") | [
"def",
"create_resource",
"(",
"self",
",",
"resource_type",
"=",
"None",
",",
"uri",
"=",
"None",
")",
":",
"if",
"resource_type",
"in",
"[",
"NonRDFSource",
",",
"Binary",
",",
"BasicContainer",
",",
"DirectContainer",
",",
"IndirectContainer",
"]",
":",
"return",
"resource_type",
"(",
"self",
",",
"uri",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"expecting Resource type, such as BasicContainer or NonRDFSource\"",
")"
]
| Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type | [
"Convenience",
"method",
"for",
"creating",
"a",
"new",
"resource"
]
| 59011df592f08978c4a901a908862d112a5dcf02 | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L141-L159 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.