body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def clizefy(obj=None, helper_class=DocutilizeClizeHelp, **kwargs):
'Decorator for creating CLI objects.\n '
if (obj is None):
return partial(clizefy, helper_class=helper_class, **kwargs)
if hasattr(obj, 'cli'):
return obj
if (not callable(obj)):
return Clize.get_cli(obj, **kwargs)
return Clize.keep(obj, helper_class=helper_class, **kwargs) | 3,488,118,777,556,546,000 | Decorator for creating CLI objects. | improver/cli/__init__.py | clizefy | anja-bom/improver | python | def clizefy(obj=None, helper_class=DocutilizeClizeHelp, **kwargs):
'\n '
if (obj is None):
return partial(clizefy, helper_class=helper_class, **kwargs)
if hasattr(obj, 'cli'):
return obj
if (not callable(obj)):
return Clize.get_cli(obj, **kwargs)
return Clize.keep(obj, helper_class=helper_class, **kwargs) |
@clizefy(help_names=())
def improver_help(prog_name: parameters.pass_name, command=None, *, usage=False):
'Show command help.'
prog_name = prog_name.split()[0]
args = filter(None, [command, '--help', (usage and '--usage')])
result = execute_command(SUBCOMMANDS_DISPATCHER, prog_name, *args)
if ((not command) and usage):
result = '\n'.join((line for line in result.splitlines() if (not line.endswith('--help [--usage]'))))
return result | 2,624,931,623,418,684,400 | Show command help. | improver/cli/__init__.py | improver_help | anja-bom/improver | python | @clizefy(help_names=())
def improver_help(prog_name: parameters.pass_name, command=None, *, usage=False):
prog_name = prog_name.split()[0]
args = filter(None, [command, '--help', (usage and '--usage')])
result = execute_command(SUBCOMMANDS_DISPATCHER, prog_name, *args)
if ((not command) and usage):
result = '\n'.join((line for line in result.splitlines() if (not line.endswith('--help [--usage]'))))
return result |
def _cli_items():
'Dynamically discover CLIs.'
import importlib
import pkgutil
from improver.cli import __path__ as improver_cli_pkg_path
(yield ('help', improver_help))
for minfo in pkgutil.iter_modules(improver_cli_pkg_path):
mod_name = minfo.name
if (mod_name != '__main__'):
mcli = importlib.import_module(('improver.cli.' + mod_name))
(yield (mod_name, clizefy(mcli.process))) | 2,685,931,208,095,483,400 | Dynamically discover CLIs. | improver/cli/__init__.py | _cli_items | anja-bom/improver | python | def _cli_items():
import importlib
import pkgutil
from improver.cli import __path__ as improver_cli_pkg_path
(yield ('help', improver_help))
for minfo in pkgutil.iter_modules(improver_cli_pkg_path):
mod_name = minfo.name
if (mod_name != '__main__'):
mcli = importlib.import_module(('improver.cli.' + mod_name))
(yield (mod_name, clizefy(mcli.process))) |
def unbracket(args):
"Convert input list with bracketed items into nested lists.\n\n >>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())\n ['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z']\n\n "
outargs = []
stack = []
mismatch_msg = 'Mismatched bracket at position %i.'
for (i, arg) in enumerate(args):
if (arg == '['):
stack.append(outargs)
outargs = []
elif (arg == ']'):
if (not stack):
raise ValueError((mismatch_msg % i))
stack[(- 1)].append(outargs)
outargs = stack.pop()
else:
outargs.append(arg)
if stack:
raise ValueError((mismatch_msg % len(args)))
return outargs | 1,415,948,892,958,881,000 | Convert input list with bracketed items into nested lists.
>>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())
['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z'] | improver/cli/__init__.py | unbracket | anja-bom/improver | python | def unbracket(args):
"Convert input list with bracketed items into nested lists.\n\n >>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())\n ['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z']\n\n "
outargs = []
stack = []
mismatch_msg = 'Mismatched bracket at position %i.'
for (i, arg) in enumerate(args):
if (arg == '['):
stack.append(outargs)
outargs = []
elif (arg == ']'):
if (not stack):
raise ValueError((mismatch_msg % i))
stack[(- 1)].append(outargs)
outargs = stack.pop()
else:
outargs.append(arg)
if stack:
raise ValueError((mismatch_msg % len(args)))
return outargs |
def execute_command(dispatcher, prog_name, *args, verbose=False, dry_run=False):
'Common entry point for command execution.'
args = list(args)
for (i, arg) in enumerate(args):
if isinstance(arg, (list, tuple)):
arg = execute_command(dispatcher, prog_name, *arg, verbose=verbose, dry_run=dry_run)
if isinstance(arg, pathlib.PurePath):
arg = str(arg)
elif (not isinstance(arg, str)):
arg = ObjectAsStr(arg)
args[i] = arg
if (verbose or dry_run):
print(' '.join([shlex.quote(x) for x in (prog_name, *args)]))
if dry_run:
return args
result = dispatcher(prog_name, *args)
if (verbose and (result is not None)):
print(ObjectAsStr.obj_to_name(result))
return result | -4,196,092,585,398,877,700 | Common entry point for command execution. | improver/cli/__init__.py | execute_command | anja-bom/improver | python | def execute_command(dispatcher, prog_name, *args, verbose=False, dry_run=False):
args = list(args)
for (i, arg) in enumerate(args):
if isinstance(arg, (list, tuple)):
arg = execute_command(dispatcher, prog_name, *arg, verbose=verbose, dry_run=dry_run)
if isinstance(arg, pathlib.PurePath):
arg = str(arg)
elif (not isinstance(arg, str)):
arg = ObjectAsStr(arg)
args[i] = arg
if (verbose or dry_run):
print(' '.join([shlex.quote(x) for x in (prog_name, *args)]))
if dry_run:
return args
result = dispatcher(prog_name, *args)
if (verbose and (result is not None)):
print(ObjectAsStr.obj_to_name(result))
return result |
@clizefy()
def main(prog_name: parameters.pass_name, command: LAST_OPTION, *args, profile: value_converter((lambda _: _), name='FILENAME')=None, memprofile: value_converter((lambda _: _), name='FILENAME')=None, verbose=False, dry_run=False):
'IMPROVER NWP post-processing toolbox\n\n Results from commands can be passed into file-like arguments\n of other commands by surrounding them by square brackets::\n\n improver command [ command ... ] ...\n\n Spaces around brackets are mandatory.\n\n Args:\n prog_name:\n The program name from argv[0].\n command (str):\n Command to execute\n args (tuple):\n Command arguments\n profile (str):\n If given, will write profiling to the file given.\n To write to stdout, use a hyphen (-)\n memprofile (str):\n Creates 2 files by adding a suffix to the provided arguemnt -\n a tracemalloc snapshot at the point of highest memory consumption\n of your program (suffixed with _SNAPSHOT)\n and a track of the maximum memory used by your program\n over time (suffixed with _MAX_TRACKER).\n verbose (bool):\n Print executed commands\n dry_run (bool):\n Print commands to be executed\n\n See improver help [--usage] [command] for more information\n on available command(s).\n '
args = unbracket(args)
exec_cmd = execute_command
if (profile is not None):
from improver.profile import profile_hook_enable
profile_hook_enable(dump_filename=(None if (profile == '-') else profile))
if (memprofile is not None):
from improver.memprofile import memory_profile_decorator
exec_cmd = memory_profile_decorator(exec_cmd, memprofile)
result = exec_cmd(SUBCOMMANDS_DISPATCHER, prog_name, command, *args, verbose=verbose, dry_run=dry_run)
return result | 8,911,020,094,292,437,000 | IMPROVER NWP post-processing toolbox
Results from commands can be passed into file-like arguments
of other commands by surrounding them by square brackets::
improver command [ command ... ] ...
Spaces around brackets are mandatory.
Args:
prog_name:
The program name from argv[0].
command (str):
Command to execute
args (tuple):
Command arguments
profile (str):
If given, will write profiling to the file given.
To write to stdout, use a hyphen (-)
memprofile (str):
Creates 2 files by adding a suffix to the provided arguemnt -
a tracemalloc snapshot at the point of highest memory consumption
of your program (suffixed with _SNAPSHOT)
and a track of the maximum memory used by your program
over time (suffixed with _MAX_TRACKER).
verbose (bool):
Print executed commands
dry_run (bool):
Print commands to be executed
See improver help [--usage] [command] for more information
on available command(s). | improver/cli/__init__.py | main | anja-bom/improver | python | @clizefy()
def main(prog_name: parameters.pass_name, command: LAST_OPTION, *args, profile: value_converter((lambda _: _), name='FILENAME')=None, memprofile: value_converter((lambda _: _), name='FILENAME')=None, verbose=False, dry_run=False):
'IMPROVER NWP post-processing toolbox\n\n Results from commands can be passed into file-like arguments\n of other commands by surrounding them by square brackets::\n\n improver command [ command ... ] ...\n\n Spaces around brackets are mandatory.\n\n Args:\n prog_name:\n The program name from argv[0].\n command (str):\n Command to execute\n args (tuple):\n Command arguments\n profile (str):\n If given, will write profiling to the file given.\n To write to stdout, use a hyphen (-)\n memprofile (str):\n Creates 2 files by adding a suffix to the provided arguemnt -\n a tracemalloc snapshot at the point of highest memory consumption\n of your program (suffixed with _SNAPSHOT)\n and a track of the maximum memory used by your program\n over time (suffixed with _MAX_TRACKER).\n verbose (bool):\n Print executed commands\n dry_run (bool):\n Print commands to be executed\n\n See improver help [--usage] [command] for more information\n on available command(s).\n '
args = unbracket(args)
exec_cmd = execute_command
if (profile is not None):
from improver.profile import profile_hook_enable
profile_hook_enable(dump_filename=(None if (profile == '-') else profile))
if (memprofile is not None):
from improver.memprofile import memory_profile_decorator
exec_cmd = memory_profile_decorator(exec_cmd, memprofile)
result = exec_cmd(SUBCOMMANDS_DISPATCHER, prog_name, command, *args, verbose=verbose, dry_run=dry_run)
return result |
def run_main(argv=None):
"Overrides argv[0] to be 'improver' then runs main.\n\n Args:\n argv (list of str):\n Arguments that were from the command line.\n\n "
import sys
from clize import run
if (argv is None):
argv = sys.argv[:]
argv[0] = 'improver'
run(main, args=argv) | -5,774,211,158,403,693,000 | Overrides argv[0] to be 'improver' then runs main.
Args:
argv (list of str):
Arguments that were from the command line. | improver/cli/__init__.py | run_main | anja-bom/improver | python | def run_main(argv=None):
"Overrides argv[0] to be 'improver' then runs main.\n\n Args:\n argv (list of str):\n Arguments that were from the command line.\n\n "
import sys
from clize import run
if (argv is None):
argv = sys.argv[:]
argv[0] = 'improver'
run(main, args=argv) |
def add_docstring(self, docstring, *args, **kwargs):
'Adds the updated docstring.'
docstring = docutilize(docstring)
super().add_docstring(docstring, *args, **kwargs) | 3,922,062,559,688,741,000 | Adds the updated docstring. | improver/cli/__init__.py | add_docstring | anja-bom/improver | python | def add_docstring(self, docstring, *args, **kwargs):
docstring = docutilize(docstring)
super().add_docstring(docstring, *args, **kwargs) |
@staticmethod
def obj_to_name(obj, cls=None):
'Helper function to create the string.'
if (cls is None):
cls = type(obj)
try:
obj_id = hash(obj)
except TypeError:
obj_id = id(obj)
return ('<%s.%s@%i>' % (cls.__module__, cls.__name__, obj_id)) | 6,488,412,296,716,952,000 | Helper function to create the string. | improver/cli/__init__.py | obj_to_name | anja-bom/improver | python | @staticmethod
def obj_to_name(obj, cls=None):
if (cls is None):
cls = type(obj)
try:
obj_id = hash(obj)
except TypeError:
obj_id = id(obj)
return ('<%s.%s@%i>' % (cls.__module__, cls.__name__, obj_id)) |
@value_converter
def constrained_inputcubelist_converter(to_convert):
'Passes the cube and constraints onto maybe_coerce_with.\n\n Args:\n to_convert (str or iris.cube.CubeList):\n A CubeList or a filename to be loaded into a CubeList.\n\n Returns:\n iris.cube.CubeList:\n The loaded cubelist of constrained cubes.\n '
from iris import Constraint
from iris.cube import CubeList
from improver.utilities.load import load_cubelist
cubelist = maybe_coerce_with(load_cubelist, to_convert)
return CubeList((cubelist.extract_cube((Constraint(cube_func=constr) if callable(constr) else constr)) for constr in constraints)) | -6,835,541,266,756,884,000 | Passes the cube and constraints onto maybe_coerce_with.
Args:
to_convert (str or iris.cube.CubeList):
A CubeList or a filename to be loaded into a CubeList.
Returns:
iris.cube.CubeList:
The loaded cubelist of constrained cubes. | improver/cli/__init__.py | constrained_inputcubelist_converter | anja-bom/improver | python | @value_converter
def constrained_inputcubelist_converter(to_convert):
'Passes the cube and constraints onto maybe_coerce_with.\n\n Args:\n to_convert (str or iris.cube.CubeList):\n A CubeList or a filename to be loaded into a CubeList.\n\n Returns:\n iris.cube.CubeList:\n The loaded cubelist of constrained cubes.\n '
from iris import Constraint
from iris.cube import CubeList
from improver.utilities.load import load_cubelist
cubelist = maybe_coerce_with(load_cubelist, to_convert)
return CubeList((cubelist.extract_cube((Constraint(cube_func=constr) if callable(constr) else constr)) for constr in constraints)) |
def get_symbol(self, cfg, is_train=True):
'\n return a generated symbol, it also need to be assigned to self.sym\n '
raise NotImplementedError() | 2,051,318,806,614,280,400 | return a generated symbol, it also need to be assigned to self.sym | lib/utils/symbol.py | get_symbol | 571502680/mx-DeepIM | python | def get_symbol(self, cfg, is_train=True):
'\n \n '
raise NotImplementedError() |
def art_search(art):
"\n Function to retrieve the information about collections in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Dataframe: df\n includes the related info about the searched artworks.\n\n Example:\n -------------\n >>>art_search('monet')\n 0\t16568\tWater Lilies\tClaude Monet\nFrench, 1840-1926\tFrance\t1906\t1906\tOil on canvas\t[Painting and Sculpture of Europe, Essentials]\n 1\t16571\tArrival of the Normandy Train, Gare Saint-Lazare\tClaude Monet\nFrench, 1840-1926\tFrance\t1877\t1877\tOil on canvas\t[Painting and Sculpture of Europe]\n "
params_search = {'q': art}
r = requests.get('https://api.artic.edu/api/v1/artworks/search?fields=id,title,date_start,date_end,artist_display,place_of_origin,medium_display,category_titles', params=params_search)
try:
status = r.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
r1 = json.dumps(r.json(), indent=2)
artsearch = json.loads(r1)
artworks = pd.DataFrame(artsearch['data'])
artworks_info = artworks[['id', 'title', 'artist_display', 'place_of_origin', 'date_start', 'date_end', 'medium_display', 'category_titles']]
return artworks_info | 5,937,697,452,561,424,000 | Function to retrieve the information about collections in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related info about the searched artworks.
Example:
-------------
>>>art_search('monet')
0 16568 Water Lilies Claude Monet
French, 1840-1926 France 1906 1906 Oil on canvas [Painting and Sculpture of Europe, Essentials]
1 16571 Arrival of the Normandy Train, Gare Saint-Lazare Claude Monet
French, 1840-1926 France 1877 1877 Oil on canvas [Painting and Sculpture of Europe] | src/aicapi_yw3760/aicapi_yw3760.py | art_search | nicolewang97/AICAPI_YW3760 | python | def art_search(art):
"\n Function to retrieve the information about collections in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Dataframe: df\n includes the related info about the searched artworks.\n\n Example:\n -------------\n >>>art_search('monet')\n 0\t16568\tWater Lilies\tClaude Monet\nFrench, 1840-1926\tFrance\t1906\t1906\tOil on canvas\t[Painting and Sculpture of Europe, Essentials]\n 1\t16571\tArrival of the Normandy Train, Gare Saint-Lazare\tClaude Monet\nFrench, 1840-1926\tFrance\t1877\t1877\tOil on canvas\t[Painting and Sculpture of Europe]\n "
params_search = {'q': art}
r = requests.get('https://api.artic.edu/api/v1/artworks/search?fields=id,title,date_start,date_end,artist_display,place_of_origin,medium_display,category_titles', params=params_search)
try:
status = r.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
r1 = json.dumps(r.json(), indent=2)
artsearch = json.loads(r1)
artworks = pd.DataFrame(artsearch['data'])
artworks_info = artworks[['id', 'title', 'artist_display', 'place_of_origin', 'date_start', 'date_end', 'medium_display', 'category_titles']]
return artworks_info |
def tour_search(tour):
"\n Function to retrieve the information about tour in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Dataframe: df\n includes the related info about the searched tour.\n\n Example:\n -------------\n >>>tour_search('monet')\n 0\t4714\tMonet and Chicago\thttp://aic-mobile-tours.artic.edu/sites/defaul...\t<p>Monet and Chicago presents the city’s uniqu...\t<p>Monet and Chicago is the first exhibition t...\t[Cliff Walk at Pourville, Caricature of a Man ...\t[Claude Monet, Claude Monet, Claude Monet, Cla...\n 1\t4520\tManet and Modern Beauty\thttp://aic-mobile-tours.artic.edu/sites/defaul...\t<p>Dive deep into the life and mind of one the...\t<p>Manet is undoubtedly one of the most fascin...\t[]\t[]\n\n "
params_search_tour = {'q': tour}
rt = requests.get('https://api.artic.edu/api/v1/tours/search?fields=id,image,title,description,intro,artwork_titles,artist_titles', params=params_search_tour)
try:
status = rt.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rt1 = json.dumps(rt.json(), indent=2)
toursearch = json.loads(rt1)
ntour = pd.DataFrame(toursearch['data'])
tour_info = ntour[['id', 'title', 'image', 'description', 'intro', 'artwork_titles', 'artist_titles']]
return tour_info | -7,110,535,691,618,277,000 | Function to retrieve the information about tour in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related info about the searched tour.
Example:
-------------
>>>tour_search('monet')
0 4714 Monet and Chicago http://aic-mobile-tours.artic.edu/sites/defaul... <p>Monet and Chicago presents the city’s uniqu... <p>Monet and Chicago is the first exhibition t... [Cliff Walk at Pourville, Caricature of a Man ... [Claude Monet, Claude Monet, Claude Monet, Cla...
1 4520 Manet and Modern Beauty http://aic-mobile-tours.artic.edu/sites/defaul... <p>Dive deep into the life and mind of one the... <p>Manet is undoubtedly one of the most fascin... [] [] | src/aicapi_yw3760/aicapi_yw3760.py | tour_search | nicolewang97/AICAPI_YW3760 | python | def tour_search(tour):
"\n Function to retrieve the information about tour in the Art institute of Chicago\n\n Parameters:\n -------------\n The key word that users want to search,\n for example: the artist's name, the title of the artwork.\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Dataframe: df\n includes the related info about the searched tour.\n\n Example:\n -------------\n >>>tour_search('monet')\n 0\t4714\tMonet and Chicago\thttp://aic-mobile-tours.artic.edu/sites/defaul...\t<p>Monet and Chicago presents the city’s uniqu...\t<p>Monet and Chicago is the first exhibition t...\t[Cliff Walk at Pourville, Caricature of a Man ...\t[Claude Monet, Claude Monet, Claude Monet, Cla...\n 1\t4520\tManet and Modern Beauty\thttp://aic-mobile-tours.artic.edu/sites/defaul...\t<p>Dive deep into the life and mind of one the...\t<p>Manet is undoubtedly one of the most fascin...\t[]\t[]\n\n "
params_search_tour = {'q': tour}
rt = requests.get('https://api.artic.edu/api/v1/tours/search?fields=id,image,title,description,intro,artwork_titles,artist_titles', params=params_search_tour)
try:
status = rt.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rt1 = json.dumps(rt.json(), indent=2)
toursearch = json.loads(rt1)
ntour = pd.DataFrame(toursearch['data'])
tour_info = ntour[['id', 'title', 'image', 'description', 'intro', 'artwork_titles', 'artist_titles']]
return tour_info |
def pic_search(pic, artist):
"\n Function to retrieve the images of artworks collected in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Image: jpg\n The image of the searched atwork\n Error Message:\n Error messsage if the search is invalid\n\n Example:\n -------------\n >>>pic_search('Water Lillies', 'Claude Monet')\n\n "
params_search_pic = {'q': pic}
rp = requests.get('https://api.artic.edu/api/v1/artworks/search?fields=id,title,artist_display,image_id', params=params_search_pic)
linkhead = 'https://www.artic.edu/iiif/2/'
linktail = '/full/843,/0/default.jpg'
try:
status = rp.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rp1 = json.dumps(rp.json(), indent=2)
picsearch = json.loads(rp1)
npic = pd.DataFrame(picsearch['data'])
pic_info = npic[['id', 'title', 'artist_display', 'image_id']]
df_len = len(pic_info)
for i in range(df_len):
if ((pic_info.iloc[i]['title'] == pic) and (artist in pic_info.iloc[i]['artist_display'])):
get_image_id = pic_info.iloc[i]['image_id']
image_link = ((linkhead + get_image_id) + linktail)
response = requests.get(image_link)
img = Image.open(BytesIO(response.content))
return img
print('Invalid Search! Please find related information below :)')
return pic_info | 4,188,129,160,413,554,700 | Function to retrieve the images of artworks collected in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
Image: jpg
The image of the searched atwork
Error Message:
Error messsage if the search is invalid
Example:
-------------
>>>pic_search('Water Lillies', 'Claude Monet') | src/aicapi_yw3760/aicapi_yw3760.py | pic_search | nicolewang97/AICAPI_YW3760 | python | def pic_search(pic, artist):
"\n Function to retrieve the images of artworks collected in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n Image: jpg\n The image of the searched atwork\n Error Message:\n Error messsage if the search is invalid\n\n Example:\n -------------\n >>>pic_search('Water Lillies', 'Claude Monet')\n\n "
params_search_pic = {'q': pic}
rp = requests.get('https://api.artic.edu/api/v1/artworks/search?fields=id,title,artist_display,image_id', params=params_search_pic)
linkhead = 'https://www.artic.edu/iiif/2/'
linktail = '/full/843,/0/default.jpg'
try:
status = rp.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rp1 = json.dumps(rp.json(), indent=2)
picsearch = json.loads(rp1)
npic = pd.DataFrame(picsearch['data'])
pic_info = npic[['id', 'title', 'artist_display', 'image_id']]
df_len = len(pic_info)
for i in range(df_len):
if ((pic_info.iloc[i]['title'] == pic) and (artist in pic_info.iloc[i]['artist_display'])):
get_image_id = pic_info.iloc[i]['image_id']
image_link = ((linkhead + get_image_id) + linktail)
response = requests.get(image_link)
img = Image.open(BytesIO(response.content))
return img
print('Invalid Search! Please find related information below :)')
return pic_info |
def product_search(product_art, product_category):
"\n Function to retrieve the information about products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n DataFrame: a dataframe include related info about the products and images of the products\n\n Example:\n -------------\n >>>product_search('Rainy Day', 'Mug')\n >>>0\t245410\tGustave Caillebotte Paris Street; Rainy Day Mug\t\t$9.95...\n\n "
params_search_product = {'q': product_art}
rpro = requests.get('https://api.artic.edu/api/v1/products?search', params=params_search_product)
try:
status = rpro.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rpro1 = json.dumps(rpro.json(), indent=2)
productsearch = json.loads(rpro1)
nproduct = pd.DataFrame(productsearch['data'])
df_len1 = len(nproduct)
for i in range(df_len1):
if ((product_art in nproduct.iloc[i]['title']) and (product_category in nproduct.iloc[i]['description'])):
product_info = nproduct[['id', 'title', 'image_url', 'price_display', 'description']]
def path_to_image_html(path):
return (('<img src="' + path) + '" width="60" >')
image_cols = ['image_url']
format_dict = {}
for image_cols in image_cols:
format_dict[image_cols] = path_to_image_html
html = display(HTML(product_info.to_html(escape=False, formatters=format_dict)))
return html
else:
return 'Invalid Search! Please try other artworks or categories:)' | 130,723,497,960,180,690 | Function to retrieve the information about products sold in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the products and images of the products
Example:
-------------
>>>product_search('Rainy Day', 'Mug')
>>>0 245410 Gustave Caillebotte Paris Street; Rainy Day Mug $9.95... | src/aicapi_yw3760/aicapi_yw3760.py | product_search | nicolewang97/AICAPI_YW3760 | python | def product_search(product_art, product_category):
"\n Function to retrieve the information about products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n pic: the title of the artwork\n artist: the full name of the artist\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n DataFrame: a dataframe include related info about the products and images of the products\n\n Example:\n -------------\n >>>product_search('Rainy Day', 'Mug')\n >>>0\t245410\tGustave Caillebotte Paris Street; Rainy Day Mug\t\t$9.95...\n\n "
params_search_product = {'q': product_art}
rpro = requests.get('https://api.artic.edu/api/v1/products?search', params=params_search_product)
try:
status = rpro.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rpro1 = json.dumps(rpro.json(), indent=2)
productsearch = json.loads(rpro1)
nproduct = pd.DataFrame(productsearch['data'])
df_len1 = len(nproduct)
for i in range(df_len1):
if ((product_art in nproduct.iloc[i]['title']) and (product_category in nproduct.iloc[i]['description'])):
product_info = nproduct[['id', 'title', 'image_url', 'price_display', 'description']]
def path_to_image_html(path):
return (('<img src="' + path) + '" width="60" >')
image_cols = ['image_url']
format_dict = {}
for image_cols in image_cols:
format_dict[image_cols] = path_to_image_html
html = display(HTML(product_info.to_html(escape=False, formatters=format_dict)))
return html
else:
return 'Invalid Search! Please try other artworks or categories:)' |
def product_show(product_art_show):
"\n Function to retrieve the information about top10 products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n Type in any random word\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n DataFrame: a dataframe include related info about the top 10 products and images of the products\n\n Example:\n -------------\n >>>product_search('')\n >>>0\t250620\tThe Age of French Impressionism—Softcover\t\t$30...\n\n "
params_show_product = {'q': product_art_show}
rproshow = requests.get('https://api.artic.edu/api/v1/products?limit=10', params=params_show_product)
try:
status = rproshow.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rproshow1 = json.dumps(rproshow.json(), indent=2)
productshow = json.loads(rproshow1)
nproductshow = pd.DataFrame(productshow['data'])
product_show_info = nproductshow[['id', 'title', 'image_url', 'price_display', 'description']]
def path_to_image_html(path):
return (('<img src="' + path) + '" width="60" >')
image_cols1 = ['image_url']
format_dict = {}
for image_cols1 in image_cols1:
format_dict[image_cols1] = path_to_image_html
html1 = display(HTML(product_show_info.to_html(escape=False, formatters=format_dict)))
return html1 | 8,153,859,559,080,861,000 | Function to retrieve the information about top10 products sold in the Art institute of Chicago
Parameters:
-------------
Type in any random word
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the top 10 products and images of the products
Example:
-------------
>>>product_search('')
>>>0 250620 The Age of French Impressionism—Softcover $30... | src/aicapi_yw3760/aicapi_yw3760.py | product_show | nicolewang97/AICAPI_YW3760 | python | def product_show(product_art_show):
"\n Function to retrieve the information about top10 products sold in the Art institute of Chicago\n\n Parameters:\n -------------\n Type in any random word\n\n Returns:\n -------------\n Status code: str\n if the API request went through\n DataFrame: a dataframe include related info about the top 10 products and images of the products\n\n Example:\n -------------\n >>>product_search()\n >>>0\t250620\tThe Age of French Impressionism—Softcover\t\t$30...\n\n "
params_show_product = {'q': product_art_show}
rproshow = requests.get('https://api.artic.edu/api/v1/products?limit=10', params=params_show_product)
try:
status = rproshow.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rproshow1 = json.dumps(rproshow.json(), indent=2)
productshow = json.loads(rproshow1)
nproductshow = pd.DataFrame(productshow['data'])
product_show_info = nproductshow[['id', 'title', 'image_url', 'price_display', 'description']]
def path_to_image_html(path):
return (('<img src="' + path) + '" width="60" >')
image_cols1 = ['image_url']
format_dict = {}
for image_cols1 in image_cols1:
format_dict[image_cols1] = path_to_image_html
html1 = display(HTML(product_show_info.to_html(escape=False, formatters=format_dict)))
return html1 |
def run_mx_unary_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"Runs benchmarks with the given context and precision (dtype)for all the unary\n operators in MXNet.\n\n Parameters\n ----------\n ctx: mx.ctx\n Context to run benchmarks\n dtype: str, default 'float32'\n Precision to use for benchmarks\n warmup: int, default 25\n Number of times to run for warmup\n runs: int, default 100\n Number of runs to capture benchmark results\n\n Returns\n -------\n Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.\n\n "
mx_unary_broadcast_ops = get_all_unary_operators()
mx_unary_op_results = run_op_benchmarks(mx_unary_broadcast_ops, dtype, ctx, warmup, runs)
return mx_unary_op_results | -1,756,838,878,309,694,200 | Runs benchmarks with the given context and precision (dtype)for all the unary
operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results. | benchmark/opperf/nd_operations/unary_operators.py | run_mx_unary_operators_benchmarks | Angzz/DeformableV2 | python | def run_mx_unary_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"Runs benchmarks with the given context and precision (dtype)for all the unary\n operators in MXNet.\n\n Parameters\n ----------\n ctx: mx.ctx\n Context to run benchmarks\n dtype: str, default 'float32'\n Precision to use for benchmarks\n warmup: int, default 25\n Number of times to run for warmup\n runs: int, default 100\n Number of runs to capture benchmark results\n\n Returns\n -------\n Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.\n\n "
mx_unary_broadcast_ops = get_all_unary_operators()
mx_unary_op_results = run_op_benchmarks(mx_unary_broadcast_ops, dtype, ctx, warmup, runs)
return mx_unary_op_results |
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=np.uint8, **kwargs):
"\n Convert categorical variable into dummy/indicator variables.\n\n Data must have category dtype to infer result's ``columns``.\n\n Parameters\n ----------\n data : Series, or DataFrame\n For Series, the dtype must be categorical.\n For DataFrame, at least one column must be categorical.\n prefix : string, list of strings, or dict of strings, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : string, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix.`\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy columns should be sparse or not. Returns\n SparseDataFrame if `data` is a Series or if all columns are included.\n Otherwise returns a DataFrame with some SparseBlocks.\n\n .. versionadded:: 0.18.2\n\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n\n dtype : dtype, default np.uint8\n Data type for new columns. Only a single dtype is allowed.\n\n .. versionadded:: 0.18.2\n\n Returns\n -------\n dummies : DataFrame\n\n Examples\n --------\n Dask's version only works with Categorical data, as this is the only way to\n know the output shape without computing all the data.\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)\n >>> dd.get_dummies(s)\n Traceback (most recent call last):\n ...\n NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...\n\n With categorical data:\n\n >>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)\n >>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE\n Dask DataFrame Structure:\n a b c\n npartitions=2\n 0 uint8 uint8 uint8\n 2 ... ... ...\n 3 ... ... ...\n Dask Name: get_dummies, 4 tasks\n >>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n See Also\n --------\n pandas.get_dummies\n "
if isinstance(data, (pd.Series, pd.DataFrame)):
return pd.get_dummies(data, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, columns=columns, sparse=sparse, drop_first=drop_first, dtype=dtype, **kwargs)
not_cat_msg = '`get_dummies` with non-categorical dtypes is not supported. Please use `df.categorize()` beforehand to convert to categorical dtype.'
unknown_cat_msg = '`get_dummies` with unknown categories is not supported. Please use `column.cat.as_known()` or `df.categorize()` beforehand to ensure known categories'
if isinstance(data, Series):
if (not methods.is_categorical_dtype(data)):
raise NotImplementedError(not_cat_msg)
if (not has_known_categories(data)):
raise NotImplementedError(unknown_cat_msg)
elif isinstance(data, DataFrame):
if (columns is None):
if (data.dtypes == 'object').any():
raise NotImplementedError(not_cat_msg)
columns = data._meta.select_dtypes(include=['category']).columns
elif (not all((methods.is_categorical_dtype(data[c]) for c in columns))):
raise NotImplementedError(not_cat_msg)
if (not all((has_known_categories(data[c]) for c in columns))):
raise NotImplementedError(unknown_cat_msg)
package_name = data._meta.__class__.__module__.split('.')[0]
dummies = sys.modules[package_name].get_dummies
return map_partitions(dummies, data, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, columns=columns, sparse=sparse, drop_first=drop_first, dtype=dtype, **kwargs) | -4,451,911,244,835,267,000 | Convert categorical variable into dummy/indicator variables.
Data must have category dtype to infer result's ``columns``.
Parameters
----------
data : Series, or DataFrame
For Series, the dtype must be categorical.
For DataFrame, at least one column must be categorical.
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.18.2
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.18.2
Returns
-------
dummies : DataFrame
Examples
--------
Dask's version only works with Categorical data, as this is the only way to
know the output shape without computing all the data.
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)
>>> dd.get_dummies(s)
Traceback (most recent call last):
...
NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...
With categorical data:
>>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)
>>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE
Dask DataFrame Structure:
a b c
npartitions=2
0 uint8 uint8 uint8
2 ... ... ...
3 ... ... ...
Dask Name: get_dummies, 4 tasks
>>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
See Also
--------
pandas.get_dummies | dask/dataframe/reshape.py | get_dummies | Kirito1397/dask | python | def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=np.uint8, **kwargs):
"\n Convert categorical variable into dummy/indicator variables.\n\n Data must have category dtype to infer result's ``columns``.\n\n Parameters\n ----------\n data : Series, or DataFrame\n For Series, the dtype must be categorical.\n For DataFrame, at least one column must be categorical.\n prefix : string, list of strings, or dict of strings, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : string, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix.`\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy columns should be sparse or not. Returns\n SparseDataFrame if `data` is a Series or if all columns are included.\n Otherwise returns a DataFrame with some SparseBlocks.\n\n .. versionadded:: 0.18.2\n\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n\n dtype : dtype, default np.uint8\n Data type for new columns. Only a single dtype is allowed.\n\n .. versionadded:: 0.18.2\n\n Returns\n -------\n dummies : DataFrame\n\n Examples\n --------\n Dask's version only works with Categorical data, as this is the only way to\n know the output shape without computing all the data.\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)\n >>> dd.get_dummies(s)\n Traceback (most recent call last):\n ...\n NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...\n\n With categorical data:\n\n >>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)\n >>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE\n Dask DataFrame Structure:\n a b c\n npartitions=2\n 0 uint8 uint8 uint8\n 2 ... ... ...\n 3 ... ... ...\n Dask Name: get_dummies, 4 tasks\n >>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n See Also\n --------\n pandas.get_dummies\n "
if isinstance(data, (pd.Series, pd.DataFrame)):
return pd.get_dummies(data, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, columns=columns, sparse=sparse, drop_first=drop_first, dtype=dtype, **kwargs)
not_cat_msg = '`get_dummies` with non-categorical dtypes is not supported. Please use `df.categorize()` beforehand to convert to categorical dtype.'
unknown_cat_msg = '`get_dummies` with unknown categories is not supported. Please use `column.cat.as_known()` or `df.categorize()` beforehand to ensure known categories'
if isinstance(data, Series):
if (not methods.is_categorical_dtype(data)):
raise NotImplementedError(not_cat_msg)
if (not has_known_categories(data)):
raise NotImplementedError(unknown_cat_msg)
elif isinstance(data, DataFrame):
if (columns is None):
if (data.dtypes == 'object').any():
raise NotImplementedError(not_cat_msg)
columns = data._meta.select_dtypes(include=['category']).columns
elif (not all((methods.is_categorical_dtype(data[c]) for c in columns))):
raise NotImplementedError(not_cat_msg)
if (not all((has_known_categories(data[c]) for c in columns))):
raise NotImplementedError(unknown_cat_msg)
package_name = data._meta.__class__.__module__.split('.')[0]
dummies = sys.modules[package_name].get_dummies
return map_partitions(dummies, data, prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na, columns=columns, sparse=sparse, drop_first=drop_first, dtype=dtype, **kwargs) |
def pivot_table(df, index=None, columns=None, values=None, aggfunc='mean'):
"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, and ``aggfunc`` must be all scalar.\n ``values`` can be scalar or list-like.\n\n Parameters\n ----------\n df : DataFrame\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n values : scalar or list(scalar)\n column(s) to aggregate\n aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n\n See Also\n --------\n pandas.DataFrame.pivot_table\n "
if ((not is_scalar(index)) or (index is None)):
raise ValueError("'index' must be the name of an existing column")
if ((not is_scalar(columns)) or (columns is None)):
raise ValueError("'columns' must be the name of an existing column")
if (not methods.is_categorical_dtype(df[columns])):
raise ValueError("'columns' must be category dtype")
if (not has_known_categories(df[columns])):
raise ValueError("'columns' must have known categories. Please use `df[columns].cat.as_known()` beforehand to ensure known categories")
if (not ((is_list_like(values) and all([is_scalar(v) for v in values])) or is_scalar(values))):
raise ValueError("'values' must refer to an existing column or columns")
available_aggfuncs = ['mean', 'sum', 'count', 'first', 'last']
if ((not is_scalar(aggfunc)) or (aggfunc not in available_aggfuncs)):
raise ValueError(('aggfunc must be either ' + ', '.join((f"'{x}'" for x in available_aggfuncs))))
columns_contents = pd.CategoricalIndex(df[columns].cat.categories, name=columns)
if is_scalar(values):
new_columns = columns_contents
else:
new_columns = pd.MultiIndex.from_product((sorted(values), columns_contents), names=[None, columns])
if (aggfunc in ['first', 'last']):
if is_scalar(values):
meta = pd.DataFrame(columns=new_columns, dtype=df[values].dtype, index=pd.Index(df._meta[index]))
else:
meta = pd.DataFrame(columns=new_columns, index=pd.Index(df._meta[index]))
for value_col in values:
meta[value_col] = meta[value_col].astype(df[values].dtypes[value_col])
else:
meta = pd.DataFrame(columns=new_columns, dtype=np.float64, index=pd.Index(df._meta[index]))
kwargs = {'index': index, 'columns': columns, 'values': values}
if (aggfunc in ['sum', 'mean']):
pv_sum = apply_concat_apply([df], chunk=methods.pivot_sum, aggregate=methods.pivot_agg, meta=meta, token='pivot_table_sum', chunk_kwargs=kwargs)
if (aggfunc in ['count', 'mean']):
pv_count = apply_concat_apply([df], chunk=methods.pivot_count, aggregate=methods.pivot_agg, meta=meta, token='pivot_table_count', chunk_kwargs=kwargs)
if (aggfunc == 'sum'):
return pv_sum
elif (aggfunc == 'count'):
return pv_count
elif (aggfunc == 'mean'):
return (pv_sum / pv_count)
elif (aggfunc == 'first'):
return apply_concat_apply([df], chunk=methods.pivot_first, aggregate=methods.pivot_agg_first, meta=meta, token='pivot_table_first', chunk_kwargs=kwargs)
elif (aggfunc == 'last'):
return apply_concat_apply([df], chunk=methods.pivot_last, aggregate=methods.pivot_agg_last, meta=meta, token='pivot_table_last', chunk_kwargs=kwargs)
else:
raise ValueError | -1,692,296,265,472,854,300 | Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, and ``aggfunc`` must be all scalar.
``values`` can be scalar or list-like.
Parameters
----------
df : DataFrame
index : scalar
column to be index
columns : scalar
column to be columns
values : scalar or list(scalar)
column(s) to aggregate
aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'
Returns
-------
table : DataFrame
See Also
--------
pandas.DataFrame.pivot_table | dask/dataframe/reshape.py | pivot_table | Kirito1397/dask | python | def pivot_table(df, index=None, columns=None, values=None, aggfunc='mean'):
"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, and ``aggfunc`` must be all scalar.\n ``values`` can be scalar or list-like.\n\n Parameters\n ----------\n df : DataFrame\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n values : scalar or list(scalar)\n column(s) to aggregate\n aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n\n See Also\n --------\n pandas.DataFrame.pivot_table\n "
if ((not is_scalar(index)) or (index is None)):
raise ValueError("'index' must be the name of an existing column")
if ((not is_scalar(columns)) or (columns is None)):
raise ValueError("'columns' must be the name of an existing column")
if (not methods.is_categorical_dtype(df[columns])):
raise ValueError("'columns' must be category dtype")
if (not has_known_categories(df[columns])):
raise ValueError("'columns' must have known categories. Please use `df[columns].cat.as_known()` beforehand to ensure known categories")
if (not ((is_list_like(values) and all([is_scalar(v) for v in values])) or is_scalar(values))):
raise ValueError("'values' must refer to an existing column or columns")
available_aggfuncs = ['mean', 'sum', 'count', 'first', 'last']
if ((not is_scalar(aggfunc)) or (aggfunc not in available_aggfuncs)):
raise ValueError(('aggfunc must be either ' + ', '.join((f"'{x}'" for x in available_aggfuncs))))
columns_contents = pd.CategoricalIndex(df[columns].cat.categories, name=columns)
if is_scalar(values):
new_columns = columns_contents
else:
new_columns = pd.MultiIndex.from_product((sorted(values), columns_contents), names=[None, columns])
if (aggfunc in ['first', 'last']):
if is_scalar(values):
meta = pd.DataFrame(columns=new_columns, dtype=df[values].dtype, index=pd.Index(df._meta[index]))
else:
meta = pd.DataFrame(columns=new_columns, index=pd.Index(df._meta[index]))
for value_col in values:
meta[value_col] = meta[value_col].astype(df[values].dtypes[value_col])
else:
meta = pd.DataFrame(columns=new_columns, dtype=np.float64, index=pd.Index(df._meta[index]))
kwargs = {'index': index, 'columns': columns, 'values': values}
if (aggfunc in ['sum', 'mean']):
pv_sum = apply_concat_apply([df], chunk=methods.pivot_sum, aggregate=methods.pivot_agg, meta=meta, token='pivot_table_sum', chunk_kwargs=kwargs)
if (aggfunc in ['count', 'mean']):
pv_count = apply_concat_apply([df], chunk=methods.pivot_count, aggregate=methods.pivot_agg, meta=meta, token='pivot_table_count', chunk_kwargs=kwargs)
if (aggfunc == 'sum'):
return pv_sum
elif (aggfunc == 'count'):
return pv_count
elif (aggfunc == 'mean'):
return (pv_sum / pv_count)
elif (aggfunc == 'first'):
return apply_concat_apply([df], chunk=methods.pivot_first, aggregate=methods.pivot_agg_first, meta=meta, token='pivot_table_first', chunk_kwargs=kwargs)
elif (aggfunc == 'last'):
return apply_concat_apply([df], chunk=methods.pivot_last, aggregate=methods.pivot_agg_last, meta=meta, token='pivot_table_last', chunk_kwargs=kwargs)
else:
raise ValueError |
def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None):
'\n Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one or more columns are identifier variables\n (``id_vars``), while all other columns, considered measured variables (``value_vars``), are "unpivoted" to the row\n axis, leaving just two non-identifier columns, \'variable\' and \'value\'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar\n Name to use for the \'variable\' column. If None it uses\n ``frame.columns.name`` or \'variable\'.\n value_name : scalar, default \'value\'\n Name to use for the \'value\' column.\n col_level : int or string, optional\n If columns are a MultiIndex then use this level to melt.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n See Also\n --------\n pandas.DataFrame.melt\n '
from dask.dataframe.core import no_default
return frame.map_partitions(M.melt, meta=no_default, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level, token='melt') | -2,040,048,766,582,793,500 | Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one or more columns are identifier variables
(``id_vars``), while all other columns, considered measured variables (``value_vars``), are "unpivoted" to the row
axis, leaving just two non-identifier columns, 'variable' and 'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
pandas.DataFrame.melt | dask/dataframe/reshape.py | melt | Kirito1397/dask | python | def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None):
'\n Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one or more columns are identifier variables\n (``id_vars``), while all other columns, considered measured variables (``value_vars``), are "unpivoted" to the row\n axis, leaving just two non-identifier columns, \'variable\' and \'value\'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar\n Name to use for the \'variable\' column. If None it uses\n ``frame.columns.name`` or \'variable\'.\n value_name : scalar, default \'value\'\n Name to use for the \'value\' column.\n col_level : int or string, optional\n If columns are a MultiIndex then use this level to melt.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n See Also\n --------\n pandas.DataFrame.melt\n '
from dask.dataframe.core import no_default
return frame.map_partitions(M.melt, meta=no_default, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level, token='melt') |
def get_latest_questionnaire_with_concept(self, codeId):
'Find the questionnaire most recently modified that has the specified concept code.'
with self.session() as session:
return session.query(Questionnaire).join(Questionnaire.concepts).filter((QuestionnaireConcept.codeId == codeId)).order_by(Questionnaire.lastModified.desc()).options(subqueryload(Questionnaire.questions)).first() | -8,409,286,402,782,696,000 | Find the questionnaire most recently modified that has the specified concept code. | rdr_service/dao/questionnaire_dao.py | get_latest_questionnaire_with_concept | all-of-us/raw-data-repository | python | def get_latest_questionnaire_with_concept(self, codeId):
with self.session() as session:
return session.query(Questionnaire).join(Questionnaire.concepts).filter((QuestionnaireConcept.codeId == codeId)).order_by(Questionnaire.lastModified.desc()).options(subqueryload(Questionnaire.questions)).first() |
def _validate_update(self, session, obj, existing_obj):
'Validates that an update is OK before performing it. (Not applied on insert.)\n By default, validates that the object already exists, and if an expected semanticVersion ID is provided,\n that it matches.\n '
if (not existing_obj):
raise NotFound(('%s with id %s does not exist' % (self.model_type.__name__, id)))
if (self.validate_version_match and (existing_obj.semanticVersion != obj.semanticVersion)):
raise PreconditionFailed(('Expected semanticVersion was %s; stored semanticVersion was %s' % (obj.semanticVersion, existing_obj.semanticVersion)))
resource_json = json.loads(obj.resource)
exist_id = str(obj.questionnaireId)
new_semantic_version = resource_json['version']
if self.has_dup_semantic_version(session, exist_id, new_semantic_version):
raise BadRequest('This semantic version already exist for this questionnaire id.')
self._validate_model(session, obj) | 6,419,327,074,788,434,000 | Validates that an update is OK before performing it. (Not applied on insert.)
By default, validates that the object already exists, and if an expected semanticVersion ID is provided,
that it matches. | rdr_service/dao/questionnaire_dao.py | _validate_update | all-of-us/raw-data-repository | python | def _validate_update(self, session, obj, existing_obj):
'Validates that an update is OK before performing it. (Not applied on insert.)\n By default, validates that the object already exists, and if an expected semanticVersion ID is provided,\n that it matches.\n '
if (not existing_obj):
raise NotFound(('%s with id %s does not exist' % (self.model_type.__name__, id)))
if (self.validate_version_match and (existing_obj.semanticVersion != obj.semanticVersion)):
raise PreconditionFailed(('Expected semanticVersion was %s; stored semanticVersion was %s' % (obj.semanticVersion, existing_obj.semanticVersion)))
resource_json = json.loads(obj.resource)
exist_id = str(obj.questionnaireId)
new_semantic_version = resource_json['version']
if self.has_dup_semantic_version(session, exist_id, new_semantic_version):
raise BadRequest('This semantic version already exist for this questionnaire id.')
self._validate_model(session, obj) |
@classmethod
def _populate_questions(cls, group, code_map, questions):
'Recursively populate questions under this group.'
if group.question:
for question in group.question:
if (question.linkId and question.concept and (len(question.concept) == 1)):
concept = question.concept[0]
if (concept.system and concept.code and (concept.system != PPI_EXTRA_SYSTEM)):
code_map[(concept.system, concept.code)] = (concept.display, CodeType.QUESTION, None)
questions.append((concept.system, concept.code, question.linkId, question.repeats))
if question.group:
for sub_group in question.group:
cls._populate_questions(sub_group, code_map, questions)
if question.option:
for option in question.option:
code_map[(option.system, option.code)] = (option.display, CodeType.ANSWER, None)
if group.group:
for sub_group in group.group:
cls._populate_questions(sub_group, code_map, questions) | 1,074,332,531,278,272 | Recursively populate questions under this group. | rdr_service/dao/questionnaire_dao.py | _populate_questions | all-of-us/raw-data-repository | python | @classmethod
def _populate_questions(cls, group, code_map, questions):
if group.question:
for question in group.question:
if (question.linkId and question.concept and (len(question.concept) == 1)):
concept = question.concept[0]
if (concept.system and concept.code and (concept.system != PPI_EXTRA_SYSTEM)):
code_map[(concept.system, concept.code)] = (concept.display, CodeType.QUESTION, None)
questions.append((concept.system, concept.code, question.linkId, question.repeats))
if question.group:
for sub_group in question.group:
cls._populate_questions(sub_group, code_map, questions)
if question.option:
for option in question.option:
code_map[(option.system, option.code)] = (option.display, CodeType.ANSWER, None)
if group.group:
for sub_group in group.group:
cls._populate_questions(sub_group, code_map, questions) |
def convert_to_int(s):
'Turn ANES data entry into an integer.\n \n >>> convert_to_int("1. Govt should provide many fewer services")\n 1\n >>> convert_to_int("2")\n 2\n '
try:
return int(s.partition('.')[0])
except ValueError:
warnings.warn(("Couldn't convert: " + s))
return np.nan
except AttributeError:
return s | 6,199,727,918,395,590,000 | Turn ANES data entry into an integer.
>>> convert_to_int("1. Govt should provide many fewer services")
1
>>> convert_to_int("2")
2 | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | convert_to_int | aryamccarthy/ANES | python | def convert_to_int(s):
'Turn ANES data entry into an integer.\n \n >>> convert_to_int("1. Govt should provide many fewer services")\n 1\n >>> convert_to_int("2")\n 2\n '
try:
return int(s.partition('.')[0])
except ValueError:
warnings.warn(("Couldn't convert: " + s))
return np.nan
except AttributeError:
return s |
def negative_to_nan(value):
'Convert negative values to missing.\n \n ANES codes various non-answers as negative numbers.\n For instance, if a question does not pertain to the \n respondent.\n '
return (value if (value >= 0) else np.nan) | -8,710,119,248,488,928,000 | Convert negative values to missing.
ANES codes various non-answers as negative numbers.
For instance, if a question does not pertain to the
respondent. | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | negative_to_nan | aryamccarthy/ANES | python | def negative_to_nan(value):
'Convert negative values to missing.\n \n ANES codes various non-answers as negative numbers.\n For instance, if a question does not pertain to the \n respondent.\n '
return (value if (value >= 0) else np.nan) |
def lib1_cons2_neutral3(x):
'Rearrange questions where 3 is neutral.'
return (((- 3) + x) if (x != 1) else x) | 4,934,245,784,321,438,000 | Rearrange questions where 3 is neutral. | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | lib1_cons2_neutral3 | aryamccarthy/ANES | python | def lib1_cons2_neutral3(x):
return (((- 3) + x) if (x != 1) else x) |
def liblow_conshigh(x):
'Reorder questions where the liberal response is low.'
return (- x) | -7,838,750,325,311,162,000 | Reorder questions where the liberal response is low. | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | liblow_conshigh | aryamccarthy/ANES | python | def liblow_conshigh(x):
return (- x) |
def dem_edu_special_treatment(x):
'Eliminate negative numbers and {95. Other}'
return (np.nan if ((x == 95) or (x < 0)) else x) | 705,192,464,560,746,200 | Eliminate negative numbers and {95. Other} | notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | dem_edu_special_treatment | aryamccarthy/ANES | python | def dem_edu_special_treatment(x):
return (np.nan if ((x == 95) or (x < 0)) else x) |
def merge(first, second):
'\n Recursively merges two dictionaries.\n\n Second dictionary values will take precedence over those from the first one.\n Nested dictionaries are merged too.\n\n :param dict first: The first dictionary\n :param dict second: The second dictionary\n :return: the resulting merged dictionary\n :rtype: dict\n '
if (not isinstance(second, dict)):
return second
result = deepcopy(first)
for (key, value) in second.items():
if ((key in result) and isinstance(result[key], dict)):
result[key] = merge(result[key], value)
else:
result[key] = deepcopy(value)
return result | -7,877,707,461,125,681,000 | Recursively merges two dictionaries.
Second dictionary values will take precedence over those from the first one.
Nested dictionaries are merged too.
:param dict first: The first dictionary
:param dict second: The second dictionary
:return: the resulting merged dictionary
:rtype: dict | sanic_restplus/utils.py | merge | oliverpain/sanic-restplus | python | def merge(first, second):
'\n Recursively merges two dictionaries.\n\n Second dictionary values will take precedence over those from the first one.\n Nested dictionaries are merged too.\n\n :param dict first: The first dictionary\n :param dict second: The second dictionary\n :return: the resulting merged dictionary\n :rtype: dict\n '
if (not isinstance(second, dict)):
return second
result = deepcopy(first)
for (key, value) in second.items():
if ((key in result) and isinstance(result[key], dict)):
result[key] = merge(result[key], value)
else:
result[key] = deepcopy(value)
return result |
def camel_to_dash(value):
'\n Transform a CamelCase string into a low_dashed one\n\n :param str value: a CamelCase string to transform\n :return: the low_dashed string\n :rtype: str\n '
first_cap = FIRST_CAP_RE.sub('\\1_\\2', value)
return ALL_CAP_RE.sub('\\1_\\2', first_cap).lower() | 258,975,552,515,779,330 | Transform a CamelCase string into a low_dashed one
:param str value: a CamelCase string to transform
:return: the low_dashed string
:rtype: str | sanic_restplus/utils.py | camel_to_dash | oliverpain/sanic-restplus | python | def camel_to_dash(value):
'\n Transform a CamelCase string into a low_dashed one\n\n :param str value: a CamelCase string to transform\n :return: the low_dashed string\n :rtype: str\n '
first_cap = FIRST_CAP_RE.sub('\\1_\\2', value)
return ALL_CAP_RE.sub('\\1_\\2', first_cap).lower() |
def default_id(resource, method):
'Default operation ID generator'
return '{0}_{1}'.format(method, camel_to_dash(resource)) | 2,429,005,458,864,332,300 | Default operation ID generator | sanic_restplus/utils.py | default_id | oliverpain/sanic-restplus | python | def default_id(resource, method):
return '{0}_{1}'.format(method, camel_to_dash(resource)) |
def not_none(data):
'\n Remove all keys where value is None\n\n :param dict data: A dictionary with potentially some values set to None\n :return: The same dictionary without the keys with values to ``None``\n :rtype: dict\n '
return dict(((k, v) for (k, v) in data.items() if (v is not None))) | 531,670,141,387,802,300 | Remove all keys where value is None
:param dict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: dict | sanic_restplus/utils.py | not_none | oliverpain/sanic-restplus | python | def not_none(data):
'\n Remove all keys where value is None\n\n :param dict data: A dictionary with potentially some values set to None\n :return: The same dictionary without the keys with values to ``None``\n :rtype: dict\n '
return dict(((k, v) for (k, v) in data.items() if (v is not None))) |
def not_none_sorted(data):
'\n Remove all keys where value is None\n\n :param OrderedDict data: A dictionary with potentially some values set to None\n :return: The same dictionary without the keys with values to ``None``\n :rtype: OrderedDict\n '
return OrderedDict(((k, v) for (k, v) in sorted(data.items()) if (v is not None))) | -6,087,313,675,626,550,000 | Remove all keys where value is None
:param OrderedDict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: OrderedDict | sanic_restplus/utils.py | not_none_sorted | oliverpain/sanic-restplus | python | def not_none_sorted(data):
'\n Remove all keys where value is None\n\n :param OrderedDict data: A dictionary with potentially some values set to None\n :return: The same dictionary without the keys with values to ``None``\n :rtype: OrderedDict\n '
return OrderedDict(((k, v) for (k, v) in sorted(data.items()) if (v is not None))) |
def unpack(response, default_code=HTTPStatus.OK):
'\n Unpack a Flask standard response.\n\n Flask response can be:\n - a single value\n - a 2-tuple ``(value, code)``\n - a 3-tuple ``(value, code, headers)``\n\n .. warning::\n\n When using this function, you must ensure that the tuple is not the response data.\n To do so, prefer returning list instead of tuple for listings.\n\n :param response: A Flask style response\n :param int default_code: The HTTP code to use as default if none is provided\n :return: a 3-tuple ``(data, code, headers)``\n :rtype: tuple\n :raise ValueError: if the response does not have one of the expected format\n '
if (not isinstance(response, tuple)):
return (response, default_code, {})
elif (len(response) == 1):
return (response[0], default_code, {})
elif (len(response) == 2):
(data, code) = response
return (data, code, {})
elif (len(response) == 3):
(data, code, headers) = response
return (data, (code or default_code), headers)
else:
raise ValueError('Too many response values') | 7,547,906,597,733,319,000 | Unpack a Flask standard response.
Flask response can be:
- a single value
- a 2-tuple ``(value, code)``
- a 3-tuple ``(value, code, headers)``
.. warning::
When using this function, you must ensure that the tuple is not the response data.
To do so, prefer returning list instead of tuple for listings.
:param response: A Flask style response
:param int default_code: The HTTP code to use as default if none is provided
:return: a 3-tuple ``(data, code, headers)``
:rtype: tuple
:raise ValueError: if the response does not have one of the expected format | sanic_restplus/utils.py | unpack | oliverpain/sanic-restplus | python | def unpack(response, default_code=HTTPStatus.OK):
'\n Unpack a Flask standard response.\n\n Flask response can be:\n - a single value\n - a 2-tuple ``(value, code)``\n - a 3-tuple ``(value, code, headers)``\n\n .. warning::\n\n When using this function, you must ensure that the tuple is not the response data.\n To do so, prefer returning list instead of tuple for listings.\n\n :param response: A Flask style response\n :param int default_code: The HTTP code to use as default if none is provided\n :return: a 3-tuple ``(data, code, headers)``\n :rtype: tuple\n :raise ValueError: if the response does not have one of the expected format\n '
if (not isinstance(response, tuple)):
return (response, default_code, {})
elif (len(response) == 1):
return (response[0], default_code, {})
elif (len(response) == 2):
(data, code) = response
return (data, code, {})
elif (len(response) == 3):
(data, code, headers) = response
return (data, (code or default_code), headers)
else:
raise ValueError('Too many response values') |
def parse_rule(parameter_string):
"Parse a parameter string into its constituent name, type, and\n pattern\n\n For example:\n `parse_parameter_string('<param_one:[A-z]>')` ->\n ('param_one', str, '[A-z]')\n\n :param parameter_string: String to parse\n :return: tuple containing\n (parameter_name, parameter_type, parameter_pattern)\n "
if str(parameter_string).startswith('/'):
parameter_string = parameter_string[1:]
parameter_string = str(parameter_string).strip('<>')
name = parameter_string
pattern = 'string'
if (':' in parameter_string):
(name, pattern) = parameter_string.split(':', 1)
default = (str, pattern)
(_type, pattern) = REGEX_TYPES.get(pattern, default)
return (name, _type, pattern) | -719,285,102,467,803,600 | Parse a parameter string into its constituent name, type, and
pattern
For example:
`parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern) | sanic_restplus/utils.py | parse_rule | oliverpain/sanic-restplus | python | def parse_rule(parameter_string):
"Parse a parameter string into its constituent name, type, and\n pattern\n\n For example:\n `parse_parameter_string('<param_one:[A-z]>')` ->\n ('param_one', str, '[A-z]')\n\n :param parameter_string: String to parse\n :return: tuple containing\n (parameter_name, parameter_type, parameter_pattern)\n "
if str(parameter_string).startswith('/'):
parameter_string = parameter_string[1:]
parameter_string = str(parameter_string).strip('<>')
name = parameter_string
pattern = 'string'
if (':' in parameter_string):
(name, pattern) = parameter_string.split(':', 1)
default = (str, pattern)
(_type, pattern) = REGEX_TYPES.get(pattern, default)
return (name, _type, pattern) |
def get_instance_seg_v1_net(point_cloud, one_hot_vec, is_training, bn_decay, end_points):
' 3D instance segmentation PointNet v1 network.\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensity in point channels\n XYZs are in frustum coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n is_training: TF boolean scalar\n bn_decay: TF float scalar\n end_points: dict\n Output:\n logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object\n end_points: dict\n '
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
net = tf.expand_dims(point_cloud, 2)
net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
point_feat = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(point_feat, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool')
global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
net = tf_util.conv2d(concat_feat, 512, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv9', bn_decay=bn_decay)
net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)
logits = tf_util.conv2d(net, 2, [1, 1], padding='VALID', stride=[1, 1], activation_fn=None, scope='conv10')
logits = tf.squeeze(logits, [2])
return (logits, end_points) | -5,711,673,160,838,257,000 | 3D instance segmentation PointNet v1 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
end_points: dict
Output:
logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
end_points: dict | models/frustum_pointnets_v1.py | get_instance_seg_v1_net | BPMJG/annotated-F-pointnet | python | def get_instance_seg_v1_net(point_cloud, one_hot_vec, is_training, bn_decay, end_points):
' 3D instance segmentation PointNet v1 network.\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensity in point channels\n XYZs are in frustum coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n is_training: TF boolean scalar\n bn_decay: TF float scalar\n end_points: dict\n Output:\n logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object\n end_points: dict\n '
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
net = tf.expand_dims(point_cloud, 2)
net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
point_feat = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(point_feat, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool')
global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
net = tf_util.conv2d(concat_feat, 512, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv9', bn_decay=bn_decay)
net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)
logits = tf_util.conv2d(net, 2, [1, 1], padding='VALID', stride=[1, 1], activation_fn=None, scope='conv10')
logits = tf.squeeze(logits, [2])
return (logits, end_points) |
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec, is_training, bn_decay, end_points):
' 3D Box Estimation PointNet v1 network.\n Input:\n object_point_cloud: TF tensor in shape (B,M,C)\n point clouds in object coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n Output:\n output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)\n including box centers, heading bin class scores and residuals,\n and size cluster scores and residuals\n '
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv-reg1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv-reg2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv-reg3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 512, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv-reg4', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool2')
net = tf.squeeze(net, axis=[1, 2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 512, scope='fc1', bn=True, is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, scope='fc2', bn=True, is_training=is_training, bn_decay=bn_decay)
output = tf_util.fully_connected(net, ((3 + (NUM_HEADING_BIN * 2)) + (NUM_SIZE_CLUSTER * 4)), activation_fn=None, scope='fc3')
return (output, end_points) | 698,406,314,337,023,000 | 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals | models/frustum_pointnets_v1.py | get_3d_box_estimation_v1_net | BPMJG/annotated-F-pointnet | python | def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec, is_training, bn_decay, end_points):
' 3D Box Estimation PointNet v1 network.\n Input:\n object_point_cloud: TF tensor in shape (B,M,C)\n point clouds in object coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n Output:\n output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)\n including box centers, heading bin class scores and residuals,\n and size cluster scores and residuals\n '
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv-reg1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv-reg2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv-reg3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 512, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv-reg4', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool2')
net = tf.squeeze(net, axis=[1, 2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 512, scope='fc1', bn=True, is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, scope='fc2', bn=True, is_training=is_training, bn_decay=bn_decay)
output = tf_util.fully_connected(net, ((3 + (NUM_HEADING_BIN * 2)) + (NUM_SIZE_CLUSTER * 4)), activation_fn=None, scope='fc3')
return (output, end_points) |
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
' Frustum PointNets model. The model predict 3D object masks and\n amodel bounding boxes for objects in frustum point clouds.\n\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensity in point channels\n XYZs are in frustum coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n is_training: TF boolean scalar\n bn_decay: TF float scalar\n Output:\n end_points: dict (map from name strings to TF tensors)\n '
end_points = {}
(logits, end_points) = get_instance_seg_v1_net(point_cloud, one_hot_vec, is_training, bn_decay, end_points)
end_points['mask_logits'] = logits
(object_point_cloud_xyz, mask_xyz_mean, end_points) = point_cloud_masking(point_cloud, logits, end_points)
(center_delta, end_points) = get_center_regression_net(object_point_cloud_xyz, one_hot_vec, is_training, bn_decay, end_points)
stage1_center = (center_delta + mask_xyz_mean)
end_points['stage1_center'] = stage1_center
object_point_cloud_xyz_new = (object_point_cloud_xyz - tf.expand_dims(center_delta, 1))
(output, end_points) = get_3d_box_estimation_v1_net(object_point_cloud_xyz_new, one_hot_vec, is_training, bn_decay, end_points)
end_points = parse_output_to_tensors(output, end_points)
end_points['center'] = (end_points['center_boxnet'] + stage1_center)
return end_points | 4,313,505,580,426,850,300 | Frustum PointNets model. The model predict 3D object masks and
amodel bounding boxes for objects in frustum point clouds.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
Output:
end_points: dict (map from name strings to TF tensors) | models/frustum_pointnets_v1.py | get_model | BPMJG/annotated-F-pointnet | python | def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
' Frustum PointNets model. The model predict 3D object masks and\n amodel bounding boxes for objects in frustum point clouds.\n\n Input:\n point_cloud: TF tensor in shape (B,N,4)\n frustum point clouds with XYZ and intensity in point channels\n XYZs are in frustum coordinate\n one_hot_vec: TF tensor in shape (B,3)\n length-3 vectors indicating predicted object type\n is_training: TF boolean scalar\n bn_decay: TF float scalar\n Output:\n end_points: dict (map from name strings to TF tensors)\n '
end_points = {}
(logits, end_points) = get_instance_seg_v1_net(point_cloud, one_hot_vec, is_training, bn_decay, end_points)
end_points['mask_logits'] = logits
(object_point_cloud_xyz, mask_xyz_mean, end_points) = point_cloud_masking(point_cloud, logits, end_points)
(center_delta, end_points) = get_center_regression_net(object_point_cloud_xyz, one_hot_vec, is_training, bn_decay, end_points)
stage1_center = (center_delta + mask_xyz_mean)
end_points['stage1_center'] = stage1_center
object_point_cloud_xyz_new = (object_point_cloud_xyz - tf.expand_dims(center_delta, 1))
(output, end_points) = get_3d_box_estimation_v1_net(object_point_cloud_xyz_new, one_hot_vec, is_training, bn_decay, end_points)
end_points = parse_output_to_tensors(output, end_points)
end_points['center'] = (end_points['center_boxnet'] + stage1_center)
return end_points |
@commands.group(invoke_without_command=True)
@commands.is_owner()
async def profanity(self, ctx, mode: bool):
'Disable or enable the profanity filter.\n \n Usage: `profanity enable` / `profanity disable` \n '
self.enabled = mode
(await self.coll.update_one({'_id': 'config'}, {'$set': {'enabled': self.enabled}}, upsert=True))
(await ctx.send((('Enabled' if mode else 'Disabled') + ' the profanity filter.'))) | -6,509,972,796,830,525,000 | Disable or enable the profanity filter.
Usage: `profanity enable` / `profanity disable` | profanity-filter/profanity-filter.py | profanity | officialpiyush/modmail-plugins-2 | python | @commands.group(invoke_without_command=True)
@commands.is_owner()
async def profanity(self, ctx, mode: bool):
'Disable or enable the profanity filter.\n \n Usage: `profanity enable` / `profanity disable` \n '
self.enabled = mode
(await self.coll.update_one({'_id': 'config'}, {'$set': {'enabled': self.enabled}}, upsert=True))
(await ctx.send((('Enabled' if mode else 'Disabled') + ' the profanity filter.'))) |
@commands.is_owner()
@profanity.command()
async def whitelist(ctx, target: Union[(Member, Role, TextChannel)]):
'Whitelist a user, role or channel from the profanity filter.\n \n Usage: `profanity whitelist @dude`\n '
self = ctx.bot.get_cog('ProfanityFilter')
if (target.id in self.whitelist):
self.whitelist.remove(target.id)
removed = True
else:
self.whitelist.add(target.id)
removed = False
(await self.coll.update_one({'_id': 'config'}, {'$set': {'whitelist': list(self.whitelist)}}, upsert=True))
(await ctx.send(f"{('Un-w' if removed else 'W')}hitelisted {target.mention} from the profanity filter.")) | 3,735,439,529,420,118,500 | Whitelist a user, role or channel from the profanity filter.
Usage: `profanity whitelist @dude` | profanity-filter/profanity-filter.py | whitelist | officialpiyush/modmail-plugins-2 | python | @commands.is_owner()
@profanity.command()
async def whitelist(ctx, target: Union[(Member, Role, TextChannel)]):
'Whitelist a user, role or channel from the profanity filter.\n \n Usage: `profanity whitelist @dude`\n '
self = ctx.bot.get_cog('ProfanityFilter')
if (target.id in self.whitelist):
self.whitelist.remove(target.id)
removed = True
else:
self.whitelist.add(target.id)
removed = False
(await self.coll.update_one({'_id': 'config'}, {'$set': {'whitelist': list(self.whitelist)}}, upsert=True))
(await ctx.send(f"{('Un-w' if removed else 'W')}hitelisted {target.mention} from the profanity filter.")) |
@classmethod
def __init__(self, *args, **kwargs):
' original request string '
self.req_obj = kwargs.pop('req_obj')
self.request = self.req_obj.get('request', '')
self.req_from = self.req_obj.get('from', '')
self.response = '' | -7,662,516,164,373,698,000 | original request string | core/brain/remind/me/every/reaction.py | __init__ | vsilent/smarty-bot | python | @classmethod
def __init__(self, *args, **kwargs):
' '
self.req_obj = kwargs.pop('req_obj')
self.request = self.req_obj.get('request', )
self.req_from = self.req_obj.get('from', )
self.response = |
@classmethod
def run(self):
'default method'
sess = Session()
sender = self.req_obj.get('sender', '')
if sender:
email = sender.split('/')[0]
profile = sess.query(Profile).filter((Profile.email == email)).one()
cron = CronTab(getuser())
DAYS = {'sunday': 'SUN', 'monday': 'MON', 'tuesday': 'TUE', 'wednesday': 'WED', 'thursday': 'THU', 'friday': 'FRI', 'saturday': 'SAT'}
req = self.request.replace('remind me every', '', 1)
m = re.search('\\s+?(by|with|to|of)\\s+message\\s+?(.+)', req)
if (m and m.group(2)):
msg = m.group(2)
else:
m = re.search('\\s+?(by|with|to|of)\\s+?(.+)', req)
if (m and m.group(2)):
msg = m.group(2)
else:
msg = 'This a reminder. Unfortunatelly I could not parse your message, but I guess you can remember what you wanted to do.'
job = cron.new(command=('/usr/bin/python %s/core/cron/cronjob.py --uuid=%s --cmd="send jabber message" --arguments="%s"' % (ROBOT_DIR, profile.uuid, msg.replace('"', ''))))
skip_other = False
if req.strip().startswith('month'):
job.minute.on(0)
job.hour.on(0)
job.dom.on(1)
skip_other = True
if req.strip().startswith('week'):
job.minute.on(0)
job.hour.on(0)
job.dow.on(0)
skip_other = True
if req.strip().startswith('year'):
job.dom.on(0)
job.month.on(0)
skip_other = True
dow = False
for (dw, cron_day) in DAYS.items():
if req.strip().lower().startswith(dw):
dow = True
break
if dow:
job.dow.on(cron_day.upper())
r = re.split(('^' + dw), req.strip(), flags=re.IGNORECASE)
if (r and (len(r) == 2)):
req = r.pop()
if req.strip().startswith('at '):
time = re.search("[^0-9](\\d{1,2})\\so'clock", req)
if (time and time.group(1)):
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if (not skip_other):
time = re.search('[^0-9](\\d{1,2}):(\\d{2})[^0-9]', req)
if (time and time.group(1) and time.group(2)):
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
if ((not skip_other) and req.strip().startswith('hourly')):
job.minute.on(0)
skip_other = True
elif (not skip_other):
time = re.search('\\s?from\\s(\\d{1,2}):(\\d{2})\\sto\\s(\\d{1,2}):(\\d{2})[^0-9]+', req)
if (time and time.group(1)):
job.hour.during(time.group(1), time.group(3))
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
elif (not skip_other):
job.minute.on(0)
job.hour.on(10)
skip_other = True
if ((not skip_other) and req.strip().startswith('day')):
req = req.replace('day', '', 1)
if req.strip().startswith('at '):
time = re.search("[^0-9](\\d{1,2})\\so'clock", req)
if (time and time.group(1)):
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if (not skip_other):
time = re.search('[^0-9](\\d{1,2}):(\\d{2})[^0-9]', req)
if (time and time.group(1) and time.group(2)):
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
if ((not skip_other) and req.strip().startswith('hourly')):
job.minute.on(0)
skip_other = True
if ((not skip_other) and req.strip().startswith('every')):
req = req.replace('every', '', 1)
hour = re.search('\\s?(\\d+)\\s+(hour|hours|hs|h)', req)
if (hour and hour.group(1)):
job.hour.every(hour.group(1))
skip_other = True
else:
hour = re.search('^\\s?([a-zA-Z]+?)\\s(hours|hour)', req)
if (hour and hour.group(1)):
h = text2int(hour.group(1))
job.hour.every(h)
job.minute.on(0)
skip_other = True
elif ((not skip_other) and req.strip().startswith('from')):
time = re.search('^from\\s(\\d{1,2}):(\\d{2})\\sto\\s(\\d{1,2}):(\\d{2})[^0-9]+', req.strip())
if (time and time.group(1)):
job.hour.during(time.group(1), time.group(3))
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
elif (not skip_other):
job.minute.on(0)
job.hour.on(10)
skip_other = True
print(job)
else:
pass
if ((not skip_other) and req.strip().startswith('with message')):
job.minute.on(0)
job.hour.on(10)
skip_other = True
if ((not skip_other) and req.strip().startswith('hour')):
job.minute.on(0)
skip_other = True
if ((not skip_other) and req.strip().startswith('minute')):
job.minute.every(1)
skip_other = True
if (not skip_other):
hour = re.search('^(\\d+)\\s+(hour|hours|hs|h)', req.strip())
if (hour and hour.group(1)):
job.hour.every(hour.group(1))
skip_other = True
else:
hour = re.search('^([a-zA-Z]+?)\\s(hours|hour)', req.strip())
if (hour and hour.group(1)):
h = text2int(hour.group(1))
job.hour.every(h)
job.minute.on(0)
skip_other = True
if (not skip_other):
day = re.search('^(\\d+)\\s+(days|day|d)', req.strip())
if (day and day.group(1)):
req = req.replace(day.group(0), '', 1)
if req.strip().startswith('at '):
req = req.replace('at', '', 1)
time = re.search("^(\\d{1,2})\\so'clock", req.strip())
if (time and time.group(1)):
job.dow.every(day.group(1))
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if ((not skip_other) and req.strip().startswith('hourly')):
job.minute.on(0)
job.dow.every(day.group(1))
skip_other = True
if (not skip_other):
time = re.search('^(\\d{1,2}):(\\d{2})[^0-9]', req.strip())
if (time and time.group(1) and time.group(2)):
job.dom.every(day.group(1))
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
if ((not skip_other) and req.strip().startswith('from')):
req = req.replace('from', '', 1)
time = re.search('^(\\d{1,2}):(\\d{2})\\sto\\s(\\d{1,2}):(\\d{2})[^0-9]+?', req.strip())
if (time and time.group(1)):
job.hour.during(time.group(1), time.group(3))
job.dom.every(day.group(1))
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
elif (not skip_other):
day = re.search('^\\s?([a-zA-Z]+?)\\s(days|day)', req)
if (day and day.group(1)):
d = text2int(day.group(1))
req = req.replace(day.group(0), '', 1)
if ((not skip_other) and req.strip().startswith('from')):
time = re.search('^from\\s(\\d{1,2}):(\\d{2})\\sto\\s(\\d{1,2}):(\\d{2})[^0-9]+?', req.strip())
if (time and time.group(1)):
job.hour.during(time.group(1), time.group(3))
job.dom.every(d)
req = req.replace(day.group(0), '', 1)
req = req.replace(time.group(0), '', 1)
if req.strip().startswith('every'):
mins = re.search('^every\\s(\\d{1,2})[^0-9]+?(min|minute|minutes)', req.strip())
if (mins and mins.group(0)):
job.minute.during(time.group(2), time.group(4)).every(mins.group(1))
skip_other = True
else:
mins = re.search('^every\\s([^0-9\\s]+)\\s?(min|minute|minutes)', req.strip())
if (mins and mins.group(1)):
m = text2int(mins.group(1))
job.minute.during(time.group(2), time.group(4)).every(m)
skip_other = True
else:
raise
else:
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
else:
job.dom.every(d)
job.minute.on(0)
job.hour.on(10)
skip_other = True
else:
print(req)
raise
if (not skip_other):
min = re.search('\\s?(\\d+)\\s+(minutes|min|minute|m)', req)
if (min and min.group(1)):
job.minute.every(min.group(1))
else:
min = re.search('^\\s?([a-zA-Z]+?)\\s(minutes|min|mins)', req)
if (min and min.group(1)):
m = text2int(min.group(1))
job.minute.every(m)
cron.write()
logger.info(('adding cronjob %s' % cron.render()))
response = ('ok, cronjob added %s' % job.render())
if (self.req_from == 'jabber'):
todo = {'text': response, 'jmsg': response, 'type': 'response'}
self.response = todo
if (self.req_from == 'julius'):
from core.broadcast import say, bang
bang()
todo = {'say': response, 'text': response, 'type': 'response'}
self.response = say(self.request.replace('say', '').upper())
return self.response | -439,311,795,631,872,600 | default method | core/brain/remind/me/every/reaction.py | run | vsilent/smarty-bot | python | @classmethod
def run(self):
sess = Session()
sender = self.req_obj.get('sender', )
if sender:
email = sender.split('/')[0]
profile = sess.query(Profile).filter((Profile.email == email)).one()
cron = CronTab(getuser())
DAYS = {'sunday': 'SUN', 'monday': 'MON', 'tuesday': 'TUE', 'wednesday': 'WED', 'thursday': 'THU', 'friday': 'FRI', 'saturday': 'SAT'}
req = self.request.replace('remind me every', , 1)
m = re.search('\\s+?(by|with|to|of)\\s+message\\s+?(.+)', req)
if (m and m.group(2)):
msg = m.group(2)
else:
m = re.search('\\s+?(by|with|to|of)\\s+?(.+)', req)
if (m and m.group(2)):
msg = m.group(2)
else:
msg = 'This a reminder. Unfortunatelly I could not parse your message, but I guess you can remember what you wanted to do.'
job = cron.new(command=('/usr/bin/python %s/core/cron/cronjob.py --uuid=%s --cmd="send jabber message" --arguments="%s"' % (ROBOT_DIR, profile.uuid, msg.replace('"', ))))
skip_other = False
if req.strip().startswith('month'):
job.minute.on(0)
job.hour.on(0)
job.dom.on(1)
skip_other = True
if req.strip().startswith('week'):
job.minute.on(0)
job.hour.on(0)
job.dow.on(0)
skip_other = True
if req.strip().startswith('year'):
job.dom.on(0)
job.month.on(0)
skip_other = True
dow = False
for (dw, cron_day) in DAYS.items():
if req.strip().lower().startswith(dw):
dow = True
break
if dow:
job.dow.on(cron_day.upper())
r = re.split(('^' + dw), req.strip(), flags=re.IGNORECASE)
if (r and (len(r) == 2)):
req = r.pop()
if req.strip().startswith('at '):
time = re.search("[^0-9](\\d{1,2})\\so'clock", req)
if (time and time.group(1)):
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if (not skip_other):
time = re.search('[^0-9](\\d{1,2}):(\\d{2})[^0-9]', req)
if (time and time.group(1) and time.group(2)):
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
if ((not skip_other) and req.strip().startswith('hourly')):
job.minute.on(0)
skip_other = True
elif (not skip_other):
time = re.search('\\s?from\\s(\\d{1,2}):(\\d{2})\\sto\\s(\\d{1,2}):(\\d{2})[^0-9]+', req)
if (time and time.group(1)):
job.hour.during(time.group(1), time.group(3))
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
elif (not skip_other):
job.minute.on(0)
job.hour.on(10)
skip_other = True
if ((not skip_other) and req.strip().startswith('day')):
req = req.replace('day', , 1)
if req.strip().startswith('at '):
time = re.search("[^0-9](\\d{1,2})\\so'clock", req)
if (time and time.group(1)):
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if (not skip_other):
time = re.search('[^0-9](\\d{1,2}):(\\d{2})[^0-9]', req)
if (time and time.group(1) and time.group(2)):
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
if ((not skip_other) and req.strip().startswith('hourly')):
job.minute.on(0)
skip_other = True
if ((not skip_other) and req.strip().startswith('every')):
req = req.replace('every', , 1)
hour = re.search('\\s?(\\d+)\\s+(hour|hours|hs|h)', req)
if (hour and hour.group(1)):
job.hour.every(hour.group(1))
skip_other = True
else:
hour = re.search('^\\s?([a-zA-Z]+?)\\s(hours|hour)', req)
if (hour and hour.group(1)):
h = text2int(hour.group(1))
job.hour.every(h)
job.minute.on(0)
skip_other = True
elif ((not skip_other) and req.strip().startswith('from')):
time = re.search('^from\\s(\\d{1,2}):(\\d{2})\\sto\\s(\\d{1,2}):(\\d{2})[^0-9]+', req.strip())
if (time and time.group(1)):
job.hour.during(time.group(1), time.group(3))
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
elif (not skip_other):
job.minute.on(0)
job.hour.on(10)
skip_other = True
print(job)
else:
pass
if ((not skip_other) and req.strip().startswith('with message')):
job.minute.on(0)
job.hour.on(10)
skip_other = True
if ((not skip_other) and req.strip().startswith('hour')):
job.minute.on(0)
skip_other = True
if ((not skip_other) and req.strip().startswith('minute')):
job.minute.every(1)
skip_other = True
if (not skip_other):
hour = re.search('^(\\d+)\\s+(hour|hours|hs|h)', req.strip())
if (hour and hour.group(1)):
job.hour.every(hour.group(1))
skip_other = True
else:
hour = re.search('^([a-zA-Z]+?)\\s(hours|hour)', req.strip())
if (hour and hour.group(1)):
h = text2int(hour.group(1))
job.hour.every(h)
job.minute.on(0)
skip_other = True
if (not skip_other):
day = re.search('^(\\d+)\\s+(days|day|d)', req.strip())
if (day and day.group(1)):
req = req.replace(day.group(0), , 1)
if req.strip().startswith('at '):
req = req.replace('at', , 1)
time = re.search("^(\\d{1,2})\\so'clock", req.strip())
if (time and time.group(1)):
job.dow.every(day.group(1))
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if ((not skip_other) and req.strip().startswith('hourly')):
job.minute.on(0)
job.dow.every(day.group(1))
skip_other = True
if (not skip_other):
time = re.search('^(\\d{1,2}):(\\d{2})[^0-9]', req.strip())
if (time and time.group(1) and time.group(2)):
job.dom.every(day.group(1))
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
if ((not skip_other) and req.strip().startswith('from')):
req = req.replace('from', , 1)
time = re.search('^(\\d{1,2}):(\\d{2})\\sto\\s(\\d{1,2}):(\\d{2})[^0-9]+?', req.strip())
if (time and time.group(1)):
job.hour.during(time.group(1), time.group(3))
job.dom.every(day.group(1))
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
elif (not skip_other):
day = re.search('^\\s?([a-zA-Z]+?)\\s(days|day)', req)
if (day and day.group(1)):
d = text2int(day.group(1))
req = req.replace(day.group(0), , 1)
if ((not skip_other) and req.strip().startswith('from')):
time = re.search('^from\\s(\\d{1,2}):(\\d{2})\\sto\\s(\\d{1,2}):(\\d{2})[^0-9]+?', req.strip())
if (time and time.group(1)):
job.hour.during(time.group(1), time.group(3))
job.dom.every(d)
req = req.replace(day.group(0), , 1)
req = req.replace(time.group(0), , 1)
if req.strip().startswith('every'):
mins = re.search('^every\\s(\\d{1,2})[^0-9]+?(min|minute|minutes)', req.strip())
if (mins and mins.group(0)):
job.minute.during(time.group(2), time.group(4)).every(mins.group(1))
skip_other = True
else:
mins = re.search('^every\\s([^0-9\\s]+)\\s?(min|minute|minutes)', req.strip())
if (mins and mins.group(1)):
m = text2int(mins.group(1))
job.minute.during(time.group(2), time.group(4)).every(m)
skip_other = True
else:
raise
else:
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
else:
job.dom.every(d)
job.minute.on(0)
job.hour.on(10)
skip_other = True
else:
print(req)
raise
if (not skip_other):
min = re.search('\\s?(\\d+)\\s+(minutes|min|minute|m)', req)
if (min and min.group(1)):
job.minute.every(min.group(1))
else:
min = re.search('^\\s?([a-zA-Z]+?)\\s(minutes|min|mins)', req)
if (min and min.group(1)):
m = text2int(min.group(1))
job.minute.every(m)
cron.write()
logger.info(('adding cronjob %s' % cron.render()))
response = ('ok, cronjob added %s' % job.render())
if (self.req_from == 'jabber'):
todo = {'text': response, 'jmsg': response, 'type': 'response'}
self.response = todo
if (self.req_from == 'julius'):
from core.broadcast import say, bang
bang()
todo = {'say': response, 'text': response, 'type': 'response'}
self.response = say(self.request.replace('say', ).upper())
return self.response |
async def status_by_coordinates(self, latitude: float, longitude: float) -> Dict[(str, Any)]:
"Get symptom data for the location nearest to the user's lat/lon."
return (await self.nearest_by_coordinates(latitude, longitude)) | -322,262,233,440,783,360 | Get symptom data for the location nearest to the user's lat/lon. | pyflunearyou/user.py | status_by_coordinates | bachya/pyflunearyou | python | async def status_by_coordinates(self, latitude: float, longitude: float) -> Dict[(str, Any)]:
return (await self.nearest_by_coordinates(latitude, longitude)) |
async def status_by_zip(self, zip_code: str) -> Dict[(str, Any)]:
'Get symptom data for the provided ZIP code.'
try:
location = next((d for d in (await self.user_reports()) if (d['zip'] == zip_code)))
except StopIteration:
return {}
return (await self.status_by_coordinates(float(location['latitude']), float(location['longitude']))) | -159,002,280,710,509,020 | Get symptom data for the provided ZIP code. | pyflunearyou/user.py | status_by_zip | bachya/pyflunearyou | python | async def status_by_zip(self, zip_code: str) -> Dict[(str, Any)]:
try:
location = next((d for d in (await self.user_reports()) if (d['zip'] == zip_code)))
except StopIteration:
return {}
return (await self.status_by_coordinates(float(location['latitude']), float(location['longitude']))) |
def query(self, document_number):
'Query the "attachment page" endpoint and set the results to self.response.\n\n :param document_number: The internal PACER document ID for the item.\n :return: a request response object\n '
assert (self.session is not None), 'session attribute of DocketReport cannot be None.'
document_number = ((document_number[:3] + '0') + document_number[4:])
url = (self.url + document_number)
logger.info((u'Querying the attachment page endpoint at URL: %s' % url))
self.response = self.session.get(url)
self.parse() | 54,769,705,627,306,050 | Query the "attachment page" endpoint and set the results to self.response.
:param document_number: The internal PACER document ID for the item.
:return: a request response object | juriscraper/pacer/attachment_page.py | query | johnhawkinson/juriscraper | python | def query(self, document_number):
'Query the "attachment page" endpoint and set the results to self.response.\n\n :param document_number: The internal PACER document ID for the item.\n :return: a request response object\n '
assert (self.session is not None), 'session attribute of DocketReport cannot be None.'
document_number = ((document_number[:3] + '0') + document_number[4:])
url = (self.url + document_number)
logger.info((u'Querying the attachment page endpoint at URL: %s' % url))
self.response = self.session.get(url)
self.parse() |
@property
def data(self):
"Get data back from the query for the matching document entry.\n\n :return: If lookup fails, an empty dict. Else, a dict containing the\n following fields:\n - document_number: The document number we're working with.\n - page_count: The number of pages of the item\n - pacer_doc_id: The doc ID for the main document.\n - attachments: A list of attached items with the following fields:\n - attachment_number: The attachment number.\n - description: A description of the item.\n - page_count: The number of pages.\n - pacer_doc_id: The document ID for the attachment (a str).\n\n See the JSON objects in the tests for more examples.\n "
rows = self.tree.xpath('//tr[.//a]')
if (not rows):
logger.info('No documents found on attachment page.')
return {}
first_row = rows.pop(0)
result = {'document_number': self._get_document_number(first_row), 'page_count': self._get_page_count_from_tr(first_row), 'pacer_doc_id': self._get_pacer_doc_id(first_row), 'pacer_case_id': self._get_pacer_case_id(), 'attachments': []}
for row in rows:
result['attachments'].append({'attachment_number': self._get_attachment_number(row), 'description': self._get_description_from_tr(row), 'page_count': self._get_page_count_from_tr(row), 'pacer_doc_id': self._get_pacer_doc_id(row)})
return result | 8,756,024,237,014,436,000 | Get data back from the query for the matching document entry.
:return: If lookup fails, an empty dict. Else, a dict containing the
following fields:
- document_number: The document number we're working with.
- page_count: The number of pages of the item
- pacer_doc_id: The doc ID for the main document.
- attachments: A list of attached items with the following fields:
- attachment_number: The attachment number.
- description: A description of the item.
- page_count: The number of pages.
- pacer_doc_id: The document ID for the attachment (a str).
See the JSON objects in the tests for more examples. | juriscraper/pacer/attachment_page.py | data | johnhawkinson/juriscraper | python | @property
def data(self):
"Get data back from the query for the matching document entry.\n\n :return: If lookup fails, an empty dict. Else, a dict containing the\n following fields:\n - document_number: The document number we're working with.\n - page_count: The number of pages of the item\n - pacer_doc_id: The doc ID for the main document.\n - attachments: A list of attached items with the following fields:\n - attachment_number: The attachment number.\n - description: A description of the item.\n - page_count: The number of pages.\n - pacer_doc_id: The document ID for the attachment (a str).\n\n See the JSON objects in the tests for more examples.\n "
rows = self.tree.xpath('//tr[.//a]')
if (not rows):
logger.info('No documents found on attachment page.')
return {}
first_row = rows.pop(0)
result = {'document_number': self._get_document_number(first_row), 'page_count': self._get_page_count_from_tr(first_row), 'pacer_doc_id': self._get_pacer_doc_id(first_row), 'pacer_case_id': self._get_pacer_case_id(), 'attachments': []}
for row in rows:
result['attachments'].append({'attachment_number': self._get_attachment_number(row), 'description': self._get_description_from_tr(row), 'page_count': self._get_page_count_from_tr(row), 'pacer_doc_id': self._get_pacer_doc_id(row)})
return result |
def _get_document_number(self, row):
"Return the document number for an item.\n\n In district court attachment pages, this is easy to extract with an\n XPath. In bankruptcy cases, it's simply not there.\n "
if self.is_bankruptcy:
return None
else:
return int(row.xpath('.//a/text()')[0].strip()) | 6,541,885,969,443,189,000 | Return the document number for an item.
In district court attachment pages, this is easy to extract with an
XPath. In bankruptcy cases, it's simply not there. | juriscraper/pacer/attachment_page.py | _get_document_number | johnhawkinson/juriscraper | python | def _get_document_number(self, row):
"Return the document number for an item.\n\n In district court attachment pages, this is easy to extract with an\n XPath. In bankruptcy cases, it's simply not there.\n "
if self.is_bankruptcy:
return None
else:
return int(row.xpath('.//a/text()')[0].strip()) |
def _get_attachment_number(self, row):
'Return the attachment number for an item.\n\n In district courts, this can be easily extracted. In bankruptcy courts,\n you must extract it, then subtract 1 from the value since these are\n tallied and include the main document.\n '
number = int(row.xpath('.//a/text()')[0].strip())
if self.is_bankruptcy:
return (number - 1)
else:
return number | -6,560,878,718,265,846,000 | Return the attachment number for an item.
In district courts, this can be easily extracted. In bankruptcy courts,
you must extract it, then subtract 1 from the value since these are
tallied and include the main document. | juriscraper/pacer/attachment_page.py | _get_attachment_number | johnhawkinson/juriscraper | python | def _get_attachment_number(self, row):
'Return the attachment number for an item.\n\n In district courts, this can be easily extracted. In bankruptcy courts,\n you must extract it, then subtract 1 from the value since these are\n tallied and include the main document.\n '
number = int(row.xpath('.//a/text()')[0].strip())
if self.is_bankruptcy:
return (number - 1)
else:
return number |
def _get_description_from_tr(self, row):
'Get the description from the row'
if (not self.is_bankruptcy):
index = 2
else:
index = 3
description_text_nodes = row.xpath(('./td[%s]//text()' % index))
if (len(description_text_nodes) == 0):
return u''
else:
description = description_text_nodes[0].strip()
return force_unicode(description) | 7,887,229,052,828,198,000 | Get the description from the row | juriscraper/pacer/attachment_page.py | _get_description_from_tr | johnhawkinson/juriscraper | python | def _get_description_from_tr(self, row):
if (not self.is_bankruptcy):
index = 2
else:
index = 3
description_text_nodes = row.xpath(('./td[%s]//text()' % index))
if (len(description_text_nodes) == 0):
return u
else:
description = description_text_nodes[0].strip()
return force_unicode(description) |
@staticmethod
def _get_page_count_from_tr(tr):
'Take a row from the attachment table and return the page count as an\n int extracted from the cell specified by index.\n '
pg_cnt_str_nodes = tr.xpath('./td[contains(., "page")]/text()')
if (len(pg_cnt_str_nodes) == 0):
return None
else:
for pg_cnt_str_node in pg_cnt_str_nodes:
try:
pg_cnt_str = pg_cnt_str_node.strip()
return int(pg_cnt_str.split()[0])
except ValueError:
continue | 6,936,813,819,514,453,000 | Take a row from the attachment table and return the page count as an
int extracted from the cell specified by index. | juriscraper/pacer/attachment_page.py | _get_page_count_from_tr | johnhawkinson/juriscraper | python | @staticmethod
def _get_page_count_from_tr(tr):
'Take a row from the attachment table and return the page count as an\n int extracted from the cell specified by index.\n '
pg_cnt_str_nodes = tr.xpath('./td[contains(., "page")]/text()')
if (len(pg_cnt_str_nodes) == 0):
return None
else:
for pg_cnt_str_node in pg_cnt_str_nodes:
try:
pg_cnt_str = pg_cnt_str_node.strip()
return int(pg_cnt_str.split()[0])
except ValueError:
continue |
@staticmethod
def _get_pacer_doc_id(row):
'Take in a row from the attachment table and return the pacer_doc_id\n for the item in that row. Return None if the ID cannot be found.\n '
try:
url = row.xpath(u'.//a')[0]
except IndexError:
return None
else:
doc1_url = url.xpath('./@href')[0]
return get_pacer_doc_id_from_doc1_url(doc1_url) | -902,098,361,262,620,000 | Take in a row from the attachment table and return the pacer_doc_id
for the item in that row. Return None if the ID cannot be found. | juriscraper/pacer/attachment_page.py | _get_pacer_doc_id | johnhawkinson/juriscraper | python | @staticmethod
def _get_pacer_doc_id(row):
'Take in a row from the attachment table and return the pacer_doc_id\n for the item in that row. Return None if the ID cannot be found.\n '
try:
url = row.xpath(u'.//a')[0]
except IndexError:
return None
else:
doc1_url = url.xpath('./@href')[0]
return get_pacer_doc_id_from_doc1_url(doc1_url) |
def _get_pacer_case_id(self):
'Get the pacer_case_id value by inspecting the HTML\n\n :returns str: The pacer_case_id value\n '
urls = self.tree.xpath('//a')
for url in urls:
try:
onclick = url.xpath('./@onclick')[0]
except IndexError:
continue
else:
if ('goDLS' not in onclick):
continue
go_dls_parts = reverse_goDLS_function(onclick)
return go_dls_parts['caseid']
input_els = self.tree.xpath('//input')
for input_el in input_els:
try:
onclick = input_el.xpath('./@onclick')[0]
except IndexError:
continue
else:
m = re.search('[?&]caseid=(\\d+)', onclick, flags=re.I)
if m:
return m.group(1) | -7,252,420,284,333,202,000 | Get the pacer_case_id value by inspecting the HTML
:returns str: The pacer_case_id value | juriscraper/pacer/attachment_page.py | _get_pacer_case_id | johnhawkinson/juriscraper | python | def _get_pacer_case_id(self):
'Get the pacer_case_id value by inspecting the HTML\n\n :returns str: The pacer_case_id value\n '
urls = self.tree.xpath('//a')
for url in urls:
try:
onclick = url.xpath('./@onclick')[0]
except IndexError:
continue
else:
if ('goDLS' not in onclick):
continue
go_dls_parts = reverse_goDLS_function(onclick)
return go_dls_parts['caseid']
input_els = self.tree.xpath('//input')
for input_el in input_els:
try:
onclick = input_el.xpath('./@onclick')[0]
except IndexError:
continue
else:
m = re.search('[?&]caseid=(\\d+)', onclick, flags=re.I)
if m:
return m.group(1) |
def import_data_stage02_physiology_pairWiseTest_add(self, filename):
'table adds'
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_data_stage02_physiology_pairWiseTest(data.data)
data.clear_data() | -3,367,785,456,156,156,000 | table adds | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | import_data_stage02_physiology_pairWiseTest_add | dmccloskey/SBaaS_COBRA | python | def import_data_stage02_physiology_pairWiseTest_add(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_data_stage02_physiology_pairWiseTest(data.data)
data.clear_data() |
def export_dataStage02PhysiologyPairWiseTest_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTest(analysis_id_I)
data1_keys = ['analysis_id', 'simulation_id_1', 'simulation_id_2', 'rxn_id', 'flux_units', 'test_description']
data1_nestkeys = ['analysis_id']
data1_keymap = {'ydata': 'pvalue_negLog10', 'xdata': 'fold_change', 'serieslabel': '', 'featureslabel': 'rxn_id'}
dataobject_O = [{'data': data_O, 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys}]
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': 'row1', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
svgparameters_O = {'svgtype': 'volcanoplot2d_01', 'svgkeymap': [data1_keymap], 'svgid': 'svg1', 'svgmargin': {'top': 50, 'right': 50, 'bottom': 50, 'left': 50}, 'svgwidth': 500, 'svgheight': 350, 'svgx1axislabel': 'Fold Change [geometric]', 'svgy1axislabel': 'Probability [-log10(P)]'}
svgtileparameters_O = {'tileheader': 'Volcano plot', 'tiletype': 'svg', 'tileid': 'tile2', 'rowid': 'row1', 'colid': 'col2', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters_O.update(svgparameters_O)
tableparameters_O = {'tabletype': 'responsivetable_01', 'tableid': 'table1', 'tableclass': 'table table-condensed table-hover', 'tablefilters': None, 'tableformtileid': 'filtermenu1', 'tableresetbuttonid': 'reset1', 'tablesubmitbuttonid': 'submit1'}
tabletileparameters_O = {'tileheader': 'pairWiseTest', 'tiletype': 'table', 'tileid': 'tile3', 'rowid': 'row2', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-12'}
tabletileparameters_O.update(tableparameters_O)
parametersobject_O = [formtileparameters_O, svgtileparameters_O, tabletileparameters_O]
tile2datamap_O = {'filtermenu1': [0], 'tile2': [0], 'tile3': [0]}
filtermenuobject_O = None
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=filtermenuobject_O)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects()) | -4,705,089,115,005,355,000 | Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value) | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | export_dataStage02PhysiologyPairWiseTest_js | dmccloskey/SBaaS_COBRA | python | def export_dataStage02PhysiologyPairWiseTest_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTest(analysis_id_I)
data1_keys = ['analysis_id', 'simulation_id_1', 'simulation_id_2', 'rxn_id', 'flux_units', 'test_description']
data1_nestkeys = ['analysis_id']
data1_keymap = {'ydata': 'pvalue_negLog10', 'xdata': 'fold_change', 'serieslabel': , 'featureslabel': 'rxn_id'}
dataobject_O = [{'data': data_O, 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys}]
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': 'row1', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
svgparameters_O = {'svgtype': 'volcanoplot2d_01', 'svgkeymap': [data1_keymap], 'svgid': 'svg1', 'svgmargin': {'top': 50, 'right': 50, 'bottom': 50, 'left': 50}, 'svgwidth': 500, 'svgheight': 350, 'svgx1axislabel': 'Fold Change [geometric]', 'svgy1axislabel': 'Probability [-log10(P)]'}
svgtileparameters_O = {'tileheader': 'Volcano plot', 'tiletype': 'svg', 'tileid': 'tile2', 'rowid': 'row1', 'colid': 'col2', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters_O.update(svgparameters_O)
tableparameters_O = {'tabletype': 'responsivetable_01', 'tableid': 'table1', 'tableclass': 'table table-condensed table-hover', 'tablefilters': None, 'tableformtileid': 'filtermenu1', 'tableresetbuttonid': 'reset1', 'tablesubmitbuttonid': 'submit1'}
tabletileparameters_O = {'tileheader': 'pairWiseTest', 'tiletype': 'table', 'tileid': 'tile3', 'rowid': 'row2', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-12'}
tabletileparameters_O.update(tableparameters_O)
parametersobject_O = [formtileparameters_O, svgtileparameters_O, tabletileparameters_O]
tile2datamap_O = {'filtermenu1': [0], 'tile2': [0], 'tile3': [0]}
filtermenuobject_O = None
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=filtermenuobject_O)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects()) |
def export_dataStage02PhysiologyPairWiseTestMetabolites_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTestMetabolites(analysis_id_I)
data1_keys = ['analysis_id', 'simulation_id_1', 'simulation_id_2', 'met_id', 'flux_units', 'test_description']
data1_nestkeys = ['analysis_id']
data1_keymap = {'ydata': 'pvalue_negLog10', 'xdata': 'fold_change', 'serieslabel': '', 'featureslabel': 'met_id'}
dataobject_O = [{'data': data_O, 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys}]
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': 'row1', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
svgparameters_O = {'svgtype': 'volcanoplot2d_01', 'svgkeymap': [data1_keymap], 'svgid': 'svg1', 'svgmargin': {'top': 50, 'right': 50, 'bottom': 50, 'left': 50}, 'svgwidth': 500, 'svgheight': 350, 'svgx1axislabel': 'Fold Change [geometric]', 'svgy1axislabel': 'Probability [-log10(P)]'}
svgtileparameters_O = {'tileheader': 'Volcano plot', 'tiletype': 'svg', 'tileid': 'tile2', 'rowid': 'row1', 'colid': 'col2', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters_O.update(svgparameters_O)
tableparameters_O = {'tabletype': 'responsivetable_01', 'tableid': 'table1', 'tableclass': 'table table-condensed table-hover', 'tablefilters': None, 'tableformtileid': 'filtermenu1', 'tableresetbuttonid': 'reset1', 'tablesubmitbuttonid': 'submit1'}
tabletileparameters_O = {'tileheader': 'pairWiseTest', 'tiletype': 'table', 'tileid': 'tile3', 'rowid': 'row2', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-12'}
tabletileparameters_O.update(tableparameters_O)
parametersobject_O = [formtileparameters_O, svgtileparameters_O, tabletileparameters_O]
tile2datamap_O = {'filtermenu1': [0], 'tile2': [0], 'tile3': [0]}
filtermenuobject_O = None
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=filtermenuobject_O)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects()) | 4,520,182,110,218,128,400 | Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value) | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | export_dataStage02PhysiologyPairWiseTestMetabolites_js | dmccloskey/SBaaS_COBRA | python | def export_dataStage02PhysiologyPairWiseTestMetabolites_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTestMetabolites(analysis_id_I)
data1_keys = ['analysis_id', 'simulation_id_1', 'simulation_id_2', 'met_id', 'flux_units', 'test_description']
data1_nestkeys = ['analysis_id']
data1_keymap = {'ydata': 'pvalue_negLog10', 'xdata': 'fold_change', 'serieslabel': , 'featureslabel': 'met_id'}
dataobject_O = [{'data': data_O, 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys}]
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': 'row1', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
svgparameters_O = {'svgtype': 'volcanoplot2d_01', 'svgkeymap': [data1_keymap], 'svgid': 'svg1', 'svgmargin': {'top': 50, 'right': 50, 'bottom': 50, 'left': 50}, 'svgwidth': 500, 'svgheight': 350, 'svgx1axislabel': 'Fold Change [geometric]', 'svgy1axislabel': 'Probability [-log10(P)]'}
svgtileparameters_O = {'tileheader': 'Volcano plot', 'tiletype': 'svg', 'tileid': 'tile2', 'rowid': 'row1', 'colid': 'col2', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters_O.update(svgparameters_O)
tableparameters_O = {'tabletype': 'responsivetable_01', 'tableid': 'table1', 'tableclass': 'table table-condensed table-hover', 'tablefilters': None, 'tableformtileid': 'filtermenu1', 'tableresetbuttonid': 'reset1', 'tablesubmitbuttonid': 'submit1'}
tabletileparameters_O = {'tileheader': 'pairWiseTest', 'tiletype': 'table', 'tileid': 'tile3', 'rowid': 'row2', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-12'}
tabletileparameters_O.update(tableparameters_O)
parametersobject_O = [formtileparameters_O, svgtileparameters_O, tabletileparameters_O]
tile2datamap_O = {'filtermenu1': [0], 'tile2': [0], 'tile3': [0]}
filtermenuobject_O = None
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=filtermenuobject_O)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects()) |
def export_dataStage02PhysiologyPairWiseTestSubsystems_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTestSubsystems(analysis_id_I)
data1_keys = ['analysis_id', 'simulation_id_1', 'simulation_id_2', 'subsystem_id', 'flux_units', 'test_description']
data1_nestkeys = ['analysis_id']
data1_keymap = {'ydata': 'pvalue_negLog10', 'xdata': 'fold_change', 'serieslabel': '', 'featureslabel': 'subsystem_id'}
dataobject_O = [{'data': data_O, 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys}]
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': 'row1', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
svgparameters_O = {'svgtype': 'volcanoplot2d_01', 'svgkeymap': [data1_keymap], 'svgid': 'svg1', 'svgmargin': {'top': 50, 'right': 50, 'bottom': 50, 'left': 50}, 'svgwidth': 500, 'svgheight': 350, 'svgx1axislabel': 'Fold Change [geometric]', 'svgy1axislabel': 'Probability [-log10(P)]'}
svgtileparameters_O = {'tileheader': 'Volcano plot', 'tiletype': 'svg', 'tileid': 'tile2', 'rowid': 'row1', 'colid': 'col2', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters_O.update(svgparameters_O)
tableparameters_O = {'tabletype': 'responsivetable_01', 'tableid': 'table1', 'tableclass': 'table table-condensed table-hover', 'tablefilters': None, 'tableformtileid': 'filtermenu1', 'tableresetbuttonid': 'reset1', 'tablesubmitbuttonid': 'submit1'}
tabletileparameters_O = {'tileheader': 'pairWiseTest', 'tiletype': 'table', 'tileid': 'tile3', 'rowid': 'row2', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-12'}
tabletileparameters_O.update(tableparameters_O)
parametersobject_O = [formtileparameters_O, svgtileparameters_O, tabletileparameters_O]
tile2datamap_O = {'filtermenu1': [0], 'tile2': [0], 'tile3': [0]}
filtermenuobject_O = None
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=filtermenuobject_O)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects()) | 8,564,029,590,552,739,000 | Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value) | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | export_dataStage02PhysiologyPairWiseTestSubsystems_js | dmccloskey/SBaaS_COBRA | python | def export_dataStage02PhysiologyPairWiseTestSubsystems_js(self, analysis_id_I, data_dir_I='tmp'):
'Export data for a volcano plot\n Visuals:\n 1. volcano plot\n 2. sample vs. sample (FC)\n 3. sample vs. sample (concentration)\n 4. sample vs. sample (p-value)'
data_O = []
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTestSubsystems(analysis_id_I)
data1_keys = ['analysis_id', 'simulation_id_1', 'simulation_id_2', 'subsystem_id', 'flux_units', 'test_description']
data1_nestkeys = ['analysis_id']
data1_keymap = {'ydata': 'pvalue_negLog10', 'xdata': 'fold_change', 'serieslabel': , 'featureslabel': 'subsystem_id'}
dataobject_O = [{'data': data_O, 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys}]
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': 'row1', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
svgparameters_O = {'svgtype': 'volcanoplot2d_01', 'svgkeymap': [data1_keymap], 'svgid': 'svg1', 'svgmargin': {'top': 50, 'right': 50, 'bottom': 50, 'left': 50}, 'svgwidth': 500, 'svgheight': 350, 'svgx1axislabel': 'Fold Change [geometric]', 'svgy1axislabel': 'Probability [-log10(P)]'}
svgtileparameters_O = {'tileheader': 'Volcano plot', 'tiletype': 'svg', 'tileid': 'tile2', 'rowid': 'row1', 'colid': 'col2', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters_O.update(svgparameters_O)
tableparameters_O = {'tabletype': 'responsivetable_01', 'tableid': 'table1', 'tableclass': 'table table-condensed table-hover', 'tablefilters': None, 'tableformtileid': 'filtermenu1', 'tableresetbuttonid': 'reset1', 'tablesubmitbuttonid': 'submit1'}
tabletileparameters_O = {'tileheader': 'pairWiseTest', 'tiletype': 'table', 'tileid': 'tile3', 'rowid': 'row2', 'colid': 'col1', 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-12'}
tabletileparameters_O.update(tableparameters_O)
parametersobject_O = [formtileparameters_O, svgtileparameters_O, tabletileparameters_O]
tile2datamap_O = {'filtermenu1': [0], 'tile2': [0], 'tile3': [0]}
filtermenuobject_O = None
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=filtermenuobject_O)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects()) |
def advance_euler(gridc, gridx, gridy, scalars, grid_var_list, predcorr):
"\n Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations\n \n Arguments\n ---------\n gridc : object\n Grid object for cell centered variables\n\n gridx : object\n Grid object for x-face variables\n\n gridy : object\n Grid object for y-face variables\n\n scalars: object\n Scalars object to access time-step and Reynold number\n\n grid_var_list : list\n List containing variable names for velocity, RHS term from the previous time-step, divergence and pressure\n\n predcorr : string\n Flag for the fractional step method equations - 'predictor', 'divergence', 'corrector'\n\n "
velc = grid_var_list[0]
hvar = grid_var_list[1]
divv = grid_var_list[2]
pres = grid_var_list[3]
if (predcorr == 'predictor'):
predictor(gridx, gridy, velc, hvar, scalars.variable['Re'], scalars.variable['dt'])
if (predcorr == 'divergence'):
divergence(gridc, gridx, gridy, velc, divv, ifac=scalars.variable['dt'])
elif (predcorr == 'corrector'):
corrector(gridc, gridx, gridy, velc, pres, scalars.variable['dt'])
divergence(gridc, gridx, gridy, velc, divv)
scalars.stats.update(stats(gridc, gridx, gridy, velc, pres, divv)) | -1,970,600,925,795,049,200 | Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations
Arguments
---------
gridc : object
Grid object for cell centered variables
gridx : object
Grid object for x-face variables
gridy : object
Grid object for y-face variables
scalars: object
Scalars object to access time-step and Reynold number
grid_var_list : list
List containing variable names for velocity, RHS term from the previous time-step, divergence and pressure
predcorr : string
Flag for the fractional step method equations - 'predictor', 'divergence', 'corrector' | flowx/ins/euler.py | advance_euler | AbhilashReddyM/flowX | python | def advance_euler(gridc, gridx, gridy, scalars, grid_var_list, predcorr):
"\n Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations\n \n Arguments\n ---------\n gridc : object\n Grid object for cell centered variables\n\n gridx : object\n Grid object for x-face variables\n\n gridy : object\n Grid object for y-face variables\n\n scalars: object\n Scalars object to access time-step and Reynold number\n\n grid_var_list : list\n List containing variable names for velocity, RHS term from the previous time-step, divergence and pressure\n\n predcorr : string\n Flag for the fractional step method equations - 'predictor', 'divergence', 'corrector'\n\n "
velc = grid_var_list[0]
hvar = grid_var_list[1]
divv = grid_var_list[2]
pres = grid_var_list[3]
if (predcorr == 'predictor'):
predictor(gridx, gridy, velc, hvar, scalars.variable['Re'], scalars.variable['dt'])
if (predcorr == 'divergence'):
divergence(gridc, gridx, gridy, velc, divv, ifac=scalars.variable['dt'])
elif (predcorr == 'corrector'):
corrector(gridc, gridx, gridy, velc, pres, scalars.variable['dt'])
divergence(gridc, gridx, gridy, velc, divv)
scalars.stats.update(stats(gridc, gridx, gridy, velc, pres, divv)) |
def stackplot(axes, x, *args, **kwargs):
"Draws a stacked area plot.\n\n *x* : 1d array of dimension N\n\n *y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension\n 1xN. The data is assumed to be unstacked. Each of the following\n calls is legal::\n\n stackplot(x, y) # where y is MxN\n stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm\n\n Keyword arguments:\n\n *baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']\n Method used to calculate the baseline. 'zero' is just a\n simple stacked plot. 'sym' is symmetric around zero and\n is sometimes called `ThemeRiver`. 'wiggle' minimizes the\n sum of the squared slopes. 'weighted_wiggle' does the\n same but weights to account for size of each layer.\n It is also called `Streamgraph`-layout. More details\n can be found at http://leebyron.com/streamgraph/.\n\n\n *labels* : A list or tuple of labels to assign to each data series.\n\n\n *colors* : A list or tuple of colors. These will be cycled through and\n used to colour the stacked areas.\n All other keyword arguments are passed to\n :func:`~matplotlib.Axes.fill_between`\n\n Returns *r* : A list of\n :class:`~matplotlib.collections.PolyCollection`, one for each\n element in the stacked area plot.\n "
if (len(args) == 1):
y = np.atleast_2d(*args)
elif (len(args) > 1):
y = np.row_stack(args)
labels = iter(kwargs.pop('labels', []))
colors = kwargs.pop('colors', None)
if (colors is not None):
axes.set_prop_cycle(cycler('color', colors))
baseline = kwargs.pop('baseline', 'zero')
stack = np.cumsum(y, axis=0)
if (baseline == 'zero'):
first_line = 0.0
elif (baseline == 'sym'):
first_line = ((- np.sum(y, 0)) * 0.5)
stack += first_line[None, :]
elif (baseline == 'wiggle'):
m = y.shape[0]
first_line = (y * ((m - 0.5) - np.arange(0, m)[:, None])).sum(0)
first_line /= (- m)
stack += first_line
elif (baseline == 'weighted_wiggle'):
(m, n) = y.shape
center = np.zeros(n)
total = np.sum(y, 0)
inv_total = np.zeros_like(total)
mask = (total > 0)
inv_total[mask] = (1.0 / total[mask])
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = (total - stack)
below_size += (0.5 * y)
move_up = (below_size * inv_total)
move_up[:, 0] = 0.5
center = ((move_up - 0.5) * increase)
center = np.cumsum(center.sum(0))
first_line = (center - (0.5 * total))
stack += first_line
else:
errstr = ('Baseline method %s not recognised. ' % baseline)
errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
raise ValueError(errstr)
color = axes._get_lines.get_next_color()
coll = axes.fill_between(x, first_line, stack[0, :], facecolor=color, label=six.next(labels, None), **kwargs)
coll.sticky_edges.y[:] = [0]
r = [coll]
for i in xrange((len(y) - 1)):
color = axes._get_lines.get_next_color()
r.append(axes.fill_between(x, stack[i, :], stack[(i + 1), :], facecolor=color, label=six.next(labels, None), **kwargs))
return r | -4,823,916,398,877,042,000 | Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']
Method used to calculate the baseline. 'zero' is just a
simple stacked plot. 'sym' is symmetric around zero and
is sometimes called `ThemeRiver`. 'wiggle' minimizes the
sum of the squared slopes. 'weighted_wiggle' does the
same but weights to account for size of each layer.
It is also called `Streamgraph`-layout. More details
can be found at http://leebyron.com/streamgraph/.
*labels* : A list or tuple of labels to assign to each data series.
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot. | lib/matplotlib/stackplot.py | stackplot | Owen-Gillespie/BeachHacks-ShowerSuite | python | def stackplot(axes, x, *args, **kwargs):
"Draws a stacked area plot.\n\n *x* : 1d array of dimension N\n\n *y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension\n 1xN. The data is assumed to be unstacked. Each of the following\n calls is legal::\n\n stackplot(x, y) # where y is MxN\n stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm\n\n Keyword arguments:\n\n *baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']\n Method used to calculate the baseline. 'zero' is just a\n simple stacked plot. 'sym' is symmetric around zero and\n is sometimes called `ThemeRiver`. 'wiggle' minimizes the\n sum of the squared slopes. 'weighted_wiggle' does the\n same but weights to account for size of each layer.\n It is also called `Streamgraph`-layout. More details\n can be found at http://leebyron.com/streamgraph/.\n\n\n *labels* : A list or tuple of labels to assign to each data series.\n\n\n *colors* : A list or tuple of colors. These will be cycled through and\n used to colour the stacked areas.\n All other keyword arguments are passed to\n :func:`~matplotlib.Axes.fill_between`\n\n Returns *r* : A list of\n :class:`~matplotlib.collections.PolyCollection`, one for each\n element in the stacked area plot.\n "
if (len(args) == 1):
y = np.atleast_2d(*args)
elif (len(args) > 1):
y = np.row_stack(args)
labels = iter(kwargs.pop('labels', []))
colors = kwargs.pop('colors', None)
if (colors is not None):
axes.set_prop_cycle(cycler('color', colors))
baseline = kwargs.pop('baseline', 'zero')
stack = np.cumsum(y, axis=0)
if (baseline == 'zero'):
first_line = 0.0
elif (baseline == 'sym'):
first_line = ((- np.sum(y, 0)) * 0.5)
stack += first_line[None, :]
elif (baseline == 'wiggle'):
m = y.shape[0]
first_line = (y * ((m - 0.5) - np.arange(0, m)[:, None])).sum(0)
first_line /= (- m)
stack += first_line
elif (baseline == 'weighted_wiggle'):
(m, n) = y.shape
center = np.zeros(n)
total = np.sum(y, 0)
inv_total = np.zeros_like(total)
mask = (total > 0)
inv_total[mask] = (1.0 / total[mask])
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = (total - stack)
below_size += (0.5 * y)
move_up = (below_size * inv_total)
move_up[:, 0] = 0.5
center = ((move_up - 0.5) * increase)
center = np.cumsum(center.sum(0))
first_line = (center - (0.5 * total))
stack += first_line
else:
errstr = ('Baseline method %s not recognised. ' % baseline)
errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
raise ValueError(errstr)
color = axes._get_lines.get_next_color()
coll = axes.fill_between(x, first_line, stack[0, :], facecolor=color, label=six.next(labels, None), **kwargs)
coll.sticky_edges.y[:] = [0]
r = [coll]
for i in xrange((len(y) - 1)):
color = axes._get_lines.get_next_color()
r.append(axes.fill_between(x, stack[i, :], stack[(i + 1), :], facecolor=color, label=six.next(labels, None), **kwargs))
return r |
@staticmethod
def create(gltf, mesh_idx, skin_idx):
'Mesh creation.'
pymesh = gltf.data.meshes[mesh_idx]
bme = bmesh.new()
materials = []
for prim in pymesh.primitives:
if (prim.material is None):
material_idx = None
else:
pymaterial = gltf.data.materials[prim.material]
vertex_color = None
if ('COLOR_0' in prim.attributes):
vertex_color = 'COLOR_0'
if (vertex_color not in pymaterial.blender_material):
BlenderMaterial.create(gltf, prim.material, vertex_color)
material_name = pymaterial.blender_material[vertex_color]
material = bpy.data.materials[material_name]
try:
material_idx = materials.index(material.name)
except ValueError:
materials.append(material.name)
material_idx = (len(materials) - 1)
BlenderPrimitive.add_primitive_to_bmesh(gltf, bme, pymesh, prim, skin_idx, material_idx)
name = (pymesh.name or ('Mesh_' + str(mesh_idx)))
mesh = bpy.data.meshes.new(name)
BlenderMesh.bmesh_to_mesh(gltf, pymesh, bme, mesh)
bme.free()
for name_material in materials:
mesh.materials.append(bpy.data.materials[name_material])
mesh.update()
set_extras(mesh, pymesh.extras, exclude=['targetNames'])
gltf.accessor_cache = {}
return mesh | 6,512,850,218,290,860,000 | Mesh creation. | addons/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py | create | MrTheRich/glTF-Blender-IO | python | @staticmethod
def create(gltf, mesh_idx, skin_idx):
pymesh = gltf.data.meshes[mesh_idx]
bme = bmesh.new()
materials = []
for prim in pymesh.primitives:
if (prim.material is None):
material_idx = None
else:
pymaterial = gltf.data.materials[prim.material]
vertex_color = None
if ('COLOR_0' in prim.attributes):
vertex_color = 'COLOR_0'
if (vertex_color not in pymaterial.blender_material):
BlenderMaterial.create(gltf, prim.material, vertex_color)
material_name = pymaterial.blender_material[vertex_color]
material = bpy.data.materials[material_name]
try:
material_idx = materials.index(material.name)
except ValueError:
materials.append(material.name)
material_idx = (len(materials) - 1)
BlenderPrimitive.add_primitive_to_bmesh(gltf, bme, pymesh, prim, skin_idx, material_idx)
name = (pymesh.name or ('Mesh_' + str(mesh_idx)))
mesh = bpy.data.meshes.new(name)
BlenderMesh.bmesh_to_mesh(gltf, pymesh, bme, mesh)
bme.free()
for name_material in materials:
mesh.materials.append(bpy.data.materials[name_material])
mesh.update()
set_extras(mesh, pymesh.extras, exclude=['targetNames'])
gltf.accessor_cache = {}
return mesh |
def testV1beta1CustomResourceSubresources(self):
'Test V1beta1CustomResourceSubresources'
pass | -7,919,106,972,520,173,000 | Test V1beta1CustomResourceSubresources | test/test_v1beta1_custom_resource_subresources.py | testV1beta1CustomResourceSubresources | olitheolix/aiokubernetes | python | def testV1beta1CustomResourceSubresources(self):
pass |
def _write_locks(self):
'\n Write racefile and ADCC Startup Report\n\n '
dotadcc = get_adcc_dir()
vals = {'http_port': self.http_port, 'pid': os.getpid()}
rfile = os.path.join(dotadcc, self.racefile)
with open(rfile, 'w') as ports:
ports.write(repr(vals))
sr = os.path.join(dotadcc, self.sreport)
write_adcc_sr(sr, vals)
return | -4,548,844,850,677,403,600 | Write racefile and ADCC Startup Report | recipe_system/adcc/adcclib.py | _write_locks | Luke-Ludwig/DRAGONS | python | def _write_locks(self):
'\n \n\n '
dotadcc = get_adcc_dir()
vals = {'http_port': self.http_port, 'pid': os.getpid()}
rfile = os.path.join(dotadcc, self.racefile)
with open(rfile, 'w') as ports:
ports.write(repr(vals))
sr = os.path.join(dotadcc, self.sreport)
write_adcc_sr(sr, vals)
return |
def _extend_control_events_default(control_events, events, state):
'Default function for extending control event sequence.\n\n This function extends a control event sequence by duplicating the final event\n in the sequence. The control event sequence will be extended to have length\n one longer than the generated event sequence.\n\n Args:\n control_events: The control event sequence to extend.\n events: The list of generated events.\n state: State maintained while generating, unused.\n\n Returns:\n The resulting state after extending the control sequence (in this case the\n state will be returned unmodified).\n '
while (len(control_events) <= len(events)):
control_events.append(control_events[(- 1)])
return state | 8,439,013,635,635,939,000 | Default function for extending control event sequence.
This function extends a control event sequence by duplicating the final event
in the sequence. The control event sequence will be extended to have length
one longer than the generated event sequence.
Args:
control_events: The control event sequence to extend.
events: The list of generated events.
state: State maintained while generating, unused.
Returns:
The resulting state after extending the control sequence (in this case the
state will be returned unmodified). | magenta/models/shared/events_rnn_model.py | _extend_control_events_default | Surya130499/magenta | python | def _extend_control_events_default(control_events, events, state):
'Default function for extending control event sequence.\n\n This function extends a control event sequence by duplicating the final event\n in the sequence. The control event sequence will be extended to have length\n one longer than the generated event sequence.\n\n Args:\n control_events: The control event sequence to extend.\n events: The list of generated events.\n state: State maintained while generating, unused.\n\n Returns:\n The resulting state after extending the control sequence (in this case the\n state will be returned unmodified).\n '
while (len(control_events) <= len(events)):
control_events.append(control_events[(- 1)])
return state |
def __init__(self, config):
'Initialize the EventSequenceRnnModel.\n\n Args:\n config: An EventSequenceRnnConfig containing the encoder/decoder and\n HParams to use.\n '
super(EventSequenceRnnModel, self).__init__()
self._config = config | 3,753,918,035,922,203,000 | Initialize the EventSequenceRnnModel.
Args:
config: An EventSequenceRnnConfig containing the encoder/decoder and
HParams to use. | magenta/models/shared/events_rnn_model.py | __init__ | Surya130499/magenta | python | def __init__(self, config):
'Initialize the EventSequenceRnnModel.\n\n Args:\n config: An EventSequenceRnnConfig containing the encoder/decoder and\n HParams to use.\n '
super(EventSequenceRnnModel, self).__init__()
self._config = config |
def _batch_size(self):
'Extracts the batch size from the graph.'
return self._session.graph.get_collection('inputs')[0].shape[0].value | -3,716,962,435,938,537,000 | Extracts the batch size from the graph. | magenta/models/shared/events_rnn_model.py | _batch_size | Surya130499/magenta | python | def _batch_size(self):
return self._session.graph.get_collection('inputs')[0].shape[0].value |
def _generate_step_for_batch(self, event_sequences, inputs, initial_state, temperature):
'Extends a batch of event sequences by a single step each.\n\n This method modifies the event sequences in place.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like object. The list of event sequences should have length equal\n to `self._batch_size()`. These are extended by this method.\n inputs: A Python list of model inputs, with length equal to\n `self._batch_size()`.\n initial_state: A numpy array containing the initial RNN state, where\n `initial_state.shape[0]` is equal to `self._batch_size()`.\n temperature: The softmax temperature.\n\n Returns:\n final_state: The final RNN state, a numpy array the same size as\n `initial_state`.\n loglik: The log-likelihood of the chosen softmax value for each event\n sequence, a 1-D numpy array of length\n `self._batch_size()`. If `inputs` is a full-length inputs batch, the\n log-likelihood of each entire sequence up to and including the\n generated step will be computed and returned.\n '
assert (len(event_sequences) == self._batch_size())
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_final_state = self._session.graph.get_collection('final_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs, tuple(graph_initial_state): initial_state}
if graph_temperature:
feed_dict[graph_temperature[0]] = temperature
(final_state, softmax) = self._session.run([graph_final_state, graph_softmax], feed_dict)
if isinstance(softmax, list):
if (softmax[0].shape[1] > 1):
softmaxes = []
for beam in range(softmax[0].shape[0]):
beam_softmaxes = []
for event in range((softmax[0].shape[1] - 1)):
beam_softmaxes.append([softmax[s][(beam, event)] for s in range(len(softmax))])
softmaxes.append(beam_softmaxes)
loglik = self._config.encoder_decoder.evaluate_log_likelihood(event_sequences, softmaxes)
else:
loglik = np.zeros(len(event_sequences))
elif (softmax.shape[1] > 1):
loglik = self._config.encoder_decoder.evaluate_log_likelihood(event_sequences, softmax[:, :(- 1), :])
else:
loglik = np.zeros(len(event_sequences))
indices = np.array(self._config.encoder_decoder.extend_event_sequences(event_sequences, softmax))
if isinstance(softmax, list):
p = 1.0
for i in range(len(softmax)):
p *= softmax[i][(range(len(event_sequences)), (- 1), indices[:, i])]
else:
p = softmax[(range(len(event_sequences)), (- 1), indices)]
return (final_state, (loglik + np.log(p))) | 3,157,986,383,050,230,300 | Extends a batch of event sequences by a single step each.
This method modifies the event sequences in place.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`. These are extended by this method.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
temperature: The softmax temperature.
Returns:
final_state: The final RNN state, a numpy array the same size as
`initial_state`.
loglik: The log-likelihood of the chosen softmax value for each event
sequence, a 1-D numpy array of length
`self._batch_size()`. If `inputs` is a full-length inputs batch, the
log-likelihood of each entire sequence up to and including the
generated step will be computed and returned. | magenta/models/shared/events_rnn_model.py | _generate_step_for_batch | Surya130499/magenta | python | def _generate_step_for_batch(self, event_sequences, inputs, initial_state, temperature):
'Extends a batch of event sequences by a single step each.\n\n This method modifies the event sequences in place.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like object. The list of event sequences should have length equal\n to `self._batch_size()`. These are extended by this method.\n inputs: A Python list of model inputs, with length equal to\n `self._batch_size()`.\n initial_state: A numpy array containing the initial RNN state, where\n `initial_state.shape[0]` is equal to `self._batch_size()`.\n temperature: The softmax temperature.\n\n Returns:\n final_state: The final RNN state, a numpy array the same size as\n `initial_state`.\n loglik: The log-likelihood of the chosen softmax value for each event\n sequence, a 1-D numpy array of length\n `self._batch_size()`. If `inputs` is a full-length inputs batch, the\n log-likelihood of each entire sequence up to and including the\n generated step will be computed and returned.\n '
assert (len(event_sequences) == self._batch_size())
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_final_state = self._session.graph.get_collection('final_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs, tuple(graph_initial_state): initial_state}
if graph_temperature:
feed_dict[graph_temperature[0]] = temperature
(final_state, softmax) = self._session.run([graph_final_state, graph_softmax], feed_dict)
if isinstance(softmax, list):
if (softmax[0].shape[1] > 1):
softmaxes = []
for beam in range(softmax[0].shape[0]):
beam_softmaxes = []
for event in range((softmax[0].shape[1] - 1)):
beam_softmaxes.append([softmax[s][(beam, event)] for s in range(len(softmax))])
softmaxes.append(beam_softmaxes)
loglik = self._config.encoder_decoder.evaluate_log_likelihood(event_sequences, softmaxes)
else:
loglik = np.zeros(len(event_sequences))
elif (softmax.shape[1] > 1):
loglik = self._config.encoder_decoder.evaluate_log_likelihood(event_sequences, softmax[:, :(- 1), :])
else:
loglik = np.zeros(len(event_sequences))
indices = np.array(self._config.encoder_decoder.extend_event_sequences(event_sequences, softmax))
if isinstance(softmax, list):
p = 1.0
for i in range(len(softmax)):
p *= softmax[i][(range(len(event_sequences)), (- 1), indices[:, i])]
else:
p = softmax[(range(len(event_sequences)), (- 1), indices)]
return (final_state, (loglik + np.log(p))) |
def _generate_step(self, event_sequences, model_states, logliks, temperature, extend_control_events_callback=None, modify_events_callback=None):
'Extends a list of event sequences by a single step each.\n\n This method modifies the event sequences in place. It also returns the\n modified event sequences and updated model states and log-likelihoods.\n\n Args:\n event_sequences: A list of event sequence objects, which are extended by\n this method.\n model_states: A list of model states, each of which contains model inputs\n and initial RNN states.\n logliks: A list containing the current log-likelihood for each event\n sequence.\n temperature: The softmax temperature.\n extend_control_events_callback: A function that takes three arguments: a\n current control event sequence, a current generated event sequence,\n and the control state. The function should a) extend the control event\n sequence to be one longer than the generated event sequence (or do\n nothing if it is already at least this long), and b) return the\n resulting control state.\n modify_events_callback: An optional callback for modifying the event list.\n Can be used to inject events rather than having them generated. If not\n None, will be called with 3 arguments after every event: the current\n EventSequenceEncoderDecoder, a list of current EventSequences, and a\n list of current encoded event inputs.\n\n Returns:\n event_sequences: A list of extended event sequences. These are modified in\n place but also returned.\n final_states: A list of resulting model states, containing model inputs\n for the next step along with RNN states for each event sequence.\n logliks: A list containing the updated log-likelihood for each event\n sequence.\n '
batch_size = self._batch_size()
num_seqs = len(event_sequences)
num_batches = int(np.ceil((num_seqs / float(batch_size))))
inputs = [model_state.inputs for model_state in model_states]
initial_states = [model_state.rnn_state for model_state in model_states]
control_sequences = [model_state.control_events for model_state in model_states]
control_states = [model_state.control_state for model_state in model_states]
final_states = []
logliks = np.array(logliks, dtype=np.float32)
pad_amt = ((- len(event_sequences)) % batch_size)
padded_event_sequences = (event_sequences + [copy.deepcopy(event_sequences[(- 1)]) for _ in range(pad_amt)])
padded_inputs = (inputs + ([inputs[(- 1)]] * pad_amt))
padded_initial_states = (initial_states + ([initial_states[(- 1)]] * pad_amt))
for b in range(num_batches):
(i, j) = ((b * batch_size), ((b + 1) * batch_size))
pad_amt = max(0, (j - num_seqs))
(batch_final_state, batch_loglik) = self._generate_step_for_batch(padded_event_sequences[i:j], padded_inputs[i:j], state_util.batch(padded_initial_states[i:j], batch_size), temperature)
final_states += state_util.unbatch(batch_final_state, batch_size)[:((j - i) - pad_amt)]
logliks[i:(j - pad_amt)] += batch_loglik[:((j - i) - pad_amt)]
if (extend_control_events_callback is not None):
for idx in range(len(control_sequences)):
control_states[idx] = extend_control_events_callback(control_sequences[idx], event_sequences[idx], control_states[idx])
next_inputs = self._config.encoder_decoder.get_inputs_batch(control_sequences, event_sequences)
else:
next_inputs = self._config.encoder_decoder.get_inputs_batch(event_sequences)
if modify_events_callback:
modify_events_callback(self._config.encoder_decoder, event_sequences, next_inputs)
model_states = [ModelState(inputs=inputs, rnn_state=final_state, control_events=control_events, control_state=control_state) for (inputs, final_state, control_events, control_state) in zip(next_inputs, final_states, control_sequences, control_states)]
return (event_sequences, model_states, logliks) | 495,595,974,226,170,000 | Extends a list of event sequences by a single step each.
This method modifies the event sequences in place. It also returns the
modified event sequences and updated model states and log-likelihoods.
Args:
event_sequences: A list of event sequence objects, which are extended by
this method.
model_states: A list of model states, each of which contains model inputs
and initial RNN states.
logliks: A list containing the current log-likelihood for each event
sequence.
temperature: The softmax temperature.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
event_sequences: A list of extended event sequences. These are modified in
place but also returned.
final_states: A list of resulting model states, containing model inputs
for the next step along with RNN states for each event sequence.
logliks: A list containing the updated log-likelihood for each event
sequence. | magenta/models/shared/events_rnn_model.py | _generate_step | Surya130499/magenta | python | def _generate_step(self, event_sequences, model_states, logliks, temperature, extend_control_events_callback=None, modify_events_callback=None):
'Extends a list of event sequences by a single step each.\n\n This method modifies the event sequences in place. It also returns the\n modified event sequences and updated model states and log-likelihoods.\n\n Args:\n event_sequences: A list of event sequence objects, which are extended by\n this method.\n model_states: A list of model states, each of which contains model inputs\n and initial RNN states.\n logliks: A list containing the current log-likelihood for each event\n sequence.\n temperature: The softmax temperature.\n extend_control_events_callback: A function that takes three arguments: a\n current control event sequence, a current generated event sequence,\n and the control state. The function should a) extend the control event\n sequence to be one longer than the generated event sequence (or do\n nothing if it is already at least this long), and b) return the\n resulting control state.\n modify_events_callback: An optional callback for modifying the event list.\n Can be used to inject events rather than having them generated. If not\n None, will be called with 3 arguments after every event: the current\n EventSequenceEncoderDecoder, a list of current EventSequences, and a\n list of current encoded event inputs.\n\n Returns:\n event_sequences: A list of extended event sequences. These are modified in\n place but also returned.\n final_states: A list of resulting model states, containing model inputs\n for the next step along with RNN states for each event sequence.\n logliks: A list containing the updated log-likelihood for each event\n sequence.\n '
batch_size = self._batch_size()
num_seqs = len(event_sequences)
num_batches = int(np.ceil((num_seqs / float(batch_size))))
inputs = [model_state.inputs for model_state in model_states]
initial_states = [model_state.rnn_state for model_state in model_states]
control_sequences = [model_state.control_events for model_state in model_states]
control_states = [model_state.control_state for model_state in model_states]
final_states = []
logliks = np.array(logliks, dtype=np.float32)
pad_amt = ((- len(event_sequences)) % batch_size)
padded_event_sequences = (event_sequences + [copy.deepcopy(event_sequences[(- 1)]) for _ in range(pad_amt)])
padded_inputs = (inputs + ([inputs[(- 1)]] * pad_amt))
padded_initial_states = (initial_states + ([initial_states[(- 1)]] * pad_amt))
for b in range(num_batches):
(i, j) = ((b * batch_size), ((b + 1) * batch_size))
pad_amt = max(0, (j - num_seqs))
(batch_final_state, batch_loglik) = self._generate_step_for_batch(padded_event_sequences[i:j], padded_inputs[i:j], state_util.batch(padded_initial_states[i:j], batch_size), temperature)
final_states += state_util.unbatch(batch_final_state, batch_size)[:((j - i) - pad_amt)]
logliks[i:(j - pad_amt)] += batch_loglik[:((j - i) - pad_amt)]
if (extend_control_events_callback is not None):
for idx in range(len(control_sequences)):
control_states[idx] = extend_control_events_callback(control_sequences[idx], event_sequences[idx], control_states[idx])
next_inputs = self._config.encoder_decoder.get_inputs_batch(control_sequences, event_sequences)
else:
next_inputs = self._config.encoder_decoder.get_inputs_batch(event_sequences)
if modify_events_callback:
modify_events_callback(self._config.encoder_decoder, event_sequences, next_inputs)
model_states = [ModelState(inputs=inputs, rnn_state=final_state, control_events=control_events, control_state=control_state) for (inputs, final_state, control_events, control_state) in zip(next_inputs, final_states, control_sequences, control_states)]
return (event_sequences, model_states, logliks) |
def _generate_events(self, num_steps, primer_events, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1, control_events=None, control_state=None, extend_control_events_callback=_extend_control_events_default, modify_events_callback=None):
'Generate an event sequence from a primer sequence.\n\n Args:\n num_steps: The integer length in steps of the final event sequence, after\n generation. Includes the primer.\n primer_events: The primer event sequence, a Python list-like object.\n temperature: A float specifying how much to divide the logits by\n before computing the softmax. Greater than 1.0 makes events more\n random, less than 1.0 makes events less random.\n beam_size: An integer, beam size to use when generating event sequences\n via beam search.\n branch_factor: An integer, beam search branch factor to use.\n steps_per_iteration: An integer, number of steps to take per beam search\n iteration.\n control_events: A sequence of control events upon which to condition the\n generation. If not None, the encoder/decoder should be a\n ConditionalEventSequenceEncoderDecoder, and the control events will be\n used along with the target sequence to generate model inputs. In some\n cases, the control event sequence cannot be fully-determined as later\n control events depend on earlier generated events; use the\n `extend_control_events_callback` argument to provide a function that\n extends the control event sequence.\n control_state: Initial state used by `extend_control_events_callback`.\n extend_control_events_callback: A function that takes three arguments: a\n current control event sequence, a current generated event sequence,\n and the control state. The function should a) extend the control event\n sequence to be one longer than the generated event sequence (or do\n nothing if it is already at least this long), and b) return the\n resulting control state.\n modify_events_callback: An optional callback for modifying the event list.\n Can be used to inject events rather than having them generated. If not\n None, will be called with 3 arguments after every event: the current\n EventSequenceEncoderDecoder, a list of current EventSequences, and a\n list of current encoded event inputs.\n\n Returns:\n The generated event sequence (which begins with the provided primer).\n\n Raises:\n EventSequenceRnnModelError: If the primer sequence has zero length or\n is not shorter than num_steps.\n '
if ((control_events is not None) and (not isinstance(self._config.encoder_decoder, mm.ConditionalEventSequenceEncoderDecoder))):
raise EventSequenceRnnModelError('control sequence provided but encoder/decoder is not a ConditionalEventSequenceEncoderDecoder')
if ((control_events is not None) and (extend_control_events_callback is None)):
raise EventSequenceRnnModelError('must provide callback for extending control sequence (or usedefault)')
if (not primer_events):
raise EventSequenceRnnModelError('primer sequence must have non-zero length')
if (len(primer_events) >= num_steps):
raise EventSequenceRnnModelError('primer sequence must be shorter than `num_steps`')
if (len(primer_events) >= num_steps):
return primer_events
event_sequences = [copy.deepcopy(primer_events)]
if (control_events is not None):
control_state = extend_control_events_callback(control_events, primer_events, control_state)
inputs = self._config.encoder_decoder.get_inputs_batch([control_events], event_sequences, full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch(event_sequences, full_length=True)
if modify_events_callback:
modify_events_callback(self._config.encoder_decoder, event_sequences, inputs)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_states = state_util.unbatch(self._session.run(graph_initial_state))
initial_state = ModelState(inputs=inputs[0], rnn_state=initial_states[0], control_events=control_events, control_state=control_state)
generate_step_fn = functools.partial(self._generate_step, temperature=temperature, extend_control_events_callback=(extend_control_events_callback if (control_events is not None) else None), modify_events_callback=modify_events_callback)
(events, _, loglik) = beam_search(initial_sequence=event_sequences[0], initial_state=initial_state, generate_step_fn=generate_step_fn, num_steps=(num_steps - len(primer_events)), beam_size=beam_size, branch_factor=branch_factor, steps_per_iteration=steps_per_iteration)
tf.logging.info('Beam search yields sequence with log-likelihood: %f ', loglik)
return events | -9,193,433,878,735,797,000 | Generate an event sequence from a primer sequence.
Args:
num_steps: The integer length in steps of the final event sequence, after
generation. Includes the primer.
primer_events: The primer event sequence, a Python list-like object.
temperature: A float specifying how much to divide the logits by
before computing the softmax. Greater than 1.0 makes events more
random, less than 1.0 makes events less random.
beam_size: An integer, beam size to use when generating event sequences
via beam search.
branch_factor: An integer, beam search branch factor to use.
steps_per_iteration: An integer, number of steps to take per beam search
iteration.
control_events: A sequence of control events upon which to condition the
generation. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the control events will be
used along with the target sequence to generate model inputs. In some
cases, the control event sequence cannot be fully-determined as later
control events depend on earlier generated events; use the
`extend_control_events_callback` argument to provide a function that
extends the control event sequence.
control_state: Initial state used by `extend_control_events_callback`.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
The generated event sequence (which begins with the provided primer).
Raises:
EventSequenceRnnModelError: If the primer sequence has zero length or
is not shorter than num_steps. | magenta/models/shared/events_rnn_model.py | _generate_events | Surya130499/magenta | python | def _generate_events(self, num_steps, primer_events, temperature=1.0, beam_size=1, branch_factor=1, steps_per_iteration=1, control_events=None, control_state=None, extend_control_events_callback=_extend_control_events_default, modify_events_callback=None):
'Generate an event sequence from a primer sequence.\n\n Args:\n num_steps: The integer length in steps of the final event sequence, after\n generation. Includes the primer.\n primer_events: The primer event sequence, a Python list-like object.\n temperature: A float specifying how much to divide the logits by\n before computing the softmax. Greater than 1.0 makes events more\n random, less than 1.0 makes events less random.\n beam_size: An integer, beam size to use when generating event sequences\n via beam search.\n branch_factor: An integer, beam search branch factor to use.\n steps_per_iteration: An integer, number of steps to take per beam search\n iteration.\n control_events: A sequence of control events upon which to condition the\n generation. If not None, the encoder/decoder should be a\n ConditionalEventSequenceEncoderDecoder, and the control events will be\n used along with the target sequence to generate model inputs. In some\n cases, the control event sequence cannot be fully-determined as later\n control events depend on earlier generated events; use the\n `extend_control_events_callback` argument to provide a function that\n extends the control event sequence.\n control_state: Initial state used by `extend_control_events_callback`.\n extend_control_events_callback: A function that takes three arguments: a\n current control event sequence, a current generated event sequence,\n and the control state. The function should a) extend the control event\n sequence to be one longer than the generated event sequence (or do\n nothing if it is already at least this long), and b) return the\n resulting control state.\n modify_events_callback: An optional callback for modifying the event list.\n Can be used to inject events rather than having them generated. If not\n None, will be called with 3 arguments after every event: the current\n EventSequenceEncoderDecoder, a list of current EventSequences, and a\n list of current encoded event inputs.\n\n Returns:\n The generated event sequence (which begins with the provided primer).\n\n Raises:\n EventSequenceRnnModelError: If the primer sequence has zero length or\n is not shorter than num_steps.\n '
if ((control_events is not None) and (not isinstance(self._config.encoder_decoder, mm.ConditionalEventSequenceEncoderDecoder))):
raise EventSequenceRnnModelError('control sequence provided but encoder/decoder is not a ConditionalEventSequenceEncoderDecoder')
if ((control_events is not None) and (extend_control_events_callback is None)):
raise EventSequenceRnnModelError('must provide callback for extending control sequence (or usedefault)')
if (not primer_events):
raise EventSequenceRnnModelError('primer sequence must have non-zero length')
if (len(primer_events) >= num_steps):
raise EventSequenceRnnModelError('primer sequence must be shorter than `num_steps`')
if (len(primer_events) >= num_steps):
return primer_events
event_sequences = [copy.deepcopy(primer_events)]
if (control_events is not None):
control_state = extend_control_events_callback(control_events, primer_events, control_state)
inputs = self._config.encoder_decoder.get_inputs_batch([control_events], event_sequences, full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch(event_sequences, full_length=True)
if modify_events_callback:
modify_events_callback(self._config.encoder_decoder, event_sequences, inputs)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_states = state_util.unbatch(self._session.run(graph_initial_state))
initial_state = ModelState(inputs=inputs[0], rnn_state=initial_states[0], control_events=control_events, control_state=control_state)
generate_step_fn = functools.partial(self._generate_step, temperature=temperature, extend_control_events_callback=(extend_control_events_callback if (control_events is not None) else None), modify_events_callback=modify_events_callback)
(events, _, loglik) = beam_search(initial_sequence=event_sequences[0], initial_state=initial_state, generate_step_fn=generate_step_fn, num_steps=(num_steps - len(primer_events)), beam_size=beam_size, branch_factor=branch_factor, steps_per_iteration=steps_per_iteration)
tf.logging.info('Beam search yields sequence with log-likelihood: %f ', loglik)
return events |
def _evaluate_batch_log_likelihood(self, event_sequences, inputs, initial_state):
'Evaluates the log likelihood of a batch of event sequences.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like object. The list of event sequences should have length equal\n to `self._batch_size()`.\n inputs: A Python list of model inputs, with length equal to\n `self._batch_size()`.\n initial_state: A numpy array containing the initial RNN state, where\n `initial_state.shape[0]` is equal to `self._batch_size()`.\n\n Returns:\n A Python list containing the log likelihood of each sequence in\n `event_sequences`.\n '
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs, tuple(graph_initial_state): initial_state}
if graph_temperature:
feed_dict[graph_temperature[0]] = 1.0
softmax = self._session.run(graph_softmax, feed_dict)
return self._config.encoder_decoder.evaluate_log_likelihood(event_sequences, softmax) | 4,642,773,188,067,565,000 | Evaluates the log likelihood of a batch of event sequences.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
Returns:
A Python list containing the log likelihood of each sequence in
`event_sequences`. | magenta/models/shared/events_rnn_model.py | _evaluate_batch_log_likelihood | Surya130499/magenta | python | def _evaluate_batch_log_likelihood(self, event_sequences, inputs, initial_state):
'Evaluates the log likelihood of a batch of event sequences.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like object. The list of event sequences should have length equal\n to `self._batch_size()`.\n inputs: A Python list of model inputs, with length equal to\n `self._batch_size()`.\n initial_state: A numpy array containing the initial RNN state, where\n `initial_state.shape[0]` is equal to `self._batch_size()`.\n\n Returns:\n A Python list containing the log likelihood of each sequence in\n `event_sequences`.\n '
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs, tuple(graph_initial_state): initial_state}
if graph_temperature:
feed_dict[graph_temperature[0]] = 1.0
softmax = self._session.run(graph_softmax, feed_dict)
return self._config.encoder_decoder.evaluate_log_likelihood(event_sequences, softmax) |
def _evaluate_log_likelihood(self, event_sequences, control_events=None):
'Evaluate log likelihood for a list of event sequences of the same length.\n\n Args:\n event_sequences: A list of event sequences for which to evaluate the log\n likelihood.\n control_events: A sequence of control events upon which to condition the\n event sequences. If not None, the encoder/decoder should be a\n ConditionalEventSequenceEncoderDecoder, and the log likelihood of each\n event sequence will be computed conditional on the control sequence.\n\n Returns:\n The log likelihood of each sequence in `event_sequences`.\n\n Raises:\n EventSequenceRnnModelError: If the event sequences are not all the\n same length, or if the control sequence is shorter than the event\n sequences.\n '
num_steps = len(event_sequences[0])
for events in event_sequences[1:]:
if (len(events) != num_steps):
raise EventSequenceRnnModelError('log likelihood evaluation requires all event sequences to have the same length')
if ((control_events is not None) and (len(control_events) < num_steps)):
raise EventSequenceRnnModelError('control sequence must be at least as long as the event sequences')
batch_size = self._batch_size()
num_full_batches = (len(event_sequences) / batch_size)
loglik = np.empty(len(event_sequences))
if (control_events is not None):
inputs = self._config.encoder_decoder.get_inputs_batch(([control_events] * len(event_sequences)), [events[:(- 1)] for events in event_sequences], full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch([events[:(- 1)] for events in event_sequences], full_length=True)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_state = ([self._session.run(graph_initial_state)] * len(event_sequences))
offset = 0
for _ in range(num_full_batches):
batch_indices = range(offset, (offset + batch_size))
batch_loglik = self._evaluate_batch_log_likelihood([event_sequences[i] for i in batch_indices], [inputs[i] for i in batch_indices], initial_state[batch_indices])
loglik[batch_indices] = batch_loglik
offset += batch_size
if (offset < len(event_sequences)):
num_extra = (len(event_sequences) - offset)
pad_size = (batch_size - num_extra)
batch_indices = range(offset, len(event_sequences))
batch_loglik = self._evaluate_batch_log_likelihood(([event_sequences[i] for i in batch_indices] + [copy.deepcopy(event_sequences[(- 1)]) for _ in range(pad_size)]), ([inputs[i] for i in batch_indices] + (inputs[(- 1)] * pad_size)), np.append(initial_state[batch_indices], np.tile(inputs[(- 1), :], (pad_size, 1)), axis=0))
loglik[batch_indices] = batch_loglik[0:num_extra]
return loglik | 3,431,351,879,014,282,000 | Evaluate log likelihood for a list of event sequences of the same length.
Args:
event_sequences: A list of event sequences for which to evaluate the log
likelihood.
control_events: A sequence of control events upon which to condition the
event sequences. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the log likelihood of each
event sequence will be computed conditional on the control sequence.
Returns:
The log likelihood of each sequence in `event_sequences`.
Raises:
EventSequenceRnnModelError: If the event sequences are not all the
same length, or if the control sequence is shorter than the event
sequences. | magenta/models/shared/events_rnn_model.py | _evaluate_log_likelihood | Surya130499/magenta | python | def _evaluate_log_likelihood(self, event_sequences, control_events=None):
'Evaluate log likelihood for a list of event sequences of the same length.\n\n Args:\n event_sequences: A list of event sequences for which to evaluate the log\n likelihood.\n control_events: A sequence of control events upon which to condition the\n event sequences. If not None, the encoder/decoder should be a\n ConditionalEventSequenceEncoderDecoder, and the log likelihood of each\n event sequence will be computed conditional on the control sequence.\n\n Returns:\n The log likelihood of each sequence in `event_sequences`.\n\n Raises:\n EventSequenceRnnModelError: If the event sequences are not all the\n same length, or if the control sequence is shorter than the event\n sequences.\n '
num_steps = len(event_sequences[0])
for events in event_sequences[1:]:
if (len(events) != num_steps):
raise EventSequenceRnnModelError('log likelihood evaluation requires all event sequences to have the same length')
if ((control_events is not None) and (len(control_events) < num_steps)):
raise EventSequenceRnnModelError('control sequence must be at least as long as the event sequences')
batch_size = self._batch_size()
num_full_batches = (len(event_sequences) / batch_size)
loglik = np.empty(len(event_sequences))
if (control_events is not None):
inputs = self._config.encoder_decoder.get_inputs_batch(([control_events] * len(event_sequences)), [events[:(- 1)] for events in event_sequences], full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch([events[:(- 1)] for events in event_sequences], full_length=True)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_state = ([self._session.run(graph_initial_state)] * len(event_sequences))
offset = 0
for _ in range(num_full_batches):
batch_indices = range(offset, (offset + batch_size))
batch_loglik = self._evaluate_batch_log_likelihood([event_sequences[i] for i in batch_indices], [inputs[i] for i in batch_indices], initial_state[batch_indices])
loglik[batch_indices] = batch_loglik
offset += batch_size
if (offset < len(event_sequences)):
num_extra = (len(event_sequences) - offset)
pad_size = (batch_size - num_extra)
batch_indices = range(offset, len(event_sequences))
batch_loglik = self._evaluate_batch_log_likelihood(([event_sequences[i] for i in batch_indices] + [copy.deepcopy(event_sequences[(- 1)]) for _ in range(pad_size)]), ([inputs[i] for i in batch_indices] + (inputs[(- 1)] * pad_size)), np.append(initial_state[batch_indices], np.tile(inputs[(- 1), :], (pad_size, 1)), axis=0))
loglik[batch_indices] = batch_loglik[0:num_extra]
return loglik |
def move_media(origin_server, file_id, src_paths, dest_paths):
'Move the given file, and any thumbnails, to the dest repo\n\n Args:\n origin_server (str):\n file_id (str):\n src_paths (MediaFilePaths):\n dest_paths (MediaFilePaths):\n '
logger.info('%s/%s', origin_server, file_id)
original_file = src_paths.remote_media_filepath(origin_server, file_id)
if (not os.path.exists(original_file)):
logger.warn('Original for %s/%s (%s) does not exist', origin_server, file_id, original_file)
else:
mkdir_and_move(original_file, dest_paths.remote_media_filepath(origin_server, file_id))
original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id)
if (not os.path.exists(original_thumb_dir)):
return
mkdir_and_move(original_thumb_dir, dest_paths.remote_media_thumbnail_dir(origin_server, file_id)) | 7,045,278,995,300,920,000 | Move the given file, and any thumbnails, to the dest repo
Args:
origin_server (str):
file_id (str):
src_paths (MediaFilePaths):
dest_paths (MediaFilePaths): | scripts/move_remote_media_to_new_store.py | move_media | AP-whitehat/synapse | python | def move_media(origin_server, file_id, src_paths, dest_paths):
'Move the given file, and any thumbnails, to the dest repo\n\n Args:\n origin_server (str):\n file_id (str):\n src_paths (MediaFilePaths):\n dest_paths (MediaFilePaths):\n '
logger.info('%s/%s', origin_server, file_id)
original_file = src_paths.remote_media_filepath(origin_server, file_id)
if (not os.path.exists(original_file)):
logger.warn('Original for %s/%s (%s) does not exist', origin_server, file_id, original_file)
else:
mkdir_and_move(original_file, dest_paths.remote_media_filepath(origin_server, file_id))
original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id)
if (not os.path.exists(original_thumb_dir)):
return
mkdir_and_move(original_thumb_dir, dest_paths.remote_media_thumbnail_dir(origin_server, file_id)) |
def __init__(self, **kwargs):
'\n Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param items:\n The value to assign to the items property of this ComputeInstanceGroupSelectorCollection.\n :type items: list[oci.devops.models.ComputeInstanceGroupSelector]\n\n '
self.swagger_types = {'items': 'list[ComputeInstanceGroupSelector]'}
self.attribute_map = {'items': 'items'}
self._items = None | -1,884,853,907,500,217,300 | Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this ComputeInstanceGroupSelectorCollection.
:type items: list[oci.devops.models.ComputeInstanceGroupSelector] | src/oci/devops/models/compute_instance_group_selector_collection.py | __init__ | CentroidChef/oci-python-sdk | python | def __init__(self, **kwargs):
'\n Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param items:\n The value to assign to the items property of this ComputeInstanceGroupSelectorCollection.\n :type items: list[oci.devops.models.ComputeInstanceGroupSelector]\n\n '
self.swagger_types = {'items': 'list[ComputeInstanceGroupSelector]'}
self.attribute_map = {'items': 'items'}
self._items = None |
@property
def items(self):
'\n **[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.\n A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.\n\n\n :return: The items of this ComputeInstanceGroupSelectorCollection.\n :rtype: list[oci.devops.models.ComputeInstanceGroupSelector]\n '
return self._items | 3,002,309,167,187,873,300 | **[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:return: The items of this ComputeInstanceGroupSelectorCollection.
:rtype: list[oci.devops.models.ComputeInstanceGroupSelector] | src/oci/devops/models/compute_instance_group_selector_collection.py | items | CentroidChef/oci-python-sdk | python | @property
def items(self):
'\n **[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.\n A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.\n\n\n :return: The items of this ComputeInstanceGroupSelectorCollection.\n :rtype: list[oci.devops.models.ComputeInstanceGroupSelector]\n '
return self._items |
@items.setter
def items(self, items):
'\n Sets the items of this ComputeInstanceGroupSelectorCollection.\n A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.\n\n\n :param items: The items of this ComputeInstanceGroupSelectorCollection.\n :type: list[oci.devops.models.ComputeInstanceGroupSelector]\n '
self._items = items | 3,188,827,730,419,083,000 | Sets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:param items: The items of this ComputeInstanceGroupSelectorCollection.
:type: list[oci.devops.models.ComputeInstanceGroupSelector] | src/oci/devops/models/compute_instance_group_selector_collection.py | items | CentroidChef/oci-python-sdk | python | @items.setter
def items(self, items):
'\n Sets the items of this ComputeInstanceGroupSelectorCollection.\n A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.\n\n\n :param items: The items of this ComputeInstanceGroupSelectorCollection.\n :type: list[oci.devops.models.ComputeInstanceGroupSelector]\n '
self._items = items |
def updateAdresPosistie(cur, X, Y, herkomst, ADRESID):
'herkomst: 2= perceel, 3= gebouw'
cur.execute("UPDATE ADRESPOSITIES\n SET X=?, Y=?, BEGINORGANISATIE=1, BEGINBEWERKING=3, BEGINTIJD=strftime('%Y-%m-%dT%H:%M:%S','now'),\n HERKOMSTADRESPOSITIE=? WHERE ID=? ;", (X, Y, herkomst, ADRESID)) | -7,462,245,121,174,849,000 | herkomst: 2= perceel, 3= gebouw | update_terrein_adrespositie.py | updateAdresPosistie | warrieka/xgrab2db | python | def updateAdresPosistie(cur, X, Y, herkomst, ADRESID):
cur.execute("UPDATE ADRESPOSITIES\n SET X=?, Y=?, BEGINORGANISATIE=1, BEGINBEWERKING=3, BEGINTIJD=strftime('%Y-%m-%dT%H:%M:%S','now'),\n HERKOMSTADRESPOSITIE=? WHERE ID=? ;", (X, Y, herkomst, ADRESID)) |
def compute_contacts_matrix(coords, mode='binary', thresh=7.8, min_seq_separation=8):
"\n much faster computation\n min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted\n \n You can specify either of two modes:\n \n 1. 'binary': Returns 1 at positions where distance is less than or equal to thresh\n 2. 'distances': Returns inter-residue distance wherever this distances is less than or equal to thresh\n \n "
M = metrics.pairwise.pairwise_distances(coords)
M = np.tril(M, (- min_seq_separation))
if (mode == 'binary'):
contacts = np.zeros(np.shape(M))
contacts[np.where(((M < thresh) & (M != 0)))] = 1
elif (mode == 'distances'):
contacts = np.zeros(np.shape(M))
contacts[(M > 0)] = M[(M > 0)]
return contacts | 2,108,503,413,948,878,800 | much faster computation
min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted
You can specify either of two modes:
1. 'binary': Returns 1 at positions where distance is less than or equal to thresh
2. 'distances': Returns inter-residue distance wherever this distances is less than or equal to thresh | dbfold/analyze_structures.py | compute_contacts_matrix | amirbitran/dbfold | python | def compute_contacts_matrix(coords, mode='binary', thresh=7.8, min_seq_separation=8):
"\n much faster computation\n min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted\n \n You can specify either of two modes:\n \n 1. 'binary': Returns 1 at positions where distance is less than or equal to thresh\n 2. 'distances': Returns inter-residue distance wherever this distances is less than or equal to thresh\n \n "
M = metrics.pairwise.pairwise_distances(coords)
M = np.tril(M, (- min_seq_separation))
if (mode == 'binary'):
contacts = np.zeros(np.shape(M))
contacts[np.where(((M < thresh) & (M != 0)))] = 1
elif (mode == 'distances'):
contacts = np.zeros(np.shape(M))
contacts[(M > 0)] = M[(M > 0)]
return contacts |
def compute_RG(snapshot, atom='CA'):
'\n Radius of gyration...\n '
(coords, resis) = read_PDB(snapshot, atom)
R_cm = np.mean(coords, axis=0)
dR = (coords - R_cm)
mag_R = np.sum((dR * dR), axis=1)
RG = np.sqrt(np.mean(mag_R))
return RG | -5,014,992,422,287,519,000 | Radius of gyration... | dbfold/analyze_structures.py | compute_RG | amirbitran/dbfold | python | def compute_RG(snapshot, atom='CA'):
'\n \n '
(coords, resis) = read_PDB(snapshot, atom)
R_cm = np.mean(coords, axis=0)
dR = (coords - R_cm)
mag_R = np.sum((dR * dR), axis=1)
RG = np.sqrt(np.mean(mag_R))
return RG |
def create_substructure_PML(PDB_path, subs_to_plot, d_cutoff, min_clustersize, contact_sep_thresh, min_seq_separation=8, substructures=[], colours=[]):
"\n Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol\n \n Ex. Create_substructure_PML('MARR_umbrella3/marr_0.100_Emin.pdb', ['a','b','c','d','e','f'], 7.8, 7, 3)\n You can also pre-enter the substructures as an optional argument\n \n Otherwise, it will compute the substrucutres using PDB_path and save the file as PDB_path but with .pml instead of .pdb\n \n You can optinally input the sequence of colors you want to use to paint the substructures (using the fancy British spelling colours)\n Otherwise, it will color things automatically using the usual default sequence\n That optional argument, if used, needs to have len equal to thhat of subs_to_plot: one color per substructure to plot\n \n \n "
alphabet = 'abcdefghijklmnopqrstuvwxyz'
if (len(substructures) == 0):
(native_contacts, substructures) = identify_native_substructures(PDB_path, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize, plot=False)
prefix = PDB_path.split('pdb')[0]
PML_path = '{}pml'.format(prefix)
Nsubs = np.shape(substructures)[2]
file = open(PML_path, 'w')
file.write('bg white \n color gray \n')
if (len(colours) == 0):
colors = cm.get_cmap('jet')
counter = 0
for s in range(Nsubs):
if (alphabet[s] in subs_to_plot):
if (len(colours) == 0):
curr_color = colors((s / (Nsubs - 1)))
else:
curr_color = colours[counter]
c_hex = cccc.to_hex(curr_color)
c_hex = '0x{}'.format(c_hex.split('#')[1])
sub = substructures[:, :, s]
contacts = np.where(sub)
substr = 'sub{}'.format(alphabet[s])
for z in range(len(contacts[0])):
i = (contacts[0][z] + 1)
j = (contacts[1][z] + 1)
lines = 'select aa, //resi {}/CA \n select bb, //resi {}/CA \n distance {}, aa, bb \n hide labels, {} \n set dash_color, {}, {} \n '.format(i, j, substr, substr, c_hex, substr)
file.write(lines)
file.write('\n set dash_gap, 0.5 \n set dash_radius, 0.2 \n')
counter += 1
file.close() | -9,167,810,917,161,769,000 | Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol
Ex. Create_substructure_PML('MARR_umbrella3/marr_0.100_Emin.pdb', ['a','b','c','d','e','f'], 7.8, 7, 3)
You can also pre-enter the substructures as an optional argument
Otherwise, it will compute the substrucutres using PDB_path and save the file as PDB_path but with .pml instead of .pdb
You can optinally input the sequence of colors you want to use to paint the substructures (using the fancy British spelling colours)
Otherwise, it will color things automatically using the usual default sequence
That optional argument, if used, needs to have len equal to thhat of subs_to_plot: one color per substructure to plot | dbfold/analyze_structures.py | create_substructure_PML | amirbitran/dbfold | python | def create_substructure_PML(PDB_path, subs_to_plot, d_cutoff, min_clustersize, contact_sep_thresh, min_seq_separation=8, substructures=[], colours=[]):
"\n Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol\n \n Ex. Create_substructure_PML('MARR_umbrella3/marr_0.100_Emin.pdb', ['a','b','c','d','e','f'], 7.8, 7, 3)\n You can also pre-enter the substructures as an optional argument\n \n Otherwise, it will compute the substrucutres using PDB_path and save the file as PDB_path but with .pml instead of .pdb\n \n You can optinally input the sequence of colors you want to use to paint the substructures (using the fancy British spelling colours)\n Otherwise, it will color things automatically using the usual default sequence\n That optional argument, if used, needs to have len equal to thhat of subs_to_plot: one color per substructure to plot\n \n \n "
alphabet = 'abcdefghijklmnopqrstuvwxyz'
if (len(substructures) == 0):
(native_contacts, substructures) = identify_native_substructures(PDB_path, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize, plot=False)
prefix = PDB_path.split('pdb')[0]
PML_path = '{}pml'.format(prefix)
Nsubs = np.shape(substructures)[2]
file = open(PML_path, 'w')
file.write('bg white \n color gray \n')
if (len(colours) == 0):
colors = cm.get_cmap('jet')
counter = 0
for s in range(Nsubs):
if (alphabet[s] in subs_to_plot):
if (len(colours) == 0):
curr_color = colors((s / (Nsubs - 1)))
else:
curr_color = colours[counter]
c_hex = cccc.to_hex(curr_color)
c_hex = '0x{}'.format(c_hex.split('#')[1])
sub = substructures[:, :, s]
contacts = np.where(sub)
substr = 'sub{}'.format(alphabet[s])
for z in range(len(contacts[0])):
i = (contacts[0][z] + 1)
j = (contacts[1][z] + 1)
lines = 'select aa, //resi {}/CA \n select bb, //resi {}/CA \n distance {}, aa, bb \n hide labels, {} \n set dash_color, {}, {} \n '.format(i, j, substr, substr, c_hex, substr)
file.write(lines)
file.write('\n set dash_gap, 0.5 \n set dash_radius, 0.2 \n')
counter += 1
file.close() |
def find_native_contacts(native_file, thresh, min_seq_separation, mode='binary'):
'\n finds all native contacts from native PDB file\n '
(native_coords, resis) = read_PDB(native_file, atom='CA')
native_contacts = compute_contacts_matrix(native_coords, thresh=thresh, min_seq_separation=min_seq_separation, mode=mode)
return native_contacts | -7,727,523,894,113,354,000 | finds all native contacts from native PDB file | dbfold/analyze_structures.py | find_native_contacts | amirbitran/dbfold | python | def find_native_contacts(native_file, thresh, min_seq_separation, mode='binary'):
'\n \n '
(native_coords, resis) = read_PDB(native_file, atom='CA')
native_contacts = compute_contacts_matrix(native_coords, thresh=thresh, min_seq_separation=min_seq_separation, mode=mode)
return native_contacts |
def identify_native_substructures(native_file, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize, atom='CA', labelsize=30, fontsize=30, max_res=None, plot=True, ax=None, native_contacts=[], verbose=False):
'\n Identify substructures within native file contact map\n Using the following strategy\n \n We produce a contact map which is a bunch of dots\n Contacts correspond to pairs of residues that are less than d_cutoff apart\n 6 Angstroms is generally a good value, but may want a larger value for helical proteins where residues interact primarily\n via sidechains, and thus the alpha carbons are further apart\n \n We only count contacts if the residues are separated by min_seq_separation along the primary sequence\n We set min_seq_separation relatively high because we don\'t care to include intra-helix contacts within our contact map\n \n Ce can calculate the "Manhattan" distnace between every pair of dots on that contact map \n and build a graph of contacts in which two contacts are connected if they are less than some\n threshold distance, contact_sep_thresh, apart in the contact map \n \n Then, we find all connected components of this graph, each of which is a substructure\n But we only keep substructures whose number of residues is at least min_clustersize, to avoid sparse contacts here and there that we dont\' care about\n \n Gives you option to input native contacts a priori, but by defualt you don\'t do this (value is set to None)\n \n You set Max_res to something other than None if you wish to plot only up to a certain residue number (ex. to depict what substructures can be formed when the first 100 AAs are synthesized)\n '
if (len(native_contacts) == 0):
(coords, resis) = read_PDB(native_file, atom)
native_distances = compute_contacts_matrix(coords, mode='distances', min_seq_separation=min_seq_separation)
native_contacts = np.zeros(np.shape(native_distances))
native_contacts[np.where(((native_distances < d_cutoff) & (native_distances != 0)))] = 1
positions = np.where((native_contacts == 1))
positions = np.transpose(positions)
M = metrics.pairwise.pairwise_distances(positions, metric='manhattan')
(clusters, pairs_in_substructures, mean_intercluster, mean_intracluster) = loopCluster(contact_sep_thresh, positions, M, sort_orphans=False, min_clustersize=min_clustersize, verbose=verbose)
pairs_in_substructures = [(np.array(C)[:, 0], np.array(C)[:, 1]) for C in pairs_in_substructures]
nsubstructures = len(pairs_in_substructures)
substructures = np.zeros((np.shape(native_contacts)[0], np.shape(native_contacts)[1], nsubstructures))
for n in range(nsubstructures):
SS = np.zeros(np.shape(native_contacts))
SS[pairs_in_substructures[n]] = 1
substructures[:, :, n] = SS
if plot:
visualize_substructures(native_contacts, substructures, max_res=max_res, ax=ax, labelsize=labelsize, fontsize=fontsize)
return (native_contacts, substructures) | 8,276,949,333,124,320,000 | Identify substructures within native file contact map
Using the following strategy
We produce a contact map which is a bunch of dots
Contacts correspond to pairs of residues that are less than d_cutoff apart
6 Angstroms is generally a good value, but may want a larger value for helical proteins where residues interact primarily
via sidechains, and thus the alpha carbons are further apart
We only count contacts if the residues are separated by min_seq_separation along the primary sequence
We set min_seq_separation relatively high because we don't care to include intra-helix contacts within our contact map
Ce can calculate the "Manhattan" distnace between every pair of dots on that contact map
and build a graph of contacts in which two contacts are connected if they are less than some
threshold distance, contact_sep_thresh, apart in the contact map
Then, we find all connected components of this graph, each of which is a substructure
But we only keep substructures whose number of residues is at least min_clustersize, to avoid sparse contacts here and there that we dont' care about
Gives you option to input native contacts a priori, but by defualt you don't do this (value is set to None)
You set Max_res to something other than None if you wish to plot only up to a certain residue number (ex. to depict what substructures can be formed when the first 100 AAs are synthesized) | dbfold/analyze_structures.py | identify_native_substructures | amirbitran/dbfold | python | def identify_native_substructures(native_file, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize, atom='CA', labelsize=30, fontsize=30, max_res=None, plot=True, ax=None, native_contacts=[], verbose=False):
'\n Identify substructures within native file contact map\n Using the following strategy\n \n We produce a contact map which is a bunch of dots\n Contacts correspond to pairs of residues that are less than d_cutoff apart\n 6 Angstroms is generally a good value, but may want a larger value for helical proteins where residues interact primarily\n via sidechains, and thus the alpha carbons are further apart\n \n We only count contacts if the residues are separated by min_seq_separation along the primary sequence\n We set min_seq_separation relatively high because we don\'t care to include intra-helix contacts within our contact map\n \n Ce can calculate the "Manhattan" distnace between every pair of dots on that contact map \n and build a graph of contacts in which two contacts are connected if they are less than some\n threshold distance, contact_sep_thresh, apart in the contact map \n \n Then, we find all connected components of this graph, each of which is a substructure\n But we only keep substructures whose number of residues is at least min_clustersize, to avoid sparse contacts here and there that we dont\' care about\n \n Gives you option to input native contacts a priori, but by defualt you don\'t do this (value is set to None)\n \n You set Max_res to something other than None if you wish to plot only up to a certain residue number (ex. to depict what substructures can be formed when the first 100 AAs are synthesized)\n '
if (len(native_contacts) == 0):
(coords, resis) = read_PDB(native_file, atom)
native_distances = compute_contacts_matrix(coords, mode='distances', min_seq_separation=min_seq_separation)
native_contacts = np.zeros(np.shape(native_distances))
native_contacts[np.where(((native_distances < d_cutoff) & (native_distances != 0)))] = 1
positions = np.where((native_contacts == 1))
positions = np.transpose(positions)
M = metrics.pairwise.pairwise_distances(positions, metric='manhattan')
(clusters, pairs_in_substructures, mean_intercluster, mean_intracluster) = loopCluster(contact_sep_thresh, positions, M, sort_orphans=False, min_clustersize=min_clustersize, verbose=verbose)
pairs_in_substructures = [(np.array(C)[:, 0], np.array(C)[:, 1]) for C in pairs_in_substructures]
nsubstructures = len(pairs_in_substructures)
substructures = np.zeros((np.shape(native_contacts)[0], np.shape(native_contacts)[1], nsubstructures))
for n in range(nsubstructures):
SS = np.zeros(np.shape(native_contacts))
SS[pairs_in_substructures[n]] = 1
substructures[:, :, n] = SS
if plot:
visualize_substructures(native_contacts, substructures, max_res=max_res, ax=ax, labelsize=labelsize, fontsize=fontsize)
return (native_contacts, substructures) |
def PDB_contacts_matrix(PDB_file, thresh=7.8, min_seq_separation=8, plot=True, mode='binary'):
'\n Input PDB file, plots contacts matrix\n \n '
(coords, resis) = read_PDB(PDB_file, 'CA')
M = metrics.pairwise.pairwise_distances(coords)
M = np.tril(M, (- min_seq_separation))
if (mode == 'binary'):
contacts = np.zeros(np.shape(M))
contacts[np.where(((M < thresh) & (M != 0)))] = 1
elif (mode == 'distances'):
contacts = np.zeros(np.shape(M))
contacts[(M > 0)] = M[(M > 0)]
if plot:
plt.figure()
plt.imshow(contacts)
plt.title(PDB_file)
return contacts | -1,211,378,378,583,712,500 | Input PDB file, plots contacts matrix | dbfold/analyze_structures.py | PDB_contacts_matrix | amirbitran/dbfold | python | def PDB_contacts_matrix(PDB_file, thresh=7.8, min_seq_separation=8, plot=True, mode='binary'):
'\n \n \n '
(coords, resis) = read_PDB(PDB_file, 'CA')
M = metrics.pairwise.pairwise_distances(coords)
M = np.tril(M, (- min_seq_separation))
if (mode == 'binary'):
contacts = np.zeros(np.shape(M))
contacts[np.where(((M < thresh) & (M != 0)))] = 1
elif (mode == 'distances'):
contacts = np.zeros(np.shape(M))
contacts[(M > 0)] = M[(M > 0)]
if plot:
plt.figure()
plt.imshow(contacts)
plt.title(PDB_file)
return contacts |
def read_PDB(file, atom):
"\n extracts coordinates for some side chain atom in some PDB file\n For instance, atom will have value 'CA' if you care about the alpha carbons\n \n TODO: Fix this so it can deal with chain labels\n Right now if the PDB has a chain label in the fifth column, this will give nonsense results\n "
openfile = open(file)
resis = []
coords = []
for line in openfile.readlines():
line = line.rstrip('\n')
entries = line.split()
if (entries[0] == 'ATOM'):
if ((entries[2] == atom) and (entries[4] == 'A') and (entries[3] != 'GLY')):
resis.append(entries[3])
coords.append([float(entries[6]), float(entries[7]), float(entries[8])])
elif ((len(entries) > 1) and (entries[2] == atom) and (entries[4] != 'A') and (entries[3] != 'GLY')):
if ('-' in entries[5][1:(- 1)]):
x = entries[5]
entries[5] = x[0:(x[1:(- 1)].index('-') + 1)]
entries[6] = x[(x[1:(- 1)].index('-') + 1):]
if ('-' in entries[6][1:(- 1)]):
x = entries[6]
entries[6] = x[0:(x[1:(- 1)].index('-') + 1)]
entries[7] = x[(x[1:(- 1)].index('-') + 1):]
resis.append(entries[3])
coords.append([float(entries[5]), float(entries[6]), float(entries[7])])
elif ((len(entries) > 1) and (entries[2] == 'CA') and (entries[4] == 'A') and (entries[3] == 'GLY')):
resis.append(entries[3])
coords.append([float(entries[6]), float(entries[7]), float(entries[8])])
elif ((len(entries) > 1) and (entries[2] == 'CA') and (entries[4] != 'A') and (entries[3] == 'GLY')):
if ('-' in entries[5][1:(- 1)]):
x = entries[5]
entries[5] = x[0:(x[1:(- 1)].index('-') + 1)]
entries[6] = x[(x[1:(- 1)].index('-') + 1):]
if ('-' in entries[6][1:(- 1)]):
x = entries[6]
entries[6] = x[0:(x[1:(- 1)].index('-') + 1)]
entries[7] = x[(x[1:(- 1)].index('-') + 1):]
resis.append(entries[3])
coords.append([float(entries[5]), float(entries[6]), float(entries[7])])
coords = np.array(coords)
return (coords, resis) | -4,223,119,219,618,408,000 | extracts coordinates for some side chain atom in some PDB file
For instance, atom will have value 'CA' if you care about the alpha carbons
TODO: Fix this so it can deal with chain labels
Right now if the PDB has a chain label in the fifth column, this will give nonsense results | dbfold/analyze_structures.py | read_PDB | amirbitran/dbfold | python | def read_PDB(file, atom):
"\n extracts coordinates for some side chain atom in some PDB file\n For instance, atom will have value 'CA' if you care about the alpha carbons\n \n TODO: Fix this so it can deal with chain labels\n Right now if the PDB has a chain label in the fifth column, this will give nonsense results\n "
openfile = open(file)
resis = []
coords = []
for line in openfile.readlines():
line = line.rstrip('\n')
entries = line.split()
if (entries[0] == 'ATOM'):
if ((entries[2] == atom) and (entries[4] == 'A') and (entries[3] != 'GLY')):
resis.append(entries[3])
coords.append([float(entries[6]), float(entries[7]), float(entries[8])])
elif ((len(entries) > 1) and (entries[2] == atom) and (entries[4] != 'A') and (entries[3] != 'GLY')):
if ('-' in entries[5][1:(- 1)]):
x = entries[5]
entries[5] = x[0:(x[1:(- 1)].index('-') + 1)]
entries[6] = x[(x[1:(- 1)].index('-') + 1):]
if ('-' in entries[6][1:(- 1)]):
x = entries[6]
entries[6] = x[0:(x[1:(- 1)].index('-') + 1)]
entries[7] = x[(x[1:(- 1)].index('-') + 1):]
resis.append(entries[3])
coords.append([float(entries[5]), float(entries[6]), float(entries[7])])
elif ((len(entries) > 1) and (entries[2] == 'CA') and (entries[4] == 'A') and (entries[3] == 'GLY')):
resis.append(entries[3])
coords.append([float(entries[6]), float(entries[7]), float(entries[8])])
elif ((len(entries) > 1) and (entries[2] == 'CA') and (entries[4] != 'A') and (entries[3] == 'GLY')):
if ('-' in entries[5][1:(- 1)]):
x = entries[5]
entries[5] = x[0:(x[1:(- 1)].index('-') + 1)]
entries[6] = x[(x[1:(- 1)].index('-') + 1):]
if ('-' in entries[6][1:(- 1)]):
x = entries[6]
entries[6] = x[0:(x[1:(- 1)].index('-') + 1)]
entries[7] = x[(x[1:(- 1)].index('-') + 1):]
resis.append(entries[3])
coords.append([float(entries[5]), float(entries[6]), float(entries[7])])
coords = np.array(coords)
return (coords, resis) |
def score_snapshot(snapshot, substructures, atom='CA', min_seq_separation=8):
'\n Assigns a set of scores for a snapshot\n the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto\n If the score is close to the characteristic contact distnace, then the substructure should be mostly formed\n '
(coords, resis) = read_PDB(snapshot, atom)
distances = compute_contacts_matrix(coords, mode='distances', min_seq_separation=min_seq_separation)
length = np.shape(distances)[0]
len_substructures = np.shape(substructures)[0]
if (length > len_substructures):
distances = distances[0:len_substructures, 0:len_substructures]
nsubstructures = np.shape(substructures)[2]
scores = np.zeros(nsubstructures)
for s in range(nsubstructures):
sub = substructures[:, :, s]
participation = np.multiply(distances, sub)
scores[s] = np.mean(participation[np.nonzero(participation)])
return (scores, distances) | 1,263,311,548,412,532,700 | Assigns a set of scores for a snapshot
the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto
If the score is close to the characteristic contact distnace, then the substructure should be mostly formed | dbfold/analyze_structures.py | score_snapshot | amirbitran/dbfold | python | def score_snapshot(snapshot, substructures, atom='CA', min_seq_separation=8):
'\n Assigns a set of scores for a snapshot\n the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto\n If the score is close to the characteristic contact distnace, then the substructure should be mostly formed\n '
(coords, resis) = read_PDB(snapshot, atom)
distances = compute_contacts_matrix(coords, mode='distances', min_seq_separation=min_seq_separation)
length = np.shape(distances)[0]
len_substructures = np.shape(substructures)[0]
if (length > len_substructures):
distances = distances[0:len_substructures, 0:len_substructures]
nsubstructures = np.shape(substructures)[2]
scores = np.zeros(nsubstructures)
for s in range(nsubstructures):
sub = substructures[:, :, s]
participation = np.multiply(distances, sub)
scores[s] = np.mean(participation[np.nonzero(participation)])
return (scores, distances) |
def visualize_nonnatives(nonnatives_path, native_file, d_cutoff=6.5, cmap='Greys', Return=False, cbar=True, filter_natives=True, filter_distance=2, vmax=1, alpha=1, custom_filter=None, ax=None, labelsize=40):
"\n Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shading according to frequency with whcih \n that contact is observed\n \n d_cutoff is distance cutoff with which you identify NATIVE structures to subtract off from the nonnatives...sholud be\n the same as whatever was used to identify the nonnatives\n \n if filter_natives, then we ignore the native contacts, as well as a border around them given by filter_distance\n You also have the option to enter a Custom filter, which is a matrix of 1's at positions where you want to filter out the contact map...by default this is off and set to none\n Note that if a custom_filter is used, you still pad that filter with a border given by filter_distance\n \n If both filter_natives is set to and and you provide a custom filter, then the two filters are used in conjunction\n \n By the way, the variable vmax says what is the strongest value in the colorbar\n By default, it's 1, but you can also set it to None in which case it becomes the maximum value in the map\n "
(native_contacts, substructures) = identify_native_substructures(native_file, d_cutoff=d_cutoff, plot=False)
[distance_maps, PDB_files, filescores] = joblib.load(nonnatives_path)
if (np.shape(distance_maps)[2] > len(PDB_files)):
mean_distances = distance_maps[:, :, (- 1)]
distance_maps = distance_maps[:, :, 0:(- 1)]
mean_nonnatives = np.mean(distance_maps, axis=2)
NN = np.shape(mean_nonnatives)[0]
if (filter_natives or (np.shape(custom_filter) != ())):
if (filter_natives and (np.shape(custom_filter) == ())):
Filter = cp.deepcopy(native_contacts)
elif (filter_natives and (np.shape(custom_filter) != ())):
Filter = (cp.deepcopy(native_contacts) + custom_filter)
zz = np.zeros(np.shape(Filter))
zz[np.where((Filter > 0))] = 1
Filter = zz
else:
Filter = custom_filter
for d in range((- filter_distance), (filter_distance + 1)):
im1_to_add = np.roll(Filter, d, axis=1)
if (d < 0):
im1_to_add[:, d:] = 0
else:
im1_to_add[:, 0:d] = 0
im2_to_add = np.roll(Filter, d, axis=0)
if (d < 0):
im2_to_add[d:, :] = 0
else:
im2_to_add[0:d, :] = 0
Filter = ((Filter + im1_to_add) + im2_to_add)
Filter[np.where(Filter)] = 1
mean_nonnatives = np.multiply(mean_nonnatives, (1 - Filter))
if (vmax == None):
vmax = np.max(mean_nonnatives)
normalize = cccc.Normalize(vmin=0, vmax=vmax)
if (ax == None):
(fig, ax) = plt.subplots()
if (cmap != None):
im = ax.imshow((mean_nonnatives + np.transpose(mean_nonnatives)), cmap=cmap, norm=normalize, alpha=alpha, origin='upper')
else:
im = ax.imshow((mean_nonnatives + np.transpose(mean_nonnatives)), norm=normalize, alpha=alpha, origin='upper')
if cbar:
cbar = plt.colorbar(im)
cbar.ax.tick_params(labelsize=labelsize)
ax.tick_params(labelsize=labelsize)
ax.plot(np.arange(0, len(mean_nonnatives)), np.arange(0, len(mean_nonnatives)), color='gray', linestyle=':')
if Return:
return im | 7,431,308,676,125,319,000 | Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shading according to frequency with whcih
that contact is observed
d_cutoff is distance cutoff with which you identify NATIVE structures to subtract off from the nonnatives...sholud be
the same as whatever was used to identify the nonnatives
if filter_natives, then we ignore the native contacts, as well as a border around them given by filter_distance
You also have the option to enter a Custom filter, which is a matrix of 1's at positions where you want to filter out the contact map...by default this is off and set to none
Note that if a custom_filter is used, you still pad that filter with a border given by filter_distance
If both filter_natives is set to and and you provide a custom filter, then the two filters are used in conjunction
By the way, the variable vmax says what is the strongest value in the colorbar
By default, it's 1, but you can also set it to None in which case it becomes the maximum value in the map | dbfold/analyze_structures.py | visualize_nonnatives | amirbitran/dbfold | python | def visualize_nonnatives(nonnatives_path, native_file, d_cutoff=6.5, cmap='Greys', Return=False, cbar=True, filter_natives=True, filter_distance=2, vmax=1, alpha=1, custom_filter=None, ax=None, labelsize=40):
"\n Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shading according to frequency with whcih \n that contact is observed\n \n d_cutoff is distance cutoff with which you identify NATIVE structures to subtract off from the nonnatives...sholud be\n the same as whatever was used to identify the nonnatives\n \n if filter_natives, then we ignore the native contacts, as well as a border around them given by filter_distance\n You also have the option to enter a Custom filter, which is a matrix of 1's at positions where you want to filter out the contact map...by default this is off and set to none\n Note that if a custom_filter is used, you still pad that filter with a border given by filter_distance\n \n If both filter_natives is set to and and you provide a custom filter, then the two filters are used in conjunction\n \n By the way, the variable vmax says what is the strongest value in the colorbar\n By default, it's 1, but you can also set it to None in which case it becomes the maximum value in the map\n "
(native_contacts, substructures) = identify_native_substructures(native_file, d_cutoff=d_cutoff, plot=False)
[distance_maps, PDB_files, filescores] = joblib.load(nonnatives_path)
if (np.shape(distance_maps)[2] > len(PDB_files)):
mean_distances = distance_maps[:, :, (- 1)]
distance_maps = distance_maps[:, :, 0:(- 1)]
mean_nonnatives = np.mean(distance_maps, axis=2)
NN = np.shape(mean_nonnatives)[0]
if (filter_natives or (np.shape(custom_filter) != ())):
if (filter_natives and (np.shape(custom_filter) == ())):
Filter = cp.deepcopy(native_contacts)
elif (filter_natives and (np.shape(custom_filter) != ())):
Filter = (cp.deepcopy(native_contacts) + custom_filter)
zz = np.zeros(np.shape(Filter))
zz[np.where((Filter > 0))] = 1
Filter = zz
else:
Filter = custom_filter
for d in range((- filter_distance), (filter_distance + 1)):
im1_to_add = np.roll(Filter, d, axis=1)
if (d < 0):
im1_to_add[:, d:] = 0
else:
im1_to_add[:, 0:d] = 0
im2_to_add = np.roll(Filter, d, axis=0)
if (d < 0):
im2_to_add[d:, :] = 0
else:
im2_to_add[0:d, :] = 0
Filter = ((Filter + im1_to_add) + im2_to_add)
Filter[np.where(Filter)] = 1
mean_nonnatives = np.multiply(mean_nonnatives, (1 - Filter))
if (vmax == None):
vmax = np.max(mean_nonnatives)
normalize = cccc.Normalize(vmin=0, vmax=vmax)
if (ax == None):
(fig, ax) = plt.subplots()
if (cmap != None):
im = ax.imshow((mean_nonnatives + np.transpose(mean_nonnatives)), cmap=cmap, norm=normalize, alpha=alpha, origin='upper')
else:
im = ax.imshow((mean_nonnatives + np.transpose(mean_nonnatives)), norm=normalize, alpha=alpha, origin='upper')
if cbar:
cbar = plt.colorbar(im)
cbar.ax.tick_params(labelsize=labelsize)
ax.tick_params(labelsize=labelsize)
ax.plot(np.arange(0, len(mean_nonnatives)), np.arange(0, len(mean_nonnatives)), color='gray', linestyle=':')
if Return:
return im |
def visualize_substructures(native_contacts, substructures, max_res=None, ax=None, labelsize=30, fontsize=30):
'\n Visualizes substructures as follows\n Everything that is a native contact but not part of any substructure will have value -1 on shown image\n (Update 10/1/18, actually will only show contacts that are part of substructures)\n Meanwhile, everything that is part of substructure i (i ranges from 0 to N_substructures-1) will have value i\n Finally, all non-contacts will just be Nans and appear white\n \n Edited this on 2/4/19 so that substructures are labeled by letter rather than number\n Also reinstated the feature that contacts unassigned to substructures are visualized\n \n On 2/10/2020, Changed a bit how the script work\n Made it a bit simpler\n Also made it so unassigned contacts are now shown in gray\n '
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
substructure_image = np.zeros(np.shape(native_contacts))
native_contacts = (native_contacts + np.transpose(native_contacts))
unassigned_contacts = cp.deepcopy(native_contacts)
for s in range(np.shape(substructures)[2]):
substructure_image += ((s + 1) * (substructures[:, :, s] + substructures[:, :, s].transpose()))
unassigned_contacts -= (substructures[:, :, s] + substructures[:, :, s].transpose())
substructure_image[(substructure_image == 0)] = np.nan
colors = cm.get_cmap('jet')
if (ax == None):
(fig, ax) = plt.subplots()
ax.imshow(substructure_image, cmap='jet')
ax.tick_params(labelsize=labelsize)
for s in range(np.shape(substructures)[2]):
y_pos = (np.where(substructures[:, :, s])[0][0] + 4)
x_pos = (np.where(substructures[:, :, s])[1][0] + 5)
curr_color = colors((s / (np.nanmax(substructure_image) - 1)))
ax.annotate('{}'.format(alphabet[s]), (x_pos, y_pos), fontsize=fontsize, color=curr_color)
ax.annotate('{}'.format(alphabet[s]), ((y_pos - 5), (x_pos - 8)), fontsize=fontsize, color=curr_color)
nsubstructures = np.shape(substructures)[2]
nbins = (nsubstructures + 1)
unassigned_contacts[(unassigned_contacts == 0)] = np.nan
ax.imshow(unassigned_contacts, cmap='gray', alpha=0.5)
ax.plot(np.arange(0, len(native_contacts)), np.arange(0, len(native_contacts)), color='gray', linestyle=':')
if (max_res != None):
ax.set_xlim((max_res, 0))
ax.set_ylim((0, max_res)) | -7,961,988,868,218,634,000 | Visualizes substructures as follows
Everything that is a native contact but not part of any substructure will have value -1 on shown image
(Update 10/1/18, actually will only show contacts that are part of substructures)
Meanwhile, everything that is part of substructure i (i ranges from 0 to N_substructures-1) will have value i
Finally, all non-contacts will just be Nans and appear white
Edited this on 2/4/19 so that substructures are labeled by letter rather than number
Also reinstated the feature that contacts unassigned to substructures are visualized
On 2/10/2020, Changed a bit how the script work
Made it a bit simpler
Also made it so unassigned contacts are now shown in gray | dbfold/analyze_structures.py | visualize_substructures | amirbitran/dbfold | python | def visualize_substructures(native_contacts, substructures, max_res=None, ax=None, labelsize=30, fontsize=30):
'\n Visualizes substructures as follows\n Everything that is a native contact but not part of any substructure will have value -1 on shown image\n (Update 10/1/18, actually will only show contacts that are part of substructures)\n Meanwhile, everything that is part of substructure i (i ranges from 0 to N_substructures-1) will have value i\n Finally, all non-contacts will just be Nans and appear white\n \n Edited this on 2/4/19 so that substructures are labeled by letter rather than number\n Also reinstated the feature that contacts unassigned to substructures are visualized\n \n On 2/10/2020, Changed a bit how the script work\n Made it a bit simpler\n Also made it so unassigned contacts are now shown in gray\n '
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
substructure_image = np.zeros(np.shape(native_contacts))
native_contacts = (native_contacts + np.transpose(native_contacts))
unassigned_contacts = cp.deepcopy(native_contacts)
for s in range(np.shape(substructures)[2]):
substructure_image += ((s + 1) * (substructures[:, :, s] + substructures[:, :, s].transpose()))
unassigned_contacts -= (substructures[:, :, s] + substructures[:, :, s].transpose())
substructure_image[(substructure_image == 0)] = np.nan
colors = cm.get_cmap('jet')
if (ax == None):
(fig, ax) = plt.subplots()
ax.imshow(substructure_image, cmap='jet')
ax.tick_params(labelsize=labelsize)
for s in range(np.shape(substructures)[2]):
y_pos = (np.where(substructures[:, :, s])[0][0] + 4)
x_pos = (np.where(substructures[:, :, s])[1][0] + 5)
curr_color = colors((s / (np.nanmax(substructure_image) - 1)))
ax.annotate('{}'.format(alphabet[s]), (x_pos, y_pos), fontsize=fontsize, color=curr_color)
ax.annotate('{}'.format(alphabet[s]), ((y_pos - 5), (x_pos - 8)), fontsize=fontsize, color=curr_color)
nsubstructures = np.shape(substructures)[2]
nbins = (nsubstructures + 1)
unassigned_contacts[(unassigned_contacts == 0)] = np.nan
ax.imshow(unassigned_contacts, cmap='gray', alpha=0.5)
ax.plot(np.arange(0, len(native_contacts)), np.arange(0, len(native_contacts)), color='gray', linestyle=':')
if (max_res != None):
ax.set_xlim((max_res, 0))
ax.set_ylim((0, max_res)) |
@property
def data_field(self) -> str:
'\n Field of the response containing data.\n By default the value self.name will be used if this property is empty or None\n '
return None | -4,658,865,884,078,469,000 | Field of the response containing data.
By default the value self.name will be used if this property is empty or None | airbyte-integrations/connectors/source-cart/source_cart/streams.py | data_field | 52-entertainment/airbyte | python | @property
def data_field(self) -> str:
'\n Field of the response containing data.\n By default the value self.name will be used if this property is empty or None\n '
return None |
def backoff_time(self, response: requests.Response) -> Optional[float]:
'\n We dont need to check the response.status_code == 429 since this header exists only in this case.\n '
retry_after = response.headers.get('Retry-After')
if retry_after:
return float(retry_after) | -4,580,624,153,590,376,400 | We dont need to check the response.status_code == 429 since this header exists only in this case. | airbyte-integrations/connectors/source-cart/source_cart/streams.py | backoff_time | 52-entertainment/airbyte | python | def backoff_time(self, response: requests.Response) -> Optional[float]:
'\n \n '
retry_after = response.headers.get('Retry-After')
if retry_after:
return float(retry_after) |
def request_params(self, stream_state: Mapping[(str, Any)], **kwargs) -> MutableMapping[(str, Any)]:
'\n Generates a query for incremental logic\n\n Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md\n '
params = super().request_params(stream_state=stream_state, **kwargs)
cursor_value = (stream_state.get(self.cursor_field) or self._start_date)
params['sort'] = self.cursor_field
start_date = max(cursor_value, self._start_date)
query = f'gt:{start_date}'
if (self._end_date and (self._end_date > start_date)):
query += f' AND lt:{self._end_date}'
params[self.cursor_field] = query
return params | -5,349,757,518,194,969,000 | Generates a query for incremental logic
Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md | airbyte-integrations/connectors/source-cart/source_cart/streams.py | request_params | 52-entertainment/airbyte | python | def request_params(self, stream_state: Mapping[(str, Any)], **kwargs) -> MutableMapping[(str, Any)]:
'\n Generates a query for incremental logic\n\n Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md\n '
params = super().request_params(stream_state=stream_state, **kwargs)
cursor_value = (stream_state.get(self.cursor_field) or self._start_date)
params['sort'] = self.cursor_field
start_date = max(cursor_value, self._start_date)
query = f'gt:{start_date}'
if (self._end_date and (self._end_date > start_date)):
query += f' AND lt:{self._end_date}'
params[self.cursor_field] = query
return params |
def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object\n and returning an updated state object.\n "
latest_state = latest_record.get(self.cursor_field)
current_state = (current_stream_state.get(self.cursor_field) or latest_state)
if current_state:
return {self.cursor_field: max(latest_state, current_state)}
return {} | 3,993,137,306,451,256,300 | Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object. | airbyte-integrations/connectors/source-cart/source_cart/streams.py | get_updated_state | 52-entertainment/airbyte | python | def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object\n and returning an updated state object.\n "
latest_state = latest_record.get(self.cursor_field)
current_state = (current_stream_state.get(self.cursor_field) or latest_state)
if current_state:
return {self.cursor_field: max(latest_state, current_state)}
return {} |
def printchapter28note():
'\n print chapter28 note.\n '
print('Run main : single chapter twenty-eight!')
chapter28_1.note()
chapter28_2.note()
chapter28_3.note()
chapter28_4.note()
chapter28_5.note() | 6,848,029,913,907,678,000 | print chapter28 note. | src/chapter28/chapter28note.py | printchapter28note | HideLakitu/IntroductionToAlgorithm.Python | python | def printchapter28note():
'\n \n '
print('Run main : single chapter twenty-eight!')
chapter28_1.note()
chapter28_2.note()
chapter28_3.note()
chapter28_4.note()
chapter28_5.note() |
def note(self):
'\n Summary\n ====\n Print chapter28.1 note\n\n Example\n ====\n ```python\n Chapter28_1().note()\n ```\n '
print('chapter28.1 note as follow')
print('28.1 矩阵的性质')
print('矩阵运算在科学计算中非常重要')
print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]')
print('矩阵和向量')
print('单位矩阵')
print('零矩阵')
print('对角矩阵')
print('三对角矩阵')
print('上三角矩阵')
print('下三角矩阵')
print('置换矩阵')
print('对称矩阵')
print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律')
print('矩阵的F范数和2范数')
print('向量的2范数')
print('矩阵的逆,秩和行列式')
print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵')
print('定理28.2 当且仅当A无空向量,矩阵A列满秩')
print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的')
print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质')
print(' 如果A的任何行或者列的元素为0,则det(A)=0')
print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式')
print(' A的行列式的值与其转置矩阵A^T的行列式的值相等')
print(' 行列式的任意两行(或者两列)互换,则其值异号')
print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的')
print('正定矩阵')
print("定理28.6 对任意列满秩矩阵A,矩阵A'A是正定的")
print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的')
print("练习28.1-2 证明:(AB)'=B'A',而且AA'总是一个对称矩阵")
print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C')
print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵')
print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵')
print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘')
print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数')
print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的')
print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0')
print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)')
print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') | -1,073,519,064,665,087,400 | Summary
====
Print chapter28.1 note
Example
====
```python
Chapter28_1().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.1 note\n\n Example\n ====\n ```python\n Chapter28_1().note()\n ```\n '
print('chapter28.1 note as follow')
print('28.1 矩阵的性质')
print('矩阵运算在科学计算中非常重要')
print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]')
print('矩阵和向量')
print('单位矩阵')
print('零矩阵')
print('对角矩阵')
print('三对角矩阵')
print('上三角矩阵')
print('下三角矩阵')
print('置换矩阵')
print('对称矩阵')
print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律')
print('矩阵的F范数和2范数')
print('向量的2范数')
print('矩阵的逆,秩和行列式')
print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵')
print('定理28.2 当且仅当A无空向量,矩阵A列满秩')
print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的')
print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质')
print(' 如果A的任何行或者列的元素为0,则det(A)=0')
print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式')
print(' A的行列式的值与其转置矩阵A^T的行列式的值相等')
print(' 行列式的任意两行(或者两列)互换,则其值异号')
print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的')
print('正定矩阵')
print("定理28.6 对任意列满秩矩阵A,矩阵A'A是正定的")
print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的')
print("练习28.1-2 证明:(AB)'=B'A',而且AA'总是一个对称矩阵")
print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C')
print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.', '证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积', '证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵')
print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA', '而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵', '证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵')
print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘', '则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘')
print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,', 'A-1的每个元素都是实数')
print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.', '证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的')
print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0')
print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))', '其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)')
print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式') |
def note(self):
'\n Summary\n ====\n Print chapter28.2 note\n\n Example\n ====\n ```python\n Chapter28_2().note()\n ```\n '
print('chapter28.2 note as follow')
print('28.2 矩阵乘法的Strassen算法')
print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)')
print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY')
print('算法概述')
print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用')
print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵')
print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)')
print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算')
print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)')
print('Strassen方法分为以下四个步骤')
print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵')
print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7')
print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7')
print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u')
print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法')
print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大')
print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快')
print(' 3) Strassen算法不像简单方法那样具有数值稳定性')
print(' 4) 在递归层次中生成的子矩阵要消耗空间')
print('练习28.2-1 运用Strassen算法计算矩阵的乘积')
print('矩阵的乘积为:')
print((np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])))
print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)')
print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少')
print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法')
print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积')
print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') | -924,092,662,678,552,300 | Summary
====
Print chapter28.2 note
Example
====
```python
Chapter28_2().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.2 note\n\n Example\n ====\n ```python\n Chapter28_2().note()\n ```\n '
print('chapter28.2 note as follow')
print('28.2 矩阵乘法的Strassen算法')
print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)')
print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY')
print('算法概述')
print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用')
print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵')
print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)')
print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算')
print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)')
print('Strassen方法分为以下四个步骤')
print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵')
print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7')
print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7')
print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,', '从而获得结果矩阵C的四个子矩阵r,s,t,u')
print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法')
print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大')
print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快')
print(' 3) Strassen算法不像简单方法那样具有数值稳定性')
print(' 4) 在递归层次中生成的子矩阵要消耗空间')
print('练习28.2-1 运用Strassen算法计算矩阵的乘积')
print('矩阵的乘积为:')
print((np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]])))
print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积', '证明修改后的算法的运行时间为Θ(n^lg7)')
print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积', '就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少')
print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法', '一种使用143640次乘法的求70*70矩阵乘积的方法', '一种使用155424次乘法的求72*72矩阵乘积的方法')
print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积')
print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,', '并分别生成实部ac-bd和虚部ad+bc的值') |
def note(self):
'\n Summary\n ====\n Print chapter28.3 note\n\n Example\n ====\n ```python\n Chapter28_3().note()\n ```\n '
print('chapter28.3 note as follow')
print('28.3 求解线性方程组')
print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R')
print('LUP分解求解线性方程组')
print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU')
print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵')
print('每一个非奇异矩阵A都有这样一种分解')
print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统')
print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解')
print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb')
print('正向替换与逆向替换')
print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P')
print('LU分解的计算')
print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去')
print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成')
print('LUP分解的计算')
print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值')
print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU')
print('练习28.3-1 运用正向替换法求解下列方程组')
print('练习28.3-2 求出下列矩阵的LU分解')
print('练习28.3-3 运用LUP分解来求解下列方程组')
print('练习28.3-4 试描述一个对角矩阵的LUP分解')
print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的')
print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵')
print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') | -2,181,509,471,979,686,400 | Summary
====
Print chapter28.3 note
Example
====
```python
Chapter28_3().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.3 note\n\n Example\n ====\n ```python\n Chapter28_3().note()\n ```\n '
print('chapter28.3 note as follow')
print('28.3 求解线性方程组')
print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程', '其中每个矩阵或者向量元素都属于一个域,如果实数域R')
print('LUP分解求解线性方程组')
print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU')
print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵')
print('每一个非奇异矩阵A都有这样一种分解')
print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统')
print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解')
print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb')
print('正向替换与逆向替换')
print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统', '用一个数组pi[1..n]来表示置换P')
print('LU分解的计算')
print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数', '以便把那些方程中的第一个变量消去')
print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成')
print('LUP分解的计算')
print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0', '除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值')
print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU')
print('练习28.3-1 运用正向替换法求解下列方程组')
print('练习28.3-2 求出下列矩阵的LU分解')
print('练习28.3-3 运用LUP分解来求解下列方程组')
print('练习28.3-4 试描述一个对角矩阵的LUP分解')
print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的')
print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵')
print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?', '在LUP-DECOMPOSITION中的情况又是怎样?') |
def note(self):
'\n Summary\n ====\n Print chapter28.4 note\n\n Example\n ====\n ```python\n Chapter28_4().note()\n ```\n '
print('chapter28.4 note as follow')
print('28.4 矩阵求逆')
print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组')
print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵')
print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题')
print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆')
print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生')
print('根据LUP分解计算逆矩阵')
print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU')
print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解')
print(" 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b'的另一个线性方程组的解")
print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同')
print('矩阵乘法与逆矩阵')
print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算')
print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法')
print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积')
print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵')
print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法')
print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法')
print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法')
print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法')
print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?')
print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的')
print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') | -5,879,448,951,198,253,000 | Summary
====
Print chapter28.4 note
Example
====
```python
Chapter28_4().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.4 note\n\n Example\n ====\n ```python\n Chapter28_4().note()\n ```\n '
print('chapter28.4 note as follow')
print('28.4 矩阵求逆')
print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组')
print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵')
print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题')
print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆')
print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生')
print('根据LUP分解计算逆矩阵')
print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU')
print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解')
print(" 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b'的另一个线性方程组的解")
print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同')
print('矩阵乘法与逆矩阵')
print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算')
print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算', '如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法', '如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法')
print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵', '其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积')
print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积', '其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)', '则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵')
print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间', '证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,', '一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法')
print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间', '证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法', '一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法')
print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间', '证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法', '一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法')
print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间', '证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法')
print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?')
print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的')
print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置') |
def note(self):
'\n Summary\n ====\n Print chapter28.5 note\n\n Example\n ====\n ```python\n Chapter28_5().note()\n ```\n '
print('chapter28.5 note as follow')
print('28.5 对称正定矩阵与最小二乘逼近')
print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况')
print('引理28.9 任意对称矩阵都是非奇异矩阵')
print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的')
print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T')
print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的')
print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形')
print('最小二乘逼近')
print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi')
print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)')
print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式')
print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的')
print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解')
print('统计学中正态方程A^TAc=A^Ty')
print('伪逆矩阵A+=(A^TA)^-1A^T')
print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值')
print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的')
print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上')
print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的')
print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1')
print('练习28.5-6 最小二乘法求')
print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:')
print(' AA^+A=A')
print(' A^+AA^+=A^+')
print(' (AA^+)^T=AA^+')
print(' (A^+A)^T=A^+A')
print('思考题28-1 三对角线性方程组')
print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间')
print((' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,' < '可以在O(n)的时间内求出方程Ax=b的解'))
print('思考题28-2 三次样条插值')
print(' 将一个曲线拟合为n个三次多项式组成')
print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') | 1,030,241,814,436,185,900 | Summary
====
Print chapter28.5 note
Example
====
```python
Chapter28_5().note()
``` | src/chapter28/chapter28note.py | note | HideLakitu/IntroductionToAlgorithm.Python | python | def note(self):
'\n Summary\n ====\n Print chapter28.5 note\n\n Example\n ====\n ```python\n Chapter28_5().note()\n ```\n '
print('chapter28.5 note as follow')
print('28.5 对称正定矩阵与最小二乘逼近')
print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况')
print('引理28.9 任意对称矩阵都是非奇异矩阵')
print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的')
print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T')
print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的')
print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形')
print('最小二乘逼近')
print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)', '其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi')
print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)')
print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式')
print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的')
print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解')
print('统计学中正态方程A^TAc=A^Ty')
print('伪逆矩阵A+=(A^TA)^-1A^T')
print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值')
print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的')
print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上')
print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的')
print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1')
print('练习28.5-6 最小二乘法求')
print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:')
print(' AA^+A=A')
print(' A^+AA^+=A^+')
print(' (AA^+)^T=AA^+')
print(' (A^+A)^T=A^+A')
print('思考题28-1 三对角线性方程组')
print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解', '论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间')
print((' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,' < '可以在O(n)的时间内求出方程Ax=b的解'))
print('思考题28-2 三次样条插值')
print(' 将一个曲线拟合为n个三次多项式组成')
print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值') |
def readin():
'Reading from stdin and displaying menu'
global positionrequest, ptz
selection = sys.stdin.readline().strip('\n')
lov = [x for x in selection.split(' ') if (x != '')]
if lov:
if (lov[0].lower() in ['u', 'up']):
move_up(ptz, positionrequest)
elif (lov[0].lower() in ['d', 'do', 'dow', 'down']):
move_down(ptz, positionrequest)
elif (lov[0].lower() in ['l', 'le', 'lef', 'left']):
move_left(ptz, positionrequest)
elif (lov[0].lower() in ['l', 'le', 'lef', 'left']):
move_left(ptz, positionrequest)
elif (lov[0].lower() in ['r', 'ri', 'rig', 'righ', 'right']):
move_right(ptz, positionrequest)
elif (lov[0].lower() in ['ul']):
move_upleft(ptz, positionrequest)
elif (lov[0].lower() in ['ur']):
move_upright(ptz, positionrequest)
elif (lov[0].lower() in ['dl']):
move_downleft(ptz, positionrequest)
elif (lov[0].lower() in ['dr']):
move_downright(ptz, positionrequest)
elif (lov[0].lower() in ['s', 'st', 'sto', 'stop']):
ptz.Stop({'ProfileToken': positionrequest.ProfileToken})
active = False
else:
print("What are you asking?\tI only know, 'up','down','left','right', 'ul' (up left), \n\t\t\t'ur' (up right), 'dl' (down left), 'dr' (down right) and 'stop'")
print('')
print('Your command: ', end='', flush=True) | -3,910,166,043,833,411,000 | Reading from stdin and displaying menu | examples/AbsoluteMove.py | readin | intflow/python-onvif-zeep | python | def readin():
global positionrequest, ptz
selection = sys.stdin.readline().strip('\n')
lov = [x for x in selection.split(' ') if (x != )]
if lov:
if (lov[0].lower() in ['u', 'up']):
move_up(ptz, positionrequest)
elif (lov[0].lower() in ['d', 'do', 'dow', 'down']):
move_down(ptz, positionrequest)
elif (lov[0].lower() in ['l', 'le', 'lef', 'left']):
move_left(ptz, positionrequest)
elif (lov[0].lower() in ['l', 'le', 'lef', 'left']):
move_left(ptz, positionrequest)
elif (lov[0].lower() in ['r', 'ri', 'rig', 'righ', 'right']):
move_right(ptz, positionrequest)
elif (lov[0].lower() in ['ul']):
move_upleft(ptz, positionrequest)
elif (lov[0].lower() in ['ur']):
move_upright(ptz, positionrequest)
elif (lov[0].lower() in ['dl']):
move_downleft(ptz, positionrequest)
elif (lov[0].lower() in ['dr']):
move_downright(ptz, positionrequest)
elif (lov[0].lower() in ['s', 'st', 'sto', 'stop']):
ptz.Stop({'ProfileToken': positionrequest.ProfileToken})
active = False
else:
print("What are you asking?\tI only know, 'up','down','left','right', 'ul' (up left), \n\t\t\t'ur' (up right), 'dl' (down left), 'dr' (down right) and 'stop'")
print()
print('Your command: ', end=, flush=True) |
def create_inactive_user(self, form):
'\n Create the inactive user account and send an email containing\n activation instructions.\n\n '
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
self.send_activation_email(new_user)
return new_user | 3,215,324,053,063,257,000 | Create the inactive user account and send an email containing
activation instructions. | polyaxon/api/users/views.py | create_inactive_user | AntoineToubhans/polyaxon | python | def create_inactive_user(self, form):
'\n Create the inactive user account and send an email containing\n activation instructions.\n\n '
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
self.send_activation_email(new_user)
return new_user |
def get_activation_key(self, user):
'\n Generate the activation key which will be emailed to the user.\n\n '
return signing.dumps(obj=getattr(user, user.USERNAME_FIELD), salt=self.key_salt) | 6,403,821,274,535,462,000 | Generate the activation key which will be emailed to the user. | polyaxon/api/users/views.py | get_activation_key | AntoineToubhans/polyaxon | python | def get_activation_key(self, user):
'\n \n\n '
return signing.dumps(obj=getattr(user, user.USERNAME_FIELD), salt=self.key_salt) |
def get_email_context(self, activation_key):
'\n Build the template context used for the activation email.\n\n '
return {'activation_key': activation_key, 'expiration_days': conf.get('ACCOUNT_ACTIVATION_DAYS'), 'site': get_current_site(self.request)} | -3,627,736,984,849,235,500 | Build the template context used for the activation email. | polyaxon/api/users/views.py | get_email_context | AntoineToubhans/polyaxon | python | def get_email_context(self, activation_key):
'\n \n\n '
return {'activation_key': activation_key, 'expiration_days': conf.get('ACCOUNT_ACTIVATION_DAYS'), 'site': get_current_site(self.request)} |
def send_activation_email(self, user):
'\n Send the activation email. The activation key is the username,\n signed using TimestampSigner.\n\n '
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context.update({'user': user})
subject = render_to_string(self.email_subject_template, context)
subject = ''.join(subject.splitlines())
message = render_to_string(self.email_body_template, context)
user.email_user(subject, message, conf.get('DEFAULT_FROM_EMAIL')) | -7,646,028,755,603,774,000 | Send the activation email. The activation key is the username,
signed using TimestampSigner. | polyaxon/api/users/views.py | send_activation_email | AntoineToubhans/polyaxon | python | def send_activation_email(self, user):
'\n Send the activation email. The activation key is the username,\n signed using TimestampSigner.\n\n '
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context.update({'user': user})
subject = render_to_string(self.email_subject_template, context)
subject = .join(subject.splitlines())
message = render_to_string(self.email_body_template, context)
user.email_user(subject, message, conf.get('DEFAULT_FROM_EMAIL')) |
def create_inactive_user(self, form):
'Create the inactive user account and wait for validation from superuser'
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
return new_user | 5,030,324,730,589,924,000 | Create the inactive user account and wait for validation from superuser | polyaxon/api/users/views.py | create_inactive_user | AntoineToubhans/polyaxon | python | def create_inactive_user(self, form):
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
return new_user |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.