Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def build_and_submit_jobs(root_dir, jobs, sgeargs=None):
# If the passed set of jobs is not a list, turn it into one. This makes the
# use of a single JobGroup a little more intutitive
if not isinstance(jobs, list):
jobs = [jobs]
# Build and submit the passed jobs
build_directories(root_dir) # build all necessary directories
build_job_scripts(root_dir, jobs) # build job scripts
submit_jobs(root_dir, jobs, sgeargs) | [
"Submits the passed iterable of Job objects to SGE, placing SGE's\n output in the passed root directory\n\n - root_dir Root directory for SGE and job output\n - jobs List of Job objects, describing each job to be submitted\n - sgeargs Additional arguments to qsub\n "
] |
Please provide a description of the function:def params_mpl(df):
return {'ANIb_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIb_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIb_alignment_coverage': ('BuRd', 0, 1),
'ANIb_hadamard': ('hadamard_BuRd', 0, 1),
'ANIb_similarity_errors': ('afmhot', df.values.min(),
df.values.max()),
'ANIm_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIm_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIm_alignment_coverage': ('BuRd', 0, 1),
'ANIm_hadamard': ('hadamard_BuRd', 0, 1),
'ANIm_similarity_errors': ('afmhot', df.values.min(),
df.values.max()),
'TETRA_correlations': ('spbnd_BuRd', 0, 1),
'ANIblastall_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIblastall_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIblastall_alignment_coverage': ('BuRd', 0, 1),
'ANIblastall_hadamard': ('hadamard_BuRd', 0, 1),
'ANIblastall_similarity_errors': ('afmhot', df.values.min(),
df.values.max())} | [
"Returns dict of matplotlib parameters, dependent on dataframe."
] |
Please provide a description of the function:def download_file(fname, target_dir=None, force=False):
target_dir = target_dir or temporary_dir()
target_fname = os.path.join(target_dir, fname)
if force or not os.path.isfile(target_fname):
url = urljoin(datasets_url, fname)
urlretrieve(url, target_fname)
return target_fname | [
"Download fname from the datasets_url, and save it to target_dir,\n unless the file already exists, and force is False.\n\n Parameters\n ----------\n fname : str\n Name of the file to download\n\n target_dir : str\n Directory where to store the file\n\n force : bool\n Force downloading the file, if it already exists\n\n Returns\n -------\n fname : str\n Full path of the downloaded file\n "
] |
Please provide a description of the function:def parse_idx(fd):
DATA_TYPES = {0x08: 'B', # unsigned byte
0x09: 'b', # signed byte
0x0b: 'h', # short (2 bytes)
0x0c: 'i', # int (4 bytes)
0x0d: 'f', # float (4 bytes)
0x0e: 'd'} # double (8 bytes)
header = fd.read(4)
if len(header) != 4:
raise IdxDecodeError('Invalid IDX file, '
'file empty or does not contain a full header.')
zeros, data_type, num_dimensions = struct.unpack('>HBB', header)
if zeros != 0:
raise IdxDecodeError('Invalid IDX file, '
'file must start with two zero bytes. '
'Found 0x%02x' % zeros)
try:
data_type = DATA_TYPES[data_type]
except KeyError:
raise IdxDecodeError('Unknown data type '
'0x%02x in IDX file' % data_type)
dimension_sizes = struct.unpack('>' + 'I' * num_dimensions,
fd.read(4 * num_dimensions))
data = array.array(data_type, fd.read())
data.byteswap() # looks like array.array reads data as little endian
expected_items = functools.reduce(operator.mul, dimension_sizes)
if len(data) != expected_items:
raise IdxDecodeError('IDX file has wrong number of items. '
'Expected: %d. Found: %d' % (expected_items,
len(data)))
return numpy.array(data).reshape(dimension_sizes) | [
"Parse an IDX file, and return it as a numpy array.\n\n Parameters\n ----------\n fd : file\n File descriptor of the IDX file to parse\n\n endian : str\n Byte order of the IDX file. See [1] for available options\n\n Returns\n -------\n data : numpy.ndarray\n Numpy array with the dimensions and the data in the IDX file\n\n 1. https://docs.python.org/3/library/struct.html\n #byte-order-size-and-alignment\n "
] |
Please provide a description of the function:def download_and_parse_mnist_file(fname, target_dir=None, force=False):
fname = download_file(fname, target_dir=target_dir, force=force)
fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open
with fopen(fname, 'rb') as fd:
return parse_idx(fd) | [
"Download the IDX file named fname from the URL specified in dataset_url\n and return it as a numpy array.\n\n Parameters\n ----------\n fname : str\n File name to download and parse\n\n target_dir : str\n Directory where to store the file\n\n force : bool\n Force downloading the file, if it already exists\n\n Returns\n -------\n data : numpy.ndarray\n Numpy array with the dimensions and the data in the IDX file\n "
] |
Please provide a description of the function:def _transactions(self):
"list[Transaction]: The current stack of Transactions."
transactions = getattr(self._state, "transactions", None)
if transactions is None:
transactions = self._state.transactions = []
return transactions | [] |
Please provide a description of the function:def transactional(*, adapter=None, retries=3, propagation=Transaction.Propagation.Nested):
def decorator(fn):
@wraps(fn)
def inner(*args, **kwargs):
nonlocal adapter
adapter = adapter or get_adapter()
attempts, cause = 0, None
while attempts <= retries:
attempts += 1
transaction = adapter.transaction(propagation)
try:
transaction.begin()
res = fn(*args, **kwargs)
transaction.commit()
return res
except TransactionFailed as e:
cause = e
continue
except Exception as e:
transaction.rollback()
raise e
finally:
transaction.end()
raise RetriesExceeded(cause)
return inner
return decorator | [
"Decorates functions so that all of their operations (except for\n queries) run inside a Datastore transaction.\n\n Parameters:\n adapter(Adapter, optional): The Adapter to use when running the\n transaction. Defaults to the current adapter.\n retries(int, optional): The number of times to retry the\n transaction if it couldn't be committed.\n propagation(Transaction.Propagation, optional): The propagation\n strategy to use. By default, transactions are nested, but you\n can force certain transactions to always run independently.\n\n Raises:\n anom.RetriesExceeded: When the decorator runbs out of retries\n while trying to commit the transaction.\n\n Returns:\n callable: The decorated function.\n "
] |
Please provide a description of the function:def batch_size(self):
batch_size = self.get("batch_size", DEFAULT_BATCH_SIZE)
if self.limit is not None:
return min(self.limit, batch_size)
return batch_size | [
"int: The number of results to fetch per batch. Clamped to\n limit if limit is set and is smaller than the given batch\n size.\n "
] |
Please provide a description of the function:def fetch_next_page(self):
for page in self:
return page
else:
return Page(self._resultset.cursor, iter(())) | [
"Fetch the next Page of results.\n\n Returns:\n Page: The next page of results.\n "
] |
Please provide a description of the function:def count(self, *, page_size=DEFAULT_BATCH_SIZE, **options):
entities = 0
options = QueryOptions(self).replace(keys_only=True)
for page in self.paginate(page_size=page_size, **options):
entities += len(list(page))
return entities | [
"Counts the number of entities that match this query.\n\n Note:\n Since Datastore doesn't provide a native way to count\n entities by query, this method paginates through all the\n entities' keys and counts them.\n\n Parameters:\n \\**options(QueryOptions, optional)\n\n Returns:\n int: The number of entities.\n "
] |
Please provide a description of the function:def delete(self, *, page_size=DEFAULT_BATCH_SIZE, **options):
from .model import delete_multi
deleted = 0
options = QueryOptions(self).replace(keys_only=True)
for page in self.paginate(page_size=page_size, **options):
keys = list(page)
deleted += len(keys)
delete_multi(keys)
return deleted | [
"Deletes all the entities that match this query.\n\n Note:\n Since Datasotre doesn't provide a native way to delete\n entities by query, this method paginates through all the\n entities' keys and issues a single delete_multi call per\n page.\n\n Parameters:\n \\**options(QueryOptions, optional)\n\n Returns:\n int: The number of deleted entities.\n "
] |
Please provide a description of the function:def get(self, **options):
sub_query = self.with_limit(1)
options = QueryOptions(sub_query).replace(batch_size=1)
for result in sub_query.run(**options):
return result
return None | [
"Run this query and get the first result.\n\n Parameters:\n \\**options(QueryOptions, optional)\n\n Returns:\n Model: An entity or None if there were no results.\n "
] |
Please provide a description of the function:def paginate(self, *, page_size, **options):
return Pages(self._prepare(), page_size, QueryOptions(self, **options)) | [
"Run this query and return a page iterator.\n\n Parameters:\n page_size(int): The number of entities to fetch per page.\n \\**options(QueryOptions, optional)\n\n Returns:\n Pages: An iterator for this query's pages of results.\n "
] |
Please provide a description of the function:def is_none(entity, prop, name):
"bool: True if the value of a property is None."
return is_not_empty(entity, prop, name) and getattr(entity, name) is None | [] |
Please provide a description of the function:def is_not_none(entity, prop, name):
"bool: True if the value of a property is not None."
return is_not_empty(entity, prop, name) and name in entity._data and getattr(entity, name) is not None | [] |
Please provide a description of the function:def is_true(entity, prop, name):
"bool: True if the value of a property is True."
return is_not_empty(entity, prop, name) and name in entity._data and bool(getattr(entity, name)) | [] |
Please provide a description of the function:def is_false(entity, prop, name):
"bool: True if the value of a property is False."
return is_not_empty(entity, prop, name) and name in entity._data and not bool(getattr(entity, name)) | [] |
Please provide a description of the function:def namespace(namespace):
try:
current_namespace = _namespace.current
except AttributeError:
current_namespace = None
set_namespace(namespace)
try:
yield
finally:
set_namespace(current_namespace) | [
"Context manager for stacking the current thread-local default\n namespace. Exiting the context sets the thread-local default\n namespace back to the previously-set namespace. If there is no\n previous namespace, then the thread-local namespace is cleared.\n\n Example:\n >>> with namespace(\"foo\"):\n ... with namespace(\"bar\"):\n ... assert get_namespace() == \"bar\"\n ... assert get_namespace() == \"foo\"\n\n >>> assert get_namespace() == \"\"\n\n Parameters:\n namespace(str): namespace to set as the current thread-local\n default.\n\n Returns:\n None\n "
] |
Please provide a description of the function:def lookup_model_by_kind(kind):
model = _known_models.get(kind)
if model is None:
raise RuntimeError(f"Model for kind {kind!r} not found.")
return model | [
"Look up the model instance for a given Datastore kind.\n\n Parameters:\n kind(str)\n\n Raises:\n RuntimeError: If a model for the given kind has not been\n defined.\n\n Returns:\n model: The model class.\n "
] |
Please provide a description of the function:def delete_multi(keys):
if not keys:
return
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_delete_hook(key)
adapter.delete_multi(keys)
for key in keys:
# Micro-optimization to avoid calling get_model. This is OK
# to do here because we've already proved that a model for
# that kind exists in the previous block.
model = _known_models[key.kind]
model.post_delete_hook(key) | [
"Delete a set of entitites from Datastore by their\n respective keys.\n\n Note:\n This uses the adapter that is tied to the first model in the list.\n If the keys have disparate adapters this function may behave in\n unexpected ways.\n\n Warning:\n You must pass a **list** and not a generator or some other kind\n of iterable to this function as it has to iterate over the list\n of keys multiple times.\n\n Parameters:\n keys(list[anom.Key]): The list of keys whose entities to delete.\n\n Raises:\n RuntimeError: If the given set of keys have models that use\n a disparate set of adapters or if any of the keys are\n partial.\n "
] |
Please provide a description of the function:def get_multi(keys):
if not keys:
return []
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_get_hook(key)
entities_data, entities = adapter.get_multi(keys), []
for key, entity_data in zip(keys, entities_data):
if entity_data is None:
entities.append(None)
continue
# Micro-optimization to avoid calling get_model. This is OK
# to do here because we've already proved that a model for
# that kind exists in the previous block.
model = _known_models[key.kind]
entity = model._load(key, entity_data)
entities.append(entity)
entity.post_get_hook()
return entities | [
"Get a set of entities from Datastore by their respective keys.\n\n Note:\n This uses the adapter that is tied to the first model in the\n list. If the keys have disparate adapters this function may\n behave in unexpected ways.\n\n Warning:\n You must pass a **list** and not a generator or some other kind\n of iterable to this function as it has to iterate over the list\n of keys multiple times.\n\n Parameters:\n keys(list[anom.Key]): The list of keys whose entities to get.\n\n Raises:\n RuntimeError: If the given set of keys have models that use\n a disparate set of adapters or if any of the keys are\n partial.\n\n Returns:\n list[Model]: Entities that do not exist are going to be None\n in the result list. The order of results matches the order\n of the input keys.\n "
] |
Please provide a description of the function:def put_multi(entities):
if not entities:
return []
adapter, requests = None, []
for entity in entities:
if adapter is None:
adapter = entity._adapter
entity.pre_put_hook()
requests.append(PutRequest(entity.key, entity.unindexed_properties, entity))
keys = adapter.put_multi(requests)
for key, entity in zip(keys, entities):
entity.key = key
entity.post_put_hook()
return entities | [
"Persist a set of entities to Datastore.\n\n Note:\n This uses the adapter that is tied to the first Entity in the\n list. If the entities have disparate adapters this function may\n behave in unexpected ways.\n\n Warning:\n You must pass a **list** and not a generator or some other kind\n of iterable to this function as it has to iterate over the list\n of entities multiple times.\n\n Parameters:\n entities(list[Model]): The list of entities to persist.\n\n Raises:\n RuntimeError: If the given set of models use a disparate set of\n adapters.\n\n Returns:\n list[Model]: The list of persisted entitites.\n "
] |
Please provide a description of the function:def from_path(cls, *path, namespace=None):
parent = None
for i in range(0, len(path), 2):
parent = cls(*path[i:i + 2], parent=parent, namespace=namespace)
return parent | [
"Build up a Datastore key from a path.\n\n Parameters:\n \\*path(tuple[str or int]): The path segments.\n namespace(str): An optional namespace for the key. This is\n applied to each key in the tree.\n\n Returns:\n anom.Key: The Datastore represented by the given path.\n "
] |
Please provide a description of the function:def path(self):
"tuple: The full Datastore path represented by this key."
prefix = ()
if self.parent:
prefix = self.parent.path
if self.id_or_name:
return prefix + (self.kind, self.id_or_name)
return prefix + (self.kind,) | [] |
Please provide a description of the function:def int_id(self):
"int: This key's numeric id."
id_or_name = self.id_or_name
if id_or_name is not None and isinstance(id_or_name, int):
return id_or_name
return None | [] |
Please provide a description of the function:def str_id(self):
"str: This key's string id."
id_or_name = self.id_or_name
if id_or_name is not None and isinstance(id_or_name, str):
return id_or_name
return None | [] |
Please provide a description of the function:def validate(self, value):
if isinstance(value, self._types):
return value
elif self.optional and value is None:
return [] if self.repeated else None
elif self.repeated and isinstance(value, (tuple, list)) and all(isinstance(x, self._types) for x in value):
return value
else:
raise TypeError(f"Value of type {classname(value)} assigned to {classname(self)} property.") | [
"Validates that `value` can be assigned to this Property.\n\n Parameters:\n value: The value to validate.\n\n Raises:\n TypeError: If the type of the assigned value is invalid.\n\n Returns:\n The value that should be assigned to the entity.\n "
] |
Please provide a description of the function:def prepare_to_store(self, entity, value):
if value is None and not self.optional:
raise RuntimeError(f"Property {self.name_on_model} requires a value.")
return value | [
"Prepare `value` for storage. Called by the Model for each\n Property, value pair it contains before handing the data off\n to an adapter.\n\n Parameters:\n entity(Model): The entity to which the value belongs.\n value: The value being stored.\n\n Raises:\n RuntimeError: If this property is required but no value was\n assigned to it.\n\n Returns:\n The value that should be persisted.\n "
] |
Please provide a description of the function:def unindexed_properties(self):
"tuple[str]: The names of all the unindexed properties on this entity."
properties = ()
for name, prop in self._properties.items():
if isinstance(prop, EmbedLike):
embedded_entity = getattr(self, name, None)
if embedded_entity:
properties += prop.get_unindexed_properties(embedded_entity)
elif not prop.indexed or prop.indexed_if and not prop.indexed_if(self, prop, name):
properties += (prop.name_on_entity,)
return properties | [] |
Please provide a description of the function:def get(cls, id_or_name, *, parent=None, namespace=None):
return Key(cls, id_or_name, parent=parent, namespace=namespace).get() | [
"Get an entity by id.\n\n Parameters:\n id_or_name(int or str): The entity's id.\n parent(anom.Key, optional): The entity's parent Key.\n namespace(str, optional): The entity's namespace.\n\n Returns:\n Model: An entity or ``None`` if the entity doesn't exist in\n Datastore.\n "
] |
Please provide a description of the function:def init_app(self, app, env_file=None, verbose_mode=False):
if self.app is None:
self.app = app
self.verbose_mode = verbose_mode
if env_file is None:
env_file = os.path.join(os.getcwd(), ".env")
if not os.path.exists(env_file):
warnings.warn("can't read {0} - it doesn't exist".format(env_file))
else:
self.__import_vars(env_file) | [
"Imports .env file."
] |
Please provide a description of the function:def __import_vars(self, env_file):
with open(env_file, "r") as f: # pylint: disable=invalid-name
for line in f:
try:
line = line.lstrip()
if line.startswith('export'):
line = line.replace('export', '', 1)
key, val = line.strip().split('=', 1)
except ValueError: # Take care of blank or comment lines
pass
else:
if not callable(val):
if self.verbose_mode:
if key in self.app.config:
print(
" * Overwriting an existing config var:"
" {0}".format(key))
else:
print(
" * Setting an entirely new config var:"
" {0}".format(key))
self.app.config[key] = re.sub(
r"\A[\"']|[\"']\Z", "", val) | [
"Actual importing function."
] |
Please provide a description of the function:def eval(self, keys):
for k, v in keys.items(): # pylint: disable=invalid-name
if k in self.app.config:
try:
val = ast.literal_eval(self.app.config[k])
if isinstance(val, v):
if self.verbose_mode:
print(
" * Casting a specified var as literal:"
" {0} => {1}".format(k, v)
)
self.app.config[k] = val
else:
print(
" ! Does not match with specified type:"
" {0} => {1}".format(k, v))
except (ValueError, SyntaxError):
print(" ! Could not evaluate as literal type:"
" {0} => {1}".format(k, v)) | [
"\n Examples:\n Specify type literal for key.\n\n >>> env.eval({MAIL_PORT: int})\n "
] |
Please provide a description of the function:def alias(self, maps):
for k, v in maps.items(): # pylint: disable=invalid-name
if self.verbose_mode:
print(
" * Mapping a specified var as a alias:"
" {0} -> {1}".format(v, k))
self.app.config[v] = self.app.config[k] | [
"\n Examples:\n Make alias var -> as.\n\n >>> env.alias(maps={\n 'TEST_DATABASE_URL': 'SQLALCHEMY_DATABASE_URI',\n 'TEST_HOST': 'HOST'\n })\n "
] |
Please provide a description of the function:def parse_django_adminopt_node(env, sig, signode):
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname | [
"A copy of sphinx.directives.CmdoptionDesc.parse_signature()"
] |
Please provide a description of the function:def get_stop_words(language, cache=True):
try:
language = LANGUAGE_MAPPING[language]
except KeyError:
if language not in AVAILABLE_LANGUAGES:
raise StopWordError('{0}" language is unavailable.'.format(
language
))
if cache and language in STOP_WORDS_CACHE:
return STOP_WORDS_CACHE[language]
language_filename = os.path.join(STOP_WORDS_DIR, language + '.txt')
try:
with open(language_filename, 'rb') as language_file:
stop_words = [line.decode('utf-8').strip()
for line in language_file.readlines()]
stop_words = apply_filters(stop_words, language)
except IOError:
raise StopWordError(
'{0}" file is unreadable, check your installation.'.format(
language_filename
)
)
if cache:
STOP_WORDS_CACHE[language] = stop_words
return stop_words | [
"\n :type language: basestring\n\n :rtype: list\n "
] |
Please provide a description of the function:def apply_filters(stopwords, language):
if language in _filters:
for func in _filters[language]:
stopwords = func(stopwords)
for func in _filters[None]:
stopwords = func(stopwords, language)
return stopwords | [
"\n Apply registered filters to stopwords\n :param stopwords: list\n :param language: string\n :return: filtered stopwords\n "
] |
Please provide a description of the function:def add_filter(func, language=None):
if language not in _filters:
_filters[language] = []
_filters[language].append(func) | [
"\n Register filters for specific language.\n If language == None the filter applies for all languages.\n Filter will not apply for stop words in cache.\n :param func: callable\n :param language: string|None\n :return:\n "
] |
Please provide a description of the function:def remove_filter(func, language=None):
if not (language in _filters and func in _filters[language]):
return False
_filters[language].remove(func)
return True | [
"\n :param func:\n :param language:\n :return:\n "
] |
Please provide a description of the function:def process_response(self, request, response):
if hasattr(request, 'COUNTRY_CODE'):
response.set_cookie(
key=constants.COUNTRY_COOKIE_NAME,
value=request.COUNTRY_CODE,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN
)
return response | [
"\n Shares config with the language cookie as they serve a similar purpose\n "
] |
Please provide a description of the function:def create_option(
self, name, value, label, selected, index,
subindex=None, attrs=None):
index = str(index) if subindex is None else "%s%s%s" % (
index, self.id_separator, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(
self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
if self.use_nice_ids:
option_attrs['id'] = "%s%s%s" % (
option_attrs['id'],
self.id_separator,
slugify(label.lower())
)
else:
option_attrs['id'] = self.id_for_label(
option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
} | [
"Patch to use nicer ids."
] |
Please provide a description of the function:def current_version():
filepath = os.path.abspath(
project_root / "directory_components" / "version.py")
version_py = get_file_string(filepath)
regex = re.compile(Utils.get_version)
if regex.search(version_py) is not None:
current_version = regex.search(version_py).group(0)
print(color(
"Current directory-components version: {}".format(current_version),
fg='blue', style='bold'))
get_update_info()
else:
print(color(
'Error finding directory-components version.',
fg='red', style='bold')) | [
"Get current version of directory-components."
] |
Please provide a description of the function:def get_file_string(filepath):
with open(os.path.abspath(filepath)) as f:
return f.read() | [
"Get string from file."
] |
Please provide a description of the function:def replace_in_dirs(version):
print(color(
"Upgrading directory-components dependency in all repos...",
fg='blue', style='bold'))
for dirname in Utils.dirs:
replace = "directory-components=={}".format(version)
replace_in_files(dirname, replace)
done(version) | [
"Look through dirs and run replace_in_files in each."
] |
Please provide a description of the function:def replace_in_files(dirname, replace):
filepath = os.path.abspath(dirname / "requirements.in")
if os.path.isfile(filepath) and header_footer_exists(filepath):
replaced = re.sub(Utils.exp, replace, get_file_string(filepath))
with open(filepath, "w") as f:
f.write(replaced)
print(color(
"Written to file: {}".format(filepath),
fg='magenta', style='bold')) | [
"Replace current version with new version in requirements files."
] |
Please provide a description of the function:def header_footer_exists(filepath):
with open(filepath) as f:
return re.search(Utils.exp, f.read()) | [
"Check if directory-components is listed in requirements files."
] |
Please provide a description of the function:def abut (source,*args):
if type(source) not in [ListType,TupleType]:
source = [source]
for addon in args:
if type(addon) not in [ListType,TupleType]:
addon = [addon]
if len(addon) < len(source): # is source list longer?
if len(source) % len(addon) == 0: # are they integer multiples?
repeats = len(source)/len(addon) # repeat addon n times
origadd = copy.deepcopy(addon)
for i in range(repeats-1):
addon = addon + origadd
else:
repeats = len(source)/len(addon)+1 # repeat addon x times,
origadd = copy.deepcopy(addon) # x is NOT an integer
for i in range(repeats-1):
addon = addon + origadd
addon = addon[0:len(source)]
elif len(source) < len(addon): # is addon list longer?
if len(addon) % len(source) == 0: # are they integer multiples?
repeats = len(addon)/len(source) # repeat source n times
origsour = copy.deepcopy(source)
for i in range(repeats-1):
source = source + origsour
else:
repeats = len(addon)/len(source)+1 # repeat source x times,
origsour = copy.deepcopy(source) # x is NOT an integer
for i in range(repeats-1):
source = source + origsour
source = source[0:len(addon)]
source = simpleabut(source,addon)
return source | [
"\nLike the |Stat abut command. It concatenates two lists side-by-side\nand returns the result. '2D' lists are also accomodated for either argument\n(source or addon). CAUTION: If one list is shorter, it will be repeated\nuntil it is as long as the longest list. If this behavior is not desired,\nuse pstat.simpleabut().\n\nUsage: abut(source, args) where args=any # of lists\nReturns: a list of lists as long as the LONGEST list past, source on the\n 'left', lists in <args> attached consecutively on the 'right'\n"
] |
Please provide a description of the function:def simpleabut (source, addon):
if type(source) not in [ListType,TupleType]:
source = [source]
if type(addon) not in [ListType,TupleType]:
addon = [addon]
minlen = min(len(source),len(addon))
list = copy.deepcopy(source) # start abut process
if type(source[0]) not in [ListType,TupleType]:
if type(addon[0]) not in [ListType,TupleType]:
for i in range(minlen):
list[i] = [source[i]] + [addon[i]] # source/addon = column
else:
for i in range(minlen):
list[i] = [source[i]] + addon[i] # addon=list-of-lists
else:
if type(addon[0]) not in [ListType,TupleType]:
for i in range(minlen):
list[i] = source[i] + [addon[i]] # source=list-of-lists
else:
for i in range(minlen):
list[i] = source[i] + addon[i] # source/addon = list-of-lists
source = list
return source | [
"\nConcatenates two lists as columns and returns the result. '2D' lists\nare also accomodated for either argument (source or addon). This DOES NOT\nrepeat either list to make the 2 lists of equal length. Beware of list pairs\nwith different lengths ... the resulting list will be the length of the\nFIRST list passed.\n\nUsage: simpleabut(source,addon) where source, addon=list (or list-of-lists)\nReturns: a list of lists as long as source, with source on the 'left' and\n addon on the 'right'\n"
] |
Please provide a description of the function:def colex (listoflists,cnums):
global index
column = 0
if type(cnums) in [ListType,TupleType]: # if multiple columns to get
index = cnums[0]
column = [x[index] for x in listoflists]
for col in cnums[1:]:
index = col
column = abut(column,[x[index] for x in listoflists])
elif type(cnums) == StringType: # if an 'x[3:]' type expr.
evalstring = 'map(lambda x: x'+cnums+', listoflists)'
column = eval(evalstring)
else: # else it's just 1 col to get
index = cnums
column = [x[index] for x in listoflists]
return column | [
"\nExtracts from listoflists the columns specified in the list 'cnums'\n(cnums can be an integer, a sequence of integers, or a string-expression that\ncorresponds to a slice operation on the variable x ... e.g., 'x[3:]' will colex\ncolumns 3 onward from the listoflists).\n\nUsage: colex (listoflists,cnums)\nReturns: a list-of-lists corresponding to the columns from listoflists\n specified by cnums, in the order the column numbers appear in cnums\n"
] |
Please provide a description of the function:def collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):
def collmean (inlist):
s = 0
for item in inlist:
s = s + item
return s/float(len(inlist))
if type(keepcols) not in [ListType,TupleType]:
keepcols = [keepcols]
if type(collapsecols) not in [ListType,TupleType]:
collapsecols = [collapsecols]
if cfcn == None:
cfcn = collmean
if keepcols == []:
means = [0]*len(collapsecols)
for i in range(len(collapsecols)):
avgcol = colex(listoflists,collapsecols[i])
means[i] = cfcn(avgcol)
if fcn1:
try:
test = fcn1(avgcol)
except:
test = 'N/A'
means[i] = [means[i], test]
if fcn2:
try:
test = fcn2(avgcol)
except:
test = 'N/A'
try:
means[i] = means[i] + [len(avgcol)]
except TypeError:
means[i] = [means[i],len(avgcol)]
return means
else:
values = colex(listoflists,keepcols)
uniques = unique(values)
uniques.sort()
newlist = []
if type(keepcols) not in [ListType,TupleType]: keepcols = [keepcols]
for item in uniques:
if type(item) not in [ListType,TupleType]: item =[item]
tmprows = linexand(listoflists,keepcols,item)
for col in collapsecols:
avgcol = colex(tmprows,col)
item.append(cfcn(avgcol))
if fcn1 != None:
try:
test = fcn1(avgcol)
except:
test = 'N/A'
item.append(test)
if fcn2 != None:
try:
test = fcn2(avgcol)
except:
test = 'N/A'
item.append(test)
newlist.append(item)
return newlist | [
"\nAverages data in collapsecol, keeping all unique items in keepcols\n(using unique, which keeps unique LISTS of column numbers), retaining the\nunique sets of values in keepcols, the mean for each. Setting fcn1\nand/or fcn2 to point to a function rather than None (e.g., stats.sterr, len)\nwill append those results (e.g., the sterr, N) after each calculated mean.\ncfcn is the collapse function to apply (defaults to mean, defined here in the\npstat module to avoid circular imports with stats.py, but harmonicmean or\nothers could be passed).\n\nUsage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)\nReturns: a list of lists with all unique permutations of entries appearing in\n columns (\"conditions\") specified by keepcols, abutted with the result of\n cfcn (if cfcn=None, defaults to the mean) of each column specified by\n collapsecols.\n"
] |
Please provide a description of the function:def flat(l):
newl = []
for i in range(len(l)):
for j in range(len(l[i])):
newl.append(l[i][j])
return newl | [
"\nReturns the flattened version of a '2D' list. List-correlate to the a.flat()\nmethod of NumPy arrays.\n\nUsage: flat(l)\n"
] |
Please provide a description of the function:def linexand (listoflists,columnlist,valuelist):
if type(columnlist) not in [ListType,TupleType]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType]:
valuelist = [valuelist]
criterion = ''
for i in range(len(columnlist)):
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'
criterion = criterion[0:-3] # remove the "and" after the last crit
function = 'filter(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines | [
"\nReturns the rows of a list of lists where col (from columnlist) = val\n(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).\nlen(columnlist) must equal len(valuelist).\n\nUsage: linexand (listoflists,columnlist,valuelist)\nReturns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i\n"
] |
Please provide a description of the function:def linedelimited (inlist,delimiter):
outstr = ''
for item in inlist:
if type(item) != StringType:
item = str(item)
outstr = outstr + item + delimiter
outstr = outstr[0:-1]
return outstr | [
"\nReturns a string composed of elements in inlist, with each element\nseparated by 'delimiter.' Used by function writedelimited. Use '\\t'\nfor tab-delimiting.\n\nUsage: linedelimited (inlist,delimiter)\n"
] |
Please provide a description of the function:def lineincols (inlist,colsize):
outstr = ''
for item in inlist:
if type(item) != StringType:
item = str(item)
size = len(item)
if size <= colsize:
for i in range(colsize-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsize+1]
return outstr | [
"\nReturns a string composed of elements in inlist, with each element\nright-aligned in columns of (fixed) colsize.\n\nUsage: lineincols (inlist,colsize) where colsize is an integer\n"
] |
Please provide a description of the function:def lineincustcols (inlist,colsizes):
outstr = ''
for i in range(len(inlist)):
if type(inlist[i]) != StringType:
item = str(inlist[i])
else:
item = inlist[i]
size = len(item)
if size <= colsizes[i]:
for j in range(colsizes[i]-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsizes[i]+1]
return outstr | [
"\nReturns a string composed of elements in inlist, with each element\nright-aligned in a column of width specified by a sequence colsizes. The\nlength of colsizes must be greater than or equal to the number of columns\nin inlist.\n\nUsage: lineincustcols (inlist,colsizes)\nReturns: formatted string created from inlist\n"
] |
Please provide a description of the function:def list2string (inlist,delimit=' '):
stringlist = [makestr(_) for _ in inlist]
return string.join(stringlist,delimit) | [
"\nConverts a 1D list to a single long string for file output, using\nthe string.join function.\n\nUsage: list2string (inlist,delimit=' ')\nReturns: the string created from inlist\n"
] |
Please provide a description of the function:def printcc (lst,extra=2):
if type(lst[0]) not in [ListType,TupleType]:
lst = [lst]
rowstokill = []
list2print = copy.deepcopy(lst)
for i in range(len(lst)):
if lst[i] == ['\n'] or lst[i]=='\n' or lst[i]=='dashes' or lst[i]=='' or lst[i]==['']:
rowstokill = rowstokill + [i]
rowstokill.reverse() # delete blank rows from the end
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = colex(list2print,col)
items = [makestr(_) for _ in items]
maxsize[col] = max(map(len, items)) + extra
for row in lst:
if row == ['\n'] or row == '\n' or row == '' or row == ['']:
print()
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
print(lineincustcols(dashes,maxsize))
else:
print(lineincustcols(row,maxsize))
return None | [
"\nPrints a list of lists in columns, customized by the max size of items\nwithin the columns (max size of items in col, plus 'extra' number of spaces).\nUse 'dashes' or '\\\\n' in the list-of-lists to print dashes or blank lines,\nrespectively.\n\nUsage: printcc (lst,extra=2)\nReturns: None\n"
] |
Please provide a description of the function:def pl (listoflists):
for row in listoflists:
if row[-1] == '\n':
print(row, end=' ')
else:
print(row)
return None | [
"\nPrints a list of lists, 1 list (row) at a time.\n\nUsage: pl(listoflists)\nReturns: None\n"
] |
Please provide a description of the function:def replace (inlst,oldval,newval):
lst = inlst*1
for i in range(len(lst)):
if type(lst[i]) not in [ListType,TupleType]:
if lst[i]==oldval: lst[i]=newval
else:
lst[i] = replace(lst[i],oldval,newval)
return lst | [
"\nReplaces all occurrences of 'oldval' with 'newval', recursively.\n\nUsage: replace (inlst,oldval,newval)\n"
] |
Please provide a description of the function:def recode (inlist,listmap,cols=None):
lst = copy.deepcopy(inlist)
if cols != None:
if type(cols) not in [ListType,TupleType]:
cols = [cols]
for col in cols:
for row in range(len(lst)):
try:
idx = colex(listmap,0).index(lst[row][col])
lst[row][col] = listmap[idx][1]
except ValueError:
pass
else:
for row in range(len(lst)):
for col in range(len(lst)):
try:
idx = colex(listmap,0).index(lst[row][col])
lst[row][col] = listmap[idx][1]
except ValueError:
pass
return lst | [
"\nChanges the values in a list to a new set of values (useful when\nyou need to recode data from (e.g.) strings to numbers. cols defaults\nto None (meaning all columns are recoded).\n\nUsage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list\nReturns: inlist with the appropriate values replaced with new ones\n"
] |
Please provide a description of the function:def roundlist (inlist,digits):
if type(inlist[0]) in [IntType, FloatType]:
inlist = [inlist]
l = inlist*1
for i in range(len(l)):
for j in range(len(l[i])):
if type(l[i][j])==FloatType:
l[i][j] = round(l[i][j],digits)
return l | [
"\nGoes through each element in a 1D or 2D inlist, and applies the following\nfunction to all elements of FloatType ... round(element,digits).\n\nUsage: roundlist(inlist,digits)\nReturns: list with rounded floats\n"
] |
Please provide a description of the function:def sortby(listoflists,sortcols):
newlist = abut(colex(listoflists,sortcols),listoflists)
newlist.sort()
try:
numcols = len(sortcols)
except TypeError:
numcols = 1
crit = '[' + str(numcols) + ':]'
newlist = colex(newlist,crit)
return newlist | [
"\nSorts a list of lists on the column(s) specified in the sequence\nsortcols.\n\nUsage: sortby(listoflists,sortcols)\nReturns: sorted list, unchanged column ordering\n"
] |
Please provide a description of the function:def unique (inlist):
uniques = []
for item in inlist:
if item not in uniques:
uniques.append(item)
return uniques | [
"\nReturns all unique items in the passed list. If the a list-of-lists\nis passed, unique LISTS are found (i.e., items in the first dimension are\ncompared).\n\nUsage: unique (inlist)\nReturns: the unique elements (or rows) in inlist\n"
] |
Please provide a description of the function:def duplicates(inlist):
dups = []
for i in range(len(inlist)):
if inlist[i] in inlist[i+1:]:
dups.append(inlist[i])
return dups | [
"\nReturns duplicate items in the FIRST dimension of the passed list.\n\nUsage: duplicates (inlist)\n"
] |
Please provide a description of the function:def nonrepeats(inlist):
nonrepeats = []
for i in range(len(inlist)):
if inlist.count(inlist[i]) == 1:
nonrepeats.append(inlist[i])
return nonrepeats | [
"\nReturns items that are NOT duplicated in the first dim of the passed list.\n\nUsage: nonrepeats (inlist)\n"
] |
Please provide a description of the function:def lgeometricmean (inlist):
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult | [
"\nCalculates the geometric mean of the values in the passed list.\nThat is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.\n\nUsage: lgeometricmean(inlist)\n"
] |
Please provide a description of the function:def lharmonicmean (inlist):
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum | [
"\nCalculates the harmonic mean of the values in the passed list.\nThat is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.\n\nUsage: lharmonicmean(inlist)\n"
] |
Please provide a description of the function:def lmean (inlist):
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist)) | [
"\nReturns the arithematic mean of the values in the passed list.\nAssumes a '1D' list, but will function on the 1st dim of an array(!).\n\nUsage: lmean(inlist)\n"
] |
Please provide a description of the function:def lmedian (inlist,numbins=1000):
(hist, smallest, binsize, extras) = histogram(inlist,numbins) # make histog
cumhist = cumsum(hist) # make cumulative histogram
for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
if cumhist[i]>=len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median | [
"\nReturns the computed median value of a list of numbers, given the\nnumber of bins to use for the histogram (more bins brings the computed value\ncloser to the median score, default number of bins = 1000). See G.W.\nHeiman's Basic Stats (1st Edition), or CRC Probability & Statistics.\n\nUsage: lmedian (inlist, numbins=1000)\n"
] |
Please provide a description of the function:def litemfreq(inlist):
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq) | [
"\nReturns a list of pairs. Each pair consists of one of the scores in inlist\nand it's frequency count. Assumes a 1D list is passed.\n\nUsage: litemfreq(inlist)\nReturns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)\n"
] |
Please provide a description of the function:def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
if (defaultreallimits != None):
if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.0001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints) | [
"\nReturns (i) a list of histogram bin counts, (ii) the smallest value\nof the histogram binning, and (iii) the bin width (the last 2 are not\nnecessarily integers). Default number of bins is 10. If no sequence object\nis given for defaultreallimits, the routine picks (usually non-pretty) bins\nspanning all the numbers in the inlist.\n\nUsage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)\nReturns: list of bin values, lowerreallimit, binsize, extrapoints\n"
] |
Please provide a description of the function:def lsem (inlist):
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n) | [
"\nReturns the estimated standard error of the mean (sx-bar) of the\nvalues in the passed list. sem = stdev / sqrt(n)\n\nUsage: lsem(inlist)\n"
] |
Please provide a description of the function:def lz (inlist, score):
z = (score-mean(inlist))/samplestdev(inlist)
return z | [
"\nReturns the z-score for a given input score, given that score and the\nlist from which that score came. Not appropriate for population calculations.\n\nUsage: lz(inlist, score)\n"
] |
Please provide a description of the function:def lzs (inlist):
zscores = []
for item in inlist:
zscores.append(z(inlist,item))
return zscores | [
"\nReturns a list of z-scores, one for each score in the passed list.\n\nUsage: lzs(inlist)\n"
] |
Please provide a description of the function:def ltrimboth (l,proportiontocut):
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut] | [
"\nSlices off the passed proportion of items from BOTH ends of the passed\nlist (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'\n10% of scores. Assumes list is sorted by magnitude. Slices off LESS if\nproportion results in a non-integer slice index (i.e., conservatively\nslices off proportiontocut).\n\nUsage: ltrimboth (l,proportiontocut)\nReturns: trimmed version of list l\n"
] |
Please provide a description of the function:def ltrim1 (l,proportiontocut,tail='right'):
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut] | [
"\nSlices off the passed proportion of items from ONE end of the passed\nlist (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'\n10% of scores). Slices off LESS if proportion results in a non-integer\nslice index (i.e., conservatively slices off proportiontocut).\n\nUsage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'\nReturns: trimmed version of list l\n"
] |
Please provide a description of the function:def lpearsonr(x,y):
TINY = 1.0e-30
if len(x) != len(y):
raise ValueError('Input values not paired in pearsonr. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
xmean = mean(x)
ymean = mean(y)
r_num = n*(summult(x,y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/float(df+t*t))
return r, prob | [
"\nCalculates a Pearson correlation coefficient and the associated\nprobability value. Taken from Heiman's Basic Statistics for the Behav.\nSci (2nd), p.195.\n\nUsage: lpearsonr(x,y) where x and y are equal-length lists\nReturns: Pearson's r value, two-tailed p-value\n"
] |
Please provide a description of the function:def lspearmanr(x,y):
TINY = 1e-30
if len(x) != len(y):
raise ValueError('Input values not paired in spearmanr. Aborting.')
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs | [
"\nCalculates a Spearman rank-order correlation coefficient. Taken\nfrom Heiman's Basic Statistics for the Behav. Sci (1st), p.192.\n\nUsage: lspearmanr(x,y) where x and y are equal-length lists\nReturns: Spearman's r, two-tailed p-value\n"
] |
Please provide a description of the function:def lpointbiserialr(x,y):
TINY = 1e-30
if len(x) != len(y):
raise ValueError('INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.')
data = pstat.abut(x,y)
categories = pstat.unique(x)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required for pointbiserialr().")
else: # there are 2 categories, continue
codemap = pstat.abut(categories,range(2))
recoded = pstat.recode(data,codemap,0)
x = pstat.linexand(data,0,categories[0])
y = pstat.linexand(data,0,categories[1])
xmean = mean(pstat.colex(x,1))
ymean = mean(pstat.colex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(pstat.colex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
return rpb, prob | [
"\nCalculates a point-biserial correlation coefficient and the associated\nprobability value. Taken from Heiman's Basic Statistics for the Behav.\nSci (1st), p.194.\n\nUsage: lpointbiserialr(x,y) where x,y are equal-length lists\nReturns: Point-biserial r, two-tailed p-value\n"
] |
Please provide a description of the function:def lkendalltau(x,y):
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob | [
"\nCalculates Kendall's tau ... correlation of ordinal data. Adapted\nfrom function kendl1 in Numerical Recipies. Needs good test-routine.@@@\n\nUsage: lkendalltau(x,y)\nReturns: Kendall's tau, two-tailed p-value\n"
] |
Please provide a description of the function:def llinregress(x,y):
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest | [
"\nCalculates a regression line on x,y pairs. \n\nUsage: llinregress(x,y) x,y are equal-length lists of x-y coordinates\nReturns: slope, intercept, r, two-tailed prob, sterr-of-estimate\n"
] |
Please provide a description of the function:def lchisquare(f_obs,f_exp=None):
k = len(f_obs) # number of groups
if f_exp == None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
return chisq, chisqprob(chisq, k-1) | [
"\nCalculates a one-way chi square for list of observed frequencies and returns\nthe result. If no expected frequencies are given, the total N is assumed to\nbe equally distributed across all groups.\n\nUsage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.\nReturns: chisquare-statistic, associated p-value\n"
] |
Please provide a description of the function:def lks_2samp (data1,data2):
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob | [
"\nComputes the Kolmogorov-Smirnof statistic on 2 samples. From\nNumerical Recipies in C, page 493.\n\nUsage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions\nReturns: KS D-value, associated p-value\n"
] |
Please provide a description of the function:def lmannwhitneyu(x,y):
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in lmannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z) | [
"\nCalculates a Mann-Whitney U statistic on the provided scores and\nreturns the result. Use only when the n in each condition is < 20 and\nyou have 2 independent samples of ranks. NOTE: Mann-Whitney U is\nsignificant if the u-obtained is LESS THAN or equal to the critical\nvalue of U found in the tables. Equivalent to Kruskal-Wallis H with\njust 2 groups.\n\nUsage: lmannwhitneyu(data)\nReturns: u-statistic, one-tailed p-value (i.e., p(z(U)))\n"
] |
Please provide a description of the function:def ltiecorrect(rankvals):
sorted,posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T | [
"\nCorrects for ties in Mann Whitney U and Kruskal Wallis H tests. See\nSiegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.\nNew York: McGraw-Hill. Code adapted from |Stat rankind.c code.\n\nUsage: ltiecorrect(rankvals)\nReturns: T correction factor for U or H\n"
] |
Please provide a description of the function:def lranksums(x,y):
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob | [
"\nCalculates the rank sums statistic on the provided scores and\nreturns the result. Use only when the n in each condition is > 20 and you\nhave 2 independent samples of ranks.\n\nUsage: lranksums(x,y)\nReturns: a z-statistic, two-tailed p-value\n"
] |
Please provide a description of the function:def lkruskalwallish(*args):
args = list(args)
n = [0]*len(args)
all = []
n = [len(_) for _ in args]
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h,df) | [
"\nThe Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more\ngroups, requiring at least 5 subjects in each group. This function\ncalculates the Kruskal-Wallis H-test for 3 or more independent samples\nand returns the result. \n\nUsage: lkruskalwallish(*args)\nReturns: H-statistic (corrected for ties), associated p-value\n"
] |
Please provide a description of the function:def lfriedmanchisquare(*args):
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
data = pstat.abut(*tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1) | [
"\nFriedman Chi-Square is a non-parametric, one-way within-subjects\nANOVA. This function calculates the Friedman Chi-square test for repeated\nmeasures and returns the result, along with the associated probability\nvalue. It assumes 3 or more repeated measures. Only 3 levels requires a\nminimum of 10 subjects in the study. Four levels requires 5 subjects per\nlevel(??).\n\nUsage: lfriedmanchisquare(*args)\nReturns: chi-square statistic, associated p-value\n"
] |
Please provide a description of the function:def lchisqprob(chisq,df):
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s | [
"\nReturns the (1-tailed) probability value associated with the provided\nchi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.\n\nUsage: lchisqprob(chisq,df)\n"
] |
Please provide a description of the function:def lerfcc(x):
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans | [
"\nReturns the complementary error function erfc(x) with fractional\nerror everywhere less than 1.2e-7. Adapted from Numerical Recipies.\n\nUsage: lerfcc(x)\n"
] |
Please provide a description of the function:def lzprob(z):
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob | [
"\nReturns the area under the normal curve 'to the left of' the given z value.\nThus, \n for z<0, zprob(z) = 1-tail probability\n for z>0, 1.0-zprob(z) = 1-tail probability\n for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability\nAdapted from z.c in Gary Perlman's |Stat.\n\nUsage: lzprob(z)\n"
] |
Please provide a description of the function:def lksprob(alam):
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 | [
"\nComputes a Kolmolgorov-Smirnov t-test significance level. Adapted from\nNumerical Recipies.\n\nUsage: lksprob(alam)\n"
] |
Please provide a description of the function:def lfprob (dfnum, dfden, F):
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p | [
"\nReturns the (1-tailed) significance level (p-value) of an F\nstatistic given the degrees of freedom for the numerator (dfR-dfF) and\nthe degrees of freedom for the denominator (dfF).\n\nUsage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn\n"
] |
Please provide a description of the function:def lgammln(xx):
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser) | [
"\nReturns the gamma function of xx.\n Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.\n(Adapted from: Numerical Recipies in C.)\n\nUsage: lgammln(xx)\n"
] |
Please provide a description of the function:def lbetai(a,b,x):
if (x<0.0 or x>1.0):
raise ValueError('Bad x in lbetai')
if (x==0.0 or x==1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b*
math.log(1.0-x))
if (x<(a+1.0)/(a+b+2.0)):
return bt*betacf(a,b,x)/float(a)
else:
return 1.0-bt*betacf(b,a,1.0-x)/float(b) | [
"\nReturns the incomplete beta function:\n\n I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)\n\nwhere a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma\nfunction of a. The continued fraction formulation is implemented here,\nusing the betacf function. (Adapted from: Numerical Recipies in C.)\n\nUsage: lbetai(a,b,x)\n"
] |
Please provide a description of the function:def lF_oneway(*lists):
a = len(lists) # ANOVA on 'a' groups, each in it's own list
means = [0]*a
vars = [0]*a
ns = [0]*a
alldata = []
tmp = [N.array(_) for _ in lists]
means = [amean(_) for _ in tmp]
vars = [avar(_) for _ in tmp]
ns = [len(_) for _ in lists]
for i in range(len(lists)):
alldata = alldata + lists[i]
alldata = N.array(alldata)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob | [
"\nPerforms a 1-way ANOVA, returning an F-value and probability given\nany number of groups. From Heiman, pp.394-7.\n\nUsage: F_oneway(*lists) where *lists is any number of lists, one per\n treatment group\nReturns: F value, one-tailed p-value\n"
] |
Please provide a description of the function:def lF_value (ER,EF,dfnum,dfden):
return ((ER-EF)/float(dfnum) / (EF/float(dfden))) | [
"\nReturns an F-statistic given the following:\n ER = error associated with the null hypothesis (the Restricted model)\n EF = error associated with the alternate hypothesis (the Full model)\n dfR-dfF = degrees of freedom of the numerator\n dfF = degrees of freedom associated with the denominator/Full model\n\nUsage: lF_value(ER,EF,dfnum,dfden)\n"
] |
Please provide a description of the function:def writecc (listoflists,file,writetype='w',extra=2):
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = [pstat.makestr(_) for _ in items]
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None | [
"\nWrites a list of lists to a file in columns, customized by the max\nsize of items within the columns (max size of items in col, +2 characters)\nto specified file. File-overwrite is the default.\n\nUsage: writecc (listoflists,file,writetype='w',extra=2)\nReturns: None\n"
] |
Please provide a description of the function:def lincr(l,cap): # to increment a list up to a max-list of 'cap'
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l | [
"\nSimulate a counting system from an n-dimensional list.\n\nUsage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n\nReturns: next set of values for list l, OR -1 (if overflow)\n"
] |
Please provide a description of the function:def lcumsum (inlist):
newlist = copy.deepcopy(inlist)
for i in range(1,len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist | [
"\nReturns a list consisting of the cumulative sum of the items in the\npassed list.\n\nUsage: lcumsum(inlist)\n"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.