repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
mansam/validator.py | validator/__init__.py | validate | def validate(validation, dictionary):
"""
Validate that a dictionary passes a set of
key-based validators. If all of the keys
in the dictionary are within the parameters
specified by the validation mapping, then
the validation passes.
:param validation: a mapping of keys to validators
:type validation: dict
:param dictionary: dictionary to be validated
:type dictionary: dict
:return: a tuple containing a bool indicating
success or failure and a mapping of fields
to error messages.
"""
errors = defaultdict(list)
for key in validation:
if isinstance(validation[key], (list, tuple)):
if Required in validation[key]:
if not Required(key, dictionary):
errors[key] = ["must be present"]
continue
_validate_list_helper(validation, dictionary, key, errors)
else:
v = validation[key]
if v == Required:
if not Required(key, dictionary):
errors[key] = ["must be present"]
else:
_validate_and_store_errs(v, dictionary, key, errors)
if len(errors) > 0:
# `errors` gets downgraded from defaultdict to dict
# because it makes for prettier output
return ValidationResult(valid=False, errors=dict(errors))
else:
return ValidationResult(valid=True, errors={}) | python | def validate(validation, dictionary):
"""
Validate that a dictionary passes a set of
key-based validators. If all of the keys
in the dictionary are within the parameters
specified by the validation mapping, then
the validation passes.
:param validation: a mapping of keys to validators
:type validation: dict
:param dictionary: dictionary to be validated
:type dictionary: dict
:return: a tuple containing a bool indicating
success or failure and a mapping of fields
to error messages.
"""
errors = defaultdict(list)
for key in validation:
if isinstance(validation[key], (list, tuple)):
if Required in validation[key]:
if not Required(key, dictionary):
errors[key] = ["must be present"]
continue
_validate_list_helper(validation, dictionary, key, errors)
else:
v = validation[key]
if v == Required:
if not Required(key, dictionary):
errors[key] = ["must be present"]
else:
_validate_and_store_errs(v, dictionary, key, errors)
if len(errors) > 0:
# `errors` gets downgraded from defaultdict to dict
# because it makes for prettier output
return ValidationResult(valid=False, errors=dict(errors))
else:
return ValidationResult(valid=True, errors={}) | Validate that a dictionary passes a set of
key-based validators. If all of the keys
in the dictionary are within the parameters
specified by the validation mapping, then
the validation passes.
:param validation: a mapping of keys to validators
:type validation: dict
:param dictionary: dictionary to be validated
:type dictionary: dict
:return: a tuple containing a bool indicating
success or failure and a mapping of fields
to error messages. | https://github.com/mansam/validator.py/blob/247f99c539c5c9aef3e5a6063026c687b8499090/validator/__init__.py#L635-L675 |
mansam/validator.py | validator/ext/__init__.py | ArgSpec | def ArgSpec(*args, **kwargs):
"""
Validate a function based on the given argspec.
# Example:
validations = {
"foo": [ArgSpec("a", "b", c", bar="baz")]
}
def pass_func(a, b, c, bar="baz"):
pass
def fail_func(b, c, a, baz="bar"):
pass
passes = {"foo": pass_func}
fails = {"foo": fail_func}
"""
def argspec_lambda(value):
argspec = getargspec(value)
argspec_kw_vals = ()
if argspec.defaults is not None:
argspec_kw_vals = argspec.defaults
kw_vals = {}
arg_offset = 0
arg_len = len(argspec.args) - 1
for val in argspec_kw_vals[::-1]:
kw_vals[argspec.args[arg_len - arg_offset]] = val
arg_offset += 1
if kwargs == kw_vals:
if len(args) != arg_len - arg_offset + 1:
return False
index = 0
for arg in args:
if argspec.args[index] != arg:
return False
index += 1
return True
return False
argspec_lambda.err_message = "must match argspec ({0}) {{{1}}}".format(args, kwargs)
# as little sense as negating this makes, best to just be consistent.
argspec_lambda.not_message = "must not match argspec ({0}) {{{1}}}".format(args, kwargs)
return argspec_lambda | python | def ArgSpec(*args, **kwargs):
"""
Validate a function based on the given argspec.
# Example:
validations = {
"foo": [ArgSpec("a", "b", c", bar="baz")]
}
def pass_func(a, b, c, bar="baz"):
pass
def fail_func(b, c, a, baz="bar"):
pass
passes = {"foo": pass_func}
fails = {"foo": fail_func}
"""
def argspec_lambda(value):
argspec = getargspec(value)
argspec_kw_vals = ()
if argspec.defaults is not None:
argspec_kw_vals = argspec.defaults
kw_vals = {}
arg_offset = 0
arg_len = len(argspec.args) - 1
for val in argspec_kw_vals[::-1]:
kw_vals[argspec.args[arg_len - arg_offset]] = val
arg_offset += 1
if kwargs == kw_vals:
if len(args) != arg_len - arg_offset + 1:
return False
index = 0
for arg in args:
if argspec.args[index] != arg:
return False
index += 1
return True
return False
argspec_lambda.err_message = "must match argspec ({0}) {{{1}}}".format(args, kwargs)
# as little sense as negating this makes, best to just be consistent.
argspec_lambda.not_message = "must not match argspec ({0}) {{{1}}}".format(args, kwargs)
return argspec_lambda | Validate a function based on the given argspec.
# Example:
validations = {
"foo": [ArgSpec("a", "b", c", bar="baz")]
}
def pass_func(a, b, c, bar="baz"):
pass
def fail_func(b, c, a, baz="bar"):
pass
passes = {"foo": pass_func}
fails = {"foo": fail_func} | https://github.com/mansam/validator.py/blob/247f99c539c5c9aef3e5a6063026c687b8499090/validator/ext/__init__.py#L35-L76 |
quantifiedcode/checkmate | checkmate/contrib/plugins/git/lib/repository_pygit2.py | get_first_date_for_group | def get_first_date_for_group(start_date,group_type,n):
"""
:param start: start date
:n : how many groups we want to get
:group_type : daily, weekly, monthly
"""
current_date = start_date
if group_type == 'monthly':
current_year = start_date.year
current_month = start_date.month
for i in range(n-1):
current_month-=1
if current_month == 0:
current_month = 12
current_year -= 1
first_date = datetime.datetime(current_year,current_month,1)
elif group_type == 'weekly':
first_date=start_date-datetime.timedelta(days = start_date.weekday()+(n-1)*7)
elif group_type == 'daily':
first_date = start_date-datetime.timedelta(days = n-1)
first_date = datetime.datetime(first_date.year,first_date.month,first_date.day,0,0,0)
return first_date | python | def get_first_date_for_group(start_date,group_type,n):
"""
:param start: start date
:n : how many groups we want to get
:group_type : daily, weekly, monthly
"""
current_date = start_date
if group_type == 'monthly':
current_year = start_date.year
current_month = start_date.month
for i in range(n-1):
current_month-=1
if current_month == 0:
current_month = 12
current_year -= 1
first_date = datetime.datetime(current_year,current_month,1)
elif group_type == 'weekly':
first_date=start_date-datetime.timedelta(days = start_date.weekday()+(n-1)*7)
elif group_type == 'daily':
first_date = start_date-datetime.timedelta(days = n-1)
first_date = datetime.datetime(first_date.year,first_date.month,first_date.day,0,0,0)
return first_date | :param start: start date
:n : how many groups we want to get
:group_type : daily, weekly, monthly | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/contrib/plugins/git/lib/repository_pygit2.py#L36-L57 |
quantifiedcode/checkmate | checkmate/contrib/plugins/git/models.py | GitRepository.get_snapshots | def get_snapshots(self,**kwargs):
"""
Returns a list of snapshots in a given repository.
"""
commits = self.repository.get_commits(**kwargs)
snapshots = []
for commit in commits:
for key in ('committer_date','author_date'):
commit[key] = datetime.datetime.fromtimestamp(commit[key+'_ts'])
snapshot = GitSnapshot(commit)
hasher = Hasher()
hasher.add(snapshot.sha)
snapshot.hash = hasher.digest.hexdigest()
snapshot.project = self.project
snapshot.pk = uuid.uuid4().hex
snapshots.append(snapshot)
return snapshots | python | def get_snapshots(self,**kwargs):
"""
Returns a list of snapshots in a given repository.
"""
commits = self.repository.get_commits(**kwargs)
snapshots = []
for commit in commits:
for key in ('committer_date','author_date'):
commit[key] = datetime.datetime.fromtimestamp(commit[key+'_ts'])
snapshot = GitSnapshot(commit)
hasher = Hasher()
hasher.add(snapshot.sha)
snapshot.hash = hasher.digest.hexdigest()
snapshot.project = self.project
snapshot.pk = uuid.uuid4().hex
snapshots.append(snapshot)
return snapshots | Returns a list of snapshots in a given repository. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/contrib/plugins/git/models.py#L70-L86 |
quantifiedcode/checkmate | checkmate/lib/code/environment.py | diff_objects | def diff_objects(objects_a,objects_b,key,comparator = None,with_unchanged = False):
"""
Returns a "diff" between two lists of objects.
:param key: The key that identifies objects with identical location in each set,
such as files with the same path or code objects with the same URL.
:param comparator: Comparison functions that decides if two objects are identical.
"""
objects_by_key = {'a' :defaultdict(list),
'b' : defaultdict(list)}
for name,objects in ('a',objects_a),('b',objects_b):
d = objects_by_key[name]
for obj in objects:
d[key(obj)].append(obj)
added_objects = [obj for key,objs in objects_by_key['b'].items()
if key not in objects_by_key['a'] for obj in objs]
deleted_objects = [obj for key,objs in objects_by_key['a'].items()
if key not in objects_by_key['b'] for obj in objs]
joint_keys = [key for key in objects_by_key['a']
if key in objects_by_key['b']]
modified_objects = []
#we go through the keys that exist in both object sets
for key in joint_keys:
objects_a = objects_by_key['a'][key]
objects_b = objects_by_key['b'][key]
if len(objects_a) > 1 or len(objects_b) > 1:
#this is an ambiguous situation: we have more than one object for the same
#key, so we have to decide which ones have been added or not
#we try to remove identical objects from the set
objects_a_copy = objects_a[:]
objects_b_copy = objects_b[:]
#for the next step, we need a comparator
if comparator:
#we iterate through the list and try to find different objects...
for obj_a in objects_a:
for obj_b in objects_b_copy:
if comparator(obj_a,obj_b) == 0:
#these objects are identical, we remove them from both sets...
objects_a_copy.remove(obj_a)
objects_b_copy.remove(obj_b)
break
#here we cannot distinguish objects...
if len(objects_b_copy) > len(objects_a_copy):
#we arbitrarily mark the last objects in objects_b as added
added_objects.extend(objects_b_copy[len(objects_a_copy):])
elif len(objects_a_copy) > len(objects_b_copy):
#we arbitrarily mark the last objects in objects_a as deleted
deleted_objects.extend(objects_a_copy[len(objects_b_copy):])
else:
if comparator and comparator(objects_a[0],objects_b[0]) != 0:
#these objects are different
modified_objects.append(objects_a[0])
result = {
'added' : added_objects,
'deleted' : deleted_objects,
'modified' : modified_objects,
}
if with_unchanged:
unchanged_objects = [objects_b_by_key[key]
for key in joint_keys
if not objects_b_by_key[key] in modified_objects]
result['unchanged'] = unchanged_objects
return result | python | def diff_objects(objects_a,objects_b,key,comparator = None,with_unchanged = False):
"""
Returns a "diff" between two lists of objects.
:param key: The key that identifies objects with identical location in each set,
such as files with the same path or code objects with the same URL.
:param comparator: Comparison functions that decides if two objects are identical.
"""
objects_by_key = {'a' :defaultdict(list),
'b' : defaultdict(list)}
for name,objects in ('a',objects_a),('b',objects_b):
d = objects_by_key[name]
for obj in objects:
d[key(obj)].append(obj)
added_objects = [obj for key,objs in objects_by_key['b'].items()
if key not in objects_by_key['a'] for obj in objs]
deleted_objects = [obj for key,objs in objects_by_key['a'].items()
if key not in objects_by_key['b'] for obj in objs]
joint_keys = [key for key in objects_by_key['a']
if key in objects_by_key['b']]
modified_objects = []
#we go through the keys that exist in both object sets
for key in joint_keys:
objects_a = objects_by_key['a'][key]
objects_b = objects_by_key['b'][key]
if len(objects_a) > 1 or len(objects_b) > 1:
#this is an ambiguous situation: we have more than one object for the same
#key, so we have to decide which ones have been added or not
#we try to remove identical objects from the set
objects_a_copy = objects_a[:]
objects_b_copy = objects_b[:]
#for the next step, we need a comparator
if comparator:
#we iterate through the list and try to find different objects...
for obj_a in objects_a:
for obj_b in objects_b_copy:
if comparator(obj_a,obj_b) == 0:
#these objects are identical, we remove them from both sets...
objects_a_copy.remove(obj_a)
objects_b_copy.remove(obj_b)
break
#here we cannot distinguish objects...
if len(objects_b_copy) > len(objects_a_copy):
#we arbitrarily mark the last objects in objects_b as added
added_objects.extend(objects_b_copy[len(objects_a_copy):])
elif len(objects_a_copy) > len(objects_b_copy):
#we arbitrarily mark the last objects in objects_a as deleted
deleted_objects.extend(objects_a_copy[len(objects_b_copy):])
else:
if comparator and comparator(objects_a[0],objects_b[0]) != 0:
#these objects are different
modified_objects.append(objects_a[0])
result = {
'added' : added_objects,
'deleted' : deleted_objects,
'modified' : modified_objects,
}
if with_unchanged:
unchanged_objects = [objects_b_by_key[key]
for key in joint_keys
if not objects_b_by_key[key] in modified_objects]
result['unchanged'] = unchanged_objects
return result | Returns a "diff" between two lists of objects.
:param key: The key that identifies objects with identical location in each set,
such as files with the same path or code objects with the same URL.
:param comparator: Comparison functions that decides if two objects are identical. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/lib/code/environment.py#L46-L123 |
quantifiedcode/checkmate | checkmate/lib/code/environment.py | CodeEnvironment.diff_snapshots | def diff_snapshots(self,snapshot_a,snapshot_b,save = True, diff=None):
"""
Returns a list of
"""
file_revisions_a = snapshot_a.file_revisions
file_revisions_b = snapshot_b.file_revisions
file_revisions_diff = diff_objects(file_revisions_a,
file_revisions_b,
file_revision_key,
file_revision_comparator)
#We just generate code objects and issues
#for the modified file revisions, to save time when diffing.
logger.debug("Generating list of modified file revisions...")
modified_file_revisions_by_path = {}
for fr_type in ('modified','added','deleted'):
for fr in file_revisions_diff[fr_type]:
if not fr.path in modified_file_revisions_by_path:
modified_file_revisions_by_path[fr.path] = fr
logger.debug("Generating list of modified issues...")
modified_file_revisions_a = [fr for fr in file_revisions_a
if fr.path in modified_file_revisions_by_path]
modified_file_revisions_b = [fr for fr in file_revisions_b
if fr.path in modified_file_revisions_by_path]
if modified_file_revisions_a:
#to do: check the file revisions chunk-wise to avoid DB query errors
issue_occurrences_a = self.project.backend.filter(IssueOccurrence,
{
'file_revision' : {'$in' : modified_file_revisions_a}
},
include = ('file_revision','issue'))
else:
issue_occurrences_a = []
if modified_file_revisions_b:
#to do: check the file revisions chunk-wise to avoid DB query errors
issue_occurrences_b = self.project.backend.filter(IssueOccurrence,
{
'file_revision' : {'$in' : modified_file_revisions_b}
},
include = ('file_revision','issue'))
else:
issue_occurrences_b = []
logger.debug("Diffing issues (%d in A, %d in B)" % (len(issue_occurrences_a),
len(issue_occurrences_b)))
issue_occurrences_diff = diff_objects(issue_occurrences_a,
issue_occurrences_b,
issue_occurrence_key,
issue_occurrence_comparator)
logger.debug("Diffing summary...")
summary_diff = self.diff_summaries(snapshot_a,snapshot_b)
if diff is None:
diff = Diff({'summary' : summary_diff,
'snapshot_a' : snapshot_a,
'project' : self.project,
'configuration' : self.project.configuration,
'snapshot_b' : snapshot_b})
#we generate the hash value for this diff
hasher = Hasher()
hasher.add(diff.snapshot_a.hash)
hasher.add(diff.snapshot_b.hash)
diff.hash = hasher.digest.hexdigest()
elif save:
with self.project.backend.transaction():
self.project.backend.filter(DiffFileRevision,{'diff' : diff}).delete()
self.project.backend.filter(DiffIssueOccurrence,{'diff' : diff}).delete()
if save:
with self.project.backend.transaction():
self.project.backend.save(diff)
diff_file_revisions = []
with self.project.backend.transaction():
for key,file_revisions in file_revisions_diff.items():
for file_revision in file_revisions:
hasher = Hasher()
hasher.add(file_revision.hash)
hasher.add(diff.hash)
hasher.add(key)
diff_file_revision = DiffFileRevision({
'diff' : diff,
'file_revision' : file_revision,
'hash' : hasher.digest.hexdigest(),
'key' : key})
if save:
self.project.backend.save(diff_file_revision)
diff_file_revisions.append(diff_file_revision)
diff_issue_occurrences = []
mapping = {'deleted' : 'fixed','added' : 'added'}
with self.project.backend.transaction():
for key,issue_occurrences in issue_occurrences_diff.items():
if not key in mapping:
continue
for issue_occurrence in issue_occurrences:
hasher = Hasher()
hasher.add(issue_occurrence.hash)
hasher.add(diff.hash)
hasher.add(mapping[key])
diff_issue_occurrence = DiffIssueOccurrence({
'diff' : diff,
'hash' : hasher.digest.hexdigest(),
'issue_occurrence' : issue_occurrence,
'key' : mapping[key]
})
if save:
self.project.backend.save(diff_issue_occurrence)
diff_issue_occurrences.append(diff_issue_occurrence)
return diff,diff_file_revisions,diff_issue_occurrences | python | def diff_snapshots(self,snapshot_a,snapshot_b,save = True, diff=None):
"""
Returns a list of
"""
file_revisions_a = snapshot_a.file_revisions
file_revisions_b = snapshot_b.file_revisions
file_revisions_diff = diff_objects(file_revisions_a,
file_revisions_b,
file_revision_key,
file_revision_comparator)
#We just generate code objects and issues
#for the modified file revisions, to save time when diffing.
logger.debug("Generating list of modified file revisions...")
modified_file_revisions_by_path = {}
for fr_type in ('modified','added','deleted'):
for fr in file_revisions_diff[fr_type]:
if not fr.path in modified_file_revisions_by_path:
modified_file_revisions_by_path[fr.path] = fr
logger.debug("Generating list of modified issues...")
modified_file_revisions_a = [fr for fr in file_revisions_a
if fr.path in modified_file_revisions_by_path]
modified_file_revisions_b = [fr for fr in file_revisions_b
if fr.path in modified_file_revisions_by_path]
if modified_file_revisions_a:
#to do: check the file revisions chunk-wise to avoid DB query errors
issue_occurrences_a = self.project.backend.filter(IssueOccurrence,
{
'file_revision' : {'$in' : modified_file_revisions_a}
},
include = ('file_revision','issue'))
else:
issue_occurrences_a = []
if modified_file_revisions_b:
#to do: check the file revisions chunk-wise to avoid DB query errors
issue_occurrences_b = self.project.backend.filter(IssueOccurrence,
{
'file_revision' : {'$in' : modified_file_revisions_b}
},
include = ('file_revision','issue'))
else:
issue_occurrences_b = []
logger.debug("Diffing issues (%d in A, %d in B)" % (len(issue_occurrences_a),
len(issue_occurrences_b)))
issue_occurrences_diff = diff_objects(issue_occurrences_a,
issue_occurrences_b,
issue_occurrence_key,
issue_occurrence_comparator)
logger.debug("Diffing summary...")
summary_diff = self.diff_summaries(snapshot_a,snapshot_b)
if diff is None:
diff = Diff({'summary' : summary_diff,
'snapshot_a' : snapshot_a,
'project' : self.project,
'configuration' : self.project.configuration,
'snapshot_b' : snapshot_b})
#we generate the hash value for this diff
hasher = Hasher()
hasher.add(diff.snapshot_a.hash)
hasher.add(diff.snapshot_b.hash)
diff.hash = hasher.digest.hexdigest()
elif save:
with self.project.backend.transaction():
self.project.backend.filter(DiffFileRevision,{'diff' : diff}).delete()
self.project.backend.filter(DiffIssueOccurrence,{'diff' : diff}).delete()
if save:
with self.project.backend.transaction():
self.project.backend.save(diff)
diff_file_revisions = []
with self.project.backend.transaction():
for key,file_revisions in file_revisions_diff.items():
for file_revision in file_revisions:
hasher = Hasher()
hasher.add(file_revision.hash)
hasher.add(diff.hash)
hasher.add(key)
diff_file_revision = DiffFileRevision({
'diff' : diff,
'file_revision' : file_revision,
'hash' : hasher.digest.hexdigest(),
'key' : key})
if save:
self.project.backend.save(diff_file_revision)
diff_file_revisions.append(diff_file_revision)
diff_issue_occurrences = []
mapping = {'deleted' : 'fixed','added' : 'added'}
with self.project.backend.transaction():
for key,issue_occurrences in issue_occurrences_diff.items():
if not key in mapping:
continue
for issue_occurrence in issue_occurrences:
hasher = Hasher()
hasher.add(issue_occurrence.hash)
hasher.add(diff.hash)
hasher.add(mapping[key])
diff_issue_occurrence = DiffIssueOccurrence({
'diff' : diff,
'hash' : hasher.digest.hexdigest(),
'issue_occurrence' : issue_occurrence,
'key' : mapping[key]
})
if save:
self.project.backend.save(diff_issue_occurrence)
diff_issue_occurrences.append(diff_issue_occurrence)
return diff,diff_file_revisions,diff_issue_occurrences | Returns a list of | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/lib/code/environment.py#L299-L420 |
quantifiedcode/checkmate | checkmate/lib/code/environment.py | CodeEnvironment.analyze | def analyze(self,file_revisions, save_if_empty = False, snapshot=None):
"""
Handling dependencies:
* First, genreate a list of file revisions for this snapshot
* Then, check which ones of of them already exist
* For the existing ones, check their dependencies
* If any of the dependencies are outdated, add the dependent file revision to the analyze list
How to handle hashes? Dependencies should be included in the hash sum.
* Just load the files based on their SHA values
* Check if dependencies match with the current set based on SHA values
* If not, re-analyze the file revision
* After analysis, calculate the hash value based on path, SHA and dependencies
"""
logger.debug("Analyzing code environment...")
if snapshot is None:
snapshot = Snapshot()
snapshot.configuration = self.project.configuration
file_revisions_by_pk = dict([(fr.hash,fr) for fr in file_revisions])
filtered_file_revisions = self.filter_file_revisions(file_revisions)
filtered_file_revisions_by_pk = dict([(fr.hash,fr) for fr in filtered_file_revisions])
excluded_file_revisions = [file_revisions_by_pk[pk]
for pk in file_revisions_by_pk.keys()
if not pk in filtered_file_revisions_by_pk
]
logger.info("Excluding %d file revisions" % len(excluded_file_revisions))
file_revisions = filtered_file_revisions
file_revisions_by_pk = filtered_file_revisions_by_pk
max_file_revisions = 10000
if len(file_revisions) > max_file_revisions:
logger.warning("Too many file revisions (%d) in snapshot, truncating at %d" %
(len(file_revisions),max_file_revisions))
file_revisions_by_pk = dict(sorted(file_revisions_by_pk.items(),
key = lambda x:x[0])[:max_file_revisions])
file_revisions = file_revisions_by_pk.values()
i = 0
chunk_size = 50
existing_file_revisions = []
file_revisions_by_pk_keys = file_revisions_by_pk.keys()
#we only check 50 keys at a time and then incrementally save them
while i < len(file_revisions_by_pk_keys):
file_revisions_by_pk_chunk = file_revisions_by_pk_keys[i:i+chunk_size]
if not file_revisions_by_pk_chunk:
break
existing_file_revisions.extend(list(self.project.backend.filter(FileRevision,{
'project' : self.project,
'hash' : {'$in' : file_revisions_by_pk_chunk}
})))
i+=chunk_size
existing_file_revisions_by_pk = dict([(fr.hash,fr) for fr in existing_file_revisions])
new_file_revisions = [file_revision for file_revision in file_revisions
if not file_revision.hash in existing_file_revisions_by_pk]
new_file_revisions = []
for file_revision in file_revisions:
if not file_revision.hash in existing_file_revisions_by_pk:
file_revision.configuration = self.project.configuration
new_file_revisions.append(file_revision)
elif existing_file_revisions_by_pk[file_revision.hash].configuration != self.project.configuration:
#we replace the pk and configuration values of the new file_revision object, so that
#it will overwrite the old version...
file_revision.pk = existing_file_revisions_by_pk[file_revision.hash].pk
file_revision.configuration = self.project.configuration
new_file_revisions.append(file_revision)
file_revisions_dict = {}
for file_revision in existing_file_revisions+new_file_revisions:
file_revisions_dict[file_revision.path] = file_revision
logger.info("Analyzing %d new file revisions (%d are already analyzed)" % (
len(new_file_revisions),
len(existing_file_revisions)
))
i = 0
#We set the project information in the snapshot.
snapshot.project = self.project
snapshot.file_revisions = file_revisions_dict.values()
self.env['snapshot'] = snapshot
try:
while i < len(new_file_revisions):
j = i+10 if i+10 < len(new_file_revisions) else len(new_file_revisions)
logger.info("Analyzing and saving: %d - %d (%d remaining)" %
(i, j, len(new_file_revisions) - i ))
file_revisions_slice = new_file_revisions[i:j]
analyzed_file_revisions = self.analyze_file_revisions(file_revisions_slice)
logger.info("Annotating and saving file revisions...")
self.save_file_revisions(snapshot,analyzed_file_revisions)
i+=10
logger.info("Summarizing file revisions...")
snapshot.summary = self.summarize(file_revisions_dict.values())
finally:
del self.env['snapshot']
snapshot.analyzed = True
logger.info("Saving snapshot...")
with self.project.backend.transaction():
self.project.backend.save(snapshot)
logger.info("Done analyzing snapshot %s" % snapshot.pk)
return snapshot | python | def analyze(self,file_revisions, save_if_empty = False, snapshot=None):
"""
Handling dependencies:
* First, genreate a list of file revisions for this snapshot
* Then, check which ones of of them already exist
* For the existing ones, check their dependencies
* If any of the dependencies are outdated, add the dependent file revision to the analyze list
How to handle hashes? Dependencies should be included in the hash sum.
* Just load the files based on their SHA values
* Check if dependencies match with the current set based on SHA values
* If not, re-analyze the file revision
* After analysis, calculate the hash value based on path, SHA and dependencies
"""
logger.debug("Analyzing code environment...")
if snapshot is None:
snapshot = Snapshot()
snapshot.configuration = self.project.configuration
file_revisions_by_pk = dict([(fr.hash,fr) for fr in file_revisions])
filtered_file_revisions = self.filter_file_revisions(file_revisions)
filtered_file_revisions_by_pk = dict([(fr.hash,fr) for fr in filtered_file_revisions])
excluded_file_revisions = [file_revisions_by_pk[pk]
for pk in file_revisions_by_pk.keys()
if not pk in filtered_file_revisions_by_pk
]
logger.info("Excluding %d file revisions" % len(excluded_file_revisions))
file_revisions = filtered_file_revisions
file_revisions_by_pk = filtered_file_revisions_by_pk
max_file_revisions = 10000
if len(file_revisions) > max_file_revisions:
logger.warning("Too many file revisions (%d) in snapshot, truncating at %d" %
(len(file_revisions),max_file_revisions))
file_revisions_by_pk = dict(sorted(file_revisions_by_pk.items(),
key = lambda x:x[0])[:max_file_revisions])
file_revisions = file_revisions_by_pk.values()
i = 0
chunk_size = 50
existing_file_revisions = []
file_revisions_by_pk_keys = file_revisions_by_pk.keys()
#we only check 50 keys at a time and then incrementally save them
while i < len(file_revisions_by_pk_keys):
file_revisions_by_pk_chunk = file_revisions_by_pk_keys[i:i+chunk_size]
if not file_revisions_by_pk_chunk:
break
existing_file_revisions.extend(list(self.project.backend.filter(FileRevision,{
'project' : self.project,
'hash' : {'$in' : file_revisions_by_pk_chunk}
})))
i+=chunk_size
existing_file_revisions_by_pk = dict([(fr.hash,fr) for fr in existing_file_revisions])
new_file_revisions = [file_revision for file_revision in file_revisions
if not file_revision.hash in existing_file_revisions_by_pk]
new_file_revisions = []
for file_revision in file_revisions:
if not file_revision.hash in existing_file_revisions_by_pk:
file_revision.configuration = self.project.configuration
new_file_revisions.append(file_revision)
elif existing_file_revisions_by_pk[file_revision.hash].configuration != self.project.configuration:
#we replace the pk and configuration values of the new file_revision object, so that
#it will overwrite the old version...
file_revision.pk = existing_file_revisions_by_pk[file_revision.hash].pk
file_revision.configuration = self.project.configuration
new_file_revisions.append(file_revision)
file_revisions_dict = {}
for file_revision in existing_file_revisions+new_file_revisions:
file_revisions_dict[file_revision.path] = file_revision
logger.info("Analyzing %d new file revisions (%d are already analyzed)" % (
len(new_file_revisions),
len(existing_file_revisions)
))
i = 0
#We set the project information in the snapshot.
snapshot.project = self.project
snapshot.file_revisions = file_revisions_dict.values()
self.env['snapshot'] = snapshot
try:
while i < len(new_file_revisions):
j = i+10 if i+10 < len(new_file_revisions) else len(new_file_revisions)
logger.info("Analyzing and saving: %d - %d (%d remaining)" %
(i, j, len(new_file_revisions) - i ))
file_revisions_slice = new_file_revisions[i:j]
analyzed_file_revisions = self.analyze_file_revisions(file_revisions_slice)
logger.info("Annotating and saving file revisions...")
self.save_file_revisions(snapshot,analyzed_file_revisions)
i+=10
logger.info("Summarizing file revisions...")
snapshot.summary = self.summarize(file_revisions_dict.values())
finally:
del self.env['snapshot']
snapshot.analyzed = True
logger.info("Saving snapshot...")
with self.project.backend.transaction():
self.project.backend.save(snapshot)
logger.info("Done analyzing snapshot %s" % snapshot.pk)
return snapshot | Handling dependencies:
* First, genreate a list of file revisions for this snapshot
* Then, check which ones of of them already exist
* For the existing ones, check their dependencies
* If any of the dependencies are outdated, add the dependent file revision to the analyze list
How to handle hashes? Dependencies should be included in the hash sum.
* Just load the files based on their SHA values
* Check if dependencies match with the current set based on SHA values
* If not, re-analyze the file revision
* After analysis, calculate the hash value based on path, SHA and dependencies | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/lib/code/environment.py#L567-L689 |
quantifiedcode/checkmate | checkmate/lib/code/environment.py | CodeEnvironment.save_file_revisions | def save_file_revisions(self,snapshot,file_revisions):
"""
We convert various items in the file revision to documents,
so that we can easily search and retrieve them...
"""
annotations = defaultdict(list)
for file_revision in file_revisions:
issues_results = {}
for analyzer_name,results in file_revision.results.items():
if 'issues' in results:
issues_results[analyzer_name] = results['issues']
del results['issues']
if len(issues_results) > 1000:
issues_results[analyzer_name] = [{
'code' : 'TooManyIssues',
'analyzer' : analyzer_name,
}]
with self.project.backend.transaction():
self.project.backend.save(file_revision)
def location_sorter(issue):
if issue['location'] and issue['location'][0] and issue['location'][0][0]:
return issue['location'][0][0][0]
return 0
with self.project.backend.transaction():
for analyzer_name,issues in issues_results.items():
grouped_issues = group_issues_by_fingerprint(issues)
for issue_dict in grouped_issues:
hasher = Hasher()
hasher.add(analyzer_name)
hasher.add(issue_dict['code'])
hasher.add(issue_dict['fingerprint'])
issue_dict['hash'] = hasher.digest.hexdigest()
try:
#we check if the issue already exists
issue = self.project.backend.get(Issue,{'hash' : issue_dict['hash'],
'project' : self.project
})
except Issue.DoesNotExist:
#if not, we create it
d = issue_dict.copy()
d['analyzer'] = analyzer_name
if 'location' in d:
del d['location']
if 'occurrences' in d:
del d['occurrences']
issue = Issue(d)
issue.project = self.project
self.project.backend.save(issue)
for occurrence in issue_dict['occurrences']:
hasher = Hasher()
hasher.add(file_revision.hash)
hasher.add(issue.hash)
hasher.add(occurrence.get('from_row'))
hasher.add(occurrence.get('from_column'))
hasher.add(occurrence.get('to_row'))
hasher.add(occurrence.get('to_column'))
hasher.add(occurrence.get('sequence'))
occurrence['hash'] = hasher.digest.hexdigest()
try:
#we check if the occurrence already exists
occurrence = self.project.backend.get(IssueOccurrence,{'hash' : occurrence['hash'],
'issue' : issue
})
except IssueOccurrence.DoesNotExist:
#if not, we create it
occurrence = IssueOccurrence(occurrence)
occurrence.issue = issue
occurrence.file_revision = file_revision
self.project.backend.save(occurrence)
annotations['occurrences'].append(occurrence)
annotations['issues'].append(issue)
return annotations | python | def save_file_revisions(self,snapshot,file_revisions):
"""
We convert various items in the file revision to documents,
so that we can easily search and retrieve them...
"""
annotations = defaultdict(list)
for file_revision in file_revisions:
issues_results = {}
for analyzer_name,results in file_revision.results.items():
if 'issues' in results:
issues_results[analyzer_name] = results['issues']
del results['issues']
if len(issues_results) > 1000:
issues_results[analyzer_name] = [{
'code' : 'TooManyIssues',
'analyzer' : analyzer_name,
}]
with self.project.backend.transaction():
self.project.backend.save(file_revision)
def location_sorter(issue):
if issue['location'] and issue['location'][0] and issue['location'][0][0]:
return issue['location'][0][0][0]
return 0
with self.project.backend.transaction():
for analyzer_name,issues in issues_results.items():
grouped_issues = group_issues_by_fingerprint(issues)
for issue_dict in grouped_issues:
hasher = Hasher()
hasher.add(analyzer_name)
hasher.add(issue_dict['code'])
hasher.add(issue_dict['fingerprint'])
issue_dict['hash'] = hasher.digest.hexdigest()
try:
#we check if the issue already exists
issue = self.project.backend.get(Issue,{'hash' : issue_dict['hash'],
'project' : self.project
})
except Issue.DoesNotExist:
#if not, we create it
d = issue_dict.copy()
d['analyzer'] = analyzer_name
if 'location' in d:
del d['location']
if 'occurrences' in d:
del d['occurrences']
issue = Issue(d)
issue.project = self.project
self.project.backend.save(issue)
for occurrence in issue_dict['occurrences']:
hasher = Hasher()
hasher.add(file_revision.hash)
hasher.add(issue.hash)
hasher.add(occurrence.get('from_row'))
hasher.add(occurrence.get('from_column'))
hasher.add(occurrence.get('to_row'))
hasher.add(occurrence.get('to_column'))
hasher.add(occurrence.get('sequence'))
occurrence['hash'] = hasher.digest.hexdigest()
try:
#we check if the occurrence already exists
occurrence = self.project.backend.get(IssueOccurrence,{'hash' : occurrence['hash'],
'issue' : issue
})
except IssueOccurrence.DoesNotExist:
#if not, we create it
occurrence = IssueOccurrence(occurrence)
occurrence.issue = issue
occurrence.file_revision = file_revision
self.project.backend.save(occurrence)
annotations['occurrences'].append(occurrence)
annotations['issues'].append(issue)
return annotations | We convert various items in the file revision to documents,
so that we can easily search and retrieve them... | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/lib/code/environment.py#L691-L774 |
quantifiedcode/checkmate | checkmate/helpers/settings.py | update | def update(d,ud):
"""
Recursively merge the values of ud into d.
"""
if ud is None:
return
for key,value in ud.items():
if not key in d:
d[key] = value
elif isinstance(value,dict):
update(d[key],value)
else:
d[key] = value | python | def update(d,ud):
"""
Recursively merge the values of ud into d.
"""
if ud is None:
return
for key,value in ud.items():
if not key in d:
d[key] = value
elif isinstance(value,dict):
update(d[key],value)
else:
d[key] = value | Recursively merge the values of ud into d. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/helpers/settings.py#L2-L14 |
quantifiedcode/checkmate | checkmate/contrib/plugins/git/commands/init.py | Command.find_git_repository | def find_git_repository(self, path):
"""
Tries to find a directory with a .git repository
"""
while path is not None:
git_path = os.path.join(path,'.git')
if os.path.exists(git_path) and os.path.isdir(git_path):
return path
path = os.path.dirname(path)
return None | python | def find_git_repository(self, path):
"""
Tries to find a directory with a .git repository
"""
while path is not None:
git_path = os.path.join(path,'.git')
if os.path.exists(git_path) and os.path.isdir(git_path):
return path
path = os.path.dirname(path)
return None | Tries to find a directory with a .git repository | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/contrib/plugins/git/commands/init.py#L26-L35 |
quantifiedcode/checkmate | checkmate/migrations/env.py | run_migrations_offline | def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
project_path = get_project_path()
project_config = get_project_config(project_path)
backend = get_backend(project_path,project_config,initialize_db = False)
url = str(backend.engine.url)
with backend.transaction():
context.configure(
connection=backend.connection,
url=url, target_metadata=backend.metadata,
literal_binds=True)
with context.begin_transaction():
context.run_migrations() | python | def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
project_path = get_project_path()
project_config = get_project_config(project_path)
backend = get_backend(project_path,project_config,initialize_db = False)
url = str(backend.engine.url)
with backend.transaction():
context.configure(
connection=backend.connection,
url=url, target_metadata=backend.metadata,
literal_binds=True)
with context.begin_transaction():
context.run_migrations() | Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/migrations/env.py#L12-L36 |
quantifiedcode/checkmate | checkmate/migrations/env.py | run_migrations_online | def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
print("Running migrations online")
project_path = get_project_path()
project_config = get_project_config(project_path)
backend = get_backend(project_path,project_config,initialize_db = False)
context.configure(
connection=backend.connection,
target_metadata=backend.metadata,
)
with context.begin_transaction():
context.run_migrations() | python | def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
print("Running migrations online")
project_path = get_project_path()
project_config = get_project_config(project_path)
backend = get_backend(project_path,project_config,initialize_db = False)
context.configure(
connection=backend.connection,
target_metadata=backend.metadata,
)
with context.begin_transaction():
context.run_migrations() | Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/migrations/env.py#L39-L59 |
quantifiedcode/checkmate | checkmate/helpers/hashing.py | get_hash | def get_hash(node,fields = None,exclude = ['pk','_id'],target = 'pk'):
"""
Here we generate a unique hash for a given node in the syntax tree.
"""
hasher = Hasher()
def add_to_hash(value):
if isinstance(value,dict):
if target in value:
add_to_hash(value[target])
else:
attribute_list = []
for key,v in sorted(value.items(),key = lambda x: x[0]):
if (fields is not None and key not in fields) \
or (exclude is not None and key in exclude):
continue
add_to_hash(key)
add_to_hash(v)
elif isinstance(value,(tuple,list)) and value and isinstance(value[0],(dict,node_class)):
for i,v in enumerate(value):
hasher.add(i)
add_to_hash(v)
else:
hasher.add(value)
add_to_hash(node)
return hasher.digest.hexdigest() | python | def get_hash(node,fields = None,exclude = ['pk','_id'],target = 'pk'):
"""
Here we generate a unique hash for a given node in the syntax tree.
"""
hasher = Hasher()
def add_to_hash(value):
if isinstance(value,dict):
if target in value:
add_to_hash(value[target])
else:
attribute_list = []
for key,v in sorted(value.items(),key = lambda x: x[0]):
if (fields is not None and key not in fields) \
or (exclude is not None and key in exclude):
continue
add_to_hash(key)
add_to_hash(v)
elif isinstance(value,(tuple,list)) and value and isinstance(value[0],(dict,node_class)):
for i,v in enumerate(value):
hasher.add(i)
add_to_hash(v)
else:
hasher.add(value)
add_to_hash(node)
return hasher.digest.hexdigest() | Here we generate a unique hash for a given node in the syntax tree. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/helpers/hashing.py#L35-L65 |
quantifiedcode/checkmate | checkmate/contrib/plugins/python/pylint/analyzer.py | Reporter.add_message | def add_message(self, msg_id, location, msg):
"""Client API to send a message"""
self._messages.append((msg_id,location,msg)) | python | def add_message(self, msg_id, location, msg):
"""Client API to send a message"""
self._messages.append((msg_id,location,msg)) | Client API to send a message | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/contrib/plugins/python/pylint/analyzer.py#L99-L102 |
quantifiedcode/checkmate | checkmate/lib/analysis/base.py | BaseAnalyzer.get_fingerprint_from_code | def get_fingerprint_from_code(self,file_revision,location, extra_data=None):
"""
This function generates a fingerprint from a series of code snippets.
Can be used by derived analyzers to generate fingerprints based on code
if nothing better is available.
"""
code = file_revision.get_file_content()
if not isinstance(code,unicode):
code = unicode(code,errors = 'ignore')
lines = code.split(u"\n")
s = ""
for l in location:
((from_row,from_column),(to_row,to_column)) = l
if from_column is None:
continue
if from_row == to_row:
s+=lines[from_row-1][from_column:to_column]
else:
if to_row < from_row:
raise ValueError("from_row must be smaller than to_row")
s+=lines[from_row-1][from_column:]
current_row = from_row+1
while current_row < to_row:
s+=lines[current_row-1]
current_row+=1
s+=lines[current_row-1][:to_column]
hasher = Hasher()
hasher.add(s)
if extra_data is not None:
hasher.add(extra_data)
return hasher.digest.hexdigest() | python | def get_fingerprint_from_code(self,file_revision,location, extra_data=None):
"""
This function generates a fingerprint from a series of code snippets.
Can be used by derived analyzers to generate fingerprints based on code
if nothing better is available.
"""
code = file_revision.get_file_content()
if not isinstance(code,unicode):
code = unicode(code,errors = 'ignore')
lines = code.split(u"\n")
s = ""
for l in location:
((from_row,from_column),(to_row,to_column)) = l
if from_column is None:
continue
if from_row == to_row:
s+=lines[from_row-1][from_column:to_column]
else:
if to_row < from_row:
raise ValueError("from_row must be smaller than to_row")
s+=lines[from_row-1][from_column:]
current_row = from_row+1
while current_row < to_row:
s+=lines[current_row-1]
current_row+=1
s+=lines[current_row-1][:to_column]
hasher = Hasher()
hasher.add(s)
if extra_data is not None:
hasher.add(extra_data)
return hasher.digest.hexdigest() | This function generates a fingerprint from a series of code snippets.
Can be used by derived analyzers to generate fingerprints based on code
if nothing better is available. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/lib/analysis/base.py#L34-L68 |
quantifiedcode/checkmate | checkmate/helpers/issue.py | group_issues_by_fingerprint | def group_issues_by_fingerprint(issues):
"""
Groups issues by fingerprint. Grouping is done by issue code in addition.
IMPORTANT: It is assumed that all issues come from the SAME analyzer.
"""
issues_by_fingerprint = defaultdict(list)
for issue in issues:
if not 'fingerprint' in issue:
raise AttributeError("No fingerprint defined for issue with analyzer %s and code %s!" %
(issue.get('analyzer','(undefined)'),issue['code']))
fp_code = "%s:%s" % (issue['fingerprint'],issue['code'])
if fp_code in issues_by_fingerprint:
grouped_issue = issues_by_fingerprint[fp_code]
else:
grouped_issue = issue.copy()
grouped_issue['occurrences'] = []
if 'location' in grouped_issue:
del grouped_issue['location']
issues_by_fingerprint[fp_code] = grouped_issue
locations = issue.get('location',[])
if locations:
for i,start_stop in enumerate(locations):
occurrence = {
'from_row' : None,
'to_row' : None,
'from_column' : None,
'to_column' : None,
'sequence' : i
}
grouped_issue['occurrences'].append(occurrence)
if not isinstance(start_stop,(list,tuple)) or not len(start_stop) == 2:
continue
start,stop = start_stop
if isinstance(start,(list,tuple)) and len(start) == 2:
occurrence['from_row'] = start[0]
occurrence['from_column'] = start[1]
if isinstance(stop,(list,tuple)) and len(stop) == 2:
occurrence['to_row'] = stop[0]
occurrence['to_column'] = stop[1]
grouped_issue['occurrences'] = sorted(grouped_issue['occurrences'],key = lambda x: (x['from_row'],x['from_column']))
return issues_by_fingerprint.values() | python | def group_issues_by_fingerprint(issues):
"""
Groups issues by fingerprint. Grouping is done by issue code in addition.
IMPORTANT: It is assumed that all issues come from the SAME analyzer.
"""
issues_by_fingerprint = defaultdict(list)
for issue in issues:
if not 'fingerprint' in issue:
raise AttributeError("No fingerprint defined for issue with analyzer %s and code %s!" %
(issue.get('analyzer','(undefined)'),issue['code']))
fp_code = "%s:%s" % (issue['fingerprint'],issue['code'])
if fp_code in issues_by_fingerprint:
grouped_issue = issues_by_fingerprint[fp_code]
else:
grouped_issue = issue.copy()
grouped_issue['occurrences'] = []
if 'location' in grouped_issue:
del grouped_issue['location']
issues_by_fingerprint[fp_code] = grouped_issue
locations = issue.get('location',[])
if locations:
for i,start_stop in enumerate(locations):
occurrence = {
'from_row' : None,
'to_row' : None,
'from_column' : None,
'to_column' : None,
'sequence' : i
}
grouped_issue['occurrences'].append(occurrence)
if not isinstance(start_stop,(list,tuple)) or not len(start_stop) == 2:
continue
start,stop = start_stop
if isinstance(start,(list,tuple)) and len(start) == 2:
occurrence['from_row'] = start[0]
occurrence['from_column'] = start[1]
if isinstance(stop,(list,tuple)) and len(stop) == 2:
occurrence['to_row'] = stop[0]
occurrence['to_column'] = stop[1]
grouped_issue['occurrences'] = sorted(grouped_issue['occurrences'],key = lambda x: (x['from_row'],x['from_column']))
return issues_by_fingerprint.values() | Groups issues by fingerprint. Grouping is done by issue code in addition.
IMPORTANT: It is assumed that all issues come from the SAME analyzer. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/helpers/issue.py#L49-L98 |
quantifiedcode/checkmate | checkmate/lib/models.py | Project.get_issue_classes | def get_issue_classes(self,backend = None,enabled = True,sort = None,**kwargs):
"""
Retrieves the issue classes for a given backend
:param backend: A backend to use. If None, the default backend will be used
:param enabled: Whether to retrieve enabled or disabled issue classes.
Passing `None` will retrieve all issue classes.
"""
if backend is None:
backend = self.backend
query = {'project_issue_classes.project' : self}
if enabled is not None:
query['project_issue_classes.enabled'] = enabled
issue_classes = backend.filter(self.IssueClass,query,
**kwargs)
if sort is not None:
issue_classes = issue_classes.sort(sort)
return issue_classes | python | def get_issue_classes(self,backend = None,enabled = True,sort = None,**kwargs):
"""
Retrieves the issue classes for a given backend
:param backend: A backend to use. If None, the default backend will be used
:param enabled: Whether to retrieve enabled or disabled issue classes.
Passing `None` will retrieve all issue classes.
"""
if backend is None:
backend = self.backend
query = {'project_issue_classes.project' : self}
if enabled is not None:
query['project_issue_classes.enabled'] = enabled
issue_classes = backend.filter(self.IssueClass,query,
**kwargs)
if sort is not None:
issue_classes = issue_classes.sort(sort)
return issue_classes | Retrieves the issue classes for a given backend
:param backend: A backend to use. If None, the default backend will be used
:param enabled: Whether to retrieve enabled or disabled issue classes.
Passing `None` will retrieve all issue classes. | https://github.com/quantifiedcode/checkmate/blob/1a4d010c8ef25c678d8d14dc8e37a9bed1883ca2/checkmate/lib/models.py#L413-L435 |
thombashi/pathvalidate | pathvalidate/_symbol.py | validate_symbol | def validate_symbol(text):
"""
Verifying whether symbol(s) included in the ``text`` or not.
:param str text: Input text.
:raises pathvalidate.InvalidCharError:
If symbol(s) included in the ``text``.
"""
match_list = __RE_SYMBOL.findall(preprocess(text))
if match_list:
raise InvalidCharError("invalid symbols found: {}".format(match_list)) | python | def validate_symbol(text):
"""
Verifying whether symbol(s) included in the ``text`` or not.
:param str text: Input text.
:raises pathvalidate.InvalidCharError:
If symbol(s) included in the ``text``.
"""
match_list = __RE_SYMBOL.findall(preprocess(text))
if match_list:
raise InvalidCharError("invalid symbols found: {}".format(match_list)) | Verifying whether symbol(s) included in the ``text`` or not.
:param str text: Input text.
:raises pathvalidate.InvalidCharError:
If symbol(s) included in the ``text``. | https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_symbol.py#L36-L47 |
thombashi/pathvalidate | pathvalidate/_symbol.py | replace_symbol | def replace_symbol(text, replacement_text="", is_replace_consecutive_chars=False, is_strip=False):
"""
Replace all of the symbols in the ``text``.
:param str text: Input text.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
:Examples:
:ref:`example-sanitize-symbol`
"""
try:
new_text = __RE_SYMBOL.sub(replacement_text, preprocess(text))
except (TypeError, AttributeError):
raise TypeError("text must be a string")
if not replacement_text:
return new_text
if is_replace_consecutive_chars:
new_text = re.sub("{}+".format(re.escape(replacement_text)), replacement_text, new_text)
if is_strip:
new_text = new_text.strip(replacement_text)
return new_text | python | def replace_symbol(text, replacement_text="", is_replace_consecutive_chars=False, is_strip=False):
"""
Replace all of the symbols in the ``text``.
:param str text: Input text.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
:Examples:
:ref:`example-sanitize-symbol`
"""
try:
new_text = __RE_SYMBOL.sub(replacement_text, preprocess(text))
except (TypeError, AttributeError):
raise TypeError("text must be a string")
if not replacement_text:
return new_text
if is_replace_consecutive_chars:
new_text = re.sub("{}+".format(re.escape(replacement_text)), replacement_text, new_text)
if is_strip:
new_text = new_text.strip(replacement_text)
return new_text | Replace all of the symbols in the ``text``.
:param str text: Input text.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
:Examples:
:ref:`example-sanitize-symbol` | https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_symbol.py#L50-L78 |
thombashi/pathvalidate | pathvalidate/_file.py | validate_filename | def validate_filename(filename, platform=None, min_len=1, max_len=_DEFAULT_MAX_FILENAME_LEN):
"""Verifying whether the ``filename`` is a valid file name or not.
Args:
filename (str):
Filename to validate.
platform (str, optional):
.. include:: platform.txt
min_len (int, optional):
Minimum length of the ``filename``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (int, optional):
Maximum length the ``filename``. The value must be lower than:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
- ``Universal``: 260
Defaults to ``255``.
Raises:
InvalidLengthError:
If the ``filename`` is longer than ``max_len`` characters.
InvalidCharError:
If the ``filename`` includes invalid character(s) for a filename:
|invalid_filename_chars|.
The following characters are also invalid for Windows platform:
|invalid_win_filename_chars|.
ReservedNameError:
If the ``filename`` equals reserved name by OS.
Windows reserved name is as follows:
``"CON"``, ``"PRN"``, ``"AUX"``, ``"NUL"``, ``"COM[1-9]"``, ``"LPT[1-9]"``.
Example:
:ref:`example-validate-filename`
See Also:
`Naming Files, Paths, and Namespaces (Windows)
<https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__
"""
FileNameSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(filename) | python | def validate_filename(filename, platform=None, min_len=1, max_len=_DEFAULT_MAX_FILENAME_LEN):
"""Verifying whether the ``filename`` is a valid file name or not.
Args:
filename (str):
Filename to validate.
platform (str, optional):
.. include:: platform.txt
min_len (int, optional):
Minimum length of the ``filename``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (int, optional):
Maximum length the ``filename``. The value must be lower than:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
- ``Universal``: 260
Defaults to ``255``.
Raises:
InvalidLengthError:
If the ``filename`` is longer than ``max_len`` characters.
InvalidCharError:
If the ``filename`` includes invalid character(s) for a filename:
|invalid_filename_chars|.
The following characters are also invalid for Windows platform:
|invalid_win_filename_chars|.
ReservedNameError:
If the ``filename`` equals reserved name by OS.
Windows reserved name is as follows:
``"CON"``, ``"PRN"``, ``"AUX"``, ``"NUL"``, ``"COM[1-9]"``, ``"LPT[1-9]"``.
Example:
:ref:`example-validate-filename`
See Also:
`Naming Files, Paths, and Namespaces (Windows)
<https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__
"""
FileNameSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(filename) | Verifying whether the ``filename`` is a valid file name or not.
Args:
filename (str):
Filename to validate.
platform (str, optional):
.. include:: platform.txt
min_len (int, optional):
Minimum length of the ``filename``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (int, optional):
Maximum length the ``filename``. The value must be lower than:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
- ``Universal``: 260
Defaults to ``255``.
Raises:
InvalidLengthError:
If the ``filename`` is longer than ``max_len`` characters.
InvalidCharError:
If the ``filename`` includes invalid character(s) for a filename:
|invalid_filename_chars|.
The following characters are also invalid for Windows platform:
|invalid_win_filename_chars|.
ReservedNameError:
If the ``filename`` equals reserved name by OS.
Windows reserved name is as follows:
``"CON"``, ``"PRN"``, ``"AUX"``, ``"NUL"``, ``"COM[1-9]"``, ``"LPT[1-9]"``.
Example:
:ref:`example-validate-filename`
See Also:
`Naming Files, Paths, and Namespaces (Windows)
<https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__ | https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_file.py#L432-L474 |
thombashi/pathvalidate | pathvalidate/_file.py | validate_filepath | def validate_filepath(file_path, platform=None, min_len=1, max_len=None):
"""Verifying whether the ``file_path`` is a valid file path or not.
Args:
file_path (str):
File path to validate.
platform (str, optional):
.. include:: platform.txt
min_len (int, optional):
Minimum length of the ``file_path``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (int, optional):
Maximum length of the ``file_path`` length. If the value is |None|,
in the default, automatically determined by the ``platform``:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
Raises:
NullNameError:
If the ``file_path`` is empty.
InvalidCharError:
If the ``file_path`` includes invalid char(s):
|invalid_file_path_chars|.
The following characters are also invalid for Windows platform:
|invalid_win_file_path_chars|
InvalidLengthError:
If the ``file_path`` is longer than ``max_len`` characters.
Example:
:ref:`example-validate-file-path`
See Also:
`Naming Files, Paths, and Namespaces (Windows)
<https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__
"""
FilePathSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(file_path) | python | def validate_filepath(file_path, platform=None, min_len=1, max_len=None):
"""Verifying whether the ``file_path`` is a valid file path or not.
Args:
file_path (str):
File path to validate.
platform (str, optional):
.. include:: platform.txt
min_len (int, optional):
Minimum length of the ``file_path``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (int, optional):
Maximum length of the ``file_path`` length. If the value is |None|,
in the default, automatically determined by the ``platform``:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
Raises:
NullNameError:
If the ``file_path`` is empty.
InvalidCharError:
If the ``file_path`` includes invalid char(s):
|invalid_file_path_chars|.
The following characters are also invalid for Windows platform:
|invalid_win_file_path_chars|
InvalidLengthError:
If the ``file_path`` is longer than ``max_len`` characters.
Example:
:ref:`example-validate-file-path`
See Also:
`Naming Files, Paths, and Namespaces (Windows)
<https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__
"""
FilePathSanitizer(platform=platform, min_len=min_len, max_len=max_len).validate(file_path) | Verifying whether the ``file_path`` is a valid file path or not.
Args:
file_path (str):
File path to validate.
platform (str, optional):
.. include:: platform.txt
min_len (int, optional):
Minimum length of the ``file_path``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (int, optional):
Maximum length of the ``file_path`` length. If the value is |None|,
in the default, automatically determined by the ``platform``:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
Raises:
NullNameError:
If the ``file_path`` is empty.
InvalidCharError:
If the ``file_path`` includes invalid char(s):
|invalid_file_path_chars|.
The following characters are also invalid for Windows platform:
|invalid_win_file_path_chars|
InvalidLengthError:
If the ``file_path`` is longer than ``max_len`` characters.
Example:
:ref:`example-validate-file-path`
See Also:
`Naming Files, Paths, and Namespaces (Windows)
<https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx>`__ | https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_file.py#L477-L515 |
thombashi/pathvalidate | pathvalidate/_file.py | sanitize_filename | def sanitize_filename(
filename, replacement_text="", platform=None, max_len=_DEFAULT_MAX_FILENAME_LEN
):
"""Make a valid filename from a string.
To make a valid filename the function does:
- Replace invalid characters as file names included in the ``filename``
with the ``replacement_text``. Invalid characters are:
- unprintable characters
- |invalid_filename_chars|
- for Windows only: |invalid_win_filename_chars|
- Append underscore (``"_"``) at the tail of the name if sanitized name
is one of the reserved names by the operating system.
Args:
filename (str or PathLike object): Filename to sanitize.
replacement_text (str, optional):
Replacement text for invalid characters. Defaults to ``""``.
platform (str, optional):
.. include:: platform.txt
max_len (int, optional):
The upper limit of the ``filename`` length. Truncate the name length if
the ``filename`` length exceeds this value.
Defaults to ``255``.
Returns:
Same type as the ``filename`` (str or PathLike object):
Sanitized filename.
Raises:
ValueError:
If the ``filename`` is an invalid filename.
Example:
:ref:`example-sanitize-filename`
"""
return FileNameSanitizer(platform=platform, max_len=max_len).sanitize(
filename, replacement_text
) | python | def sanitize_filename(
filename, replacement_text="", platform=None, max_len=_DEFAULT_MAX_FILENAME_LEN
):
"""Make a valid filename from a string.
To make a valid filename the function does:
- Replace invalid characters as file names included in the ``filename``
with the ``replacement_text``. Invalid characters are:
- unprintable characters
- |invalid_filename_chars|
- for Windows only: |invalid_win_filename_chars|
- Append underscore (``"_"``) at the tail of the name if sanitized name
is one of the reserved names by the operating system.
Args:
filename (str or PathLike object): Filename to sanitize.
replacement_text (str, optional):
Replacement text for invalid characters. Defaults to ``""``.
platform (str, optional):
.. include:: platform.txt
max_len (int, optional):
The upper limit of the ``filename`` length. Truncate the name length if
the ``filename`` length exceeds this value.
Defaults to ``255``.
Returns:
Same type as the ``filename`` (str or PathLike object):
Sanitized filename.
Raises:
ValueError:
If the ``filename`` is an invalid filename.
Example:
:ref:`example-sanitize-filename`
"""
return FileNameSanitizer(platform=platform, max_len=max_len).sanitize(
filename, replacement_text
) | Make a valid filename from a string.
To make a valid filename the function does:
- Replace invalid characters as file names included in the ``filename``
with the ``replacement_text``. Invalid characters are:
- unprintable characters
- |invalid_filename_chars|
- for Windows only: |invalid_win_filename_chars|
- Append underscore (``"_"``) at the tail of the name if sanitized name
is one of the reserved names by the operating system.
Args:
filename (str or PathLike object): Filename to sanitize.
replacement_text (str, optional):
Replacement text for invalid characters. Defaults to ``""``.
platform (str, optional):
.. include:: platform.txt
max_len (int, optional):
The upper limit of the ``filename`` length. Truncate the name length if
the ``filename`` length exceeds this value.
Defaults to ``255``.
Returns:
Same type as the ``filename`` (str or PathLike object):
Sanitized filename.
Raises:
ValueError:
If the ``filename`` is an invalid filename.
Example:
:ref:`example-sanitize-filename` | https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_file.py#L533-L575 |
thombashi/pathvalidate | pathvalidate/_file.py | sanitize_filepath | def sanitize_filepath(file_path, replacement_text="", platform=None, max_len=None):
"""Make a valid file path from a string.
Replace invalid characters for a file path within the ``file_path``
with the ``replacement_text``.
Invalid characters are as followings:
|invalid_file_path_chars|, |invalid_win_file_path_chars| (and non printable characters).
Args:
file_path (str or PathLike object):
File path to sanitize.
replacement_text (str, optional):
Replacement text for invalid characters.
Defaults to ``""``.
platform (str, optional):
.. include:: platform.txt
max_len (int, optional):
The upper limit of the ``file_path`` length. Truncate the name if the ``file_path``
length exceedd this value. If the value is |None|, the default value automatically
determined by the execution platform:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
Returns:
Same type as the argument (str or PathLike object):
Sanitized filepath.
Raises:
ValueError:
If the ``file_path`` is an invalid file path.
Example:
:ref:`example-sanitize-file-path`
"""
return FilePathSanitizer(platform=platform, max_len=max_len).sanitize(
file_path, replacement_text
) | python | def sanitize_filepath(file_path, replacement_text="", platform=None, max_len=None):
"""Make a valid file path from a string.
Replace invalid characters for a file path within the ``file_path``
with the ``replacement_text``.
Invalid characters are as followings:
|invalid_file_path_chars|, |invalid_win_file_path_chars| (and non printable characters).
Args:
file_path (str or PathLike object):
File path to sanitize.
replacement_text (str, optional):
Replacement text for invalid characters.
Defaults to ``""``.
platform (str, optional):
.. include:: platform.txt
max_len (int, optional):
The upper limit of the ``file_path`` length. Truncate the name if the ``file_path``
length exceedd this value. If the value is |None|, the default value automatically
determined by the execution platform:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
Returns:
Same type as the argument (str or PathLike object):
Sanitized filepath.
Raises:
ValueError:
If the ``file_path`` is an invalid file path.
Example:
:ref:`example-sanitize-file-path`
"""
return FilePathSanitizer(platform=platform, max_len=max_len).sanitize(
file_path, replacement_text
) | Make a valid file path from a string.
Replace invalid characters for a file path within the ``file_path``
with the ``replacement_text``.
Invalid characters are as followings:
|invalid_file_path_chars|, |invalid_win_file_path_chars| (and non printable characters).
Args:
file_path (str or PathLike object):
File path to sanitize.
replacement_text (str, optional):
Replacement text for invalid characters.
Defaults to ``""``.
platform (str, optional):
.. include:: platform.txt
max_len (int, optional):
The upper limit of the ``file_path`` length. Truncate the name if the ``file_path``
length exceedd this value. If the value is |None|, the default value automatically
determined by the execution platform:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
Returns:
Same type as the argument (str or PathLike object):
Sanitized filepath.
Raises:
ValueError:
If the ``file_path`` is an invalid file path.
Example:
:ref:`example-sanitize-file-path` | https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_file.py#L578-L617 |
thombashi/pathvalidate | pathvalidate/_ltsv.py | validate_ltsv_label | def validate_ltsv_label(label):
"""
Verifying whether ``label`` is a valid
`Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
:param str label: Label to validate.
:raises pathvalidate.NullNameError: If the ``label`` is empty.
:raises pathvalidate.InvalidCharError:
If invalid character(s) found in the ``label`` for a LTSV format label.
"""
validate_null_string(label, error_msg="label is empty")
match_list = __RE_INVALID_LTSV_LABEL.findall(preprocess(label))
if match_list:
raise InvalidCharError(
"invalid character found for a LTSV format label: {}".format(match_list)
) | python | def validate_ltsv_label(label):
"""
Verifying whether ``label`` is a valid
`Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
:param str label: Label to validate.
:raises pathvalidate.NullNameError: If the ``label`` is empty.
:raises pathvalidate.InvalidCharError:
If invalid character(s) found in the ``label`` for a LTSV format label.
"""
validate_null_string(label, error_msg="label is empty")
match_list = __RE_INVALID_LTSV_LABEL.findall(preprocess(label))
if match_list:
raise InvalidCharError(
"invalid character found for a LTSV format label: {}".format(match_list)
) | Verifying whether ``label`` is a valid
`Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
:param str label: Label to validate.
:raises pathvalidate.NullNameError: If the ``label`` is empty.
:raises pathvalidate.InvalidCharError:
If invalid character(s) found in the ``label`` for a LTSV format label. | https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_ltsv.py#L18-L35 |
thombashi/pathvalidate | pathvalidate/_ltsv.py | sanitize_ltsv_label | def sanitize_ltsv_label(label, replacement_text=""):
"""
Replace all of the symbols in text.
:param str label: Input text.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
"""
validate_null_string(label, error_msg="label is empty")
return __RE_INVALID_LTSV_LABEL.sub(replacement_text, preprocess(label)) | python | def sanitize_ltsv_label(label, replacement_text=""):
"""
Replace all of the symbols in text.
:param str label: Input text.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
"""
validate_null_string(label, error_msg="label is empty")
return __RE_INVALID_LTSV_LABEL.sub(replacement_text, preprocess(label)) | Replace all of the symbols in text.
:param str label: Input text.
:param str replacement_text: Replacement text.
:return: A replacement string.
:rtype: str | https://github.com/thombashi/pathvalidate/blob/22d64038fb08c04aa9d0e8dd9fd1a955c1a9bfef/pathvalidate/_ltsv.py#L38-L50 |
labtocat/beautifier | beautifier/__init__.py | Url.param | def param(self):
"""
Returns params
"""
try:
self.parameters = self._main_url.split('?')[1]
return self.parameters.split('&')
except:
return self.parameters | python | def param(self):
"""
Returns params
"""
try:
self.parameters = self._main_url.split('?')[1]
return self.parameters.split('&')
except:
return self.parameters | Returns params | https://github.com/labtocat/beautifier/blob/5827edc2d6dc057e5f1f57596037fc94201ca8e7/beautifier/__init__.py#L40-L48 |
labtocat/beautifier | beautifier/__init__.py | Url.domain | def domain(self):
"""
Return domain from the url
"""
remove_pac = self.cleanup.replace(
"https://", "").replace("http://", "").replace("www.", "")
try:
return remove_pac.split('/')[0]
except:
return None | python | def domain(self):
"""
Return domain from the url
"""
remove_pac = self.cleanup.replace(
"https://", "").replace("http://", "").replace("www.", "")
try:
return remove_pac.split('/')[0]
except:
return None | Return domain from the url | https://github.com/labtocat/beautifier/blob/5827edc2d6dc057e5f1f57596037fc94201ca8e7/beautifier/__init__.py#L58-L67 |
ccubed/PyMoe | Pymoe/Mal/Objects.py | Anime.to_xml | def to_xml(self):
"""
Convert data to XML String.
:return: Str of valid XML data
"""
root = ET.Element("entry")
for x in self.xml_tags:
if getattr(self, x):
if x in ['episodes', 'scores', 'status', 'dates', 'storage', 'rewatched', 'flags', 'tags']:
if x == 'episodes':
if self.episodes.current:
temp = ET.SubElement(root, 'episode')
temp.text = str(self.episodes.current)
elif x == 'scores':
if self.scores.user:
temp = ET.SubElement(root, 'score')
temp.text = str(self.scores.user)
elif x == 'status':
if self.status.user:
temp = ET.SubElement(root, 'status')
temp.text = str(self.status.user)
elif x == 'dates':
if self.dates.user.start:
start = ET.SubElement(root, 'date_start')
start.text = format_date(self.dates.user.start)
if self.dates.user.end:
end = ET.SubElement(root, 'date_finish')
end.text = format_date(self.dates.user.end)
elif x == 'storage':
if self.storage.type:
stype = ET.SubElement(root, 'storage_type')
stype.text = str(self.storage.type)
if self.storage.value:
sval = ET.SubElement(root, 'storage_value')
sval.text = str(self.storage.value)
elif x == 'rewatched':
if self.rewatched.times:
rt = ET.SubElement(root, 'times_rewatched')
rt.text = str(self.rewatched.times)
if self.rewatched.value:
rv = ET.SubElement(root, 'rewatch_value')
rv.text = str(self.rewatched.value)
elif x == 'flags':
if self.flags.discussion:
df = ET.SubElement(root, 'enable_discussion')
df.text = '1' if self.flags.discussion else '0'
if self.flags.rewatching:
rf = ET.SubElement(root, 'enable_rewatching')
rf.text = '1' if self.flags.rewatching else '0'
else:
if self.tags:
temp = ET.SubElement(root, 'tags')
temp.text = ','.join(self.tags)
else:
temp = ET.SubElement(root, x)
temp.text = str(getattr(self, x))
return '<?xml version="1.0" encoding="UTF-8"?>{}'.format(ET.tostring(root, encoding="unicode")) | python | def to_xml(self):
"""
Convert data to XML String.
:return: Str of valid XML data
"""
root = ET.Element("entry")
for x in self.xml_tags:
if getattr(self, x):
if x in ['episodes', 'scores', 'status', 'dates', 'storage', 'rewatched', 'flags', 'tags']:
if x == 'episodes':
if self.episodes.current:
temp = ET.SubElement(root, 'episode')
temp.text = str(self.episodes.current)
elif x == 'scores':
if self.scores.user:
temp = ET.SubElement(root, 'score')
temp.text = str(self.scores.user)
elif x == 'status':
if self.status.user:
temp = ET.SubElement(root, 'status')
temp.text = str(self.status.user)
elif x == 'dates':
if self.dates.user.start:
start = ET.SubElement(root, 'date_start')
start.text = format_date(self.dates.user.start)
if self.dates.user.end:
end = ET.SubElement(root, 'date_finish')
end.text = format_date(self.dates.user.end)
elif x == 'storage':
if self.storage.type:
stype = ET.SubElement(root, 'storage_type')
stype.text = str(self.storage.type)
if self.storage.value:
sval = ET.SubElement(root, 'storage_value')
sval.text = str(self.storage.value)
elif x == 'rewatched':
if self.rewatched.times:
rt = ET.SubElement(root, 'times_rewatched')
rt.text = str(self.rewatched.times)
if self.rewatched.value:
rv = ET.SubElement(root, 'rewatch_value')
rv.text = str(self.rewatched.value)
elif x == 'flags':
if self.flags.discussion:
df = ET.SubElement(root, 'enable_discussion')
df.text = '1' if self.flags.discussion else '0'
if self.flags.rewatching:
rf = ET.SubElement(root, 'enable_rewatching')
rf.text = '1' if self.flags.rewatching else '0'
else:
if self.tags:
temp = ET.SubElement(root, 'tags')
temp.text = ','.join(self.tags)
else:
temp = ET.SubElement(root, x)
temp.text = str(getattr(self, x))
return '<?xml version="1.0" encoding="UTF-8"?>{}'.format(ET.tostring(root, encoding="unicode")) | Convert data to XML String.
:return: Str of valid XML data | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Mal/Objects.py#L76-L132 |
ccubed/PyMoe | Pymoe/Kitsu/user.py | KitsuUser.search | def search(self, term):
"""
Search for a user by name.
:param str term: What to search for.
:return: The results as a SearchWrapper iterator or None if no results.
:rtype: SearchWrapper or None
"""
r = requests.get(self.apiurl + "/users", params={"filter[name]": term}, headers=self.header)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd['meta']['count']:
return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header)
else:
return None | python | def search(self, term):
"""
Search for a user by name.
:param str term: What to search for.
:return: The results as a SearchWrapper iterator or None if no results.
:rtype: SearchWrapper or None
"""
r = requests.get(self.apiurl + "/users", params={"filter[name]": term}, headers=self.header)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd['meta']['count']:
return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header)
else:
return None | Search for a user by name.
:param str term: What to search for.
:return: The results as a SearchWrapper iterator or None if no results.
:rtype: SearchWrapper or None | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/user.py#L11-L29 |
ccubed/PyMoe | Pymoe/Kitsu/user.py | KitsuUser.create | def create(self, data):
"""
Create a user. Please review the attributes required. You need only provide the attributes.
:param data: A dictionary of the required attributes
:return: Dictionary returned by server or a ServerError exception
:rtype: Dictionary or Exception
"""
final_dict = {"data": {"type": "users", "attributes": data}}
r = requests.post(self.apiurl + "/users", json=final_dict, headers=self.header)
if r.status_code != 200:
raise ServerError
return r.json() | python | def create(self, data):
"""
Create a user. Please review the attributes required. You need only provide the attributes.
:param data: A dictionary of the required attributes
:return: Dictionary returned by server or a ServerError exception
:rtype: Dictionary or Exception
"""
final_dict = {"data": {"type": "users", "attributes": data}}
r = requests.post(self.apiurl + "/users", json=final_dict, headers=self.header)
if r.status_code != 200:
raise ServerError
return r.json() | Create a user. Please review the attributes required. You need only provide the attributes.
:param data: A dictionary of the required attributes
:return: Dictionary returned by server or a ServerError exception
:rtype: Dictionary or Exception | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/user.py#L31-L45 |
ccubed/PyMoe | Pymoe/Kitsu/user.py | KitsuUser.get | def get(self, uid):
"""
Get a user's information by their id.
:param uid str: User ID
:return: The user's information or None
:rtype: Dictionary or None
"""
r = requests.get(self.apiurl + "/users/{}".format(uid), headers=self.header)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd['data']:
return jsd['data']
else:
return None | python | def get(self, uid):
"""
Get a user's information by their id.
:param uid str: User ID
:return: The user's information or None
:rtype: Dictionary or None
"""
r = requests.get(self.apiurl + "/users/{}".format(uid), headers=self.header)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd['data']:
return jsd['data']
else:
return None | Get a user's information by their id.
:param uid str: User ID
:return: The user's information or None
:rtype: Dictionary or None | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/user.py#L47-L65 |
ccubed/PyMoe | Pymoe/Kitsu/user.py | KitsuUser.update | def update(self, uid, data, token):
"""
Update a user's data. Requires an auth token.
:param uid str: User ID to update
:param data dict: The dictionary of data attributes to change. Just the attributes.
:param token str: The authorization token for this user
:return: True or Exception
:rtype: Bool or ServerError
"""
final_dict = {"data": {"id": uid, "type": "users", "attributes": data}}
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.patch(self.apiurl + "/users/{}".format(uid), json=final_dict, headers=final_headers)
if r.status_code != 200:
raise ServerError
return True | python | def update(self, uid, data, token):
"""
Update a user's data. Requires an auth token.
:param uid str: User ID to update
:param data dict: The dictionary of data attributes to change. Just the attributes.
:param token str: The authorization token for this user
:return: True or Exception
:rtype: Bool or ServerError
"""
final_dict = {"data": {"id": uid, "type": "users", "attributes": data}}
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.patch(self.apiurl + "/users/{}".format(uid), json=final_dict, headers=final_headers)
if r.status_code != 200:
raise ServerError
return True | Update a user's data. Requires an auth token.
:param uid str: User ID to update
:param data dict: The dictionary of data attributes to change. Just the attributes.
:param token str: The authorization token for this user
:return: True or Exception
:rtype: Bool or ServerError | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/user.py#L67-L85 |
ccubed/PyMoe | Pymoe/Anilist/search.py | ASearch.character | def character(self, term, page = 1, perpage = 3):
"""
Search for a character by term.
Results are paginated by default. Page specifies which page we're on.
Perpage specifies how many per page to request. 3 is just the example from the API docs.
:param term str: Name to search by
:param page int: Which page are we requesting? Starts at 1.
:param perpage int: How many results per page are we requesting?
:return: Json object with returned results.
:rtype: Json object with returned results.
"""
query_string = """\
query ($query: String, $page: Int, $perpage: Int) {
Page (page: $page, perPage: $perpage) {
pageInfo {
total
currentPage
lastPage
hasNextPage
}
characters (search: $query) {
id
name {
first
last
}
image {
large
}
}
}
}
"""
vars = {"query": term, "page": page, "perpage": perpage}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd | python | def character(self, term, page = 1, perpage = 3):
"""
Search for a character by term.
Results are paginated by default. Page specifies which page we're on.
Perpage specifies how many per page to request. 3 is just the example from the API docs.
:param term str: Name to search by
:param page int: Which page are we requesting? Starts at 1.
:param perpage int: How many results per page are we requesting?
:return: Json object with returned results.
:rtype: Json object with returned results.
"""
query_string = """\
query ($query: String, $page: Int, $perpage: Int) {
Page (page: $page, perPage: $perpage) {
pageInfo {
total
currentPage
lastPage
hasNextPage
}
characters (search: $query) {
id
name {
first
last
}
image {
large
}
}
}
}
"""
vars = {"query": term, "page": page, "perpage": perpage}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd | Search for a character by term.
Results are paginated by default. Page specifies which page we're on.
Perpage specifies how many per page to request. 3 is just the example from the API docs.
:param term str: Name to search by
:param page int: Which page are we requesting? Starts at 1.
:param perpage int: How many results per page are we requesting?
:return: Json object with returned results.
:rtype: Json object with returned results. | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Anilist/search.py#L9-L54 |
ccubed/PyMoe | Pymoe/Anidb/aid.py | Aid.search | def search(term, lang=None):
"""
As a convenient alternative to downloading and parsing a dump,
This function will instead query the AID search provided by Eloyard.
This is the same information available at http://anisearch.outrance.pl/.
:param str term: Search Term
:param list lang: A list of language codes which determines what titles are returned
"""
r = requests.get(
"http://anisearch.outrance.pl/index.php",
params={
"task": "search",
"query": term,
"langs": "ja,x-jat,en" if lang is None else ','.join(lang)
}
)
if r.status_code != 200:
raise ServerError
tree = ET.fromstring(r.text)
root = tree.getroot()
for item in root.iter("anime"):
# Parse XML http://wiki.anidb.net/w/User:Eloyard/anititles_dump
results[aid]={}
for title in item.iter('title'):
if title.attrib['type'] in ['official', 'main']:
results[aid][title.attrib['xml:lang']] = title.text
return results | python | def search(term, lang=None):
"""
As a convenient alternative to downloading and parsing a dump,
This function will instead query the AID search provided by Eloyard.
This is the same information available at http://anisearch.outrance.pl/.
:param str term: Search Term
:param list lang: A list of language codes which determines what titles are returned
"""
r = requests.get(
"http://anisearch.outrance.pl/index.php",
params={
"task": "search",
"query": term,
"langs": "ja,x-jat,en" if lang is None else ','.join(lang)
}
)
if r.status_code != 200:
raise ServerError
tree = ET.fromstring(r.text)
root = tree.getroot()
for item in root.iter("anime"):
# Parse XML http://wiki.anidb.net/w/User:Eloyard/anititles_dump
results[aid]={}
for title in item.iter('title'):
if title.attrib['type'] in ['official', 'main']:
results[aid][title.attrib['xml:lang']] = title.text
return results | As a convenient alternative to downloading and parsing a dump,
This function will instead query the AID search provided by Eloyard.
This is the same information available at http://anisearch.outrance.pl/.
:param str term: Search Term
:param list lang: A list of language codes which determines what titles are returned | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Anidb/aid.py#L9-L41 |
ccubed/PyMoe | Pymoe/VNDB/__init__.py | VNDB.get | def get(self, stype, flags, filters, options=None):
"""
Send a request to the API to return results related to Visual Novels.
:param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist
:param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str.
:param str filters: A string with the one filter to search by (apparently you only get one).
This is kind of special. You need to pass them in the form <filter><op>"<term>"
for strings or <filter><op><number> for numbers. This is counter intuitive.
Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=.
I will attempt to properly format this if not done so when called.
:param dict options: A dictionary of options to customize the search by. Optional, defaults to None.
:return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query.
:raises ServerError: Raises a ServerError if an error is returned.
"""
if not isinstance(flags, str):
if isinstance(flags, list):
finflags = ",".join(flags)
else:
raise SyntaxError("Flags should be a list or comma separated string")
else:
finflags = flags
if not isinstance(filters, str):
raise SyntaxError("Filters needs to be a string in the format Filter<op>Value. The simplest form is search=\"<Term>\".")
if stype not in self.stypes:
raise SyntaxError("{} not a valid Search type.".format(stype))
if '"' not in filters or "'" not in filters:
newfilters = self.helperpat.split(filters)
newfilters = [x.strip() for x in newfilters]
newfilters[1] = '"' + newfilters[1] + '"'
op = self.helperpat.search(filters)
newfilters = op.group(0).join(newfilters)
command = '{} {} ({}){}'.format(stype, finflags, newfilters,
' ' + ujson.dumps(options) if options is not None else '')
else:
command = '{} {} ({}){}'.format(stype, finflags, filters,
' ' + ujson.dumps(options) if options is not None else '')
data = self.connection.send_command('get', command)
if 'id' in data:
raise ServerError(data['msg'], data['id'])
else:
return {'pages': data.get('more', default=False), 'data': data['items']} | python | def get(self, stype, flags, filters, options=None):
"""
Send a request to the API to return results related to Visual Novels.
:param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist
:param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str.
:param str filters: A string with the one filter to search by (apparently you only get one).
This is kind of special. You need to pass them in the form <filter><op>"<term>"
for strings or <filter><op><number> for numbers. This is counter intuitive.
Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=.
I will attempt to properly format this if not done so when called.
:param dict options: A dictionary of options to customize the search by. Optional, defaults to None.
:return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query.
:raises ServerError: Raises a ServerError if an error is returned.
"""
if not isinstance(flags, str):
if isinstance(flags, list):
finflags = ",".join(flags)
else:
raise SyntaxError("Flags should be a list or comma separated string")
else:
finflags = flags
if not isinstance(filters, str):
raise SyntaxError("Filters needs to be a string in the format Filter<op>Value. The simplest form is search=\"<Term>\".")
if stype not in self.stypes:
raise SyntaxError("{} not a valid Search type.".format(stype))
if '"' not in filters or "'" not in filters:
newfilters = self.helperpat.split(filters)
newfilters = [x.strip() for x in newfilters]
newfilters[1] = '"' + newfilters[1] + '"'
op = self.helperpat.search(filters)
newfilters = op.group(0).join(newfilters)
command = '{} {} ({}){}'.format(stype, finflags, newfilters,
' ' + ujson.dumps(options) if options is not None else '')
else:
command = '{} {} ({}){}'.format(stype, finflags, filters,
' ' + ujson.dumps(options) if options is not None else '')
data = self.connection.send_command('get', command)
if 'id' in data:
raise ServerError(data['msg'], data['id'])
else:
return {'pages': data.get('more', default=False), 'data': data['items']} | Send a request to the API to return results related to Visual Novels.
:param str stype: What are we searching for? One of: vn, release, producer, character, votelist, vnlist, wishlist
:param flags: See the D11 docs. A comma separated list of flags for what data to return. Can be list or str.
:param str filters: A string with the one filter to search by (apparently you only get one).
This is kind of special. You need to pass them in the form <filter><op>"<term>"
for strings or <filter><op><number> for numbers. This is counter intuitive.
Also, per the docs, <filter>=<number> doesn't do what we think, use >, >= or < and <=.
I will attempt to properly format this if not done so when called.
:param dict options: A dictionary of options to customize the search by. Optional, defaults to None.
:return dict: A dictionary containing a pages and data key. data contains a list of dictionaries with data on your results. If pages is true, you can call this command again with the same parameters and pass a page option to get more data. Otherwise no further results exist for this query.
:raises ServerError: Raises a ServerError if an error is returned. | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/VNDB/__init__.py#L34-L80 |
ccubed/PyMoe | Pymoe/VNDB/__init__.py | VNDB.set | def set(self, stype, sid, fields):
"""
Send a request to the API to modify something in the database if logged in.
:param str stype: What are we modifying? One of: votelist, vnlist, wishlist
:param int sid: The ID that we're modifying.
:param dict fields: A dictionary of the fields and their values
:raises ServerError: Raises a ServerError if an error is returned
:return bool: True if successful, error otherwise
"""
if stype not in ['votelist', 'vnlist', 'wishlist']:
raise SyntaxError("{} is not a valid type for set. Should be one of: votelist, vnlist or wishlist.".format(stype))
command = "{} {} {}".format(stype, id, ujson.dumps(fields))
data = self.connection.send_command('set', command)
if 'id' in data:
raise ServerError(data['msg'], data['id'])
else:
return True | python | def set(self, stype, sid, fields):
"""
Send a request to the API to modify something in the database if logged in.
:param str stype: What are we modifying? One of: votelist, vnlist, wishlist
:param int sid: The ID that we're modifying.
:param dict fields: A dictionary of the fields and their values
:raises ServerError: Raises a ServerError if an error is returned
:return bool: True if successful, error otherwise
"""
if stype not in ['votelist', 'vnlist', 'wishlist']:
raise SyntaxError("{} is not a valid type for set. Should be one of: votelist, vnlist or wishlist.".format(stype))
command = "{} {} {}".format(stype, id, ujson.dumps(fields))
data = self.connection.send_command('set', command)
if 'id' in data:
raise ServerError(data['msg'], data['id'])
else:
return True | Send a request to the API to modify something in the database if logged in.
:param str stype: What are we modifying? One of: votelist, vnlist, wishlist
:param int sid: The ID that we're modifying.
:param dict fields: A dictionary of the fields and their values
:raises ServerError: Raises a ServerError if an error is returned
:return bool: True if successful, error otherwise | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/VNDB/__init__.py#L82-L101 |
ccubed/PyMoe | Pymoe/Anilist/get.py | AGet.anime | def anime(self, item_id):
"""
The function to retrieve an anime's details.
:param int item_id: the anime's ID
:return: dict or None
:rtype: dict or NoneType
"""
query_string = """\
query ($id: Int) {
Media(id: $id, type: ANIME) {
title {
romaji
english
}
startDate {
year
month
day
}
endDate {
year
month
day
}
coverImage {
large
}
bannerImage
format
status
episodes
season
description
averageScore
meanScore
genres
synonyms
nextAiringEpisode {
airingAt
timeUntilAiring
episode
}
}
}
"""
vars = {"id": item_id}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd | python | def anime(self, item_id):
"""
The function to retrieve an anime's details.
:param int item_id: the anime's ID
:return: dict or None
:rtype: dict or NoneType
"""
query_string = """\
query ($id: Int) {
Media(id: $id, type: ANIME) {
title {
romaji
english
}
startDate {
year
month
day
}
endDate {
year
month
day
}
coverImage {
large
}
bannerImage
format
status
episodes
season
description
averageScore
meanScore
genres
synonyms
nextAiringEpisode {
airingAt
timeUntilAiring
episode
}
}
}
"""
vars = {"id": item_id}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd | The function to retrieve an anime's details.
:param int item_id: the anime's ID
:return: dict or None
:rtype: dict or NoneType | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Anilist/get.py#L8-L65 |
ccubed/PyMoe | Pymoe/Anilist/get.py | AGet.review | def review(self, item_id, html = True):
"""
With the change to v2 of the api, reviews have their own IDs. This accepts the ID of the review.
You can set html to False if you want the review body returned without html formatting.
The API Default is true.
:param item_id: the Id of the review
:param html: do you want the body returned with html formatting?
:return: json object
:rtype: json object containing review information
"""
query_string = """\
query ($id: Int, $html: Boolean) {
Review (id: $id) {
summary
body(asHtml: $html)
score
rating
ratingAmount
createdAt
updatedAt
private
media {
id
}
user {
id
name
avatar {
large
}
}
}
}
"""
vars = {"id": item_id, "html": html}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd | python | def review(self, item_id, html = True):
"""
With the change to v2 of the api, reviews have their own IDs. This accepts the ID of the review.
You can set html to False if you want the review body returned without html formatting.
The API Default is true.
:param item_id: the Id of the review
:param html: do you want the body returned with html formatting?
:return: json object
:rtype: json object containing review information
"""
query_string = """\
query ($id: Int, $html: Boolean) {
Review (id: $id) {
summary
body(asHtml: $html)
score
rating
ratingAmount
createdAt
updatedAt
private
media {
id
}
user {
id
name
avatar {
large
}
}
}
}
"""
vars = {"id": item_id, "html": html}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd | With the change to v2 of the api, reviews have their own IDs. This accepts the ID of the review.
You can set html to False if you want the review body returned without html formatting.
The API Default is true.
:param item_id: the Id of the review
:param html: do you want the body returned with html formatting?
:return: json object
:rtype: json object containing review information | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Anilist/get.py#L219-L265 |
ccubed/PyMoe | Pymoe/Kitsu/mappings.py | KitsuMappings.get | def get(self, external_site: str, external_id: int):
"""
Get a kitsu mapping by external site ID
:param str external_site: string representing the external site
:param int external_id: ID of the entry in the external site.
:return: Dictionary or None (for not found)
:rtype: Dictionary or None
:raises: :class:`Pymoe.errors.ServerError`
"""
r = requests.get(self.apiurl + "/mappings", params={"filter[externalSite]": external_site, "filter[externalId]": external_id}, headers=self.header)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if len(jsd['data']) < 1:
return None
r = requests.get(jsd['data'][0]['relationships']['item']['links']['related'], headers=self.header)
if r.status_code != 200:
return jsd
else:
return r.json() | python | def get(self, external_site: str, external_id: int):
"""
Get a kitsu mapping by external site ID
:param str external_site: string representing the external site
:param int external_id: ID of the entry in the external site.
:return: Dictionary or None (for not found)
:rtype: Dictionary or None
:raises: :class:`Pymoe.errors.ServerError`
"""
r = requests.get(self.apiurl + "/mappings", params={"filter[externalSite]": external_site, "filter[externalId]": external_id}, headers=self.header)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if len(jsd['data']) < 1:
return None
r = requests.get(jsd['data'][0]['relationships']['item']['links']['related'], headers=self.header)
if r.status_code != 200:
return jsd
else:
return r.json() | Get a kitsu mapping by external site ID
:param str external_site: string representing the external site
:param int external_id: ID of the entry in the external site.
:return: Dictionary or None (for not found)
:rtype: Dictionary or None
:raises: :class:`Pymoe.errors.ServerError` | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/mappings.py#L11-L36 |
ccubed/PyMoe | Pymoe/Kitsu/auth.py | KitsuAuth.authenticate | def authenticate(self, username, password):
"""
Obtain an oauth token. Pass username and password. Get a token back. If KitsuAuth is set to remember your tokens
for this session, it will store the token under the username given.
:param username: username
:param password: password
:param alias: A list of alternative names for a person if using the KitsuAuth token storage
:return: A tuple of (token, expiration time in unix time stamp, refresh_token) or ServerError
"""
r = requests.post(self.apiurl + "/token",
params={"grant_type": "password", "username": username, "password": password,
"client_id": self.cid, "client_secret": self.csecret})
if r.status_code != 200:
raise ServerError
jsd = r.json()
if self.remember:
self.token_storage[username] = {'token': jsd['access_token'], 'refresh': jsd['refresh_token'],
'expiration': int(jsd['created_at']) + int(jsd['expires_in'])}
return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at']), jsd['refresh_token'] | python | def authenticate(self, username, password):
"""
Obtain an oauth token. Pass username and password. Get a token back. If KitsuAuth is set to remember your tokens
for this session, it will store the token under the username given.
:param username: username
:param password: password
:param alias: A list of alternative names for a person if using the KitsuAuth token storage
:return: A tuple of (token, expiration time in unix time stamp, refresh_token) or ServerError
"""
r = requests.post(self.apiurl + "/token",
params={"grant_type": "password", "username": username, "password": password,
"client_id": self.cid, "client_secret": self.csecret})
if r.status_code != 200:
raise ServerError
jsd = r.json()
if self.remember:
self.token_storage[username] = {'token': jsd['access_token'], 'refresh': jsd['refresh_token'],
'expiration': int(jsd['created_at']) + int(jsd['expires_in'])}
return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at']), jsd['refresh_token'] | Obtain an oauth token. Pass username and password. Get a token back. If KitsuAuth is set to remember your tokens
for this session, it will store the token under the username given.
:param username: username
:param password: password
:param alias: A list of alternative names for a person if using the KitsuAuth token storage
:return: A tuple of (token, expiration time in unix time stamp, refresh_token) or ServerError | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/auth.py#L25-L48 |
ccubed/PyMoe | Pymoe/Kitsu/auth.py | KitsuAuth.refresh | def refresh(self, refresh_token):
"""
Renew an oauth token given an appropriate refresh token.
:param refresh_token: The Refresh Token
:return: A tuple of (token, expiration time in unix time stamp)
"""
r = requests.post(self.apiurl + "/token", params={"grant_type": "refresh_token", "client_id": self.cid,
"client_secret": self.csecret,
"refresh_token": refresh_token})
if r.status_code != 200:
raise ServerError
jsd = r.json()
return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at']) | python | def refresh(self, refresh_token):
"""
Renew an oauth token given an appropriate refresh token.
:param refresh_token: The Refresh Token
:return: A tuple of (token, expiration time in unix time stamp)
"""
r = requests.post(self.apiurl + "/token", params={"grant_type": "refresh_token", "client_id": self.cid,
"client_secret": self.csecret,
"refresh_token": refresh_token})
if r.status_code != 200:
raise ServerError
jsd = r.json()
return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at']) | Renew an oauth token given an appropriate refresh token.
:param refresh_token: The Refresh Token
:return: A tuple of (token, expiration time in unix time stamp) | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/auth.py#L50-L66 |
ccubed/PyMoe | Pymoe/Kitsu/auth.py | KitsuAuth.get | def get(self, username):
"""
If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error
"""
if not self.remember:
raise NotSaving
if username not in self.token_storage:
raise UserNotFound
if self.token_storage[username]['expiration'] < time.time():
new_token = self.refresh(self.token_storage[username]['refresh'])
self.token_storage[username]['token'] = new_token[0]
self.token_storage[username]['expiration'] = new_token[1]
return new_token[0]
else:
return self.token_storage[username]['token'] | python | def get(self, username):
"""
If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error
"""
if not self.remember:
raise NotSaving
if username not in self.token_storage:
raise UserNotFound
if self.token_storage[username]['expiration'] < time.time():
new_token = self.refresh(self.token_storage[username]['refresh'])
self.token_storage[username]['token'] = new_token[0]
self.token_storage[username]['expiration'] = new_token[1]
return new_token[0]
else:
return self.token_storage[username]['token'] | If using the remember option and KitsuAuth is storing your tokens, this function will retrieve one.
:param username: The username whose token we are retrieving
:return: A token, NotFound or NotSaving error | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/auth.py#L69-L88 |
ccubed/PyMoe | Pymoe/Anidb/dump.py | save | def save(url, destination):
"""
This is just the thread target.
It's actually responsible for downloading and saving.
:param str url: which dump to download
:param str destination: a file path to save to
"""
r = requests.get(url, stream=True)
with open(destination, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk) | python | def save(url, destination):
"""
This is just the thread target.
It's actually responsible for downloading and saving.
:param str url: which dump to download
:param str destination: a file path to save to
"""
r = requests.get(url, stream=True)
with open(destination, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk) | This is just the thread target.
It's actually responsible for downloading and saving.
:param str url: which dump to download
:param str destination: a file path to save to | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Anidb/dump.py#L40-L52 |
ccubed/PyMoe | Pymoe/Anidb/dump.py | Dump.download | def download(which, destination=None):
"""
I realize that the download for the dumps is going to take awhile.
Given that, I've decided to approach this using threads.
When you call this method, it will launch a thread to download the data.
By default, the dump is dropped into the current working directory.
If the directory given doesn't exist, we'll try to make it.
Don't use '..' in the path as this confuses makedirs.
:param int which: 0 for dat (txt), 1 for xml
:param str destination: a file path to save to, defaults to cwd
"""
if destination:
if not os.path.exists(destination):
os.makedirs(destination)
pthread = threading.Thread(
target=save,
args=(
self.urls[which],
os.path.join(destination, self.urls[which])
)
)
pthread.start()
return pthread | python | def download(which, destination=None):
"""
I realize that the download for the dumps is going to take awhile.
Given that, I've decided to approach this using threads.
When you call this method, it will launch a thread to download the data.
By default, the dump is dropped into the current working directory.
If the directory given doesn't exist, we'll try to make it.
Don't use '..' in the path as this confuses makedirs.
:param int which: 0 for dat (txt), 1 for xml
:param str destination: a file path to save to, defaults to cwd
"""
if destination:
if not os.path.exists(destination):
os.makedirs(destination)
pthread = threading.Thread(
target=save,
args=(
self.urls[which],
os.path.join(destination, self.urls[which])
)
)
pthread.start()
return pthread | I realize that the download for the dumps is going to take awhile.
Given that, I've decided to approach this using threads.
When you call this method, it will launch a thread to download the data.
By default, the dump is dropped into the current working directory.
If the directory given doesn't exist, we'll try to make it.
Don't use '..' in the path as this confuses makedirs.
:param int which: 0 for dat (txt), 1 for xml
:param str destination: a file path to save to, defaults to cwd | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Anidb/dump.py#L13-L37 |
ccubed/PyMoe | Pymoe/Kitsu/manga.py | KitsuManga.get | def get(self, aid):
"""
Get manga information by id.
:param int aid: ID of the manga.
:return: Dictionary or None (for not found)
:rtype: Dictionary or None
:raises: :class:`Pymoe.errors.ServerError`
"""
r = requests.get(self.apiurl + "/manga/{}".format(aid), headers=self.header)
if r.status_code != 200:
if r.status_code == 404:
return None
else:
raise ServerError
return r.json() | python | def get(self, aid):
"""
Get manga information by id.
:param int aid: ID of the manga.
:return: Dictionary or None (for not found)
:rtype: Dictionary or None
:raises: :class:`Pymoe.errors.ServerError`
"""
r = requests.get(self.apiurl + "/manga/{}".format(aid), headers=self.header)
if r.status_code != 200:
if r.status_code == 404:
return None
else:
raise ServerError
return r.json() | Get manga information by id.
:param int aid: ID of the manga.
:return: Dictionary or None (for not found)
:rtype: Dictionary or None
:raises: :class:`Pymoe.errors.ServerError` | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/manga.py#L11-L28 |
ccubed/PyMoe | Pymoe/Bakatsuki/__init__.py | Bakatsuki.active | def active(self):
"""
Get a list of active projects.
:return list: A list of tuples containing a title and pageid in that order.
"""
projects = []
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers', 'cmpageid': self.active_id,
'cmtype': 'page', 'cmlimit': '500', 'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' in jsd:
while True:
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers',
'cmpageid': self.active_id, 'cmtype': 'page', 'cmlimit': '500',
'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'],
'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' not in jsd:
break
else:
break
return projects[0] | python | def active(self):
"""
Get a list of active projects.
:return list: A list of tuples containing a title and pageid in that order.
"""
projects = []
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers', 'cmpageid': self.active_id,
'cmtype': 'page', 'cmlimit': '500', 'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' in jsd:
while True:
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers',
'cmpageid': self.active_id, 'cmtype': 'page', 'cmlimit': '500',
'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'],
'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' not in jsd:
break
else:
break
return projects[0] | Get a list of active projects.
:return list: A list of tuples containing a title and pageid in that order. | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Bakatsuki/__init__.py#L26-L55 |
ccubed/PyMoe | Pymoe/Bakatsuki/__init__.py | Bakatsuki.light_novels | def light_novels(self, language="English"):
"""
Get a list of light novels under a certain language.
:param str language: Defaults to English. Replace with whatever language you want to query.
:return list: A list of tuples containing a title and pageid element in that order.
"""
projects = []
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers',
'cmtitle': 'Category:Light_novel_({})'.format(language.replace(" ", "_")),
'cmtype': 'page', 'cmlimit': '500', 'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' in jsd:
while True:
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers',
'cmtitle': 'Category:Light_novel_({})'.format(language.replace(" ", "_")),
'cmtype': 'page', 'cmlimit': '500',
'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'],
'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' not in jsd:
break
else:
break
return projects[0] | python | def light_novels(self, language="English"):
"""
Get a list of light novels under a certain language.
:param str language: Defaults to English. Replace with whatever language you want to query.
:return list: A list of tuples containing a title and pageid element in that order.
"""
projects = []
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers',
'cmtitle': 'Category:Light_novel_({})'.format(language.replace(" ", "_")),
'cmtype': 'page', 'cmlimit': '500', 'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' in jsd:
while True:
r = requests.get(self.api,
params={'action': 'query', 'list': 'categorymembers',
'cmtitle': 'Category:Light_novel_({})'.format(language.replace(" ", "_")),
'cmtype': 'page', 'cmlimit': '500',
'cmcontinue': jsd['query-continue']['categorymembers']['cmcontinue'],
'format': 'json'},
headers=self.header)
if r.status_code == 200:
jsd = r.json()
projects.append([(x['title'], x['pageid']) for x in jsd['query']['categorymembers']])
if 'query-continue' not in jsd:
break
else:
break
return projects[0] | Get a list of light novels under a certain language.
:param str language: Defaults to English. Replace with whatever language you want to query.
:return list: A list of tuples containing a title and pageid element in that order. | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Bakatsuki/__init__.py#L57-L89 |
ccubed/PyMoe | Pymoe/Bakatsuki/__init__.py | Bakatsuki.chapters | def chapters(self, title):
"""
Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage.
:param str title: The title of the novel you want chapters from
:return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied
"""
r = requests.get("https://www.baka-tsuki.org/project/index.php?title={}".format(title.replace(" ", "_")),
headers=self.header)
if r.status_code != 200:
raise requests.HTTPError("Not Found")
else:
parsed = soup(r.text, 'html.parser')
dd = parsed.find_all("a")
volumes = []
for link in dd:
if 'class' in link.attrs:
if 'image' in link.get('class'):
continue
if 'href' in link.attrs:
if re.search(self.chapter_regex, link.get('href')) is not None and not link.get('href').startswith('#'):
volumes.append(link)
seplist = OrderedDict()
for item in volumes:
if 'title' in item.attrs:
result = re.search(self.separate_regex, item.get('title').lower())
else:
result = re.search(self.separate_regex, item.text.lower())
if result and result.groups():
if result.group('chapter').lstrip('0') in seplist:
seplist[result.group('chapter').lstrip('0')].append([item.get('href'),
item.get('title') if 'title' in item.attrs else item.text])
else:
seplist[result.group('chapter').lstrip('0')] = [[item.get('href'),
item.get('title') if 'title' in item.attrs else item.text]]
return seplist | python | def chapters(self, title):
"""
Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage.
:param str title: The title of the novel you want chapters from
:return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied
"""
r = requests.get("https://www.baka-tsuki.org/project/index.php?title={}".format(title.replace(" ", "_")),
headers=self.header)
if r.status_code != 200:
raise requests.HTTPError("Not Found")
else:
parsed = soup(r.text, 'html.parser')
dd = parsed.find_all("a")
volumes = []
for link in dd:
if 'class' in link.attrs:
if 'image' in link.get('class'):
continue
if 'href' in link.attrs:
if re.search(self.chapter_regex, link.get('href')) is not None and not link.get('href').startswith('#'):
volumes.append(link)
seplist = OrderedDict()
for item in volumes:
if 'title' in item.attrs:
result = re.search(self.separate_regex, item.get('title').lower())
else:
result = re.search(self.separate_regex, item.text.lower())
if result and result.groups():
if result.group('chapter').lstrip('0') in seplist:
seplist[result.group('chapter').lstrip('0')].append([item.get('href'),
item.get('title') if 'title' in item.attrs else item.text])
else:
seplist[result.group('chapter').lstrip('0')] = [[item.get('href'),
item.get('title') if 'title' in item.attrs else item.text]]
return seplist | Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage.
:param str title: The title of the novel you want chapters from
:return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Bakatsuki/__init__.py#L159-L195 |
ccubed/PyMoe | Pymoe/Bakatsuki/__init__.py | Bakatsuki.cover | def cover(self, pageid):
"""
Get a cover image given a page id.
:param str pageid: The pageid for the light novel you want a cover image for
:return str: the image url
"""
r = requests.get(self.api,
params={'action': 'query', 'prop': 'pageimages', 'pageids': pageid, 'format': 'json'},
headers=self.header)
jsd = r.json()
image = "File:" + jsd['query']['pages'][str(pageid)]['pageimage']
r = requests.get(self.api,
params={'action': 'query', 'prop': 'imageinfo', 'iiprop': 'url', 'titles': image,
'format': 'json'},
headers=self.header)
jsd = r.json()
return jsd['query']['pages'][list(jsd['query']['pages'].keys())[0]]['imageinfo'][0]['url'] | python | def cover(self, pageid):
"""
Get a cover image given a page id.
:param str pageid: The pageid for the light novel you want a cover image for
:return str: the image url
"""
r = requests.get(self.api,
params={'action': 'query', 'prop': 'pageimages', 'pageids': pageid, 'format': 'json'},
headers=self.header)
jsd = r.json()
image = "File:" + jsd['query']['pages'][str(pageid)]['pageimage']
r = requests.get(self.api,
params={'action': 'query', 'prop': 'imageinfo', 'iiprop': 'url', 'titles': image,
'format': 'json'},
headers=self.header)
jsd = r.json()
return jsd['query']['pages'][list(jsd['query']['pages'].keys())[0]]['imageinfo'][0]['url'] | Get a cover image given a page id.
:param str pageid: The pageid for the light novel you want a cover image for
:return str: the image url | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Bakatsuki/__init__.py#L197-L214 |
ccubed/PyMoe | Pymoe/Bakatsuki/__init__.py | Bakatsuki.get_text | def get_text(self, title):
"""
This will grab the html content of the chapter given by url. Technically you can use this to get the content of other pages too.
:param title: Title for the page you want the content of
:return: a string containing the html content
"""
r = requests.get(self.api,
params={'action': 'parse', 'page': title, 'format': 'json'},
headers=self.header)
jsd = r.json()
return jsd['parse']['text']['*'] | python | def get_text(self, title):
"""
This will grab the html content of the chapter given by url. Technically you can use this to get the content of other pages too.
:param title: Title for the page you want the content of
:return: a string containing the html content
"""
r = requests.get(self.api,
params={'action': 'parse', 'page': title, 'format': 'json'},
headers=self.header)
jsd = r.json()
return jsd['parse']['text']['*'] | This will grab the html content of the chapter given by url. Technically you can use this to get the content of other pages too.
:param title: Title for the page you want the content of
:return: a string containing the html content | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Bakatsuki/__init__.py#L216-L227 |
ccubed/PyMoe | Pymoe/Mal/__init__.py | Mal._verify_credentials | def _verify_credentials(self):
"""
An internal method that verifies the credentials given at instantiation.
:raises: :class:`Pymoe.errors.UserLoginFailed`
"""
r = requests.get(self.apiurl + "account/verify_credentials.xml",
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 200:
raise UserLoginFailed("Username or Password incorrect.") | python | def _verify_credentials(self):
"""
An internal method that verifies the credentials given at instantiation.
:raises: :class:`Pymoe.errors.UserLoginFailed`
"""
r = requests.get(self.apiurl + "account/verify_credentials.xml",
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 200:
raise UserLoginFailed("Username or Password incorrect.") | An internal method that verifies the credentials given at instantiation.
:raises: :class:`Pymoe.errors.UserLoginFailed` | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Mal/__init__.py#L37-L47 |
ccubed/PyMoe | Pymoe/Mal/__init__.py | Mal._search | def _search(self, which, term):
"""
The real search method.
:param which: 1 for anime, 2 for manga
:param term: What to search for
:rtype: list
:return: list of :class:`Pymoe.Mal.Objects.Manga` or :class:`Pymoe.Mal.Objects.Anime` objects as per the type param.
"""
url = self.apiurl + "{}/search.xml".format('anime' if which == 1 else 'manga')
r = requests.get(url, params={'q': term},
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 200:
return []
data = ET.fromstring(r.text)
final_list = []
if which == 1:
for item in data.findall('entry'):
syn = item.find('synonyms').text.split(';') if item.find('synonyms').text else []
final_list.append(Anime(
item.find('id').text,
title=item.find('title').text,
synonyms=syn.append(item.find('english').text),
episodes=item.find('episodes').text,
average=item.find('score').text,
anime_start=item.find('start_date').text,
anime_end=item.find('end_date').text,
synopsis=html.unescape(item.find('synopsis').text.replace('<br />', '')) if item.find(
'synopsis').text else None,
image=item.find('image').text,
status_anime=item.find('status').text,
type=item.find('type').text
))
return NT_SEARCH_ANIME(
airing=[x for x in final_list if x.status.series == "Currently Airing"],
finished=[x for x in final_list if x.status.series == "Finished Airing"],
unaired=[x for x in final_list if x.status.series == "Not Yet Aired"],
dropped=[x for x in final_list if x.status.series == "Dropped"],
planned=[x for x in final_list if x.status.series == "Plan to Watch"]
)
else:
for item in data.findall('entry'):
syn = item.find('synonyms').text.split(';') if item.find('synonyms').text else []
final_list.append(Manga(
item.find('id').text,
title=item.find('title').text,
synonyms=syn.append(item.find('english').text),
chapters=item.find('chapters').text,
volumes=item.find('volumes').text,
average=item.find('score').text,
manga_start=item.find('start_date').text,
manga_end=item.find('end_date').text,
synopsis=html.unescape(item.find('synopsis').text.replace('<br />', '')) if item.find(
'synopsis').text else None,
image=item.find('image').text,
status_manga=item.find('status').text,
type=item.find('type').text
))
return NT_SEARCH_MANGA(
publishing=[x for x in final_list if x.status.series == "Publishing"],
finished=[x for x in final_list if x.status.series == "Finished"],
unpublished=[x for x in final_list if x.status.series == "Not Yet Published"],
dropped=[x for x in final_list if x.status.series == "Dropped"],
planned=[x for x in final_list if x.status.series == "Plan to Read"]
) | python | def _search(self, which, term):
"""
The real search method.
:param which: 1 for anime, 2 for manga
:param term: What to search for
:rtype: list
:return: list of :class:`Pymoe.Mal.Objects.Manga` or :class:`Pymoe.Mal.Objects.Anime` objects as per the type param.
"""
url = self.apiurl + "{}/search.xml".format('anime' if which == 1 else 'manga')
r = requests.get(url, params={'q': term},
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 200:
return []
data = ET.fromstring(r.text)
final_list = []
if which == 1:
for item in data.findall('entry'):
syn = item.find('synonyms').text.split(';') if item.find('synonyms').text else []
final_list.append(Anime(
item.find('id').text,
title=item.find('title').text,
synonyms=syn.append(item.find('english').text),
episodes=item.find('episodes').text,
average=item.find('score').text,
anime_start=item.find('start_date').text,
anime_end=item.find('end_date').text,
synopsis=html.unescape(item.find('synopsis').text.replace('<br />', '')) if item.find(
'synopsis').text else None,
image=item.find('image').text,
status_anime=item.find('status').text,
type=item.find('type').text
))
return NT_SEARCH_ANIME(
airing=[x for x in final_list if x.status.series == "Currently Airing"],
finished=[x for x in final_list if x.status.series == "Finished Airing"],
unaired=[x for x in final_list if x.status.series == "Not Yet Aired"],
dropped=[x for x in final_list if x.status.series == "Dropped"],
planned=[x for x in final_list if x.status.series == "Plan to Watch"]
)
else:
for item in data.findall('entry'):
syn = item.find('synonyms').text.split(';') if item.find('synonyms').text else []
final_list.append(Manga(
item.find('id').text,
title=item.find('title').text,
synonyms=syn.append(item.find('english').text),
chapters=item.find('chapters').text,
volumes=item.find('volumes').text,
average=item.find('score').text,
manga_start=item.find('start_date').text,
manga_end=item.find('end_date').text,
synopsis=html.unescape(item.find('synopsis').text.replace('<br />', '')) if item.find(
'synopsis').text else None,
image=item.find('image').text,
status_manga=item.find('status').text,
type=item.find('type').text
))
return NT_SEARCH_MANGA(
publishing=[x for x in final_list if x.status.series == "Publishing"],
finished=[x for x in final_list if x.status.series == "Finished"],
unpublished=[x for x in final_list if x.status.series == "Not Yet Published"],
dropped=[x for x in final_list if x.status.series == "Dropped"],
planned=[x for x in final_list if x.status.series == "Plan to Read"]
) | The real search method.
:param which: 1 for anime, 2 for manga
:param term: What to search for
:rtype: list
:return: list of :class:`Pymoe.Mal.Objects.Manga` or :class:`Pymoe.Mal.Objects.Anime` objects as per the type param. | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Mal/__init__.py#L69-L134 |
ccubed/PyMoe | Pymoe/Mal/__init__.py | Mal._anime_add | def _anime_add(self, data):
"""
Adds an anime to a user's list.
:param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data
:raises: SyntaxError on invalid data type
:raises: ServerError on failure to add
:rtype: Bool
:return: True on success
"""
if isinstance(data, Anime):
xmlstr = data.to_xml()
r = requests.get(self.apiurl + "animelist/add/{}.xml".format(data.id),
params={'data': xmlstr},
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 201:
raise ServerError(r.text, r.status_code)
return True
else:
raise SyntaxError(
"Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data))) | python | def _anime_add(self, data):
"""
Adds an anime to a user's list.
:param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data
:raises: SyntaxError on invalid data type
:raises: ServerError on failure to add
:rtype: Bool
:return: True on success
"""
if isinstance(data, Anime):
xmlstr = data.to_xml()
r = requests.get(self.apiurl + "animelist/add/{}.xml".format(data.id),
params={'data': xmlstr},
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 201:
raise ServerError(r.text, r.status_code)
return True
else:
raise SyntaxError(
"Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data))) | Adds an anime to a user's list.
:param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data
:raises: SyntaxError on invalid data type
:raises: ServerError on failure to add
:rtype: Bool
:return: True on success | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Mal/__init__.py#L136-L157 |
ccubed/PyMoe | Pymoe/Mal/__init__.py | Mal.user | def user(self, name):
"""
Get a user's anime list and details. This returns an encapsulated data type.
:param str name: The username to query
:rtype: :class:`Pymoe.Mal.Objects.User`
:return: A :class:`Pymoe.Mal.Objects.User` Object
"""
anime_data = requests.get(self.apiusers, params={'u': name, 'status': 'all', 'type': 'anime'},
headers=self.header)
if anime_data.status_code != 200:
raise ConnectionError(
"Anime Data Request failed. Please Open a bug on https://github.com/ccubed/Pymoe and include the following data.\nStatus Code: {}\n\nText:{}".format(
anime_data.status_code, anime_data.text))
manga_data = requests.get(self.apiusers, params={'u': name, 'status': 'all', 'type': 'manga'},
headers=self.header)
if manga_data.status_code != 200:
raise ConnectionError(
"Manga Data Request failed. Please Open a bug on https://github.com/ccubed/Pymoe and include the following data.\nStatus Code: {}\n\nText:{}".format(
manga_data.status_code, manga_data.text))
root = ET.fromstring(anime_data.text)
uid = root.find('myinfo').find('user_id').text
uname = root.find('myinfo').find('user_name').text
anime_object_list = self.parse_anime_data(anime_data.text)
manga_object_list = self.parse_manga_data(manga_data.text)
return User(uid=uid,
name=uname,
anime_list=NT_USER_ANIME(
watching=[x for x in anime_object_list['data'] if x.status.user == "Currently Watching"],
completed=[x for x in anime_object_list['data'] if x.status.user == "Completed"],
held=[x for x in anime_object_list['data'] if x.status.user == "On Hold"],
dropped=[x for x in anime_object_list['data'] if x.status.user == "Dropped"],
planned=[x for x in anime_object_list['data'] if x.status.user == "Plan to Watch"]
),
anime_days=anime_object_list['days'],
manga_list=NT_USER_MANGA(
reading=[x for x in manga_object_list['data'] if x.status.user == "Currently Reading"],
completed=[x for x in manga_object_list['data'] if x.status.user == "Completed"],
held=[x for x in manga_object_list['data'] if x.status.user == "On Hold"],
dropped=[x for x in manga_object_list['data'] if x.status.user == "Dropped"],
planned=[x for x in manga_object_list['data'] if x.status.user == "Plan to Read"]
),
manga_days=manga_object_list['days']) | python | def user(self, name):
"""
Get a user's anime list and details. This returns an encapsulated data type.
:param str name: The username to query
:rtype: :class:`Pymoe.Mal.Objects.User`
:return: A :class:`Pymoe.Mal.Objects.User` Object
"""
anime_data = requests.get(self.apiusers, params={'u': name, 'status': 'all', 'type': 'anime'},
headers=self.header)
if anime_data.status_code != 200:
raise ConnectionError(
"Anime Data Request failed. Please Open a bug on https://github.com/ccubed/Pymoe and include the following data.\nStatus Code: {}\n\nText:{}".format(
anime_data.status_code, anime_data.text))
manga_data = requests.get(self.apiusers, params={'u': name, 'status': 'all', 'type': 'manga'},
headers=self.header)
if manga_data.status_code != 200:
raise ConnectionError(
"Manga Data Request failed. Please Open a bug on https://github.com/ccubed/Pymoe and include the following data.\nStatus Code: {}\n\nText:{}".format(
manga_data.status_code, manga_data.text))
root = ET.fromstring(anime_data.text)
uid = root.find('myinfo').find('user_id').text
uname = root.find('myinfo').find('user_name').text
anime_object_list = self.parse_anime_data(anime_data.text)
manga_object_list = self.parse_manga_data(manga_data.text)
return User(uid=uid,
name=uname,
anime_list=NT_USER_ANIME(
watching=[x for x in anime_object_list['data'] if x.status.user == "Currently Watching"],
completed=[x for x in anime_object_list['data'] if x.status.user == "Completed"],
held=[x for x in anime_object_list['data'] if x.status.user == "On Hold"],
dropped=[x for x in anime_object_list['data'] if x.status.user == "Dropped"],
planned=[x for x in anime_object_list['data'] if x.status.user == "Plan to Watch"]
),
anime_days=anime_object_list['days'],
manga_list=NT_USER_MANGA(
reading=[x for x in manga_object_list['data'] if x.status.user == "Currently Reading"],
completed=[x for x in manga_object_list['data'] if x.status.user == "Completed"],
held=[x for x in manga_object_list['data'] if x.status.user == "On Hold"],
dropped=[x for x in manga_object_list['data'] if x.status.user == "Dropped"],
planned=[x for x in manga_object_list['data'] if x.status.user == "Plan to Read"]
),
manga_days=manga_object_list['days']) | Get a user's anime list and details. This returns an encapsulated data type.
:param str name: The username to query
:rtype: :class:`Pymoe.Mal.Objects.User`
:return: A :class:`Pymoe.Mal.Objects.User` Object | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Mal/__init__.py#L270-L316 |
ccubed/PyMoe | Pymoe/VNDB/connection.py | VNDBConnection.login | def login(self, username, password):
"""
This handles login logic instead of stuffing all that in the __init__.
:param username: The username to log in as or None
:param password: The password for that user or None
:return: Nothing
:raises: :class:`Pymoe.errors.UserLoginFailed` - Didn't respond with Ok
:raises: :class:`Pymoe.errors.GeneralLoginError` - For some reason, we were already logged in, tried to login again and it failed. This probably isn't bad.
"""
finvars = self.clientvars
if username and password:
finvars['username'] = username
finvars['password'] = password
self.loggedin = True
ret = self.send_command('login', ujson.dumps(finvars))
if not isinstance(ret, str): # should just be 'Ok'
if self.loggedin:
self.loggedin = False
raise UserLoginFailed(ret['msg'])
else:
raise GeneralLoginError(ret['msg']) | python | def login(self, username, password):
"""
This handles login logic instead of stuffing all that in the __init__.
:param username: The username to log in as or None
:param password: The password for that user or None
:return: Nothing
:raises: :class:`Pymoe.errors.UserLoginFailed` - Didn't respond with Ok
:raises: :class:`Pymoe.errors.GeneralLoginError` - For some reason, we were already logged in, tried to login again and it failed. This probably isn't bad.
"""
finvars = self.clientvars
if username and password:
finvars['username'] = username
finvars['password'] = password
self.loggedin = True
ret = self.send_command('login', ujson.dumps(finvars))
if not isinstance(ret, str): # should just be 'Ok'
if self.loggedin:
self.loggedin = False
raise UserLoginFailed(ret['msg'])
else:
raise GeneralLoginError(ret['msg']) | This handles login logic instead of stuffing all that in the __init__.
:param username: The username to log in as or None
:param password: The password for that user or None
:return: Nothing
:raises: :class:`Pymoe.errors.UserLoginFailed` - Didn't respond with Ok
:raises: :class:`Pymoe.errors.GeneralLoginError` - For some reason, we were already logged in, tried to login again and it failed. This probably isn't bad. | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/VNDB/connection.py#L47-L68 |
ccubed/PyMoe | Pymoe/VNDB/connection.py | VNDBConnection.send_command | def send_command(self, command, args=None):
"""
Send a command to VNDB and then get the result.
:param command: What command are we sending
:param args: What are the json args for this command
:return: Servers Response
:rtype: Dictionary (See D11 docs on VNDB)
"""
if args:
if isinstance(args, str):
final_command = command + ' ' + args + '\x04'
else:
# We just let ujson propogate the error here if it can't parse the arguments
final_command = command + ' ' + ujson.dumps(args) + '\x04'
else:
final_command = command + '\x04'
self.sslwrap.sendall(final_command.encode('utf-8'))
return self._recv_data() | python | def send_command(self, command, args=None):
"""
Send a command to VNDB and then get the result.
:param command: What command are we sending
:param args: What are the json args for this command
:return: Servers Response
:rtype: Dictionary (See D11 docs on VNDB)
"""
if args:
if isinstance(args, str):
final_command = command + ' ' + args + '\x04'
else:
# We just let ujson propogate the error here if it can't parse the arguments
final_command = command + ' ' + ujson.dumps(args) + '\x04'
else:
final_command = command + '\x04'
self.sslwrap.sendall(final_command.encode('utf-8'))
return self._recv_data() | Send a command to VNDB and then get the result.
:param command: What command are we sending
:param args: What are the json args for this command
:return: Servers Response
:rtype: Dictionary (See D11 docs on VNDB) | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/VNDB/connection.py#L70-L88 |
ccubed/PyMoe | Pymoe/VNDB/connection.py | VNDBConnection._recv_data | def _recv_data(self):
"""
Receieves data until we reach the \x04 and then returns it.
:return: The data received
"""
temp = ""
while True:
self.data_buffer = self.sslwrap.recv(1024)
if '\x04' in self.data_buffer.decode('utf-8', 'ignore'):
temp += self.data_buffer.decode('utf-8', 'ignore')
break
else:
temp += self.data_buffer.decode('utf-8', 'ignore')
self.data_buffer = bytes(1024)
temp = temp.replace('\x04', '')
if 'Ok' in temp: # Because login
return temp
else:
return ujson.loads(temp.split(' ', 1)[1]) | python | def _recv_data(self):
"""
Receieves data until we reach the \x04 and then returns it.
:return: The data received
"""
temp = ""
while True:
self.data_buffer = self.sslwrap.recv(1024)
if '\x04' in self.data_buffer.decode('utf-8', 'ignore'):
temp += self.data_buffer.decode('utf-8', 'ignore')
break
else:
temp += self.data_buffer.decode('utf-8', 'ignore')
self.data_buffer = bytes(1024)
temp = temp.replace('\x04', '')
if 'Ok' in temp: # Because login
return temp
else:
return ujson.loads(temp.split(' ', 1)[1]) | Receieves data until we reach the \x04 and then returns it.
:return: The data received | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/VNDB/connection.py#L90-L109 |
ccubed/PyMoe | Pymoe/Kitsu/library.py | KitsuLib.get | def get(self, uid, filters=None):
"""
Get a user's list of library entries. While individual entries on this
list don't show what type of entry it is, you can use the filters provided
by the Kitsu API to only select which ones you want
:param uid: str: User ID to get library entries for
:param filters: dict: Dictionary of filters for the library
:return: Results or ServerError
:rtype: SearchWrapper or Exception
"""
filters = self.__format_filters(filters)
r = requests.get(self.apiurl + "/users/{}/library-entries".format(uid), headers=self.header, params=filters)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd['meta']['count']:
return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header)
else:
return None | python | def get(self, uid, filters=None):
"""
Get a user's list of library entries. While individual entries on this
list don't show what type of entry it is, you can use the filters provided
by the Kitsu API to only select which ones you want
:param uid: str: User ID to get library entries for
:param filters: dict: Dictionary of filters for the library
:return: Results or ServerError
:rtype: SearchWrapper or Exception
"""
filters = self.__format_filters(filters)
r = requests.get(self.apiurl + "/users/{}/library-entries".format(uid), headers=self.header, params=filters)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd['meta']['count']:
return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header)
else:
return None | Get a user's list of library entries. While individual entries on this
list don't show what type of entry it is, you can use the filters provided
by the Kitsu API to only select which ones you want
:param uid: str: User ID to get library entries for
:param filters: dict: Dictionary of filters for the library
:return: Results or ServerError
:rtype: SearchWrapper or Exception | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/library.py#L10-L33 |
ccubed/PyMoe | Pymoe/Kitsu/library.py | KitsuLib.create | def create(self, user_id, media_id, item_type, token, data):
"""
Create a library entry for a user. data should be just the attributes.
Data at least needs a status and progress.
:param user_id str: User ID that this Library Entry is for
:param media_id str: ID for the media this entry relates to
:param item_type str: anime, drama or manga depending
:param token str: OAuth token for user
:param data dict: Dictionary of attributes for the entry
:return: New Entry ID or ServerError
:rtype: Str or Exception
"""
final_dict = {
"data": {
"type": "libraryEntries",
"attributes": data,
"relationships":{
"user":{
"data":{
"id": user_id,
"type": "users"
}
},
"media":{
"data":{
"id": media_id,
"type": item_type
}
}
}
}
}
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.post(self.apiurl + "/library-entries", json=final_dict, headers=final_headers)
if r.status_code != 201:
raise ConnectionError(r.text)
jsd = r.json()
return jsd['data']['id'] | python | def create(self, user_id, media_id, item_type, token, data):
"""
Create a library entry for a user. data should be just the attributes.
Data at least needs a status and progress.
:param user_id str: User ID that this Library Entry is for
:param media_id str: ID for the media this entry relates to
:param item_type str: anime, drama or manga depending
:param token str: OAuth token for user
:param data dict: Dictionary of attributes for the entry
:return: New Entry ID or ServerError
:rtype: Str or Exception
"""
final_dict = {
"data": {
"type": "libraryEntries",
"attributes": data,
"relationships":{
"user":{
"data":{
"id": user_id,
"type": "users"
}
},
"media":{
"data":{
"id": media_id,
"type": item_type
}
}
}
}
}
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.post(self.apiurl + "/library-entries", json=final_dict, headers=final_headers)
if r.status_code != 201:
raise ConnectionError(r.text)
jsd = r.json()
return jsd['data']['id'] | Create a library entry for a user. data should be just the attributes.
Data at least needs a status and progress.
:param user_id str: User ID that this Library Entry is for
:param media_id str: ID for the media this entry relates to
:param item_type str: anime, drama or manga depending
:param token str: OAuth token for user
:param data dict: Dictionary of attributes for the entry
:return: New Entry ID or ServerError
:rtype: Str or Exception | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/library.py#L35-L78 |
ccubed/PyMoe | Pymoe/Kitsu/library.py | KitsuLib.update | def update(self, eid, data, token):
"""
Update a given Library Entry.
:param eid str: Entry ID
:param data dict: Attributes
:param token str: OAuth token
:return: True or ServerError
:rtype: Bool or Exception
"""
final_dict = {"data": {"id": eid, "type": "libraryEntries", "attributes": data}}
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.patch(self.apiurl + "/library-entries/{}".format(eid), json=final_dict, headers=final_headers)
if r.status_code != 200:
raise ConnectionError(r.text)
return True | python | def update(self, eid, data, token):
"""
Update a given Library Entry.
:param eid str: Entry ID
:param data dict: Attributes
:param token str: OAuth token
:return: True or ServerError
:rtype: Bool or Exception
"""
final_dict = {"data": {"id": eid, "type": "libraryEntries", "attributes": data}}
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.patch(self.apiurl + "/library-entries/{}".format(eid), json=final_dict, headers=final_headers)
if r.status_code != 200:
raise ConnectionError(r.text)
return True | Update a given Library Entry.
:param eid str: Entry ID
:param data dict: Attributes
:param token str: OAuth token
:return: True or ServerError
:rtype: Bool or Exception | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/library.py#L80-L99 |
ccubed/PyMoe | Pymoe/Kitsu/library.py | KitsuLib.delete | def delete(self, eid, token):
"""
Delete a library entry.
:param eid str: Entry ID
:param token str: OAuth Token
:return: True or ServerError
:rtype: Bool or Exception
"""
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.delete(self.apiurl + "/library-entries/{}".format(eid), headers=final_headers)
if r.status_code != 204:
print(r.status_code)
raise ConnectionError(r.text)
return True | python | def delete(self, eid, token):
"""
Delete a library entry.
:param eid str: Entry ID
:param token str: OAuth Token
:return: True or ServerError
:rtype: Bool or Exception
"""
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.delete(self.apiurl + "/library-entries/{}".format(eid), headers=final_headers)
if r.status_code != 204:
print(r.status_code)
raise ConnectionError(r.text)
return True | Delete a library entry.
:param eid str: Entry ID
:param token str: OAuth Token
:return: True or ServerError
:rtype: Bool or Exception | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/library.py#L101-L119 |
ccubed/PyMoe | Pymoe/Kitsu/library.py | KitsuLib.__format_filters | def __format_filters(filters):
"""
Format filters for the api query (to filter[<filter-name>])
:param filters: dict: can be None, filters for the query
:return: the formatted filters, or None
"""
if filters is not None:
for k in filters:
if 'filter[' not in k:
filters['filter[{}]'.format(k)] = filters.pop(k)
return filters | python | def __format_filters(filters):
"""
Format filters for the api query (to filter[<filter-name>])
:param filters: dict: can be None, filters for the query
:return: the formatted filters, or None
"""
if filters is not None:
for k in filters:
if 'filter[' not in k:
filters['filter[{}]'.format(k)] = filters.pop(k)
return filters | Format filters for the api query (to filter[<filter-name>])
:param filters: dict: can be None, filters for the query
:return: the formatted filters, or None | https://github.com/ccubed/PyMoe/blob/5b2a2591bb113bd80d838e65aaa06f3a97ff3670/Pymoe/Kitsu/library.py#L122-L133 |
wecatch/app-turbo | turbo/mongo_model.py | convert_to_record | def convert_to_record(func):
"""Wrap mongodb record to a dict record with default value None
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if result is not None:
if isinstance(result, dict):
return _record(result)
return (_record(i) for i in result)
return result
return wrapper | python | def convert_to_record(func):
"""Wrap mongodb record to a dict record with default value None
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if result is not None:
if isinstance(result, dict):
return _record(result)
return (_record(i) for i in result)
return result
return wrapper | Wrap mongodb record to a dict record with default value None | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/mongo_model.py#L23-L36 |
wecatch/app-turbo | turbo/mongo_model.py | MixinModel.to_one_str | def to_one_str(cls, value, *args, **kwargs):
"""Convert single record's values to str
"""
if kwargs.get('wrapper'):
return cls._wrapper_to_one_str(value)
return _es.to_dict_str(value) | python | def to_one_str(cls, value, *args, **kwargs):
"""Convert single record's values to str
"""
if kwargs.get('wrapper'):
return cls._wrapper_to_one_str(value)
return _es.to_dict_str(value) | Convert single record's values to str | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/mongo_model.py#L67-L73 |
wecatch/app-turbo | turbo/mongo_model.py | MixinModel.to_str | def to_str(cls, values, callback=None):
"""Convert many records's values to str
"""
if callback and callable(callback):
if isinstance(values, dict):
return callback(_es.to_str(values))
return [callback(_es.to_str(i)) for i in values]
return _es.to_str(values) | python | def to_str(cls, values, callback=None):
"""Convert many records's values to str
"""
if callback and callable(callback):
if isinstance(values, dict):
return callback(_es.to_str(values))
return [callback(_es.to_str(i)) for i in values]
return _es.to_str(values) | Convert many records's values to str | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/mongo_model.py#L76-L84 |
wecatch/app-turbo | turbo/mongo_model.py | MixinModel.instance | def instance(cls, name):
"""Instantiate a model class according to import path
args:
name: class import path like `user.User`
return:
model instance
"""
if not cls._instance.get(name):
model_name = name.split('.')
ins_name = '.'.join(
['models', model_name[0], 'model', model_name[1]])
cls._instance[name] = cls.import_model(ins_name)()
return cls._instance[name] | python | def instance(cls, name):
"""Instantiate a model class according to import path
args:
name: class import path like `user.User`
return:
model instance
"""
if not cls._instance.get(name):
model_name = name.split('.')
ins_name = '.'.join(
['models', model_name[0], 'model', model_name[1]])
cls._instance[name] = cls.import_model(ins_name)()
return cls._instance[name] | Instantiate a model class according to import path
args:
name: class import path like `user.User`
return:
model instance | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/mongo_model.py#L116-L129 |
wecatch/app-turbo | turbo/mongo_model.py | MixinModel.import_model | def import_model(cls, ins_name):
"""Import model class in models package
"""
try:
package_space = getattr(cls, 'package_space')
except AttributeError:
raise ValueError('package_space not exist')
else:
return import_object(ins_name, package_space) | python | def import_model(cls, ins_name):
"""Import model class in models package
"""
try:
package_space = getattr(cls, 'package_space')
except AttributeError:
raise ValueError('package_space not exist')
else:
return import_object(ins_name, package_space) | Import model class in models package | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/mongo_model.py#L132-L140 |
wecatch/app-turbo | turbo/register.py | register_app | def register_app(app_name, app_setting, web_application_setting, mainfile, package_space):
"""insert current project root path into sys path
"""
from turbo import log
app_config.app_name = app_name
app_config.app_setting = app_setting
app_config.project_name = os.path.basename(get_base_dir(mainfile, 2))
app_config.web_application_setting.update(web_application_setting)
if app_setting.get('session_config'):
app_config.session_config.update(app_setting['session_config'])
log.getLogger(**app_setting.log)
_install_app(package_space) | python | def register_app(app_name, app_setting, web_application_setting, mainfile, package_space):
"""insert current project root path into sys path
"""
from turbo import log
app_config.app_name = app_name
app_config.app_setting = app_setting
app_config.project_name = os.path.basename(get_base_dir(mainfile, 2))
app_config.web_application_setting.update(web_application_setting)
if app_setting.get('session_config'):
app_config.session_config.update(app_setting['session_config'])
log.getLogger(**app_setting.log)
_install_app(package_space) | insert current project root path into sys path | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/register.py#L14-L25 |
wecatch/app-turbo | turbo/register.py | register_url | def register_url(url, handler, name=None, kwargs=None):
"""insert url into tornado application handlers group
:arg str url: url
:handler object handler: url mapping handler
:name reverse url name
:kwargs dict tornado handler initlize args
"""
if name is None and kwargs is None:
app_config.urls.append((url, handler))
return
if name is None:
app_config.urls.append((url, handler, kwargs))
return
app_config.urls.append((url, handler, kwargs, name)) | python | def register_url(url, handler, name=None, kwargs=None):
"""insert url into tornado application handlers group
:arg str url: url
:handler object handler: url mapping handler
:name reverse url name
:kwargs dict tornado handler initlize args
"""
if name is None and kwargs is None:
app_config.urls.append((url, handler))
return
if name is None:
app_config.urls.append((url, handler, kwargs))
return
app_config.urls.append((url, handler, kwargs, name)) | insert url into tornado application handlers group
:arg str url: url
:handler object handler: url mapping handler
:name reverse url name
:kwargs dict tornado handler initlize args | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/register.py#L28-L44 |
wecatch/app-turbo | demos/db-server/apps/base.py | BaseHandler.write_error | def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
http://tornado.readthedocs.org/en/stable/_modules/tornado/web.html#RequestHandler.write_error
"""
super(BaseHandler, self).write_error(status_code, **kwargs) | python | def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
http://tornado.readthedocs.org/en/stable/_modules/tornado/web.html#RequestHandler.write_error
"""
super(BaseHandler, self).write_error(status_code, **kwargs) | Override to implement custom error pages.
http://tornado.readthedocs.org/en/stable/_modules/tornado/web.html#RequestHandler.write_error | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/demos/db-server/apps/base.py#L55-L59 |
wecatch/app-turbo | turbo/app.py | BaseBaseHandler.parameter | def parameter(self):
'''
according to request method config to filter all request paremter
if value is invalid then set None
'''
method = self.request.method.lower()
arguments = self.request.arguments
files = self.request.files
rpd = {} # request parameter dict
def filter_parameter(key, tp, default=None):
if tp not in self._types:
raise ValueError(
'%s parameter expected types %s' % (key, self._types))
if not isinstance(tp, file_types):
if key not in arguments:
rpd[key] = default
return
if tp in [ObjectId, int, float, bool]:
rpd[key] = getattr(self, 'to_%s' % getattr(
tp, '__name__').lower())(self.get_argument(key))
return
if tp == basestring_type or issubclass(tp, basestring_type):
rpd[key] = self.get_argument(key, strip=False)
return
if tp == list:
rpd[key] = self.get_arguments(key)
return
if tp == file:
if key not in files:
rpd[key] = []
return
rpd[key] = self.request.files[key]
required_params = getattr(self, '_required_params', None)
if isinstance(required_params, list):
for key, tp, default in required_params:
filter_parameter(key, tp, default)
# extract method required params
method_required_params = getattr(
self, '_%s_required_params' % method, None)
if isinstance(method_required_params, list):
for key, tp, default in method_required_params:
filter_parameter(key, tp, default)
params = getattr(self, '_%s_params' % method, None)
if params is None:
return rpd
# need arguments
try:
for key, tp in params.get('need', []):
if tp == list:
filter_parameter(key, tp, [])
else:
filter_parameter(key, tp)
except ValueError as e:
app_log.error(
'%s request need arguments parse error: %s' % (method, e))
raise ValueError(e)
except Exception as e:
app_log.error(
'%s request need arguments parse error: %s' % (method, e))
raise e
# option arguments
for key, tp, default in params.get('option', []):
filter_parameter(key, tp, default)
return rpd | python | def parameter(self):
'''
according to request method config to filter all request paremter
if value is invalid then set None
'''
method = self.request.method.lower()
arguments = self.request.arguments
files = self.request.files
rpd = {} # request parameter dict
def filter_parameter(key, tp, default=None):
if tp not in self._types:
raise ValueError(
'%s parameter expected types %s' % (key, self._types))
if not isinstance(tp, file_types):
if key not in arguments:
rpd[key] = default
return
if tp in [ObjectId, int, float, bool]:
rpd[key] = getattr(self, 'to_%s' % getattr(
tp, '__name__').lower())(self.get_argument(key))
return
if tp == basestring_type or issubclass(tp, basestring_type):
rpd[key] = self.get_argument(key, strip=False)
return
if tp == list:
rpd[key] = self.get_arguments(key)
return
if tp == file:
if key not in files:
rpd[key] = []
return
rpd[key] = self.request.files[key]
required_params = getattr(self, '_required_params', None)
if isinstance(required_params, list):
for key, tp, default in required_params:
filter_parameter(key, tp, default)
# extract method required params
method_required_params = getattr(
self, '_%s_required_params' % method, None)
if isinstance(method_required_params, list):
for key, tp, default in method_required_params:
filter_parameter(key, tp, default)
params = getattr(self, '_%s_params' % method, None)
if params is None:
return rpd
# need arguments
try:
for key, tp in params.get('need', []):
if tp == list:
filter_parameter(key, tp, [])
else:
filter_parameter(key, tp)
except ValueError as e:
app_log.error(
'%s request need arguments parse error: %s' % (method, e))
raise ValueError(e)
except Exception as e:
app_log.error(
'%s request need arguments parse error: %s' % (method, e))
raise e
# option arguments
for key, tp, default in params.get('option', []):
filter_parameter(key, tp, default)
return rpd | according to request method config to filter all request paremter
if value is invalid then set None | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/app.py#L173-L250 |
wecatch/app-turbo | turbo/app.py | BaseBaseHandler.wo_resp | def wo_resp(self, resp):
"""
can override for other style
"""
if self._data is not None:
resp['res'] = self.to_str(self._data)
return self.wo_json(resp) | python | def wo_resp(self, resp):
"""
can override for other style
"""
if self._data is not None:
resp['res'] = self.to_str(self._data)
return self.wo_json(resp) | can override for other style | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/app.py#L307-L314 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.insert | def insert(self, doc_or_docs, **kwargs):
"""Insert method
"""
check = kwargs.pop('check', True)
if isinstance(doc_or_docs, dict):
if check is True:
doc_or_docs = self._valid_record(doc_or_docs)
result = self.__collect.insert_one(doc_or_docs, **kwargs)
return result.inserted_id
else:
if check is True:
for d in doc_or_docs:
d = self._valid_record(d)
result = self.__collect.insert_many(doc_or_docs, **kwargs)
return result.inserted_ids | python | def insert(self, doc_or_docs, **kwargs):
"""Insert method
"""
check = kwargs.pop('check', True)
if isinstance(doc_or_docs, dict):
if check is True:
doc_or_docs = self._valid_record(doc_or_docs)
result = self.__collect.insert_one(doc_or_docs, **kwargs)
return result.inserted_id
else:
if check is True:
for d in doc_or_docs:
d = self._valid_record(d)
result = self.__collect.insert_many(doc_or_docs, **kwargs)
return result.inserted_ids | Insert method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L35-L49 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.save | def save(self, to_save, **kwargs):
"""save method
"""
check = kwargs.pop('check', True)
if check:
self._valid_record(to_save)
if '_id' in to_save:
self.__collect.replace_one(
{'_id': to_save['_id']}, to_save, **kwargs)
return to_save['_id']
else:
result = self.__collect.insert_one(to_save, **kwargs)
return result.inserted_id | python | def save(self, to_save, **kwargs):
"""save method
"""
check = kwargs.pop('check', True)
if check:
self._valid_record(to_save)
if '_id' in to_save:
self.__collect.replace_one(
{'_id': to_save['_id']}, to_save, **kwargs)
return to_save['_id']
else:
result = self.__collect.insert_one(to_save, **kwargs)
return result.inserted_id | save method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L51-L63 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.update | def update(self, filter_, document, multi=False, **kwargs):
"""update method
"""
self._valide_update_document(document)
if multi:
return self.__collect.update_many(filter_, document, **kwargs)
else:
return self.__collect.update_one(filter_, document, **kwargs) | python | def update(self, filter_, document, multi=False, **kwargs):
"""update method
"""
self._valide_update_document(document)
if multi:
return self.__collect.update_many(filter_, document, **kwargs)
else:
return self.__collect.update_one(filter_, document, **kwargs) | update method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L65-L72 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.remove | def remove(self, filter_=None, **kwargs):
"""collection remove method
warning:
if you want to remove all documents,
you must override _remove_all method to make sure
you understand the result what you do
"""
if isinstance(filter_, dict) and filter_ == {}:
raise ValueError('not allowed remove all documents')
if filter_ is None:
raise ValueError('not allowed remove all documents')
if kwargs.pop('multi', False) is True:
return self.__collect.delete_many(filter_, **kwargs)
else:
return self.__collect.delete_one(filter_, **kwargs) | python | def remove(self, filter_=None, **kwargs):
"""collection remove method
warning:
if you want to remove all documents,
you must override _remove_all method to make sure
you understand the result what you do
"""
if isinstance(filter_, dict) and filter_ == {}:
raise ValueError('not allowed remove all documents')
if filter_ is None:
raise ValueError('not allowed remove all documents')
if kwargs.pop('multi', False) is True:
return self.__collect.delete_many(filter_, **kwargs)
else:
return self.__collect.delete_one(filter_, **kwargs) | collection remove method
warning:
if you want to remove all documents,
you must override _remove_all method to make sure
you understand the result what you do | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L74-L90 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.insert_one | def insert_one(self, doc_or_docs, **kwargs):
"""Insert method
"""
check = kwargs.pop('check', True)
if check is True:
self._valid_record(doc_or_docs)
return self.__collect.insert_one(doc_or_docs, **kwargs) | python | def insert_one(self, doc_or_docs, **kwargs):
"""Insert method
"""
check = kwargs.pop('check', True)
if check is True:
self._valid_record(doc_or_docs)
return self.__collect.insert_one(doc_or_docs, **kwargs) | Insert method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L92-L99 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.insert_many | def insert_many(self, doc_or_docs, **kwargs):
"""Insert method
"""
check = kwargs.pop('check', True)
if check is True:
for i in doc_or_docs:
i = self._valid_record(i)
return self.__collect.insert_many(doc_or_docs, **kwargs) | python | def insert_many(self, doc_or_docs, **kwargs):
"""Insert method
"""
check = kwargs.pop('check', True)
if check is True:
for i in doc_or_docs:
i = self._valid_record(i)
return self.__collect.insert_many(doc_or_docs, **kwargs) | Insert method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L101-L109 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.find_one | def find_one(self, filter_=None, *args, **kwargs):
"""find_one method
"""
wrapper = kwargs.pop('wrapper', False)
if wrapper is True:
return self._wrapper_find_one(filter_, *args, **kwargs)
return self.__collect.find_one(filter_, *args, **kwargs) | python | def find_one(self, filter_=None, *args, **kwargs):
"""find_one method
"""
wrapper = kwargs.pop('wrapper', False)
if wrapper is True:
return self._wrapper_find_one(filter_, *args, **kwargs)
return self.__collect.find_one(filter_, *args, **kwargs) | find_one method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L111-L118 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.find | def find(self, *args, **kwargs):
"""collection find method
"""
wrapper = kwargs.pop('wrapper', False)
if wrapper is True:
return self._wrapper_find(*args, **kwargs)
return self.__collect.find(*args, **kwargs) | python | def find(self, *args, **kwargs):
"""collection find method
"""
wrapper = kwargs.pop('wrapper', False)
if wrapper is True:
return self._wrapper_find(*args, **kwargs)
return self.__collect.find(*args, **kwargs) | collection find method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L120-L128 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel._wrapper_find_one | def _wrapper_find_one(self, filter_=None, *args, **kwargs):
"""Convert record to a dict that has no key error
"""
return self.__collect.find_one(filter_, *args, **kwargs) | python | def _wrapper_find_one(self, filter_=None, *args, **kwargs):
"""Convert record to a dict that has no key error
"""
return self.__collect.find_one(filter_, *args, **kwargs) | Convert record to a dict that has no key error | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L131-L134 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.update_one | def update_one(self, filter_, document, **kwargs):
"""update method
"""
self._valide_update_document(document)
return self.__collect.update_one(filter_, document, **kwargs) | python | def update_one(self, filter_, document, **kwargs):
"""update method
"""
self._valide_update_document(document)
return self.__collect.update_one(filter_, document, **kwargs) | update method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L142-L146 |
wecatch/app-turbo | turbo/model.py | BaseBaseModel.find_by_id | def find_by_id(self, _id, projection=None):
"""find record by _id
"""
if isinstance(_id, list) or isinstance(_id, tuple):
return list(self.__collect.find(
{'_id': {'$in': [self._to_primary_key(i) for i in _id]}}, projection))
document_id = self._to_primary_key(_id)
if document_id is None:
return None
return self.__collect.find_one({'_id': document_id}, projection) | python | def find_by_id(self, _id, projection=None):
"""find record by _id
"""
if isinstance(_id, list) or isinstance(_id, tuple):
return list(self.__collect.find(
{'_id': {'$in': [self._to_primary_key(i) for i in _id]}}, projection))
document_id = self._to_primary_key(_id)
if document_id is None:
return None
return self.__collect.find_one({'_id': document_id}, projection) | find record by _id | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L161-L173 |
wecatch/app-turbo | turbo/model.py | BaseModel.create_model | def create_model(cls, name, field=None):
"""dynamic create new model
:args field table field, if field is None or {}, this model can not use create method
"""
if field:
attrs = {'name': name, 'field': field}
else:
attrs = {'name': name, 'field': {'_id': ObjectId()}}
return type(str(name), (cls, ), attrs)() | python | def create_model(cls, name, field=None):
"""dynamic create new model
:args field table field, if field is None or {}, this model can not use create method
"""
if field:
attrs = {'name': name, 'field': field}
else:
attrs = {'name': name, 'field': {'_id': ObjectId()}}
return type(str(name), (cls, ), attrs)() | dynamic create new model
:args field table field, if field is None or {}, this model can not use create method | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/model.py#L232-L241 |
wecatch/app-turbo | turbo/util.py | to_list_str | def to_list_str(value, encode=None):
"""recursively convert list content into string
:arg list value: The list that need to be converted.
:arg function encode: Function used to encode object.
"""
result = []
for index, v in enumerate(value):
if isinstance(v, dict):
result.append(to_dict_str(v, encode))
continue
if isinstance(v, list):
result.append(to_list_str(v, encode))
continue
if encode:
result.append(encode(v))
else:
result.append(default_encode(v))
return result | python | def to_list_str(value, encode=None):
"""recursively convert list content into string
:arg list value: The list that need to be converted.
:arg function encode: Function used to encode object.
"""
result = []
for index, v in enumerate(value):
if isinstance(v, dict):
result.append(to_dict_str(v, encode))
continue
if isinstance(v, list):
result.append(to_list_str(v, encode))
continue
if encode:
result.append(encode(v))
else:
result.append(default_encode(v))
return result | recursively convert list content into string
:arg list value: The list that need to be converted.
:arg function encode: Function used to encode object. | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L29-L50 |
wecatch/app-turbo | turbo/util.py | to_dict_str | def to_dict_str(origin_value, encode=None):
"""recursively convert dict content into string
"""
value = copy.deepcopy(origin_value)
for k, v in value.items():
if isinstance(v, dict):
value[k] = to_dict_str(v, encode)
continue
if isinstance(v, list):
value[k] = to_list_str(v, encode)
continue
if encode:
value[k] = encode(v)
else:
value[k] = default_encode(v)
return value | python | def to_dict_str(origin_value, encode=None):
"""recursively convert dict content into string
"""
value = copy.deepcopy(origin_value)
for k, v in value.items():
if isinstance(v, dict):
value[k] = to_dict_str(v, encode)
continue
if isinstance(v, list):
value[k] = to_list_str(v, encode)
continue
if encode:
value[k] = encode(v)
else:
value[k] = default_encode(v)
return value | recursively convert dict content into string | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L53-L71 |
wecatch/app-turbo | turbo/util.py | default_encode | def default_encode(v):
"""convert ObjectId, datetime, date into string
"""
if isinstance(v, ObjectId):
return unicode_type(v)
if isinstance(v, datetime):
return format_time(v)
if isinstance(v, date):
return format_time(v)
return v | python | def default_encode(v):
"""convert ObjectId, datetime, date into string
"""
if isinstance(v, ObjectId):
return unicode_type(v)
if isinstance(v, datetime):
return format_time(v)
if isinstance(v, date):
return format_time(v)
return v | convert ObjectId, datetime, date into string | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L74-L86 |
wecatch/app-turbo | turbo/util.py | to_str | def to_str(v, encode=None):
"""convert any list, dict, iterable and primitives object to string
"""
if isinstance(v, basestring_type):
return v
if isinstance(v, dict):
return to_dict_str(v, encode)
if isinstance(v, Iterable):
return to_list_str(v, encode)
if encode:
return encode(v)
else:
return default_encode(v) | python | def to_str(v, encode=None):
"""convert any list, dict, iterable and primitives object to string
"""
if isinstance(v, basestring_type):
return v
if isinstance(v, dict):
return to_dict_str(v, encode)
if isinstance(v, Iterable):
return to_list_str(v, encode)
if encode:
return encode(v)
else:
return default_encode(v) | convert any list, dict, iterable and primitives object to string | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L89-L104 |
wecatch/app-turbo | turbo/util.py | to_objectid | def to_objectid(objid):
"""字符对象转换成objectid
"""
if objid is None:
return objid
try:
objid = ObjectId(objid)
except:
util_log.error('%s is invalid objectid' % objid)
return None
return objid | python | def to_objectid(objid):
"""字符对象转换成objectid
"""
if objid is None:
return objid
try:
objid = ObjectId(objid)
except:
util_log.error('%s is invalid objectid' % objid)
return None
return objid | 字符对象转换成objectid | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L113-L125 |
wecatch/app-turbo | turbo/util.py | get_base_dir | def get_base_dir(currfile, dir_level_num=3):
"""
find certain path according to currfile
"""
root_path = os.path.abspath(currfile)
for i in range(0, dir_level_num):
root_path = os.path.dirname(root_path)
return root_path | python | def get_base_dir(currfile, dir_level_num=3):
"""
find certain path according to currfile
"""
root_path = os.path.abspath(currfile)
for i in range(0, dir_level_num):
root_path = os.path.dirname(root_path)
return root_path | find certain path according to currfile | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L188-L196 |
wecatch/app-turbo | turbo/util.py | join_sys_path | def join_sys_path(currfile, dir_level_num=3):
"""
find certain path then load into sys path
"""
if os.path.isdir(currfile):
root_path = currfile
else:
root_path = get_base_dir(currfile, dir_level_num)
sys.path.append(root_path) | python | def join_sys_path(currfile, dir_level_num=3):
"""
find certain path then load into sys path
"""
if os.path.isdir(currfile):
root_path = currfile
else:
root_path = get_base_dir(currfile, dir_level_num)
sys.path.append(root_path) | find certain path then load into sys path | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L199-L208 |
wecatch/app-turbo | turbo/util.py | camel_to_underscore | def camel_to_underscore(name):
"""
convert CamelCase style to under_score_case
"""
as_list = []
length = len(name)
for index, i in enumerate(name):
if index != 0 and index != length - 1 and i.isupper():
as_list.append('_%s' % i.lower())
else:
as_list.append(i.lower())
return ''.join(as_list) | python | def camel_to_underscore(name):
"""
convert CamelCase style to under_score_case
"""
as_list = []
length = len(name)
for index, i in enumerate(name):
if index != 0 and index != length - 1 and i.isupper():
as_list.append('_%s' % i.lower())
else:
as_list.append(i.lower())
return ''.join(as_list) | convert CamelCase style to under_score_case | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L224-L236 |
wecatch/app-turbo | turbo/util.py | to_basestring | def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8") | python | def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8") | Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode. | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/util.py#L364-L379 |
wecatch/app-turbo | turbo/httputil.py | encode_http_params | def encode_http_params(**kw):
'''
url paremeter encode
'''
try:
_fo = lambda k, v: '{name}={value}'.format(
name=k, value=to_basestring(quote(v)))
except:
_fo = lambda k, v: '%s=%s' % (k, to_basestring(quote(v)))
_en = utf8
return '&'.join([_fo(k, _en(v)) for k, v in kw.items() if not is_empty(v)]) | python | def encode_http_params(**kw):
'''
url paremeter encode
'''
try:
_fo = lambda k, v: '{name}={value}'.format(
name=k, value=to_basestring(quote(v)))
except:
_fo = lambda k, v: '%s=%s' % (k, to_basestring(quote(v)))
_en = utf8
return '&'.join([_fo(k, _en(v)) for k, v in kw.items() if not is_empty(v)]) | url paremeter encode | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/httputil.py#L25-L37 |
wecatch/app-turbo | turbo/log.py | _init_file_logger | def _init_file_logger(logger, level, log_path, log_size, log_count):
"""
one logger only have one level RotatingFileHandler
"""
if level not in [logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]:
level = logging.DEBUG
for h in logger.handlers:
if isinstance(h, logging.handlers.RotatingFileHandler):
if h.level == level:
return
fh = logging.handlers.RotatingFileHandler(
log_path, maxBytes=log_size, backupCount=log_count)
fh.setLevel(level)
fh.setFormatter(_formatter)
logger.addHandler(fh) | python | def _init_file_logger(logger, level, log_path, log_size, log_count):
"""
one logger only have one level RotatingFileHandler
"""
if level not in [logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]:
level = logging.DEBUG
for h in logger.handlers:
if isinstance(h, logging.handlers.RotatingFileHandler):
if h.level == level:
return
fh = logging.handlers.RotatingFileHandler(
log_path, maxBytes=log_size, backupCount=log_count)
fh.setLevel(level)
fh.setFormatter(_formatter)
logger.addHandler(fh) | one logger only have one level RotatingFileHandler | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/log.py#L21-L37 |
wecatch/app-turbo | turbo/session.py | Session._processor | def _processor(self):
"""Application processor to setup session for every request"""
self.store.cleanup(self._config.timeout)
self._load() | python | def _processor(self):
"""Application processor to setup session for every request"""
self.store.cleanup(self._config.timeout)
self._load() | Application processor to setup session for every request | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/session.py#L94-L97 |
wecatch/app-turbo | turbo/session.py | Session._load | def _load(self):
"""Load the session from the store, by the id from cookie"""
self.session_id = self._session_object.get_session_id()
# protection against session_id tampering
if self.session_id and not self._valid_session_id(self.session_id):
self.session_id = None
if self.session_id:
d = self.store[self.session_id]
if isinstance(d, dict) and d:
self.update(d)
if not self.session_id:
self.session_id = self._session_object.generate_session_id()
if not self._data:
if self._initializer and isinstance(self._initializer, dict):
self.update(deepcopy(self._initializer))
self._session_object.set_session_id(self.session_id) | python | def _load(self):
"""Load the session from the store, by the id from cookie"""
self.session_id = self._session_object.get_session_id()
# protection against session_id tampering
if self.session_id and not self._valid_session_id(self.session_id):
self.session_id = None
if self.session_id:
d = self.store[self.session_id]
if isinstance(d, dict) and d:
self.update(d)
if not self.session_id:
self.session_id = self._session_object.generate_session_id()
if not self._data:
if self._initializer and isinstance(self._initializer, dict):
self.update(deepcopy(self._initializer))
self._session_object.set_session_id(self.session_id) | Load the session from the store, by the id from cookie | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/session.py#L99-L120 |
wecatch/app-turbo | turbo/session.py | SessionObject.generate_session_id | def generate_session_id(self):
"""Generate a random id for session"""
secret_key = self._config.secret_key
while True:
rand = os.urandom(16)
now = time.time()
session_id = sha1(utf8("%s%s%s%s" % (
rand, now, self.handler.request.remote_ip, secret_key)))
session_id = session_id.hexdigest()
if session_id not in self.store:
break
return session_id | python | def generate_session_id(self):
"""Generate a random id for session"""
secret_key = self._config.secret_key
while True:
rand = os.urandom(16)
now = time.time()
session_id = sha1(utf8("%s%s%s%s" % (
rand, now, self.handler.request.remote_ip, secret_key)))
session_id = session_id.hexdigest()
if session_id not in self.store:
break
return session_id | Generate a random id for session | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/session.py#L159-L171 |
wecatch/app-turbo | turbo/session.py | Store.encode | def encode(self, session_data):
"""encodes session dict as a string"""
pickled = pickle.dumps(session_data)
return to_basestring(encodebytes(pickled)) | python | def encode(self, session_data):
"""encodes session dict as a string"""
pickled = pickle.dumps(session_data)
return to_basestring(encodebytes(pickled)) | encodes session dict as a string | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/session.py#L231-L234 |
wecatch/app-turbo | turbo/session.py | Store.decode | def decode(self, session_data):
"""decodes the data to get back the session dict """
pickled = decodebytes(utf8(session_data))
return pickle.loads(pickled) | python | def decode(self, session_data):
"""decodes the data to get back the session dict """
pickled = decodebytes(utf8(session_data))
return pickle.loads(pickled) | decodes the data to get back the session dict | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/session.py#L236-L239 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.