repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/source.py | AtlasChangeSource.build_api_struct | def build_api_struct(self):
"""
Calls parent's method and just adds the addtional field 'action', that
is required to form the structure that Atlas API is accepting.
"""
data = super(AtlasChangeSource, self).build_api_struct()
data.update({"action": self._action})
return data | python | def build_api_struct(self):
"""
Calls parent's method and just adds the addtional field 'action', that
is required to form the structure that Atlas API is accepting.
"""
data = super(AtlasChangeSource, self).build_api_struct()
data.update({"action": self._action})
return data | Calls parent's method and just adds the addtional field 'action', that
is required to form the structure that Atlas API is accepting. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/source.py#L226-L233 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/measurement.py | AtlasMeasurement.add_option | def add_option(self, **options):
"""
Adds an option and its value to the class as an attribute and stores it
to the used options set.
"""
for option, value in options.items():
setattr(self, option, value)
self._store_option(option) | python | def add_option(self, **options):
"""
Adds an option and its value to the class as an attribute and stores it
to the used options set.
"""
for option, value in options.items():
setattr(self, option, value)
self._store_option(option) | Adds an option and its value to the class as an attribute and stores it
to the used options set. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/measurement.py#L60-L67 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/measurement.py | AtlasMeasurement._init_required_options | def _init_required_options(self, **kwargs):
"""
Initialize the required option as class members. The value will be
either None or the specified value in the kwargs or __init__. The logic
here is to make the required options accesible to edit after a class
instance has been created.
"""
for field in self.required_options:
setattr(self, field, kwargs.get(field))
self._store_option(field) | python | def _init_required_options(self, **kwargs):
"""
Initialize the required option as class members. The value will be
either None or the specified value in the kwargs or __init__. The logic
here is to make the required options accesible to edit after a class
instance has been created.
"""
for field in self.required_options:
setattr(self, field, kwargs.get(field))
self._store_option(field) | Initialize the required option as class members. The value will be
either None or the specified value in the kwargs or __init__. The logic
here is to make the required options accesible to edit after a class
instance has been created. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/measurement.py#L69-L78 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/measurement.py | AtlasMeasurement.clean | def clean(self):
"""
Cleans/checks user entered data making sure required options are at
least present. This might save some queries from being sent if
they are totally wrong.
"""
# make sure the correct measurement type is set.
if not self.measurement_type:
log = "Please define a valid measurement type."
raise MalFormattedMeasurement(log)
# make sure the required fields are set.
for roption in self.required_options:
if getattr(self, roption, None) is None:
log = "%s Measurement field: <%s> is required" % (
self.__class__.__name__, roption
)
raise MalFormattedMeasurement(log) | python | def clean(self):
"""
Cleans/checks user entered data making sure required options are at
least present. This might save some queries from being sent if
they are totally wrong.
"""
# make sure the correct measurement type is set.
if not self.measurement_type:
log = "Please define a valid measurement type."
raise MalFormattedMeasurement(log)
# make sure the required fields are set.
for roption in self.required_options:
if getattr(self, roption, None) is None:
log = "%s Measurement field: <%s> is required" % (
self.__class__.__name__, roption
)
raise MalFormattedMeasurement(log) | Cleans/checks user entered data making sure required options are at
least present. This might save some queries from being sent if
they are totally wrong. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/measurement.py#L80-L98 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/measurement.py | AtlasMeasurement.v2_translator | def v2_translator(self, option):
"""
This is a temporary function that helps move from v1 API to v2 without
breaking already running script and keep backwards compatibility.
Translates option name from API v1 to renamed one of v2 API.
"""
new_option = option
new_value = getattr(self, option)
renaming_pairs = {
"dontfrag": "dont_fragment",
"maxhops": "max_hops",
"firsthop": "first_hop",
"use_NSID": "set_nsid_bit",
"cd": "set_cd_bit",
"do": "set_do_bit",
"qbuf": "include_qbuf",
"recursion_desired": "set_rd_bit",
"noabuf": "include_abuf"
}
if option in renaming_pairs.keys():
warninglog = (
"DeprecationWarning: {0} option has been deprecated and "
"renamed to {1}."
).format(option, renaming_pairs[option])
print(warninglog)
new_option = renaming_pairs[option]
# noabuf was changed to include_abuf so we need a double-negative
if option == "noabuf":
new_value = not new_value
return new_option, new_value | python | def v2_translator(self, option):
"""
This is a temporary function that helps move from v1 API to v2 without
breaking already running script and keep backwards compatibility.
Translates option name from API v1 to renamed one of v2 API.
"""
new_option = option
new_value = getattr(self, option)
renaming_pairs = {
"dontfrag": "dont_fragment",
"maxhops": "max_hops",
"firsthop": "first_hop",
"use_NSID": "set_nsid_bit",
"cd": "set_cd_bit",
"do": "set_do_bit",
"qbuf": "include_qbuf",
"recursion_desired": "set_rd_bit",
"noabuf": "include_abuf"
}
if option in renaming_pairs.keys():
warninglog = (
"DeprecationWarning: {0} option has been deprecated and "
"renamed to {1}."
).format(option, renaming_pairs[option])
print(warninglog)
new_option = renaming_pairs[option]
# noabuf was changed to include_abuf so we need a double-negative
if option == "noabuf":
new_value = not new_value
return new_option, new_value | This is a temporary function that helps move from v1 API to v2 without
breaking already running script and keep backwards compatibility.
Translates option name from API v1 to renamed one of v2 API. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/measurement.py#L100-L132 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/measurement.py | AtlasMeasurement.build_api_struct | def build_api_struct(self):
"""
Calls the clean method of the class and returns the info in a
structure that Atlas API is accepting.
"""
self.clean()
data = {"type": self.measurement_type}
# add all options
for option in self.used_options:
option_key, option_value = self.v2_translator(option)
data.update({option_key: option_value})
return data | python | def build_api_struct(self):
"""
Calls the clean method of the class and returns the info in a
structure that Atlas API is accepting.
"""
self.clean()
data = {"type": self.measurement_type}
# add all options
for option in self.used_options:
option_key, option_value = self.v2_translator(option)
data.update({option_key: option_value})
return data | Calls the clean method of the class and returns the info in a
structure that Atlas API is accepting. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/measurement.py#L134-L147 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/stream.py | AtlasStream.connect | def connect(self):
"""Initiate the channel we want to start streams from."""
self.socketIO = SocketIO(
host=self.iosocket_server,
port=80,
resource=self.iosocket_resource,
proxies=self.proxies,
headers=self.headers,
transports=["websocket"],
Namespace=AtlasNamespace,
)
self.socketIO.on(self.EVENT_NAME_ERROR, self.handle_error) | python | def connect(self):
"""Initiate the channel we want to start streams from."""
self.socketIO = SocketIO(
host=self.iosocket_server,
port=80,
resource=self.iosocket_resource,
proxies=self.proxies,
headers=self.headers,
transports=["websocket"],
Namespace=AtlasNamespace,
)
self.socketIO.on(self.EVENT_NAME_ERROR, self.handle_error) | Initiate the channel we want to start streams from. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/stream.py#L104-L116 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/stream.py | AtlasStream.bind_channel | def bind_channel(self, channel, callback):
"""Bind given channel with the given callback"""
# Remove the following list when deprecation time expires
if channel in self.CHANNELS:
warning = (
"The event name '{}' will soon be deprecated. Use "
"the real event name '{}' instead."
).format(channel, self.CHANNELS[channel])
self.handle_error(warning)
channel = self.CHANNELS[channel]
# -------------------------------------------------------
if channel == self.EVENT_NAME_ERROR:
self.error_callback = callback
elif channel == self.EVENT_NAME_RESULTS:
self.socketIO.on(channel, partial(self.unpack_results, callback))
else:
self.socketIO.on(channel, callback) | python | def bind_channel(self, channel, callback):
"""Bind given channel with the given callback"""
# Remove the following list when deprecation time expires
if channel in self.CHANNELS:
warning = (
"The event name '{}' will soon be deprecated. Use "
"the real event name '{}' instead."
).format(channel, self.CHANNELS[channel])
self.handle_error(warning)
channel = self.CHANNELS[channel]
# -------------------------------------------------------
if channel == self.EVENT_NAME_ERROR:
self.error_callback = callback
elif channel == self.EVENT_NAME_RESULTS:
self.socketIO.on(channel, partial(self.unpack_results, callback))
else:
self.socketIO.on(channel, callback) | Bind given channel with the given callback | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/stream.py#L130-L149 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/stream.py | AtlasStream.start_stream | def start_stream(self, stream_type, **stream_parameters):
"""Starts new stream for given type with given parameters"""
if stream_type:
self.subscribe(stream_type, **stream_parameters)
else:
self.handle_error("You need to set a stream type") | python | def start_stream(self, stream_type, **stream_parameters):
"""Starts new stream for given type with given parameters"""
if stream_type:
self.subscribe(stream_type, **stream_parameters)
else:
self.handle_error("You need to set a stream type") | Starts new stream for given type with given parameters | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/stream.py#L151-L156 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/stream.py | AtlasStream.subscribe | def subscribe(self, stream_type, **parameters):
"""Subscribe to stream with give parameters."""
parameters["stream_type"] = stream_type
if (stream_type == "result") and ("buffering" not in parameters):
parameters["buffering"] = True
self.socketIO.emit(self.EVENT_NAME_SUBSCRIBE, parameters) | python | def subscribe(self, stream_type, **parameters):
"""Subscribe to stream with give parameters."""
parameters["stream_type"] = stream_type
if (stream_type == "result") and ("buffering" not in parameters):
parameters["buffering"] = True
self.socketIO.emit(self.EVENT_NAME_SUBSCRIBE, parameters) | Subscribe to stream with give parameters. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/stream.py#L158-L165 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/stream.py | AtlasStream.timeout | def timeout(self, seconds=None):
"""
Times out all streams after n seconds or wait forever if seconds is
None
"""
if seconds is None:
self.socketIO.wait()
else:
self.socketIO.wait(seconds=seconds) | python | def timeout(self, seconds=None):
"""
Times out all streams after n seconds or wait forever if seconds is
None
"""
if seconds is None:
self.socketIO.wait()
else:
self.socketIO.wait(seconds=seconds) | Times out all streams after n seconds or wait forever if seconds is
None | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/stream.py#L167-L175 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/api_listing.py | RequestGenerator.build_url | def build_url(self):
"""Build the url path based on the filter options."""
if not self.api_filters:
return self.url
# Reduce complex objects to simpler strings
for k, v in self.api_filters.items():
if isinstance(v, datetime): # datetime > UNIX timestamp
self.api_filters[k] = int(calendar.timegm(v.timetuple()))
if isinstance(v, (tuple, list)): # tuples & lists > x,y,z
self.api_filters[k] = ",".join([str(_) for _ in v])
if (
self.id_filter in self.api_filters and
len(str(self.api_filters[self.id_filter])) > self.URL_LENGTH_LIMIT
):
self.build_url_chunks()
return self.split_urls.pop(0)
filters = '&'.join("%s=%s" % (k, v) for (k, v) in self.api_filters.items())
return "%s?%s" % (self.url, filters) | python | def build_url(self):
"""Build the url path based on the filter options."""
if not self.api_filters:
return self.url
# Reduce complex objects to simpler strings
for k, v in self.api_filters.items():
if isinstance(v, datetime): # datetime > UNIX timestamp
self.api_filters[k] = int(calendar.timegm(v.timetuple()))
if isinstance(v, (tuple, list)): # tuples & lists > x,y,z
self.api_filters[k] = ",".join([str(_) for _ in v])
if (
self.id_filter in self.api_filters and
len(str(self.api_filters[self.id_filter])) > self.URL_LENGTH_LIMIT
):
self.build_url_chunks()
return self.split_urls.pop(0)
filters = '&'.join("%s=%s" % (k, v) for (k, v) in self.api_filters.items())
return "%s?%s" % (self.url, filters) | Build the url path based on the filter options. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/api_listing.py#L55-L77 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/api_listing.py | RequestGenerator.build_url_chunks | def build_url_chunks(self):
"""
If url is too big because of id filter is huge, break id and construct
several urls to call them in order to abstract this complexity from user.
"""
CHUNK_SIZE = 500
id_filter = str(self.api_filters.pop(self.id_filter)).split(',')
chuncks = list(self.chunks(id_filter, CHUNK_SIZE))
filters = '&'.join("%s=%s" % (k, v) for (k, v) in self.api_filters.items())
for chunk in chuncks:
if filters:
url = "{0}?{1}&{2}={3}".format(self.url, filters, self.id_filter, ','.join(chunk))
else:
url = "{0}?{1}={2}".format(self.url, self.id_filter, ','.join(chunk))
self.split_urls.append(url) | python | def build_url_chunks(self):
"""
If url is too big because of id filter is huge, break id and construct
several urls to call them in order to abstract this complexity from user.
"""
CHUNK_SIZE = 500
id_filter = str(self.api_filters.pop(self.id_filter)).split(',')
chuncks = list(self.chunks(id_filter, CHUNK_SIZE))
filters = '&'.join("%s=%s" % (k, v) for (k, v) in self.api_filters.items())
for chunk in chuncks:
if filters:
url = "{0}?{1}&{2}={3}".format(self.url, filters, self.id_filter, ','.join(chunk))
else:
url = "{0}?{1}={2}".format(self.url, self.id_filter, ','.join(chunk))
self.split_urls.append(url) | If url is too big because of id filter is huge, break id and construct
several urls to call them in order to abstract this complexity from user. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/api_listing.py#L79-L95 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/api_listing.py | RequestGenerator.next_batch | def next_batch(self):
"""
Querying API for the next batch of objects and store next url and
batch of objects.
"""
is_success, results = AtlasRequest(
url_path=self.atlas_url,
user_agent=self._user_agent,
server=self.server,
verify=self.verify,
).get()
if not is_success:
raise APIResponseError(results)
self.total_count = results.get("count")
self.atlas_url = self.build_next_url(results.get("next"))
self.current_batch = results.get("results", []) | python | def next_batch(self):
"""
Querying API for the next batch of objects and store next url and
batch of objects.
"""
is_success, results = AtlasRequest(
url_path=self.atlas_url,
user_agent=self._user_agent,
server=self.server,
verify=self.verify,
).get()
if not is_success:
raise APIResponseError(results)
self.total_count = results.get("count")
self.atlas_url = self.build_next_url(results.get("next"))
self.current_batch = results.get("results", []) | Querying API for the next batch of objects and store next url and
batch of objects. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/api_listing.py#L123-L140 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/api_listing.py | RequestGenerator.build_next_url | def build_next_url(self, url):
"""Builds next url in a format compatible with cousteau. Path + query"""
if not url:
if self.split_urls: # If we had a long request give the next part
self.total_count_flag = False # Reset flag for count
return self.split_urls.pop(0)
else:
return None
parsed_url = urlparse(url)
return "{0}?{1}".format(parsed_url.path, parsed_url.query) | python | def build_next_url(self, url):
"""Builds next url in a format compatible with cousteau. Path + query"""
if not url:
if self.split_urls: # If we had a long request give the next part
self.total_count_flag = False # Reset flag for count
return self.split_urls.pop(0)
else:
return None
parsed_url = urlparse(url)
return "{0}?{1}".format(parsed_url.path, parsed_url.query) | Builds next url in a format compatible with cousteau. Path + query | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/api_listing.py#L142-L152 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/api_listing.py | RequestGenerator.set_total_count | def set_total_count(self, value):
"""Setter for count attribute. Set should append only one count per splitted url."""
if not self.total_count_flag and value:
self._count.append(int(value))
self.total_count_flag = True | python | def set_total_count(self, value):
"""Setter for count attribute. Set should append only one count per splitted url."""
if not self.total_count_flag and value:
self._count.append(int(value))
self.total_count_flag = True | Setter for count attribute. Set should append only one count per splitted url. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/api_listing.py#L162-L166 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasRequest.get_headers | def get_headers(self):
"""Return header for the HTTP request."""
headers = {
"User-Agent": self.http_agent,
"Content-Type": "application/json",
"Accept": "application/json"
}
if self.headers:
headers.update(self.headers)
return headers | python | def get_headers(self):
"""Return header for the HTTP request."""
headers = {
"User-Agent": self.http_agent,
"Content-Type": "application/json",
"Accept": "application/json"
}
if self.headers:
headers.update(self.headers)
return headers | Return header for the HTTP request. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L63-L73 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasRequest.http_method | def http_method(self, method):
"""
Execute the given HTTP method and returns if it's success or not
and the response as a string if not success and as python object after
unjson if it's success.
"""
self.build_url()
try:
response = self.get_http_method(method)
is_success = response.ok
try:
response_message = response.json()
except ValueError:
response_message = response.text
except requests.exceptions.RequestException as exc:
is_success = False
response_message = exc.args
return is_success, response_message | python | def http_method(self, method):
"""
Execute the given HTTP method and returns if it's success or not
and the response as a string if not success and as python object after
unjson if it's success.
"""
self.build_url()
try:
response = self.get_http_method(method)
is_success = response.ok
try:
response_message = response.json()
except ValueError:
response_message = response.text
except requests.exceptions.RequestException as exc:
is_success = False
response_message = exc.args
return is_success, response_message | Execute the given HTTP method and returns if it's success or not
and the response as a string if not success and as python object after
unjson if it's success. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L75-L96 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasRequest.get_http_method | def get_http_method(self, method):
"""Gets the http method that will be called from the requests library"""
return self.http_methods[method](self.url, **self.http_method_args) | python | def get_http_method(self, method):
"""Gets the http method that will be called from the requests library"""
return self.http_methods[method](self.url, **self.http_method_args) | Gets the http method that will be called from the requests library | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L98-L100 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasRequest.get | def get(self, **url_params):
"""
Makes the HTTP GET to the url.
"""
if url_params:
self.http_method_args["params"].update(url_params)
return self.http_method("GET") | python | def get(self, **url_params):
"""
Makes the HTTP GET to the url.
"""
if url_params:
self.http_method_args["params"].update(url_params)
return self.http_method("GET") | Makes the HTTP GET to the url. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L109-L115 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasRequest.post | def post(self):
"""
Makes the HTTP POST to the url sending post_data.
"""
self._construct_post_data()
post_args = {"json": self.post_data}
self.http_method_args.update(post_args)
return self.http_method("POST") | python | def post(self):
"""
Makes the HTTP POST to the url sending post_data.
"""
self._construct_post_data()
post_args = {"json": self.post_data}
self.http_method_args.update(post_args)
return self.http_method("POST") | Makes the HTTP POST to the url sending post_data. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L117-L126 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasRequest.clean_time | def clean_time(self, time):
"""
Transform time field to datetime object if there is any.
"""
if isinstance(time, int):
time = datetime.utcfromtimestamp(time)
elif isinstance(time, str):
time = parser.parse(time)
return time | python | def clean_time(self, time):
"""
Transform time field to datetime object if there is any.
"""
if isinstance(time, int):
time = datetime.utcfromtimestamp(time)
elif isinstance(time, str):
time = parser.parse(time)
return time | Transform time field to datetime object if there is any. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L131-L140 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasCreateRequest._construct_post_data | def _construct_post_data(self):
"""
Constructs the data structure that is required from the atlas API based
on measurements, sources and times user has specified.
"""
definitions = [msm.build_api_struct() for msm in self.measurements]
probes = [source.build_api_struct() for source in self.sources]
self.post_data = {
"definitions": definitions,
"probes": probes,
"is_oneoff": self.is_oneoff
}
if self.is_oneoff:
self.post_data.update({"is_oneoff": self.is_oneoff})
if self.start_time:
self.post_data.update(
{"start_time": int(calendar.timegm(self.start_time.timetuple()))}
)
if self.stop_time:
self.post_data.update(
{"stop_time": int(calendar.timegm(self.stop_time.timetuple()))}
)
if self.bill_to:
self.post_data.update({"bill_to": self.bill_to}) | python | def _construct_post_data(self):
"""
Constructs the data structure that is required from the atlas API based
on measurements, sources and times user has specified.
"""
definitions = [msm.build_api_struct() for msm in self.measurements]
probes = [source.build_api_struct() for source in self.sources]
self.post_data = {
"definitions": definitions,
"probes": probes,
"is_oneoff": self.is_oneoff
}
if self.is_oneoff:
self.post_data.update({"is_oneoff": self.is_oneoff})
if self.start_time:
self.post_data.update(
{"start_time": int(calendar.timegm(self.start_time.timetuple()))}
)
if self.stop_time:
self.post_data.update(
{"stop_time": int(calendar.timegm(self.stop_time.timetuple()))}
)
if self.bill_to:
self.post_data.update({"bill_to": self.bill_to}) | Constructs the data structure that is required from the atlas API based
on measurements, sources and times user has specified. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L180-L206 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasResultsRequest.clean_probes | def clean_probes(self, probe_ids):
"""
Checks format of probe ids and transform it to something API
understands.
"""
if isinstance(probe_ids, (tuple, list)): # tuples & lists > x,y,z
probe_ids = ",".join([str(_) for _ in probe_ids])
return probe_ids | python | def clean_probes(self, probe_ids):
"""
Checks format of probe ids and transform it to something API
understands.
"""
if isinstance(probe_ids, (tuple, list)): # tuples & lists > x,y,z
probe_ids = ",".join([str(_) for _ in probe_ids])
return probe_ids | Checks format of probe ids and transform it to something API
understands. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L319-L327 |
RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/request.py | AtlasResultsRequest.update_http_method_params | def update_http_method_params(self):
"""
Update HTTP url parameters based on msm_id and query filters if
there are any.
"""
url_params = {}
if self.start:
url_params.update(
{"start": int(calendar.timegm(self.start.timetuple()))}
)
if self.stop:
url_params.update(
{"stop": int(calendar.timegm(self.stop.timetuple()))}
)
if self.probe_ids:
url_params.update({"probe_ids": self.probe_ids})
self.http_method_args["params"].update(url_params) | python | def update_http_method_params(self):
"""
Update HTTP url parameters based on msm_id and query filters if
there are any.
"""
url_params = {}
if self.start:
url_params.update(
{"start": int(calendar.timegm(self.start.timetuple()))}
)
if self.stop:
url_params.update(
{"stop": int(calendar.timegm(self.stop.timetuple()))}
)
if self.probe_ids:
url_params.update({"probe_ids": self.probe_ids})
self.http_method_args["params"].update(url_params) | Update HTTP url parameters based on msm_id and query filters if
there are any. | https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/request.py#L329-L349 |
fuzeman/trakt.py | trakt/interfaces/search.py | SearchInterface.lookup | def lookup(self, id, service=None, media=None, extended=None, **kwargs):
"""Lookup items by their Trakt, IMDB, TMDB, TVDB, or TVRage ID.
**Note:** If you lookup an identifier without a :code:`media` type specified it
might return multiple items if the :code:`service` is not globally unique.
:param id: Identifier value to lookup
:type id: :class:`~python:str` or :class:`~python:int`
:param service: Identifier service
**Possible values:**
- :code:`trakt`
- :code:`imdb`
- :code:`tmdb`
- :code:`tvdb`
- :code:`tvrage`
:type service: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`trakt.objects.media.Media` or :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Expand tuple `id`
if type(id) is tuple:
if len(id) != 2:
raise ValueError()
id, service = id
# Validate parameters
if not service:
raise ValueError('Invalid value provided for the "service" parameter')
# Build query
query = {}
if isinstance(media, six.string_types):
query['type'] = media
elif isinstance(media, list):
query['type'] = ','.join(media)
if extended:
query['extended'] = extended
# Send request
response = self.http.get(
params=[service, id],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if not items:
return None
count = len(items)
if count > 1:
return SearchMapper.process_many(self.client, items)
elif count == 1:
return SearchMapper.process(self.client, items[0])
return None | python | def lookup(self, id, service=None, media=None, extended=None, **kwargs):
"""Lookup items by their Trakt, IMDB, TMDB, TVDB, or TVRage ID.
**Note:** If you lookup an identifier without a :code:`media` type specified it
might return multiple items if the :code:`service` is not globally unique.
:param id: Identifier value to lookup
:type id: :class:`~python:str` or :class:`~python:int`
:param service: Identifier service
**Possible values:**
- :code:`trakt`
- :code:`imdb`
- :code:`tmdb`
- :code:`tvdb`
- :code:`tvrage`
:type service: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`trakt.objects.media.Media` or :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Expand tuple `id`
if type(id) is tuple:
if len(id) != 2:
raise ValueError()
id, service = id
# Validate parameters
if not service:
raise ValueError('Invalid value provided for the "service" parameter')
# Build query
query = {}
if isinstance(media, six.string_types):
query['type'] = media
elif isinstance(media, list):
query['type'] = ','.join(media)
if extended:
query['extended'] = extended
# Send request
response = self.http.get(
params=[service, id],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if not items:
return None
count = len(items)
if count > 1:
return SearchMapper.process_many(self.client, items)
elif count == 1:
return SearchMapper.process(self.client, items[0])
return None | Lookup items by their Trakt, IMDB, TMDB, TVDB, or TVRage ID.
**Note:** If you lookup an identifier without a :code:`media` type specified it
might return multiple items if the :code:`service` is not globally unique.
:param id: Identifier value to lookup
:type id: :class:`~python:str` or :class:`~python:int`
:param service: Identifier service
**Possible values:**
- :code:`trakt`
- :code:`imdb`
- :code:`tmdb`
- :code:`tvdb`
- :code:`tvrage`
:type service: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`trakt.objects.media.Media` or :class:`~python:list` of :class:`trakt.objects.media.Media` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/search.py#L14-L103 |
fuzeman/trakt.py | trakt/interfaces/search.py | SearchInterface.query | def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs):
"""Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Validate parameters
if not media:
warnings.warn(
"\"media\" parameter is now required on the Trakt['search'].query() method",
DeprecationWarning, stacklevel=2
)
if fields and not media:
raise ValueError('"fields" can only be used when the "media" parameter is defined')
# Build query
query = {
'query': query
}
if year:
query['year'] = year
if fields:
query['fields'] = fields
if extended:
query['extended'] = extended
# Serialize media items
if isinstance(media, list):
media = ','.join(media)
# Send request
response = self.http.get(
params=[media],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if items is not None:
return SearchMapper.process_many(self.client, items)
return None | python | def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs):
"""Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Validate parameters
if not media:
warnings.warn(
"\"media\" parameter is now required on the Trakt['search'].query() method",
DeprecationWarning, stacklevel=2
)
if fields and not media:
raise ValueError('"fields" can only be used when the "media" parameter is defined')
# Build query
query = {
'query': query
}
if year:
query['year'] = year
if fields:
query['fields'] = fields
if extended:
query['extended'] = extended
# Serialize media items
if isinstance(media, list):
media = ','.join(media)
# Send request
response = self.http.get(
params=[media],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if items is not None:
return SearchMapper.process_many(self.client, items)
return None | Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/search.py#L105-L187 |
fuzeman/trakt.py | trakt/objects/season.py | Season.to_identifier | def to_identifier(self):
"""Return the season identifier which is compatible with requests that require season definitions.
:return: Season identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'number': self.pk,
'episodes': [
episode.to_dict()
for episode in self.episodes.values()
]
} | python | def to_identifier(self):
"""Return the season identifier which is compatible with requests that require season definitions.
:return: Season identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'number': self.pk,
'episodes': [
episode.to_dict()
for episode in self.episodes.values()
]
} | Return the season identifier which is compatible with requests that require season definitions.
:return: Season identifier/definition
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/season.py#L49-L62 |
fuzeman/trakt.py | trakt/objects/season.py | Season.to_dict | def to_dict(self):
"""Dump season to a dictionary.
:return: Season dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the season identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.episode_count:
result['episode_count'] = self.episode_count
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result | python | def to_dict(self):
"""Dump season to a dictionary.
:return: Season dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the season identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.episode_count:
result['episode_count'] = self.episode_count
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result | Dump season to a dictionary.
:return: Season dictionary
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/season.py#L69-L100 |
fuzeman/trakt.py | trakt/objects/episode.py | Episode.to_dict | def to_dict(self):
"""Dump episode to a dictionary.
:return: Episode dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'title': self.title,
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at),
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the (<season>, <episode>) identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.available_translations:
result['available_translations'] = self.available_translations
return result | python | def to_dict(self):
"""Dump episode to a dictionary.
:return: Episode dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'title': self.title,
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at),
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the (<season>, <episode>) identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.available_translations:
result['available_translations'] = self.available_translations
return result | Dump episode to a dictionary.
:return: Episode dictionary
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/episode.py#L72-L117 |
fuzeman/trakt.py | examples/authentication/device.py | Application.on_aborted | def on_aborted(self):
"""Device authentication aborted.
Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()`
or via the "poll" event)
"""
print('Authentication aborted')
# Authentication aborted
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release() | python | def on_aborted(self):
"""Device authentication aborted.
Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()`
or via the "poll" event)
"""
print('Authentication aborted')
# Authentication aborted
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release() | Device authentication aborted.
Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()`
or via the "poll" event) | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/examples/authentication/device.py#L70-L82 |
fuzeman/trakt.py | examples/authentication/device.py | Application.on_authenticated | def on_authenticated(self, authorization):
"""Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
"""
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release() | python | def on_authenticated(self, authorization):
"""Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
"""
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release() | Device authenticated.
:param authorization: Authentication token details
:type authorization: dict | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/examples/authentication/device.py#L84-L101 |
fuzeman/trakt.py | examples/authentication/device.py | Application.on_expired | def on_expired(self):
"""Device authentication expired."""
print('Authentication expired')
# Authentication expired
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release() | python | def on_expired(self):
"""Device authentication expired."""
print('Authentication expired')
# Authentication expired
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release() | Device authentication expired. | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/examples/authentication/device.py#L103-L111 |
fuzeman/trakt.py | trakt/interfaces/oauth/device.py | DeviceOAuthInterface.poll | def poll(self, device_code, expires_in, interval, **kwargs):
"""Construct the device authentication poller.
:param device_code: Device authentication code
:type device_code: str
:param expires_in: Device authentication code expiry (in seconds)
:type in: int
:param interval: Device authentication poll interval
:type interval: int
:rtype: DeviceOAuthPoller
"""
return DeviceOAuthPoller(self.client, device_code, expires_in, interval) | python | def poll(self, device_code, expires_in, interval, **kwargs):
"""Construct the device authentication poller.
:param device_code: Device authentication code
:type device_code: str
:param expires_in: Device authentication code expiry (in seconds)
:type in: int
:param interval: Device authentication poll interval
:type interval: int
:rtype: DeviceOAuthPoller
"""
return DeviceOAuthPoller(self.client, device_code, expires_in, interval) | Construct the device authentication poller.
:param device_code: Device authentication code
:type device_code: str
:param expires_in: Device authentication code expiry (in seconds)
:type in: int
:param interval: Device authentication poll interval
:type interval: int
:rtype: DeviceOAuthPoller | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/oauth/device.py#L42-L56 |
fuzeman/trakt.py | trakt/objects/movie.py | Movie.to_dict | def to_dict(self):
"""Dump movie to a dictionary.
:return: Movie dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at)
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.released:
result['released'] = to_iso8601_date(self.released)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.tagline:
result['tagline'] = self.tagline
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.homepage:
result['homepage'] = self.homepage
if self.trailer:
result['trailer'] = self.trailer
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
return result | python | def to_dict(self):
"""Dump movie to a dictionary.
:return: Movie dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at)
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.released:
result['released'] = to_iso8601_date(self.released)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.tagline:
result['tagline'] = self.tagline
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.homepage:
result['homepage'] = self.homepage
if self.trailer:
result['trailer'] = self.trailer
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
return result | Dump movie to a dictionary.
:return: Movie dictionary
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/movie.py#L123-L183 |
fuzeman/trakt.py | trakt/objects/progress.py | Progress.to_dict | def to_dict(self):
"""Dump progress to a dictionary.
:return: Progress dictionary
:rtype: :class:`~python:dict`
"""
result = super(Progress, self).to_dict()
label = LABELS['last_progress_change'][self.progress_type]
result[label] = to_iso8601_datetime(self.last_progress_change)
if self.progress_type == 'watched':
result['reset_at'] = self.reset_at
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
if self.hidden_seasons:
result['hidden_seasons'] = [
popitems(season.to_dict(), ['number', 'ids'])
for season in self.hidden_seasons.values()
]
if self.next_episode:
result['next_episode'] = popitems(self.next_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['next_episode']['season'] = self.next_episode.keys[0][0]
if self.last_episode:
result['last_episode'] = popitems(self.last_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['last_episode']['season'] = self.last_episode.keys[0][0]
return result | python | def to_dict(self):
"""Dump progress to a dictionary.
:return: Progress dictionary
:rtype: :class:`~python:dict`
"""
result = super(Progress, self).to_dict()
label = LABELS['last_progress_change'][self.progress_type]
result[label] = to_iso8601_datetime(self.last_progress_change)
if self.progress_type == 'watched':
result['reset_at'] = self.reset_at
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
if self.hidden_seasons:
result['hidden_seasons'] = [
popitems(season.to_dict(), ['number', 'ids'])
for season in self.hidden_seasons.values()
]
if self.next_episode:
result['next_episode'] = popitems(self.next_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['next_episode']['season'] = self.next_episode.keys[0][0]
if self.last_episode:
result['last_episode'] = popitems(self.last_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['last_episode']['season'] = self.last_episode.keys[0][0]
return result | Dump progress to a dictionary.
:return: Progress dictionary
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/progress.py#L108-L142 |
fuzeman/trakt.py | trakt/interfaces/scrobble.py | ScrobbleInterface.action | def action(self, action, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
if movie and (show or episode):
raise ValueError('Only one media type should be provided')
if not movie and not episode:
raise ValueError('Missing media item')
data = {
'progress': progress,
'app_version': kwargs.pop('app_version', self.client.version),
'app_date': kwargs.pop('app_date', None)
}
if movie:
# TODO validate
data['movie'] = movie
elif episode:
if show:
data['show'] = show
# TODO validate
data['episode'] = episode
response = self.http.post(
action,
data=data,
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs) | python | def action(self, action, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
if movie and (show or episode):
raise ValueError('Only one media type should be provided')
if not movie and not episode:
raise ValueError('Missing media item')
data = {
'progress': progress,
'app_version': kwargs.pop('app_version', self.client.version),
'app_date': kwargs.pop('app_date', None)
}
if movie:
# TODO validate
data['movie'] = movie
elif episode:
if show:
data['show'] = show
# TODO validate
data['episode'] = episode
response = self.http.post(
action,
data=data,
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs) | Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/scrobble.py#L12-L134 |
fuzeman/trakt.py | trakt/interfaces/scrobble.py | ScrobbleInterface.start | def start(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'start',
movie, show, episode,
progress,
**kwargs
) | python | def start(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'start',
movie, show, episode,
progress,
**kwargs
) | Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/scrobble.py#L138-L238 |
fuzeman/trakt.py | trakt/interfaces/scrobble.py | ScrobbleInterface.pause | def pause(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'pause',
movie, show, episode,
progress,
**kwargs
) | python | def pause(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'pause',
movie, show, episode,
progress,
**kwargs
) | Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/scrobble.py#L242-L340 |
fuzeman/trakt.py | trakt/interfaces/scrobble.py | ScrobbleInterface.stop | def stop(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'stop',
movie, show, episode,
progress,
**kwargs
) | python | def stop(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'stop',
movie, show, episode,
progress,
**kwargs
) | Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/scrobble.py#L344-L449 |
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.delete | def delete(self, **kwargs):
"""Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].delete(self.username, self.id, **kwargs) | python | def delete(self, **kwargs):
"""Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].delete(self.username, self.id, **kwargs) | Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L86-L96 |
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.update | def update(self, **kwargs):
"""Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
item = self._client['users/*/lists/*'].update(self.username, self.id, return_type='data', **kwargs)
if not item:
return False
self._update(item)
return True | python | def update(self, **kwargs):
"""Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
item = self._client['users/*/lists/*'].update(self.username, self.id, return_type='data', **kwargs)
if not item:
return False
self._update(item)
return True | Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L98-L114 |
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.remove | def remove(self, items, **kwargs):
"""Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs) | python | def remove(self, items, **kwargs):
"""Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs) | Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L116-L129 |
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.like | def like(self, **kwargs):
"""Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].like(self.username, self.id, **kwargs) | python | def like(self, **kwargs):
"""Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].like(self.username, self.id, **kwargs) | Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L135-L145 |
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.unlike | def unlike(self, **kwargs):
"""Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].unlike(self.username, self.id, **kwargs) | python | def unlike(self, **kwargs):
"""Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].unlike(self.username, self.id, **kwargs) | Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L147-L157 |
fuzeman/trakt.py | trakt/interfaces/calendars.py | Base.get | def get(self, source, media, collection=None, start_date=None, days=None, query=None, years=None, genres=None,
languages=None, countries=None, runtimes=None, ratings=None, certifications=None, networks=None,
status=None, **kwargs):
"""Retrieve calendar items.
The `all` calendar displays info for all shows airing during the specified period. The `my` calendar displays
episodes for all shows that have been watched, collected, or watchlisted.
:param source: Calendar source (`all` or `my`)
:type source: str
:param media: Media type (`dvd`, `movies` or `shows`)
:type media: str
:param collection: Collection type (`new`, `premieres`)
:type collection: str or None
:param start_date: Start date (defaults to today)
:type start_date: datetime or None
:param days: Number of days to display (defaults to `7`)
:type days: int or None
:param query: Search title or description.
:type query: str or None
:param years: Year or range of years (e.g. `2014`, or `2014-2016`)
:type years: int or str or tuple or None
:param genres: Genre slugs (e.g. `action`)
:type genres: str or list of str or None
:param languages: Language codes (e.g. `en`)
:type languages: str or list of str or None
:param countries: Country codes (e.g. `us`)
:type countries: str or list of str or None
:param runtimes: Runtime range in minutes (e.g. `30-90`)
:type runtimes: str or tuple or None
:param ratings: Rating range between `0` and `100` (e.g. `75-100`)
:type ratings: str or tuple or None
:param certifications: US Content Certification (e.g. `pg-13`, `tv-pg`)
:type certifications: str or list of str or None
:param networks: (TV) Network name (e.g. `HBO`)
:type networks: str or list of str or None
:param status: (TV) Show status (e.g. `returning series`, `in production`, ended`)
:type status: str or list of str or None
:return: Items
:rtype: list of trakt.objects.video.Video
"""
if source not in ['all', 'my']:
raise ValueError('Unknown collection type: %s' % (source,))
if media not in ['dvd', 'movies', 'shows']:
raise ValueError('Unknown media type: %s' % (media,))
# Default `start_date` to today when only `days` is provided
if start_date is None and days:
start_date = datetime.utcnow()
# Request calendar collection
response = self.http.get(
'/calendars/%s/%s%s' % (
source, media,
('/' + collection) if collection else ''
),
params=[
start_date.strftime('%Y-%m-%d') if start_date else None,
days
],
query={
'query': query,
'years': years,
'genres': genres,
'languages': languages,
'countries': countries,
'runtimes': runtimes,
'ratings': ratings,
'certifications': certifications,
# TV
'networks': networks,
'status': status
},
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
# Map items
if media == 'shows':
return SummaryMapper.episodes(
self.client, items,
parse_show=True
)
return SummaryMapper.movies(self.client, items) | python | def get(self, source, media, collection=None, start_date=None, days=None, query=None, years=None, genres=None,
languages=None, countries=None, runtimes=None, ratings=None, certifications=None, networks=None,
status=None, **kwargs):
"""Retrieve calendar items.
The `all` calendar displays info for all shows airing during the specified period. The `my` calendar displays
episodes for all shows that have been watched, collected, or watchlisted.
:param source: Calendar source (`all` or `my`)
:type source: str
:param media: Media type (`dvd`, `movies` or `shows`)
:type media: str
:param collection: Collection type (`new`, `premieres`)
:type collection: str or None
:param start_date: Start date (defaults to today)
:type start_date: datetime or None
:param days: Number of days to display (defaults to `7`)
:type days: int or None
:param query: Search title or description.
:type query: str or None
:param years: Year or range of years (e.g. `2014`, or `2014-2016`)
:type years: int or str or tuple or None
:param genres: Genre slugs (e.g. `action`)
:type genres: str or list of str or None
:param languages: Language codes (e.g. `en`)
:type languages: str or list of str or None
:param countries: Country codes (e.g. `us`)
:type countries: str or list of str or None
:param runtimes: Runtime range in minutes (e.g. `30-90`)
:type runtimes: str or tuple or None
:param ratings: Rating range between `0` and `100` (e.g. `75-100`)
:type ratings: str or tuple or None
:param certifications: US Content Certification (e.g. `pg-13`, `tv-pg`)
:type certifications: str or list of str or None
:param networks: (TV) Network name (e.g. `HBO`)
:type networks: str or list of str or None
:param status: (TV) Show status (e.g. `returning series`, `in production`, ended`)
:type status: str or list of str or None
:return: Items
:rtype: list of trakt.objects.video.Video
"""
if source not in ['all', 'my']:
raise ValueError('Unknown collection type: %s' % (source,))
if media not in ['dvd', 'movies', 'shows']:
raise ValueError('Unknown media type: %s' % (media,))
# Default `start_date` to today when only `days` is provided
if start_date is None and days:
start_date = datetime.utcnow()
# Request calendar collection
response = self.http.get(
'/calendars/%s/%s%s' % (
source, media,
('/' + collection) if collection else ''
),
params=[
start_date.strftime('%Y-%m-%d') if start_date else None,
days
],
query={
'query': query,
'years': years,
'genres': genres,
'languages': languages,
'countries': countries,
'runtimes': runtimes,
'ratings': ratings,
'certifications': certifications,
# TV
'networks': networks,
'status': status
},
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
# Map items
if media == 'shows':
return SummaryMapper.episodes(
self.client, items,
parse_show=True
)
return SummaryMapper.movies(self.client, items) | Retrieve calendar items.
The `all` calendar displays info for all shows airing during the specified period. The `my` calendar displays
episodes for all shows that have been watched, collected, or watchlisted.
:param source: Calendar source (`all` or `my`)
:type source: str
:param media: Media type (`dvd`, `movies` or `shows`)
:type media: str
:param collection: Collection type (`new`, `premieres`)
:type collection: str or None
:param start_date: Start date (defaults to today)
:type start_date: datetime or None
:param days: Number of days to display (defaults to `7`)
:type days: int or None
:param query: Search title or description.
:type query: str or None
:param years: Year or range of years (e.g. `2014`, or `2014-2016`)
:type years: int or str or tuple or None
:param genres: Genre slugs (e.g. `action`)
:type genres: str or list of str or None
:param languages: Language codes (e.g. `en`)
:type languages: str or list of str or None
:param countries: Country codes (e.g. `us`)
:type countries: str or list of str or None
:param runtimes: Runtime range in minutes (e.g. `30-90`)
:type runtimes: str or tuple or None
:param ratings: Rating range between `0` and `100` (e.g. `75-100`)
:type ratings: str or tuple or None
:param certifications: US Content Certification (e.g. `pg-13`, `tv-pg`)
:type certifications: str or list of str or None
:param networks: (TV) Network name (e.g. `HBO`)
:type networks: str or list of str or None
:param status: (TV) Show status (e.g. `returning series`, `in production`, ended`)
:type status: str or list of str or None
:return: Items
:rtype: list of trakt.objects.video.Video | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/calendars.py#L24-L135 |
fuzeman/trakt.py | trakt/objects/show.py | Show.episodes | def episodes(self):
"""Return a flat episode iterator.
:returns: Iterator :code:`((season_num, episode_num), Episode)`
:rtype: iterator
"""
for sk, season in iteritems(self.seasons):
# Yield each episode in season
for ek, episode in iteritems(season.episodes):
yield (sk, ek), episode | python | def episodes(self):
"""Return a flat episode iterator.
:returns: Iterator :code:`((season_num, episode_num), Episode)`
:rtype: iterator
"""
for sk, season in iteritems(self.seasons):
# Yield each episode in season
for ek, episode in iteritems(season.episodes):
yield (sk, ek), episode | Return a flat episode iterator.
:returns: Iterator :code:`((season_num, episode_num), Episode)`
:rtype: iterator | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/show.py#L138-L148 |
fuzeman/trakt.py | trakt/objects/show.py | Show.to_dict | def to_dict(self):
"""Dump show to a dictionary.
:return: Show dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.airs:
result['airs'] = self.airs
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.network:
result['network'] = self.network
if self.country:
result['country'] = self.country
if self.status:
result['status'] = self.status
if self.homepage:
result['homepage'] = self.homepage
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result | python | def to_dict(self):
"""Dump show to a dictionary.
:return: Show dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.airs:
result['airs'] = self.airs
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.network:
result['network'] = self.network
if self.country:
result['country'] = self.country
if self.status:
result['status'] = self.status
if self.homepage:
result['homepage'] = self.homepage
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result | Dump show to a dictionary.
:return: Show dictionary
:rtype: :class:`~python:dict` | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/show.py#L168-L231 |
fuzeman/trakt.py | trakt/core/request.py | TraktRequest.construct_url | def construct_url(self):
"""Construct a full trakt request URI, with `params` and `query`."""
path = [self.path]
path.extend(self.params)
# Build URL
url = self.client.base_url + '/'.join(
str(value) for value in path
if value
)
# Append query parameters (if defined)
query = self.encode_query(self.query)
if query:
url += '?' + query
return url | python | def construct_url(self):
"""Construct a full trakt request URI, with `params` and `query`."""
path = [self.path]
path.extend(self.params)
# Build URL
url = self.client.base_url + '/'.join(
str(value) for value in path
if value
)
# Append query parameters (if defined)
query = self.encode_query(self.query)
if query:
url += '?' + query
return url | Construct a full trakt request URI, with `params` and `query`. | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/core/request.py#L101-L118 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.search_officers | def search_officers(self, term, disqualified=False, **kwargs):
"""Search for officers by name.
Args:
term (str): Officer name to search on.
disqualified (Optional[bool]): True to search for disqualified
officers
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
search_type = ('officers' if not disqualified else
'disqualified-officers')
params = kwargs
params['q'] = term
baseuri = self._BASE_URI + 'search/{}'.format(search_type)
res = self.session.get(baseuri, params=params)
self.handle_http_error(res)
return res | python | def search_officers(self, term, disqualified=False, **kwargs):
"""Search for officers by name.
Args:
term (str): Officer name to search on.
disqualified (Optional[bool]): True to search for disqualified
officers
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
search_type = ('officers' if not disqualified else
'disqualified-officers')
params = kwargs
params['q'] = term
baseuri = self._BASE_URI + 'search/{}'.format(search_type)
res = self.session.get(baseuri, params=params)
self.handle_http_error(res)
return res | Search for officers by name.
Args:
term (str): Officer name to search on.
disqualified (Optional[bool]): True to search for disqualified
officers
kwargs (dict): additional keywords passed into
requests.session.get params keyword. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L68-L85 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.address | def address(self, num):
"""Search for company addresses by company number.
Args:
num (str): Company number to search on.
"""
url_root = "company/{}/registered-office-address"
baseuri = self._BASE_URI + url_root.format(num)
res = self.session.get(baseuri)
self.handle_http_error(res)
return res | python | def address(self, num):
"""Search for company addresses by company number.
Args:
num (str): Company number to search on.
"""
url_root = "company/{}/registered-office-address"
baseuri = self._BASE_URI + url_root.format(num)
res = self.session.get(baseuri)
self.handle_http_error(res)
return res | Search for company addresses by company number.
Args:
num (str): Company number to search on. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L100-L110 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.profile | def profile(self, num):
"""Search for company profile by company number.
Args:
num (str): Company number to search on.
"""
baseuri = self._BASE_URI + "company/{}".format(num)
res = self.session.get(baseuri)
self.handle_http_error(res)
return res | python | def profile(self, num):
"""Search for company profile by company number.
Args:
num (str): Company number to search on.
"""
baseuri = self._BASE_URI + "company/{}".format(num)
res = self.session.get(baseuri)
self.handle_http_error(res)
return res | Search for company profile by company number.
Args:
num (str): Company number to search on. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L112-L121 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.filing_history | def filing_history(self, num, transaction=None, **kwargs):
"""Search for a company's filling history by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
baseuri = self._BASE_URI + "company/{}/filing-history".format(num)
if transaction is not None:
baseuri += "/{}".format(transaction)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | python | def filing_history(self, num, transaction=None, **kwargs):
"""Search for a company's filling history by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
baseuri = self._BASE_URI + "company/{}/filing-history".format(num)
if transaction is not None:
baseuri += "/{}".format(transaction)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Search for a company's filling history by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L134-L149 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.charges | def charges(self, num, charge_id=None, **kwargs):
"""Search for charges against a company by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
baseuri = self._BASE_URI + "company/{}/charges".format(num)
if charge_id is not None:
baseuri += "/{}".format(charge_id)
res = self.session.get(baseuri, params=kwargs)
else:
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | python | def charges(self, num, charge_id=None, **kwargs):
"""Search for charges against a company by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
baseuri = self._BASE_URI + "company/{}/charges".format(num)
if charge_id is not None:
baseuri += "/{}".format(charge_id)
res = self.session.get(baseuri, params=kwargs)
else:
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Search for charges against a company by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L151-L167 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.officers | def officers(self, num, **kwargs):
"""Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = self._BASE_URI + "company/{}/officers".format(num)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | python | def officers(self, num, **kwargs):
"""Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = self._BASE_URI + "company/{}/officers".format(num)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L169-L180 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.disqualified | def disqualified(self, num, natural=True, **kwargs):
"""Search for disqualified officers by officer ID.
Searches for natural disqualifications by default. Specify
natural=False to search for corporate disqualifications.
Args:
num (str): Company number to search on.
natural (Optional[bool]): Natural or corporate search
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
search_type = 'natural' if natural else 'corporate'
baseuri = (self._BASE_URI +
'disqualified-officers/{}/{}'.format(search_type, num))
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | python | def disqualified(self, num, natural=True, **kwargs):
"""Search for disqualified officers by officer ID.
Searches for natural disqualifications by default. Specify
natural=False to search for corporate disqualifications.
Args:
num (str): Company number to search on.
natural (Optional[bool]): Natural or corporate search
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
search_type = 'natural' if natural else 'corporate'
baseuri = (self._BASE_URI +
'disqualified-officers/{}/{}'.format(search_type, num))
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Search for disqualified officers by officer ID.
Searches for natural disqualifications by default. Specify
natural=False to search for corporate disqualifications.
Args:
num (str): Company number to search on.
natural (Optional[bool]): Natural or corporate search
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L182-L199 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.persons_significant_control | def persons_significant_control(self, num, statements=False, **kwargs):
"""Search for a list of persons with significant control.
Searches for persons of significant control based on company number for
a specified company. Specify statements=True to only search for
officers with statements.
Args:
num (str, int): Company number to search on.
statements (Optional[bool]): Search only for persons with
statements. Default is False.
kwargs (dict): additional keywords passed into requests.session.get
*params* keyword.
"""
baseuri = (self._BASE_URI +
'company/{}/persons-with-significant-control'.format(num))
# Only append statements to the URL if statements is True
if statements is True:
baseuri += '-statements'
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | python | def persons_significant_control(self, num, statements=False, **kwargs):
"""Search for a list of persons with significant control.
Searches for persons of significant control based on company number for
a specified company. Specify statements=True to only search for
officers with statements.
Args:
num (str, int): Company number to search on.
statements (Optional[bool]): Search only for persons with
statements. Default is False.
kwargs (dict): additional keywords passed into requests.session.get
*params* keyword.
"""
baseuri = (self._BASE_URI +
'company/{}/persons-with-significant-control'.format(num))
# Only append statements to the URL if statements is True
if statements is True:
baseuri += '-statements'
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Search for a list of persons with significant control.
Searches for persons of significant control based on company number for
a specified company. Specify statements=True to only search for
officers with statements.
Args:
num (str, int): Company number to search on.
statements (Optional[bool]): Search only for persons with
statements. Default is False.
kwargs (dict): additional keywords passed into requests.session.get
*params* keyword. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L201-L224 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.significant_control | def significant_control(self,
num,
entity_id,
entity_type='individual',
**kwargs):
"""Get details of a specific entity with significant control.
Args:
num (str, int): Company number to search on.
entity_id (str, int): Entity id to request details for
entity_type (str, int): What type of entity to search for. Defaults
to 'individual'. Other possible opetions are
'corporate' (for corporate entitys), 'legal' (for legal
persons), 'statements' (for a person with significant control
statement) and 'secure' (for a super secure person).
kwargs (dict): additional keywords passed into requests.session.get
*params* keyword.
"""
# Dict mapping entity_type strings to url strings
entities = {'individual': 'individual',
'corporate': 'corporate-entity',
'legal': 'legal-person',
'statements': 'persons-with-significant-control-statements',
'secure': 'super-secure'}
# Make sure correct entity_type supplied
try:
entity = entities[entity_type]
except KeyError as e:
msg = ("Wrong entity_type supplied. Please choose from " +
"individual, corporate, legal, statements or secure")
raise Exception(msg) from e
# Construct the request and return the result
baseuri = (self._BASE_URI +
'company/{}/persons-with-significant-control/'.format(num) +
'{}/{}'.format(entity, entity_id))
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | python | def significant_control(self,
num,
entity_id,
entity_type='individual',
**kwargs):
"""Get details of a specific entity with significant control.
Args:
num (str, int): Company number to search on.
entity_id (str, int): Entity id to request details for
entity_type (str, int): What type of entity to search for. Defaults
to 'individual'. Other possible opetions are
'corporate' (for corporate entitys), 'legal' (for legal
persons), 'statements' (for a person with significant control
statement) and 'secure' (for a super secure person).
kwargs (dict): additional keywords passed into requests.session.get
*params* keyword.
"""
# Dict mapping entity_type strings to url strings
entities = {'individual': 'individual',
'corporate': 'corporate-entity',
'legal': 'legal-person',
'statements': 'persons-with-significant-control-statements',
'secure': 'super-secure'}
# Make sure correct entity_type supplied
try:
entity = entities[entity_type]
except KeyError as e:
msg = ("Wrong entity_type supplied. Please choose from " +
"individual, corporate, legal, statements or secure")
raise Exception(msg) from e
# Construct the request and return the result
baseuri = (self._BASE_URI +
'company/{}/persons-with-significant-control/'.format(num) +
'{}/{}'.format(entity, entity_id))
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Get details of a specific entity with significant control.
Args:
num (str, int): Company number to search on.
entity_id (str, int): Entity id to request details for
entity_type (str, int): What type of entity to search for. Defaults
to 'individual'. Other possible opetions are
'corporate' (for corporate entitys), 'legal' (for legal
persons), 'statements' (for a person with significant control
statement) and 'secure' (for a super secure person).
kwargs (dict): additional keywords passed into requests.session.get
*params* keyword. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L226-L265 |
JamesGardiner/chwrapper | chwrapper/services/search.py | Search.document | def document(self, document_id, **kwargs):
"""Requests for a document by the document id.
Normally the response.content can be saved as a pdf file
Args:
document_id (str): The id of the document retrieved.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = '{}document/{}/content'.format(self._DOCUMENT_URI,
document_id)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | python | def document(self, document_id, **kwargs):
"""Requests for a document by the document id.
Normally the response.content can be saved as a pdf file
Args:
document_id (str): The id of the document retrieved.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = '{}document/{}/content'.format(self._DOCUMENT_URI,
document_id)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | Requests for a document by the document id.
Normally the response.content can be saved as a pdf file
Args:
document_id (str): The id of the document retrieved.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword. | https://github.com/JamesGardiner/chwrapper/blob/50f9cb2f5264c59505e8cc4e45ee6dc5d5669134/chwrapper/services/search.py#L267-L280 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordWebhook.add_file | def add_file(self, file, filename):
"""
add file to webhook
:param file: file content
:param filename: filename
:return:
"""
self.files['_{}'.format(filename)] = (filename, file) | python | def add_file(self, file, filename):
"""
add file to webhook
:param file: file content
:param filename: filename
:return:
"""
self.files['_{}'.format(filename)] = (filename, file) | add file to webhook
:param file: file content
:param filename: filename
:return: | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L36-L43 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordWebhook.add_embed | def add_embed(self, embed):
"""
add embedded rich content
:param embed: embed object or dict
"""
self.embeds.append(embed.__dict__ if isinstance(embed, DiscordEmbed) else embed) | python | def add_embed(self, embed):
"""
add embedded rich content
:param embed: embed object or dict
"""
self.embeds.append(embed.__dict__ if isinstance(embed, DiscordEmbed) else embed) | add embedded rich content
:param embed: embed object or dict | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L45-L50 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordWebhook.json | def json(self):
"""
convert webhook data to json
:return webhook data as json:
"""
data = dict()
embeds = self.embeds
self.embeds = list()
# convert DiscordEmbed to dict
for embed in embeds:
self.add_embed(embed)
for key, value in self.__dict__.items():
if value and key not in ['url', 'files', 'filename']:
data[key] = value
embeds_empty = all(not embed for embed in data["embeds"]) if 'embeds' in data else True
if embeds_empty and 'content' not in data and bool(self.files) is False:
logger.error('webhook message is empty! set content or embed data')
return data | python | def json(self):
"""
convert webhook data to json
:return webhook data as json:
"""
data = dict()
embeds = self.embeds
self.embeds = list()
# convert DiscordEmbed to dict
for embed in embeds:
self.add_embed(embed)
for key, value in self.__dict__.items():
if value and key not in ['url', 'files', 'filename']:
data[key] = value
embeds_empty = all(not embed for embed in data["embeds"]) if 'embeds' in data else True
if embeds_empty and 'content' not in data and bool(self.files) is False:
logger.error('webhook message is empty! set content or embed data')
return data | convert webhook data to json
:return webhook data as json: | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L74-L91 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordWebhook.execute | def execute(self):
"""
execute Webhook
:return:
"""
if bool(self.files) is False:
response = requests.post(self.url, json=self.json, proxies=self.proxies)
else:
self.files['payload_json'] = (None, json.dumps(self.json))
response = requests.post(self.url, files=self.files, proxies=self.proxies)
if response.status_code in [200, 204]:
logger.debug("Webhook executed")
else:
logger.error('status code %s: %s' % (response.status_code, response.content.decode("utf-8"))) | python | def execute(self):
"""
execute Webhook
:return:
"""
if bool(self.files) is False:
response = requests.post(self.url, json=self.json, proxies=self.proxies)
else:
self.files['payload_json'] = (None, json.dumps(self.json))
response = requests.post(self.url, files=self.files, proxies=self.proxies)
if response.status_code in [200, 204]:
logger.debug("Webhook executed")
else:
logger.error('status code %s: %s' % (response.status_code, response.content.decode("utf-8"))) | execute Webhook
:return: | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L93-L106 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.set_timestamp | def set_timestamp(self, timestamp=str(datetime.datetime.utcfromtimestamp(time.time()))):
"""
set timestamp of embed content
:param timestamp: (optional) timestamp of embed content
"""
self.timestamp = timestamp | python | def set_timestamp(self, timestamp=str(datetime.datetime.utcfromtimestamp(time.time()))):
"""
set timestamp of embed content
:param timestamp: (optional) timestamp of embed content
"""
self.timestamp = timestamp | set timestamp of embed content
:param timestamp: (optional) timestamp of embed content | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L163-L168 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.set_footer | def set_footer(self, **kwargs):
"""
set footer information of embed
:keyword text: footer text
:keyword icon_url: url of footer icon (only supports http(s) and attachments)
:keyword proxy_icon_url: a proxied url of footer icon
"""
self.footer = {
'text': kwargs.get('text'),
'icon_url': kwargs.get('icon_url'),
'proxy_icon_url': kwargs.get('proxy_icon_url')
} | python | def set_footer(self, **kwargs):
"""
set footer information of embed
:keyword text: footer text
:keyword icon_url: url of footer icon (only supports http(s) and attachments)
:keyword proxy_icon_url: a proxied url of footer icon
"""
self.footer = {
'text': kwargs.get('text'),
'icon_url': kwargs.get('icon_url'),
'proxy_icon_url': kwargs.get('proxy_icon_url')
} | set footer information of embed
:keyword text: footer text
:keyword icon_url: url of footer icon (only supports http(s) and attachments)
:keyword proxy_icon_url: a proxied url of footer icon | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L177-L188 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.set_image | def set_image(self, **kwargs):
"""
set image of embed
:keyword url: source url of image (only supports http(s) and attachments)
:keyword proxy_url: a proxied url of the image
:keyword height: height of image
:keyword width: width of image
"""
self.image = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | python | def set_image(self, **kwargs):
"""
set image of embed
:keyword url: source url of image (only supports http(s) and attachments)
:keyword proxy_url: a proxied url of the image
:keyword height: height of image
:keyword width: width of image
"""
self.image = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | set image of embed
:keyword url: source url of image (only supports http(s) and attachments)
:keyword proxy_url: a proxied url of the image
:keyword height: height of image
:keyword width: width of image | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L190-L203 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.set_thumbnail | def set_thumbnail(self, **kwargs):
"""
set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail
"""
self.thumbnail = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | python | def set_thumbnail(self, **kwargs):
"""
set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail
"""
self.thumbnail = {
'url': kwargs.get('url'),
'proxy_url': kwargs.get('proxy_url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | set thumbnail of embed
:keyword url: source url of thumbnail (only supports http(s) and attachments)
:keyword proxy_url: a proxied thumbnail of the image
:keyword height: height of thumbnail
:keyword width: width of thumbnail | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L205-L218 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.set_video | def set_video(self, **kwargs):
"""
set video of embed
:keyword url: source url of video
:keyword height: height of video
:keyword width: width of video
"""
self.video = {
'url': kwargs.get('url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | python | def set_video(self, **kwargs):
"""
set video of embed
:keyword url: source url of video
:keyword height: height of video
:keyword width: width of video
"""
self.video = {
'url': kwargs.get('url'),
'height': kwargs.get('height'),
'width': kwargs.get('width'),
} | set video of embed
:keyword url: source url of video
:keyword height: height of video
:keyword width: width of video | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L220-L231 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.set_author | def set_author(self, **kwargs):
"""
set author of embed
:keyword name: name of author
:keyword url: url of author
:keyword icon_url: url of author icon (only supports http(s) and attachments)
:keyword proxy_icon_url: a proxied url of author icon
"""
self.author = {
'name': kwargs.get('name'),
'url': kwargs.get('url'),
'icon_url': kwargs.get('icon_url'),
'proxy_icon_url': kwargs.get('proxy_icon_url'),
} | python | def set_author(self, **kwargs):
"""
set author of embed
:keyword name: name of author
:keyword url: url of author
:keyword icon_url: url of author icon (only supports http(s) and attachments)
:keyword proxy_icon_url: a proxied url of author icon
"""
self.author = {
'name': kwargs.get('name'),
'url': kwargs.get('url'),
'icon_url': kwargs.get('icon_url'),
'proxy_icon_url': kwargs.get('proxy_icon_url'),
} | set author of embed
:keyword name: name of author
:keyword url: url of author
:keyword icon_url: url of author icon (only supports http(s) and attachments)
:keyword proxy_icon_url: a proxied url of author icon | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L244-L257 |
lovvskillz/python-discord-webhook | discord_webhook/webhook.py | DiscordEmbed.add_embed_field | def add_embed_field(self, **kwargs):
"""
set field of embed
:keyword name: name of the field
:keyword value: value of the field
:keyword inline: (optional) whether or not this field should display inline
"""
self.fields.append({
'name': kwargs.get('name'),
'value': kwargs.get('value'),
'inline': kwargs.get('inline', True)
}) | python | def add_embed_field(self, **kwargs):
"""
set field of embed
:keyword name: name of the field
:keyword value: value of the field
:keyword inline: (optional) whether or not this field should display inline
"""
self.fields.append({
'name': kwargs.get('name'),
'value': kwargs.get('value'),
'inline': kwargs.get('inline', True)
}) | set field of embed
:keyword name: name of the field
:keyword value: value of the field
:keyword inline: (optional) whether or not this field should display inline | https://github.com/lovvskillz/python-discord-webhook/blob/5278184078c9da9362b6343c478a92e0904a7f83/discord_webhook/webhook.py#L259-L270 |
LegoStormtroopr/django-spaghetti-and-meatballs | django_spaghetti/views.py | Plate.plate | def plate(self):
"""
Serves up a delicious plate with your models
"""
request = self.request
if self.settings is None:
graph_settings = deepcopy(getattr(settings, 'SPAGHETTI_SAUCE', {}))
graph_settings.update(self.override_settings)
else:
graph_settings = self.settings
apps = graph_settings.get('apps', [])
excludes = [
"%s__%s" % (app, model)
for app, models in graph_settings.get('exclude', {}).items()
for model in models
]
models = ContentType.objects.filter(app_label__in=apps)
nodes = []
edges = []
for model in models:
if (model.model_class() is None):
continue
model.is_proxy = model.model_class()._meta.proxy
if (model.is_proxy and not graph_settings.get('show_proxy', False)):
continue
model.doc = model.model_class().__doc__
_id = "%s__%s" % (model.app_label, model.model)
if _id in excludes:
continue
label = self.get_node_label(model)
fields = [f for f in model.model_class()._meta.fields]
many = [f for f in model.model_class()._meta.many_to_many]
if graph_settings.get('show_fields', True):
label += "\n%s\n" % ("-" * len(model.model))
label += "\n".join([str(f.name) for f in fields])
edge_color = {'inherit': 'from'}
for f in fields + many:
if f.remote_field is not None:
m = f.remote_field.model._meta
to_id = "%s__%s" % (m.app_label, m.model_name)
if to_id in excludes:
pass
elif _id == to_id and graph_settings.get('ignore_self_referential', False):
pass
else:
if m.app_label != model.app_label:
edge_color = {'inherit': 'both'}
edge = {'from': _id, 'to': to_id, 'color': edge_color}
if str(f.name).endswith('_ptr'):
# fields that end in _ptr are pointing to a parent object
edge.update({
'arrows': {'to': {'scaleFactor': 0.75}}, # needed to draw from-to
'font': {'align': 'middle'},
'label': 'is a',
'dashes': True
})
elif type(f) == related.ForeignKey:
edge.update({
'arrows': {'to': {'scaleFactor': 0.75}}
})
elif type(f) == related.OneToOneField:
edge.update({
'font': {'align': 'middle'},
'label': '|'
})
elif type(f) == related.ManyToManyField:
edge.update({
'color': {'color': 'gray'},
'arrows': {'to': {'scaleFactor': 1}, 'from': {'scaleFactor': 1}},
})
edges.append(edge)
if model.is_proxy:
proxy = model.model_class()._meta.proxy_for_model._meta
model.proxy = proxy
edge = {
'to': _id,
'from': "%s__%s" % (proxy.app_label, proxy.model_name),
'color': edge_color,
}
edges.append(edge)
all_node_fields = fields
if graph_settings.get('show_m2m_field_detail', False):
all_node_fields = fields + many
nodes.append(
{
'id': _id,
'label': label,
'shape': 'box',
'group': model.app_label,
'title': get_template(self.meatball_template_name).render(
{'model': model, 'fields': all_node_fields}
)
}
)
data = {
'meatballs': json.dumps(nodes),
'spaghetti': json.dumps(edges)
}
return render(request, self.plate_template_name, data) | python | def plate(self):
"""
Serves up a delicious plate with your models
"""
request = self.request
if self.settings is None:
graph_settings = deepcopy(getattr(settings, 'SPAGHETTI_SAUCE', {}))
graph_settings.update(self.override_settings)
else:
graph_settings = self.settings
apps = graph_settings.get('apps', [])
excludes = [
"%s__%s" % (app, model)
for app, models in graph_settings.get('exclude', {}).items()
for model in models
]
models = ContentType.objects.filter(app_label__in=apps)
nodes = []
edges = []
for model in models:
if (model.model_class() is None):
continue
model.is_proxy = model.model_class()._meta.proxy
if (model.is_proxy and not graph_settings.get('show_proxy', False)):
continue
model.doc = model.model_class().__doc__
_id = "%s__%s" % (model.app_label, model.model)
if _id in excludes:
continue
label = self.get_node_label(model)
fields = [f for f in model.model_class()._meta.fields]
many = [f for f in model.model_class()._meta.many_to_many]
if graph_settings.get('show_fields', True):
label += "\n%s\n" % ("-" * len(model.model))
label += "\n".join([str(f.name) for f in fields])
edge_color = {'inherit': 'from'}
for f in fields + many:
if f.remote_field is not None:
m = f.remote_field.model._meta
to_id = "%s__%s" % (m.app_label, m.model_name)
if to_id in excludes:
pass
elif _id == to_id and graph_settings.get('ignore_self_referential', False):
pass
else:
if m.app_label != model.app_label:
edge_color = {'inherit': 'both'}
edge = {'from': _id, 'to': to_id, 'color': edge_color}
if str(f.name).endswith('_ptr'):
# fields that end in _ptr are pointing to a parent object
edge.update({
'arrows': {'to': {'scaleFactor': 0.75}}, # needed to draw from-to
'font': {'align': 'middle'},
'label': 'is a',
'dashes': True
})
elif type(f) == related.ForeignKey:
edge.update({
'arrows': {'to': {'scaleFactor': 0.75}}
})
elif type(f) == related.OneToOneField:
edge.update({
'font': {'align': 'middle'},
'label': '|'
})
elif type(f) == related.ManyToManyField:
edge.update({
'color': {'color': 'gray'},
'arrows': {'to': {'scaleFactor': 1}, 'from': {'scaleFactor': 1}},
})
edges.append(edge)
if model.is_proxy:
proxy = model.model_class()._meta.proxy_for_model._meta
model.proxy = proxy
edge = {
'to': _id,
'from': "%s__%s" % (proxy.app_label, proxy.model_name),
'color': edge_color,
}
edges.append(edge)
all_node_fields = fields
if graph_settings.get('show_m2m_field_detail', False):
all_node_fields = fields + many
nodes.append(
{
'id': _id,
'label': label,
'shape': 'box',
'group': model.app_label,
'title': get_template(self.meatball_template_name).render(
{'model': model, 'fields': all_node_fields}
)
}
)
data = {
'meatballs': json.dumps(nodes),
'spaghetti': json.dumps(edges)
}
return render(request, self.plate_template_name, data) | Serves up a delicious plate with your models | https://github.com/LegoStormtroopr/django-spaghetti-and-meatballs/blob/19240f0faeddb0e6fdd9e657cb1565d78bf43f10/django_spaghetti/views.py#L44-L153 |
LegoStormtroopr/django-spaghetti-and-meatballs | django_spaghetti/views.py | Plate.get_node_label | def get_node_label(self, model):
"""
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
"""
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label) | python | def get_node_label(self, model):
"""
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
"""
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label) | Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible | https://github.com/LegoStormtroopr/django-spaghetti-and-meatballs/blob/19240f0faeddb0e6fdd9e657cb1565d78bf43f10/django_spaghetti/views.py#L155-L176 |
LegoStormtroopr/django-spaghetti-and-meatballs | django_spaghetti/__init__.py | get_version | def get_version(release_level=True):
"""
Return the formatted version information
"""
vers = ["%(major)i.%(minor)i.%(micro)i" % __version_info__]
if release_level and __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s%(serial)i' % __version_info__)
return ''.join(vers) | python | def get_version(release_level=True):
"""
Return the formatted version information
"""
vers = ["%(major)i.%(minor)i.%(micro)i" % __version_info__]
if release_level and __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s%(serial)i' % __version_info__)
return ''.join(vers) | Return the formatted version information | https://github.com/LegoStormtroopr/django-spaghetti-and-meatballs/blob/19240f0faeddb0e6fdd9e657cb1565d78bf43f10/django_spaghetti/__init__.py#L10-L17 |
drbild/sslpsk | sslpsk/sslpsk.py | _python_psk_client_callback | def _python_psk_client_callback(ssl_id, hint):
"""Called by _sslpsk.c to return the (psk, identity) tuple for the socket with
the specified ssl socket.
"""
if ssl_id not in _callbacks:
return ("", "")
else:
res = _callbacks[ssl_id](hint)
return res if isinstance(res, tuple) else (res, "") | python | def _python_psk_client_callback(ssl_id, hint):
"""Called by _sslpsk.c to return the (psk, identity) tuple for the socket with
the specified ssl socket.
"""
if ssl_id not in _callbacks:
return ("", "")
else:
res = _callbacks[ssl_id](hint)
return res if isinstance(res, tuple) else (res, "") | Called by _sslpsk.c to return the (psk, identity) tuple for the socket with
the specified ssl socket. | https://github.com/drbild/sslpsk/blob/583f7b1f775c33ddc1196a400188005c50cfeb0f/sslpsk/sslpsk.py#L38-L47 |
drbild/sslpsk | sslpsk/sslpsk.py | _sslobj | def _sslobj(sock):
"""Returns the underlying PySLLSocket object with which the C extension
functions interface.
"""
pass
if isinstance(sock._sslobj, _ssl._SSLSocket):
return sock._sslobj
else:
return sock._sslobj._sslobj | python | def _sslobj(sock):
"""Returns the underlying PySLLSocket object with which the C extension
functions interface.
"""
pass
if isinstance(sock._sslobj, _ssl._SSLSocket):
return sock._sslobj
else:
return sock._sslobj._sslobj | Returns the underlying PySLLSocket object with which the C extension
functions interface. | https://github.com/drbild/sslpsk/blob/583f7b1f775c33ddc1196a400188005c50cfeb0f/sslpsk/sslpsk.py#L49-L58 |
mahmoudimus/nose-timer | nosetimer/plugin.py | _colorize | def _colorize(val, color):
"""Colorize a string using termcolor or colorama.
If any of them are available.
"""
if termcolor is not None:
val = termcolor.colored(val, color)
elif colorama is not None:
val = TERMCOLOR2COLORAMA[color] + val + colorama.Style.RESET_ALL
return val | python | def _colorize(val, color):
"""Colorize a string using termcolor or colorama.
If any of them are available.
"""
if termcolor is not None:
val = termcolor.colored(val, color)
elif colorama is not None:
val = TERMCOLOR2COLORAMA[color] + val + colorama.Style.RESET_ALL
return val | Colorize a string using termcolor or colorama.
If any of them are available. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L83-L93 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin._parse_time | def _parse_time(self, value):
"""Parse string time representation to get number of milliseconds.
Raises the ``ValueError`` for invalid format.
"""
try:
# Default time unit is a second, we should convert it to milliseconds.
return int(value) * 1000
except ValueError:
# Try to parse if we are unlucky to cast value into int.
m = self.time_format.match(value)
if not m:
raise ValueError("Could not parse time represented by '{t}'".format(t=value))
time = int(m.group('time'))
if m.group('units') != 'ms':
time *= 1000
return time | python | def _parse_time(self, value):
"""Parse string time representation to get number of milliseconds.
Raises the ``ValueError`` for invalid format.
"""
try:
# Default time unit is a second, we should convert it to milliseconds.
return int(value) * 1000
except ValueError:
# Try to parse if we are unlucky to cast value into int.
m = self.time_format.match(value)
if not m:
raise ValueError("Could not parse time represented by '{t}'".format(t=value))
time = int(m.group('time'))
if m.group('units') != 'ms':
time *= 1000
return time | Parse string time representation to get number of milliseconds.
Raises the ``ValueError`` for invalid format. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L125-L140 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin.configure | def configure(self, options, config):
"""Configures the test timer plugin."""
super(TimerPlugin, self).configure(options, config)
self.config = config
if self.enabled:
self.timer_top_n = int(options.timer_top_n)
self.timer_ok = self._parse_time(options.timer_ok)
self.timer_warning = self._parse_time(options.timer_warning)
self.timer_filter = self._parse_filter(options.timer_filter)
self.timer_fail = options.timer_fail
self.timer_no_color = True
self.json_file = options.json_file
# Windows + nosetests does not support colors (even with colorama).
if not IS_NT:
self.timer_no_color = options.timer_no_color
# determine if multiprocessing plugin enabled
self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False)) | python | def configure(self, options, config):
"""Configures the test timer plugin."""
super(TimerPlugin, self).configure(options, config)
self.config = config
if self.enabled:
self.timer_top_n = int(options.timer_top_n)
self.timer_ok = self._parse_time(options.timer_ok)
self.timer_warning = self._parse_time(options.timer_warning)
self.timer_filter = self._parse_filter(options.timer_filter)
self.timer_fail = options.timer_fail
self.timer_no_color = True
self.json_file = options.json_file
# Windows + nosetests does not support colors (even with colorama).
if not IS_NT:
self.timer_no_color = options.timer_no_color
# determine if multiprocessing plugin enabled
self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False)) | Configures the test timer plugin. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L147-L165 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin.report | def report(self, stream):
"""Report the test times."""
if not self.enabled:
return
# if multiprocessing plugin enabled - get items from results queue
if self.multiprocessing_enabled:
for i in range(_results_queue.qsize()):
try:
k, v, s = _results_queue.get(False)
self._timed_tests[k] = {
'time': v,
'status': s,
}
except Queue.Empty:
pass
d = sorted(self._timed_tests.items(), key=lambda item: item[1]['time'], reverse=True)
if self.json_file:
dict_type = OrderedDict if self.timer_top_n else dict
with open(self.json_file, 'w') as f:
json.dump({'tests': dict_type((k, v) for k, v in d)}, f)
total_time = sum([vv['time'] for kk, vv in d])
for i, (test, time_and_status) in enumerate(d):
time_taken = time_and_status['time']
status = time_and_status['status']
if i < self.timer_top_n or self.timer_top_n == -1:
color = self._get_result_color(time_taken)
percent = 0 if total_time == 0 else time_taken / total_time * 100
line = self._format_report_line(
test=test,
time_taken=time_taken,
color=color,
status=status,
percent=percent,
)
_filter = self._COLOR_TO_FILTER.get(color)
if self.timer_filter is None or _filter is None or _filter in self.timer_filter:
stream.writeln(line) | python | def report(self, stream):
"""Report the test times."""
if not self.enabled:
return
# if multiprocessing plugin enabled - get items from results queue
if self.multiprocessing_enabled:
for i in range(_results_queue.qsize()):
try:
k, v, s = _results_queue.get(False)
self._timed_tests[k] = {
'time': v,
'status': s,
}
except Queue.Empty:
pass
d = sorted(self._timed_tests.items(), key=lambda item: item[1]['time'], reverse=True)
if self.json_file:
dict_type = OrderedDict if self.timer_top_n else dict
with open(self.json_file, 'w') as f:
json.dump({'tests': dict_type((k, v) for k, v in d)}, f)
total_time = sum([vv['time'] for kk, vv in d])
for i, (test, time_and_status) in enumerate(d):
time_taken = time_and_status['time']
status = time_and_status['status']
if i < self.timer_top_n or self.timer_top_n == -1:
color = self._get_result_color(time_taken)
percent = 0 if total_time == 0 else time_taken / total_time * 100
line = self._format_report_line(
test=test,
time_taken=time_taken,
color=color,
status=status,
percent=percent,
)
_filter = self._COLOR_TO_FILTER.get(color)
if self.timer_filter is None or _filter is None or _filter in self.timer_filter:
stream.writeln(line) | Report the test times. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L171-L212 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin._get_result_color | def _get_result_color(self, time_taken):
"""Get time taken result color."""
time_taken_ms = time_taken * 1000
if time_taken_ms <= self.timer_ok:
color = 'green'
elif time_taken_ms <= self.timer_warning:
color = 'yellow'
else:
color = 'red'
return color | python | def _get_result_color(self, time_taken):
"""Get time taken result color."""
time_taken_ms = time_taken * 1000
if time_taken_ms <= self.timer_ok:
color = 'green'
elif time_taken_ms <= self.timer_warning:
color = 'yellow'
else:
color = 'red'
return color | Get time taken result color. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L214-L224 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin.threshold | def threshold(self):
"""Get maximum test time allowed when --timer-fail option is used."""
if self._threshold is None:
self._threshold = {
'error': self.timer_warning,
'warning': self.timer_ok,
}[self.timer_fail]
return self._threshold | python | def threshold(self):
"""Get maximum test time allowed when --timer-fail option is used."""
if self._threshold is None:
self._threshold = {
'error': self.timer_warning,
'warning': self.timer_ok,
}[self.timer_fail]
return self._threshold | Get maximum test time allowed when --timer-fail option is used. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L227-L234 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin._colored_time | def _colored_time(self, time_taken, color=None):
"""Get formatted and colored string for a given time taken."""
if self.timer_no_color:
return "{0:0.4f}s".format(time_taken)
return _colorize("{0:0.4f}s".format(time_taken), color) | python | def _colored_time(self, time_taken, color=None):
"""Get formatted and colored string for a given time taken."""
if self.timer_no_color:
return "{0:0.4f}s".format(time_taken)
return _colorize("{0:0.4f}s".format(time_taken), color) | Get formatted and colored string for a given time taken. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L236-L241 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin._format_report_line | def _format_report_line(self, test, time_taken, color, status, percent):
"""Format a single report line."""
return "[{0}] {3:04.2f}% {1}: {2}".format(
status, test, self._colored_time(time_taken, color), percent
) | python | def _format_report_line(self, test, time_taken, color, status, percent):
"""Format a single report line."""
return "[{0}] {3:04.2f}% {1}: {2}".format(
status, test, self._colored_time(time_taken, color), percent
) | Format a single report line. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L243-L247 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin.addSuccess | def addSuccess(self, test, capt=None):
"""Called when a test passes."""
time_taken = self._register_time(test, 'success')
if self.timer_fail is not None and time_taken * 1000.0 > self.threshold:
test.fail('Test was too slow (took {0:0.4f}s, threshold was '
'{1:0.4f}s)'.format(time_taken, self.threshold / 1000.0)) | python | def addSuccess(self, test, capt=None):
"""Called when a test passes."""
time_taken = self._register_time(test, 'success')
if self.timer_fail is not None and time_taken * 1000.0 > self.threshold:
test.fail('Test was too slow (took {0:0.4f}s, threshold was '
'{1:0.4f}s)'.format(time_taken, self.threshold / 1000.0)) | Called when a test passes. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L268-L273 |
mahmoudimus/nose-timer | nosetimer/plugin.py | TimerPlugin.options | def options(self, parser, env=os.environ):
"""Register commandline options."""
super(TimerPlugin, self).options(parser, env)
# timer top n
parser.add_option(
"--timer-top-n",
action="store",
default="-1",
dest="timer_top_n",
help=(
"When the timer plugin is enabled, only show the N tests that "
"consume more time. The default, -1, shows all tests."
),
)
parser.add_option(
"--timer-json-file",
action="store",
default=None,
dest="json_file",
help=(
"Save the results of the timing and status of each tests in "
"said Json file."
),
)
_time_units_help = ("Default time unit is a second, but you can set "
"it explicitly (e.g. 1s, 500ms)")
# timer ok
parser.add_option(
"--timer-ok",
action="store",
default=1,
dest="timer_ok",
help=(
"Normal execution time. Such tests will be highlighted in "
"green. {units_help}.".format(units_help=_time_units_help)
),
)
# time warning
parser.add_option(
"--timer-warning",
action="store",
default=3,
dest="timer_warning",
help=(
"Warning about execution time to highlight slow tests in "
"yellow. Tests which take more time will be highlighted in "
"red. {units_help}.".format(units_help=_time_units_help)
),
)
# Windows + nosetests does not support colors (even with colorama).
if not IS_NT:
parser.add_option(
"--timer-no-color",
action="store_true",
default=False,
dest="timer_no_color",
help="Don't colorize output (useful for non-tty output).",
)
# timer filter
parser.add_option(
"--timer-filter",
action="store",
default=None,
dest="timer_filter",
help="Show filtered results only (ok,warning,error).",
)
# timer fail
parser.add_option(
"--timer-fail",
action="store",
default=None,
dest="timer_fail",
choices=('warning', 'error'),
help="Fail tests that exceed a threshold (warning,error)",
) | python | def options(self, parser, env=os.environ):
"""Register commandline options."""
super(TimerPlugin, self).options(parser, env)
# timer top n
parser.add_option(
"--timer-top-n",
action="store",
default="-1",
dest="timer_top_n",
help=(
"When the timer plugin is enabled, only show the N tests that "
"consume more time. The default, -1, shows all tests."
),
)
parser.add_option(
"--timer-json-file",
action="store",
default=None,
dest="json_file",
help=(
"Save the results of the timing and status of each tests in "
"said Json file."
),
)
_time_units_help = ("Default time unit is a second, but you can set "
"it explicitly (e.g. 1s, 500ms)")
# timer ok
parser.add_option(
"--timer-ok",
action="store",
default=1,
dest="timer_ok",
help=(
"Normal execution time. Such tests will be highlighted in "
"green. {units_help}.".format(units_help=_time_units_help)
),
)
# time warning
parser.add_option(
"--timer-warning",
action="store",
default=3,
dest="timer_warning",
help=(
"Warning about execution time to highlight slow tests in "
"yellow. Tests which take more time will be highlighted in "
"red. {units_help}.".format(units_help=_time_units_help)
),
)
# Windows + nosetests does not support colors (even with colorama).
if not IS_NT:
parser.add_option(
"--timer-no-color",
action="store_true",
default=False,
dest="timer_no_color",
help="Don't colorize output (useful for non-tty output).",
)
# timer filter
parser.add_option(
"--timer-filter",
action="store",
default=None,
dest="timer_filter",
help="Show filtered results only (ok,warning,error).",
)
# timer fail
parser.add_option(
"--timer-fail",
action="store",
default=None,
dest="timer_fail",
choices=('warning', 'error'),
help="Fail tests that exceed a threshold (warning,error)",
) | Register commandline options. | https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L294-L376 |
jaijuneja/PyTLDR | pytldr/summarize/textrank.py | TextRankSummarizer.summarize | def summarize(self, text, length=5, weighting='frequency', norm=None):
"""
Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking
web pages.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default)
:param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight"
the voting power of long sentences (None by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
# Compute the word frequency matrix. If norm is set to 'l1' or 'l2' then words are normalized
# by the length of their associated sentences (such that each vector of sentence terms sums to 1).
word_matrix = self._compute_matrix(sentences, weighting=weighting, norm=norm)
# Build the similarity graph by calculating the number of overlapping words between all
# combinations of sentences.
similarity_matrix = (word_matrix * word_matrix.T)
similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix)
scores = networkx.pagerank(similarity_graph)
ranked_sentences = sorted(
((score, ndx) for ndx, score in scores.items()), reverse=True
)
top_sentences = [ranked_sentences[i][1] for i in range(length)]
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] | python | def summarize(self, text, length=5, weighting='frequency', norm=None):
"""
Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking
web pages.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default)
:param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight"
the voting power of long sentences (None by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
# Compute the word frequency matrix. If norm is set to 'l1' or 'l2' then words are normalized
# by the length of their associated sentences (such that each vector of sentence terms sums to 1).
word_matrix = self._compute_matrix(sentences, weighting=weighting, norm=norm)
# Build the similarity graph by calculating the number of overlapping words between all
# combinations of sentences.
similarity_matrix = (word_matrix * word_matrix.T)
similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix)
scores = networkx.pagerank(similarity_graph)
ranked_sentences = sorted(
((score, ndx) for ndx, score in scores.items()), reverse=True
)
top_sentences = [ranked_sentences[i][1] for i in range(length)]
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] | Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking
web pages.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default)
:param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight"
the voting power of long sentences (None by default)
:return: list of sentences for the summary | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/summarize/textrank.py#L9-L49 |
jaijuneja/PyTLDR | pytldr/nlp/tokenizer.py | Tokenizer.remove_stopwords | def remove_stopwords(self, tokens):
"""Remove all stopwords from a list of word tokens or a string of text."""
if isinstance(tokens, (list, tuple)):
return [word for word in tokens if word.lower() not in self._stopwords]
else:
return ' '.join(
[word for word in tokens.split(' ') if word.lower() not in self._stopwords]
) | python | def remove_stopwords(self, tokens):
"""Remove all stopwords from a list of word tokens or a string of text."""
if isinstance(tokens, (list, tuple)):
return [word for word in tokens if word.lower() not in self._stopwords]
else:
return ' '.join(
[word for word in tokens.split(' ') if word.lower() not in self._stopwords]
) | Remove all stopwords from a list of word tokens or a string of text. | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/nlp/tokenizer.py#L52-L59 |
jaijuneja/PyTLDR | pytldr/nlp/tokenizer.py | Tokenizer.stem | def stem(self, word):
"""Perform stemming on an input word."""
if self.stemmer:
return unicode_to_ascii(self._stemmer.stem(word))
else:
return word | python | def stem(self, word):
"""Perform stemming on an input word."""
if self.stemmer:
return unicode_to_ascii(self._stemmer.stem(word))
else:
return word | Perform stemming on an input word. | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/nlp/tokenizer.py#L61-L66 |
jaijuneja/PyTLDR | pytldr/nlp/tokenizer.py | Tokenizer.strip_punctuation | def strip_punctuation(text, exclude='', include=''):
"""Strip leading and trailing punctuation from an input string."""
chars_to_strip = ''.join(
set(list(punctuation)).union(set(list(include))) - set(list(exclude))
)
return text.strip(chars_to_strip) | python | def strip_punctuation(text, exclude='', include=''):
"""Strip leading and trailing punctuation from an input string."""
chars_to_strip = ''.join(
set(list(punctuation)).union(set(list(include))) - set(list(exclude))
)
return text.strip(chars_to_strip) | Strip leading and trailing punctuation from an input string. | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/nlp/tokenizer.py#L73-L78 |
jaijuneja/PyTLDR | pytldr/nlp/tokenizer.py | Tokenizer.tokenize_words | def tokenize_words(self, text):
"""Tokenize an input string into a list of words (with punctuation removed)."""
return [
self.strip_punctuation(word) for word in text.split(' ')
if self.strip_punctuation(word)
] | python | def tokenize_words(self, text):
"""Tokenize an input string into a list of words (with punctuation removed)."""
return [
self.strip_punctuation(word) for word in text.split(' ')
if self.strip_punctuation(word)
] | Tokenize an input string into a list of words (with punctuation removed). | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/nlp/tokenizer.py#L85-L90 |
jaijuneja/PyTLDR | pytldr/nlp/tokenizer.py | Tokenizer._remove_whitespace | def _remove_whitespace(text):
"""Remove excess whitespace from the ends of a given input string."""
# while True:
# old_text = text
# text = text.replace(' ', ' ')
# if text == old_text:
# return text
non_spaces = re.finditer(r'[^ ]', text)
if not non_spaces:
return text
first_non_space = non_spaces.next()
first_non_space = first_non_space.start()
last_non_space = None
for item in non_spaces:
last_non_space = item
if not last_non_space:
return text[first_non_space:]
else:
last_non_space = last_non_space.end()
return text[first_non_space:last_non_space] | python | def _remove_whitespace(text):
"""Remove excess whitespace from the ends of a given input string."""
# while True:
# old_text = text
# text = text.replace(' ', ' ')
# if text == old_text:
# return text
non_spaces = re.finditer(r'[^ ]', text)
if not non_spaces:
return text
first_non_space = non_spaces.next()
first_non_space = first_non_space.start()
last_non_space = None
for item in non_spaces:
last_non_space = item
if not last_non_space:
return text[first_non_space:]
else:
last_non_space = last_non_space.end()
return text[first_non_space:last_non_space] | Remove excess whitespace from the ends of a given input string. | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/nlp/tokenizer.py#L100-L123 |
jaijuneja/PyTLDR | pytldr/nlp/tokenizer.py | Tokenizer.tokenize_sentences | def tokenize_sentences(self, text, word_threshold=5):
"""
Returns a list of sentences given an input string of text.
:param text: input string
:param word_threshold: number of significant words that a sentence must contain to be counted
(to count all sentences set equal to 1; 5 by default)
:return: list of sentences
"""
punkt_params = PunktParameters()
# Not using set literal to allow compatibility with Python 2.6
punkt_params.abbrev_types = set([
'dr', 'vs', 'mr', 'mrs', 'ms', 'prof', 'mt', 'inc', 'i.e', 'e.g'
])
sentence_splitter = PunktSentenceTokenizer(punkt_params)
# 1. TOKENIZE "UNPROCESSED" SENTENCES FOR DISPLAY
# Need to adjust quotations for correct sentence splitting
text_unprocessed = text.replace('?"', '? "').replace('!"', '! "').replace('."', '. "')
# Treat line breaks as end of sentence (needed in cases where titles don't have a full stop)
text_unprocessed = text_unprocessed.replace('\n', ' . ')
# Perform sentence splitting
unprocessed_sentences = sentence_splitter.tokenize(text_unprocessed)
# Now that sentences have been split we can return them back to their normal formatting
for ndx, sentence in enumerate(unprocessed_sentences):
sentence = unicode_to_ascii(sentence) # Sentence splitter returns unicode strings
sentence = sentence.replace('? " ', '?" ').replace('! " ', '!" ').replace('. " ', '." ')
sentence = self._remove_whitespace(sentence) # Remove excess whitespace
sentence = sentence[:-2] if (sentence.endswith(' .') or sentence.endswith(' . ')) else sentence
unprocessed_sentences[ndx] = sentence
# 2. PROCESS THE SENTENCES TO PERFORM STEMMING, STOPWORDS REMOVAL ETC. FOR MATRIX COMPUTATION
processed_sentences = [self.sanitize_text(sen) for sen in unprocessed_sentences]
# Sentences should contain at least 'word_threshold' significant terms
filter_sentences = [i for i in range(len(processed_sentences))
if len(processed_sentences[i].replace('.', '').split(' ')) > word_threshold]
processed_sentences = [processed_sentences[i] for i in filter_sentences]
unprocessed_sentences = [unprocessed_sentences[i] for i in filter_sentences]
return processed_sentences, unprocessed_sentences | python | def tokenize_sentences(self, text, word_threshold=5):
"""
Returns a list of sentences given an input string of text.
:param text: input string
:param word_threshold: number of significant words that a sentence must contain to be counted
(to count all sentences set equal to 1; 5 by default)
:return: list of sentences
"""
punkt_params = PunktParameters()
# Not using set literal to allow compatibility with Python 2.6
punkt_params.abbrev_types = set([
'dr', 'vs', 'mr', 'mrs', 'ms', 'prof', 'mt', 'inc', 'i.e', 'e.g'
])
sentence_splitter = PunktSentenceTokenizer(punkt_params)
# 1. TOKENIZE "UNPROCESSED" SENTENCES FOR DISPLAY
# Need to adjust quotations for correct sentence splitting
text_unprocessed = text.replace('?"', '? "').replace('!"', '! "').replace('."', '. "')
# Treat line breaks as end of sentence (needed in cases where titles don't have a full stop)
text_unprocessed = text_unprocessed.replace('\n', ' . ')
# Perform sentence splitting
unprocessed_sentences = sentence_splitter.tokenize(text_unprocessed)
# Now that sentences have been split we can return them back to their normal formatting
for ndx, sentence in enumerate(unprocessed_sentences):
sentence = unicode_to_ascii(sentence) # Sentence splitter returns unicode strings
sentence = sentence.replace('? " ', '?" ').replace('! " ', '!" ').replace('. " ', '." ')
sentence = self._remove_whitespace(sentence) # Remove excess whitespace
sentence = sentence[:-2] if (sentence.endswith(' .') or sentence.endswith(' . ')) else sentence
unprocessed_sentences[ndx] = sentence
# 2. PROCESS THE SENTENCES TO PERFORM STEMMING, STOPWORDS REMOVAL ETC. FOR MATRIX COMPUTATION
processed_sentences = [self.sanitize_text(sen) for sen in unprocessed_sentences]
# Sentences should contain at least 'word_threshold' significant terms
filter_sentences = [i for i in range(len(processed_sentences))
if len(processed_sentences[i].replace('.', '').split(' ')) > word_threshold]
processed_sentences = [processed_sentences[i] for i in filter_sentences]
unprocessed_sentences = [unprocessed_sentences[i] for i in filter_sentences]
return processed_sentences, unprocessed_sentences | Returns a list of sentences given an input string of text.
:param text: input string
:param word_threshold: number of significant words that a sentence must contain to be counted
(to count all sentences set equal to 1; 5 by default)
:return: list of sentences | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/nlp/tokenizer.py#L125-L169 |
jaijuneja/PyTLDR | pytldr/nlp/tokenizer.py | Tokenizer.tokenize_paragraphs | def tokenize_paragraphs(cls, text):
"""Convert an input string into a list of paragraphs."""
paragraphs = []
paragraphs_first_pass = text.split('\n')
for p in paragraphs_first_pass:
paragraphs_second_pass = re.split('\s{4,}', p)
paragraphs += paragraphs_second_pass
# Remove empty strings from list
paragraphs = [p for p in paragraphs if p]
return paragraphs | python | def tokenize_paragraphs(cls, text):
"""Convert an input string into a list of paragraphs."""
paragraphs = []
paragraphs_first_pass = text.split('\n')
for p in paragraphs_first_pass:
paragraphs_second_pass = re.split('\s{4,}', p)
paragraphs += paragraphs_second_pass
# Remove empty strings from list
paragraphs = [p for p in paragraphs if p]
return paragraphs | Convert an input string into a list of paragraphs. | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/nlp/tokenizer.py#L172-L182 |
jaijuneja/PyTLDR | pytldr/summarize/lsa.py | BaseLsaSummarizer._svd | def _svd(cls, matrix, num_concepts=5):
"""
Perform singular value decomposition for dimensionality reduction of the input matrix.
"""
u, s, v = svds(matrix, k=num_concepts)
return u, s, v | python | def _svd(cls, matrix, num_concepts=5):
"""
Perform singular value decomposition for dimensionality reduction of the input matrix.
"""
u, s, v = svds(matrix, k=num_concepts)
return u, s, v | Perform singular value decomposition for dimensionality reduction of the input matrix. | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/summarize/lsa.py#L14-L19 |
jaijuneja/PyTLDR | pytldr/summarize/lsa.py | LsaSteinberger.summarize | def summarize(self, text, topics=4, length=5, binary_matrix=True, topic_sigma_threshold=0.5):
"""
Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper:
J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation.
Proc. ISIM ’04, pp. 93–100.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param topics: the number of topics/concepts covered in the input text (defines the degree of
dimensionality reduction in the SVD step)
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:param topic_sigma_threshold: filters out topics/concepts with a singular value less than this
percentage of the largest singular value (must be between 0 and 1, 0.5 by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
topics = self._validate_num_topics(topics, sentences)
# Generate a matrix of terms that appear in each sentence
weighting = 'binary' if binary_matrix else 'frequency'
sentence_matrix = self._compute_matrix(sentences, weighting=weighting)
sentence_matrix = sentence_matrix.transpose()
# Filter out negatives in the sparse matrix (need to do this on Vt for LSA method):
sentence_matrix = sentence_matrix.multiply(sentence_matrix > 0)
s, u, v = self._svd(sentence_matrix, num_concepts=topics)
# Only consider topics/concepts whose singular values are half of the largest singular value
if 1 <= topic_sigma_threshold < 0:
raise ValueError('Parameter topic_sigma_threshold must take a value between 0 and 1')
sigma_threshold = max(u) * topic_sigma_threshold
u[u < sigma_threshold] = 0 # Set all other singular values to zero
# Build a "length vector" containing the length (i.e. saliency) of each sentence
saliency_vec = np.dot(np.square(u), np.square(v))
top_sentences = saliency_vec.argsort()[-length:][::-1]
# Return the sentences in the order in which they appear in the document
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] | python | def summarize(self, text, topics=4, length=5, binary_matrix=True, topic_sigma_threshold=0.5):
"""
Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper:
J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation.
Proc. ISIM ’04, pp. 93–100.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param topics: the number of topics/concepts covered in the input text (defines the degree of
dimensionality reduction in the SVD step)
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:param topic_sigma_threshold: filters out topics/concepts with a singular value less than this
percentage of the largest singular value (must be between 0 and 1, 0.5 by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
topics = self._validate_num_topics(topics, sentences)
# Generate a matrix of terms that appear in each sentence
weighting = 'binary' if binary_matrix else 'frequency'
sentence_matrix = self._compute_matrix(sentences, weighting=weighting)
sentence_matrix = sentence_matrix.transpose()
# Filter out negatives in the sparse matrix (need to do this on Vt for LSA method):
sentence_matrix = sentence_matrix.multiply(sentence_matrix > 0)
s, u, v = self._svd(sentence_matrix, num_concepts=topics)
# Only consider topics/concepts whose singular values are half of the largest singular value
if 1 <= topic_sigma_threshold < 0:
raise ValueError('Parameter topic_sigma_threshold must take a value between 0 and 1')
sigma_threshold = max(u) * topic_sigma_threshold
u[u < sigma_threshold] = 0 # Set all other singular values to zero
# Build a "length vector" containing the length (i.e. saliency) of each sentence
saliency_vec = np.dot(np.square(u), np.square(v))
top_sentences = saliency_vec.argsort()[-length:][::-1]
# Return the sentences in the order in which they appear in the document
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences] | Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper:
J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation.
Proc. ISIM ’04, pp. 93–100.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param topics: the number of topics/concepts covered in the input text (defines the degree of
dimensionality reduction in the SVD step)
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param binary_matrix: boolean value indicating whether the matrix of word counts should be binary
(True by default)
:param topic_sigma_threshold: filters out topics/concepts with a singular value less than this
percentage of the largest singular value (must be between 0 and 1, 0.5 by default)
:return: list of sentences for the summary | https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/summarize/lsa.py#L49-L102 |
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD.check_payment | def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation) | python | def check_payment(self, payment):
"""
Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
if not isinstance(payment['amount'], int):
validation += "AMOUNT_NOT_INTEGER "
if not isinstance(payment['mandate_date'], datetime.date):
validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['mandate_date'] = str(payment['mandate_date'])
if not isinstance(payment['collection_date'], datetime.date):
validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE"
payment['collection_date'] = str(payment['collection_date'])
if validation == "":
return True
else:
raise Exception('Payment did not validate: ' + validation) | Check the payment for required fields and validity.
@param payment: The payment dict
@return: True if valid, error string if invalid paramaters where
encountered. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L38-L61 |
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD.add_payment | def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes) | python | def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes) | Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L63-L127 |
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._create_header | def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node
"""
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node) | python | def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node
"""
# Retrieve the node to which we will append the group header.
CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
SupId_node = ET.Element("Id")
OrgId_node = ET.Element("OrgId")
Othr_node = ET.Element("Othr")
Id_node = ET.Element("Id")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
Id_node.text = self._config['creditor_id']
# Append the nodes
Othr_node.append(Id_node)
OrgId_node.append(Othr_node)
SupId_node.append(OrgId_node)
InitgPty_node.append(Nm_node)
InitgPty_node.append(SupId_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrDrctDbtInitn_node.append(GrpHdr_node) | Function to create the GroupHeader (GrpHdr) in the
CstmrDrctDbtInit Node | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L129-L169 |
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._create_PmtInf_node | def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED | python | def _create_PmtInf_node(self):
"""
Method to create the blank payment information nodes as a dict.
"""
ED = dict() # ED is element dict
ED['PmtInfNode'] = ET.Element("PmtInf")
ED['PmtInfIdNode'] = ET.Element("PmtInfId")
ED['PmtMtdNode'] = ET.Element("PmtMtd")
ED['BtchBookgNode'] = ET.Element("BtchBookg")
ED['NbOfTxsNode'] = ET.Element("NbOfTxs")
ED['CtrlSumNode'] = ET.Element("CtrlSum")
ED['PmtTpInfNode'] = ET.Element("PmtTpInf")
ED['SvcLvlNode'] = ET.Element("SvcLvl")
ED['Cd_SvcLvl_Node'] = ET.Element("Cd")
ED['LclInstrmNode'] = ET.Element("LclInstrm")
ED['Cd_LclInstrm_Node'] = ET.Element("Cd")
ED['SeqTpNode'] = ET.Element("SeqTp")
ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt")
ED['CdtrNode'] = ET.Element("Cdtr")
ED['Nm_Cdtr_Node'] = ET.Element("Nm")
ED['CdtrAcctNode'] = ET.Element("CdtrAcct")
ED['Id_CdtrAcct_Node'] = ET.Element("Id")
ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN")
ED['CdtrAgtNode'] = ET.Element("CdtrAgt")
ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId")
if 'BIC' in self._config:
ED['BIC_CdtrAgt_Node'] = ET.Element("BIC")
ED['ChrgBrNode'] = ET.Element("ChrgBr")
ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId")
ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm")
ED['Id_CdtrSchmeId_Node'] = ET.Element("Id")
ED['PrvtIdNode'] = ET.Element("PrvtId")
ED['OthrNode'] = ET.Element("Othr")
ED['Id_Othr_Node'] = ET.Element("Id")
ED['SchmeNmNode'] = ET.Element("SchmeNm")
ED['PrtryNode'] = ET.Element("Prtry")
return ED | Method to create the blank payment information nodes as a dict. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L171-L207 |
raphaelm/python-sepaxml | sepaxml/debit.py | SepaDD._create_TX_node | def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED | python | def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED | Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created. | https://github.com/raphaelm/python-sepaxml/blob/187b699b1673c862002b2bae7e1bd62fe8623aec/sepaxml/debit.py#L209-L234 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.