repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
lekhakpadmanabh/Summarizer | smrzr/core.py | _title_similarity_score | def _title_similarity_score(full_text, title):
"""Similarity scores for sentences with
title in descending order"""
sentences = sentence_tokenizer(full_text)
norm = _normalize([title]+sentences)
similarity_matrix = pairwise_kernels(norm, metric='cosine')
return sorted(zip(
similarity_matrix[0,1:],
range(len(similarity_matrix)),
sentences
),
key = lambda tup: tup[0],
reverse=True
) | python | def _title_similarity_score(full_text, title):
"""Similarity scores for sentences with
title in descending order"""
sentences = sentence_tokenizer(full_text)
norm = _normalize([title]+sentences)
similarity_matrix = pairwise_kernels(norm, metric='cosine')
return sorted(zip(
similarity_matrix[0,1:],
range(len(similarity_matrix)),
sentences
),
key = lambda tup: tup[0],
reverse=True
) | [
"def",
"_title_similarity_score",
"(",
"full_text",
",",
"title",
")",
":",
"sentences",
"=",
"sentence_tokenizer",
"(",
"full_text",
")",
"norm",
"=",
"_normalize",
"(",
"[",
"title",
"]",
"+",
"sentences",
")",
"similarity_matrix",
"=",
"pairwise_kernels",
"(",
"norm",
",",
"metric",
"=",
"'cosine'",
")",
"return",
"sorted",
"(",
"zip",
"(",
"similarity_matrix",
"[",
"0",
",",
"1",
":",
"]",
",",
"range",
"(",
"len",
"(",
"similarity_matrix",
")",
")",
",",
"sentences",
")",
",",
"key",
"=",
"lambda",
"tup",
":",
"tup",
"[",
"0",
"]",
",",
"reverse",
"=",
"True",
")"
] | Similarity scores for sentences with
title in descending order | [
"Similarity",
"scores",
"for",
"sentences",
"with",
"title",
"in",
"descending",
"order"
] | 143456a48217905c720d87331f410e5c8b4e24aa | https://github.com/lekhakpadmanabh/Summarizer/blob/143456a48217905c720d87331f410e5c8b4e24aa/smrzr/core.py#L77-L91 | train |
lekhakpadmanabh/Summarizer | smrzr/core.py | _aggregrate_scores | def _aggregrate_scores(its,tss,num_sentences):
"""rerank the two vectors by
min aggregrate rank, reorder"""
final = []
for i,el in enumerate(its):
for j, le in enumerate(tss):
if el[2] == le[2]:
assert el[1] == le[1]
final.append((el[1],i+j,el[2]))
_final = sorted(final, key = lambda tup: tup[1])[:num_sentences]
return sorted(_final, key = lambda tup: tup[0]) | python | def _aggregrate_scores(its,tss,num_sentences):
"""rerank the two vectors by
min aggregrate rank, reorder"""
final = []
for i,el in enumerate(its):
for j, le in enumerate(tss):
if el[2] == le[2]:
assert el[1] == le[1]
final.append((el[1],i+j,el[2]))
_final = sorted(final, key = lambda tup: tup[1])[:num_sentences]
return sorted(_final, key = lambda tup: tup[0]) | [
"def",
"_aggregrate_scores",
"(",
"its",
",",
"tss",
",",
"num_sentences",
")",
":",
"final",
"=",
"[",
"]",
"for",
"i",
",",
"el",
"in",
"enumerate",
"(",
"its",
")",
":",
"for",
"j",
",",
"le",
"in",
"enumerate",
"(",
"tss",
")",
":",
"if",
"el",
"[",
"2",
"]",
"==",
"le",
"[",
"2",
"]",
":",
"assert",
"el",
"[",
"1",
"]",
"==",
"le",
"[",
"1",
"]",
"final",
".",
"append",
"(",
"(",
"el",
"[",
"1",
"]",
",",
"i",
"+",
"j",
",",
"el",
"[",
"2",
"]",
")",
")",
"_final",
"=",
"sorted",
"(",
"final",
",",
"key",
"=",
"lambda",
"tup",
":",
"tup",
"[",
"1",
"]",
")",
"[",
":",
"num_sentences",
"]",
"return",
"sorted",
"(",
"_final",
",",
"key",
"=",
"lambda",
"tup",
":",
"tup",
"[",
"0",
"]",
")"
] | rerank the two vectors by
min aggregrate rank, reorder | [
"rerank",
"the",
"two",
"vectors",
"by",
"min",
"aggregrate",
"rank",
"reorder"
] | 143456a48217905c720d87331f410e5c8b4e24aa | https://github.com/lekhakpadmanabh/Summarizer/blob/143456a48217905c720d87331f410e5c8b4e24aa/smrzr/core.py#L102-L112 | train |
lekhakpadmanabh/Summarizer | smrzr/core.py | _eval_meta_as_summary | def _eval_meta_as_summary(meta):
"""some crude heuristics for now
most are implemented on bot-side
with domain whitelists"""
if meta == '':
return False
if len(meta)>500:
return False
if 'login' in meta.lower():
return False
return True | python | def _eval_meta_as_summary(meta):
"""some crude heuristics for now
most are implemented on bot-side
with domain whitelists"""
if meta == '':
return False
if len(meta)>500:
return False
if 'login' in meta.lower():
return False
return True | [
"def",
"_eval_meta_as_summary",
"(",
"meta",
")",
":",
"if",
"meta",
"==",
"''",
":",
"return",
"False",
"if",
"len",
"(",
"meta",
")",
">",
"500",
":",
"return",
"False",
"if",
"'login'",
"in",
"meta",
".",
"lower",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | some crude heuristics for now
most are implemented on bot-side
with domain whitelists | [
"some",
"crude",
"heuristics",
"for",
"now",
"most",
"are",
"implemented",
"on",
"bot",
"-",
"side",
"with",
"domain",
"whitelists"
] | 143456a48217905c720d87331f410e5c8b4e24aa | https://github.com/lekhakpadmanabh/Summarizer/blob/143456a48217905c720d87331f410e5c8b4e24aa/smrzr/core.py#L115-L126 | train |
digidotcom/python-wvalib | wva/core.py | WVA.get_subscriptions | def get_subscriptions(self):
"""Return a list of subscriptions currently active for this WVA device
:raises WVAError: if there is a problem getting the subscription list from the WVA
:returns: A list of :class:`WVASubscription` instances
"""
# Example: {'subscriptions': ['subscriptions/TripDistance~sub', 'subscriptions/FuelRate~sub', ]}
subscriptions = []
for uri in self.get_http_client().get("subscriptions").get('subscriptions'):
subscriptions.append(self.get_subscription(uri.split("/")[-1]))
return subscriptions | python | def get_subscriptions(self):
"""Return a list of subscriptions currently active for this WVA device
:raises WVAError: if there is a problem getting the subscription list from the WVA
:returns: A list of :class:`WVASubscription` instances
"""
# Example: {'subscriptions': ['subscriptions/TripDistance~sub', 'subscriptions/FuelRate~sub', ]}
subscriptions = []
for uri in self.get_http_client().get("subscriptions").get('subscriptions'):
subscriptions.append(self.get_subscription(uri.split("/")[-1]))
return subscriptions | [
"def",
"get_subscriptions",
"(",
"self",
")",
":",
"# Example: {'subscriptions': ['subscriptions/TripDistance~sub', 'subscriptions/FuelRate~sub', ]}",
"subscriptions",
"=",
"[",
"]",
"for",
"uri",
"in",
"self",
".",
"get_http_client",
"(",
")",
".",
"get",
"(",
"\"subscriptions\"",
")",
".",
"get",
"(",
"'subscriptions'",
")",
":",
"subscriptions",
".",
"append",
"(",
"self",
".",
"get_subscription",
"(",
"uri",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
")",
")",
"return",
"subscriptions"
] | Return a list of subscriptions currently active for this WVA device
:raises WVAError: if there is a problem getting the subscription list from the WVA
:returns: A list of :class:`WVASubscription` instances | [
"Return",
"a",
"list",
"of",
"subscriptions",
"currently",
"active",
"for",
"this",
"WVA",
"device"
] | 4252735e2775f80ebaffd813fbe84046d26906b3 | https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/core.py#L90-L100 | train |
digidotcom/python-wvalib | wva/core.py | WVA.get_event_stream | def get_event_stream(self):
"""Get the event stream associated with this WVA
Note that this event stream is shared across all users of this WVA device
as the WVA only supports a single event stream.
:return: a new :class:`WVAEventStream` instance
"""
if self._event_stream is None:
self._event_stream = WVAEventStream(self._http_client)
return self._event_stream | python | def get_event_stream(self):
"""Get the event stream associated with this WVA
Note that this event stream is shared across all users of this WVA device
as the WVA only supports a single event stream.
:return: a new :class:`WVAEventStream` instance
"""
if self._event_stream is None:
self._event_stream = WVAEventStream(self._http_client)
return self._event_stream | [
"def",
"get_event_stream",
"(",
"self",
")",
":",
"if",
"self",
".",
"_event_stream",
"is",
"None",
":",
"self",
".",
"_event_stream",
"=",
"WVAEventStream",
"(",
"self",
".",
"_http_client",
")",
"return",
"self",
".",
"_event_stream"
] | Get the event stream associated with this WVA
Note that this event stream is shared across all users of this WVA device
as the WVA only supports a single event stream.
:return: a new :class:`WVAEventStream` instance | [
"Get",
"the",
"event",
"stream",
"associated",
"with",
"this",
"WVA"
] | 4252735e2775f80ebaffd813fbe84046d26906b3 | https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/core.py#L102-L112 | train |
spacetelescope/stsci.imagestats | stsci/imagestats/histogram1d.py | histogram1d._populateHistogram | def _populateHistogram(self):
"""Call the C-code that actually populates the histogram"""
try :
buildHistogram.populate1DHist(self._data, self.histogram,
self.minValue, self.maxValue, self.binWidth)
except:
if ((self._data.max() - self._data.min()) < self.binWidth):
raise ValueError("In histogram1d class, the binWidth is "
"greater than the data range of the array "
"object.")
else:
raise SystemError("An error processing the array object "
"information occured in the buildHistogram "
"module of histogram1d.") | python | def _populateHistogram(self):
"""Call the C-code that actually populates the histogram"""
try :
buildHistogram.populate1DHist(self._data, self.histogram,
self.minValue, self.maxValue, self.binWidth)
except:
if ((self._data.max() - self._data.min()) < self.binWidth):
raise ValueError("In histogram1d class, the binWidth is "
"greater than the data range of the array "
"object.")
else:
raise SystemError("An error processing the array object "
"information occured in the buildHistogram "
"module of histogram1d.") | [
"def",
"_populateHistogram",
"(",
"self",
")",
":",
"try",
":",
"buildHistogram",
".",
"populate1DHist",
"(",
"self",
".",
"_data",
",",
"self",
".",
"histogram",
",",
"self",
".",
"minValue",
",",
"self",
".",
"maxValue",
",",
"self",
".",
"binWidth",
")",
"except",
":",
"if",
"(",
"(",
"self",
".",
"_data",
".",
"max",
"(",
")",
"-",
"self",
".",
"_data",
".",
"min",
"(",
")",
")",
"<",
"self",
".",
"binWidth",
")",
":",
"raise",
"ValueError",
"(",
"\"In histogram1d class, the binWidth is \"",
"\"greater than the data range of the array \"",
"\"object.\"",
")",
"else",
":",
"raise",
"SystemError",
"(",
"\"An error processing the array object \"",
"\"information occured in the buildHistogram \"",
"\"module of histogram1d.\"",
")"
] | Call the C-code that actually populates the histogram | [
"Call",
"the",
"C",
"-",
"code",
"that",
"actually",
"populates",
"the",
"histogram"
] | d7fc9fe9783f7ed3dc9e4af47acd357a5ccd68e3 | https://github.com/spacetelescope/stsci.imagestats/blob/d7fc9fe9783f7ed3dc9e4af47acd357a5ccd68e3/stsci/imagestats/histogram1d.py#L55-L68 | train |
spacetelescope/stsci.imagestats | stsci/imagestats/histogram1d.py | histogram1d.getCenters | def getCenters(self):
""" Returns histogram's centers. """
return np.arange(self.histogram.size) * self.binWidth + self.minValue | python | def getCenters(self):
""" Returns histogram's centers. """
return np.arange(self.histogram.size) * self.binWidth + self.minValue | [
"def",
"getCenters",
"(",
"self",
")",
":",
"return",
"np",
".",
"arange",
"(",
"self",
".",
"histogram",
".",
"size",
")",
"*",
"self",
".",
"binWidth",
"+",
"self",
".",
"minValue"
] | Returns histogram's centers. | [
"Returns",
"histogram",
"s",
"centers",
"."
] | d7fc9fe9783f7ed3dc9e4af47acd357a5ccd68e3 | https://github.com/spacetelescope/stsci.imagestats/blob/d7fc9fe9783f7ed3dc9e4af47acd357a5ccd68e3/stsci/imagestats/histogram1d.py#L70-L72 | train |
pennlabs/penn-sdk-python | penn/wharton.py | Wharton.book_reservation | def book_reservation(self, sessionid, roomid, start, end):
""" Book a reservation given the session id, the room id as an integer, and the start and end time as datetimes. """
duration = int((end - start).seconds / 60)
format = "%Y-%m-%dT%H:%M:%S-{}".format(self.get_dst_gmt_timezone())
booking_url = "{}/reserve/{}/{}/?d={}".format(BASE_URL, roomid, start.strftime(format), duration)
resp = requests.get(booking_url, cookies={"sessionid": sessionid})
if resp.status_code == 403:
return {"success": False, "error": "Your account does not have permission to book Wharton GSRs!"}
resp.raise_for_status()
csrfheader = re.search(r"csrftoken=(.*?);", resp.headers["Set-Cookie"]).group(1)
csrftoken = re.search(r"<input name=\"csrfmiddlewaretoken\" type=\"hidden\" value=\"(.*?)\"/>", resp.content.decode("utf8")).group(1)
start_string = start.strftime("%I:%M %p")
if start_string[0] == "0":
start_string = start_string[1:]
resp = requests.post(
booking_url,
cookies={"sessionid": sessionid, "csrftoken": csrfheader},
headers={"Referer": booking_url},
data={
"csrfmiddlewaretoken": csrftoken,
"room": roomid,
"start_time": start_string,
"end_time": end.strftime("%a %b %d %H:%M:%S %Y"),
"date": start.strftime("%B %d, %Y")
}
)
resp.raise_for_status()
content = resp.content.decode("utf8")
if "errorlist" in content:
error_msg = re.search(r"class=\"errorlist\"><li>(.*?)</li>", content).group(1)
return {"success": False, "error": error_msg}
return {"success": True} | python | def book_reservation(self, sessionid, roomid, start, end):
""" Book a reservation given the session id, the room id as an integer, and the start and end time as datetimes. """
duration = int((end - start).seconds / 60)
format = "%Y-%m-%dT%H:%M:%S-{}".format(self.get_dst_gmt_timezone())
booking_url = "{}/reserve/{}/{}/?d={}".format(BASE_URL, roomid, start.strftime(format), duration)
resp = requests.get(booking_url, cookies={"sessionid": sessionid})
if resp.status_code == 403:
return {"success": False, "error": "Your account does not have permission to book Wharton GSRs!"}
resp.raise_for_status()
csrfheader = re.search(r"csrftoken=(.*?);", resp.headers["Set-Cookie"]).group(1)
csrftoken = re.search(r"<input name=\"csrfmiddlewaretoken\" type=\"hidden\" value=\"(.*?)\"/>", resp.content.decode("utf8")).group(1)
start_string = start.strftime("%I:%M %p")
if start_string[0] == "0":
start_string = start_string[1:]
resp = requests.post(
booking_url,
cookies={"sessionid": sessionid, "csrftoken": csrfheader},
headers={"Referer": booking_url},
data={
"csrfmiddlewaretoken": csrftoken,
"room": roomid,
"start_time": start_string,
"end_time": end.strftime("%a %b %d %H:%M:%S %Y"),
"date": start.strftime("%B %d, %Y")
}
)
resp.raise_for_status()
content = resp.content.decode("utf8")
if "errorlist" in content:
error_msg = re.search(r"class=\"errorlist\"><li>(.*?)</li>", content).group(1)
return {"success": False, "error": error_msg}
return {"success": True} | [
"def",
"book_reservation",
"(",
"self",
",",
"sessionid",
",",
"roomid",
",",
"start",
",",
"end",
")",
":",
"duration",
"=",
"int",
"(",
"(",
"end",
"-",
"start",
")",
".",
"seconds",
"/",
"60",
")",
"format",
"=",
"\"%Y-%m-%dT%H:%M:%S-{}\"",
".",
"format",
"(",
"self",
".",
"get_dst_gmt_timezone",
"(",
")",
")",
"booking_url",
"=",
"\"{}/reserve/{}/{}/?d={}\"",
".",
"format",
"(",
"BASE_URL",
",",
"roomid",
",",
"start",
".",
"strftime",
"(",
"format",
")",
",",
"duration",
")",
"resp",
"=",
"requests",
".",
"get",
"(",
"booking_url",
",",
"cookies",
"=",
"{",
"\"sessionid\"",
":",
"sessionid",
"}",
")",
"if",
"resp",
".",
"status_code",
"==",
"403",
":",
"return",
"{",
"\"success\"",
":",
"False",
",",
"\"error\"",
":",
"\"Your account does not have permission to book Wharton GSRs!\"",
"}",
"resp",
".",
"raise_for_status",
"(",
")",
"csrfheader",
"=",
"re",
".",
"search",
"(",
"r\"csrftoken=(.*?);\"",
",",
"resp",
".",
"headers",
"[",
"\"Set-Cookie\"",
"]",
")",
".",
"group",
"(",
"1",
")",
"csrftoken",
"=",
"re",
".",
"search",
"(",
"r\"<input name=\\\"csrfmiddlewaretoken\\\" type=\\\"hidden\\\" value=\\\"(.*?)\\\"/>\"",
",",
"resp",
".",
"content",
".",
"decode",
"(",
"\"utf8\"",
")",
")",
".",
"group",
"(",
"1",
")",
"start_string",
"=",
"start",
".",
"strftime",
"(",
"\"%I:%M %p\"",
")",
"if",
"start_string",
"[",
"0",
"]",
"==",
"\"0\"",
":",
"start_string",
"=",
"start_string",
"[",
"1",
":",
"]",
"resp",
"=",
"requests",
".",
"post",
"(",
"booking_url",
",",
"cookies",
"=",
"{",
"\"sessionid\"",
":",
"sessionid",
",",
"\"csrftoken\"",
":",
"csrfheader",
"}",
",",
"headers",
"=",
"{",
"\"Referer\"",
":",
"booking_url",
"}",
",",
"data",
"=",
"{",
"\"csrfmiddlewaretoken\"",
":",
"csrftoken",
",",
"\"room\"",
":",
"roomid",
",",
"\"start_time\"",
":",
"start_string",
",",
"\"end_time\"",
":",
"end",
".",
"strftime",
"(",
"\"%a %b %d %H:%M:%S %Y\"",
")",
",",
"\"date\"",
":",
"start",
".",
"strftime",
"(",
"\"%B %d, %Y\"",
")",
"}",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"content",
"=",
"resp",
".",
"content",
".",
"decode",
"(",
"\"utf8\"",
")",
"if",
"\"errorlist\"",
"in",
"content",
":",
"error_msg",
"=",
"re",
".",
"search",
"(",
"r\"class=\\\"errorlist\\\"><li>(.*?)</li>\"",
",",
"content",
")",
".",
"group",
"(",
"1",
")",
"return",
"{",
"\"success\"",
":",
"False",
",",
"\"error\"",
":",
"error_msg",
"}",
"return",
"{",
"\"success\"",
":",
"True",
"}"
] | Book a reservation given the session id, the room id as an integer, and the start and end time as datetimes. | [
"Book",
"a",
"reservation",
"given",
"the",
"session",
"id",
"the",
"room",
"id",
"as",
"an",
"integer",
"and",
"the",
"start",
"and",
"end",
"time",
"as",
"datetimes",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/wharton.py#L53-L90 | train |
pennlabs/penn-sdk-python | penn/wharton.py | Wharton.delete_booking | def delete_booking(self, sessionid, booking_id):
""" Deletes a Wharton GSR Booking for a given booking and session id. """
url = "{}{}{}/".format(BASE_URL, "/delete/", booking_id)
cookies = dict(sessionid=sessionid)
try:
resp = requests.get(url, cookies=cookies, headers={'Referer': '{}{}'.format(BASE_URL, "/reservations/")})
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
if resp.status_code == 404:
raise APIError("Booking could not be found on server.")
html = resp.content.decode("utf8")
if "https://weblogin.pennkey.upenn.edu" in html:
raise APIError("Wharton Auth Failed. Session ID is not valid.")
resp.raise_for_status()
soup = BeautifulSoup(html, "html5lib")
middleware_token = soup.find("input", {'name': "csrfmiddlewaretoken"}).get('value')
csrftoken = resp.cookies['csrftoken']
cookies2 = {'sessionid': sessionid, 'csrftoken': csrftoken}
headers = {'Referer': url}
payload = {'csrfmiddlewaretoken': middleware_token}
try:
resp2 = requests.post(url, cookies=cookies2, data=payload, headers=headers)
except resp2.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
return {"success": True} | python | def delete_booking(self, sessionid, booking_id):
""" Deletes a Wharton GSR Booking for a given booking and session id. """
url = "{}{}{}/".format(BASE_URL, "/delete/", booking_id)
cookies = dict(sessionid=sessionid)
try:
resp = requests.get(url, cookies=cookies, headers={'Referer': '{}{}'.format(BASE_URL, "/reservations/")})
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
if resp.status_code == 404:
raise APIError("Booking could not be found on server.")
html = resp.content.decode("utf8")
if "https://weblogin.pennkey.upenn.edu" in html:
raise APIError("Wharton Auth Failed. Session ID is not valid.")
resp.raise_for_status()
soup = BeautifulSoup(html, "html5lib")
middleware_token = soup.find("input", {'name': "csrfmiddlewaretoken"}).get('value')
csrftoken = resp.cookies['csrftoken']
cookies2 = {'sessionid': sessionid, 'csrftoken': csrftoken}
headers = {'Referer': url}
payload = {'csrfmiddlewaretoken': middleware_token}
try:
resp2 = requests.post(url, cookies=cookies2, data=payload, headers=headers)
except resp2.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
return {"success": True} | [
"def",
"delete_booking",
"(",
"self",
",",
"sessionid",
",",
"booking_id",
")",
":",
"url",
"=",
"\"{}{}{}/\"",
".",
"format",
"(",
"BASE_URL",
",",
"\"/delete/\"",
",",
"booking_id",
")",
"cookies",
"=",
"dict",
"(",
"sessionid",
"=",
"sessionid",
")",
"try",
":",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"cookies",
"=",
"cookies",
",",
"headers",
"=",
"{",
"'Referer'",
":",
"'{}{}'",
".",
"format",
"(",
"BASE_URL",
",",
"\"/reservations/\"",
")",
"}",
")",
"except",
"resp",
".",
"exceptions",
".",
"HTTPError",
"as",
"error",
":",
"raise",
"APIError",
"(",
"\"Server Error: {}\"",
".",
"format",
"(",
"error",
")",
")",
"if",
"resp",
".",
"status_code",
"==",
"404",
":",
"raise",
"APIError",
"(",
"\"Booking could not be found on server.\"",
")",
"html",
"=",
"resp",
".",
"content",
".",
"decode",
"(",
"\"utf8\"",
")",
"if",
"\"https://weblogin.pennkey.upenn.edu\"",
"in",
"html",
":",
"raise",
"APIError",
"(",
"\"Wharton Auth Failed. Session ID is not valid.\"",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"html",
",",
"\"html5lib\"",
")",
"middleware_token",
"=",
"soup",
".",
"find",
"(",
"\"input\"",
",",
"{",
"'name'",
":",
"\"csrfmiddlewaretoken\"",
"}",
")",
".",
"get",
"(",
"'value'",
")",
"csrftoken",
"=",
"resp",
".",
"cookies",
"[",
"'csrftoken'",
"]",
"cookies2",
"=",
"{",
"'sessionid'",
":",
"sessionid",
",",
"'csrftoken'",
":",
"csrftoken",
"}",
"headers",
"=",
"{",
"'Referer'",
":",
"url",
"}",
"payload",
"=",
"{",
"'csrfmiddlewaretoken'",
":",
"middleware_token",
"}",
"try",
":",
"resp2",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"cookies",
"=",
"cookies2",
",",
"data",
"=",
"payload",
",",
"headers",
"=",
"headers",
")",
"except",
"resp2",
".",
"exceptions",
".",
"HTTPError",
"as",
"error",
":",
"raise",
"APIError",
"(",
"\"Server Error: {}\"",
".",
"format",
"(",
"error",
")",
")",
"return",
"{",
"\"success\"",
":",
"True",
"}"
] | Deletes a Wharton GSR Booking for a given booking and session id. | [
"Deletes",
"a",
"Wharton",
"GSR",
"Booking",
"for",
"a",
"given",
"booking",
"and",
"session",
"id",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/wharton.py#L92-L124 | train |
pennlabs/penn-sdk-python | penn/wharton.py | Wharton.get_wharton_gsrs | def get_wharton_gsrs(self, sessionid, date=None):
""" Make a request to retrieve Wharton GSR listings. """
if date:
date += " {}".format(self.get_dst_gmt_timezone())
else:
date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%S")
resp = requests.get('https://apps.wharton.upenn.edu/gsr/api/app/grid_view/', params={
'search_time': date
}, cookies={
'sessionid': sessionid
})
if resp.status_code == 200:
return resp.json()
else:
raise APIError('Remote server returned status code {}.'.format(resp.status_code)) | python | def get_wharton_gsrs(self, sessionid, date=None):
""" Make a request to retrieve Wharton GSR listings. """
if date:
date += " {}".format(self.get_dst_gmt_timezone())
else:
date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%S")
resp = requests.get('https://apps.wharton.upenn.edu/gsr/api/app/grid_view/', params={
'search_time': date
}, cookies={
'sessionid': sessionid
})
if resp.status_code == 200:
return resp.json()
else:
raise APIError('Remote server returned status code {}.'.format(resp.status_code)) | [
"def",
"get_wharton_gsrs",
"(",
"self",
",",
"sessionid",
",",
"date",
"=",
"None",
")",
":",
"if",
"date",
":",
"date",
"+=",
"\" {}\"",
".",
"format",
"(",
"self",
".",
"get_dst_gmt_timezone",
"(",
")",
")",
"else",
":",
"date",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%S\"",
")",
"resp",
"=",
"requests",
".",
"get",
"(",
"'https://apps.wharton.upenn.edu/gsr/api/app/grid_view/'",
",",
"params",
"=",
"{",
"'search_time'",
":",
"date",
"}",
",",
"cookies",
"=",
"{",
"'sessionid'",
":",
"sessionid",
"}",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"return",
"resp",
".",
"json",
"(",
")",
"else",
":",
"raise",
"APIError",
"(",
"'Remote server returned status code {}.'",
".",
"format",
"(",
"resp",
".",
"status_code",
")",
")"
] | Make a request to retrieve Wharton GSR listings. | [
"Make",
"a",
"request",
"to",
"retrieve",
"Wharton",
"GSR",
"listings",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/wharton.py#L126-L140 | train |
pennlabs/penn-sdk-python | penn/wharton.py | Wharton.switch_format | def switch_format(self, gsr):
""" Convert the Wharton GSR format into the studyspaces API format. """
if "error" in gsr:
return gsr
categories = {
"cid": 1,
"name": "Huntsman Hall",
"rooms": []
}
for time in gsr["times"]:
for entry in time:
entry["name"] = entry["room_number"]
del entry["room_number"]
start_time_str = entry["start_time"]
end_time = datetime.datetime.strptime(start_time_str[:-6], '%Y-%m-%dT%H:%M:%S') + datetime.timedelta(minutes=30)
end_time_str = end_time.strftime("%Y-%m-%dT%H:%M:%S") + "-{}".format(self.get_dst_gmt_timezone())
time = {
"available": not entry["reserved"],
"start": entry["start_time"],
"end": end_time_str,
}
exists = False
for room in categories["rooms"]:
if room["name"] == entry["name"]:
room["times"].append(time)
exists = True
if not exists:
del entry["booked_by_user"]
del entry["building"]
if "reservation_id" in entry:
del entry["reservation_id"]
entry["lid"] = 1
entry["gid"] = 1
entry["capacity"] = 5
entry["room_id"] = int(entry["id"])
del entry["id"]
entry["times"] = [time]
del entry["reserved"]
del entry["end_time"]
del entry["start_time"]
categories["rooms"].append(entry)
return {"categories": [categories], "rooms": categories["rooms"]} | python | def switch_format(self, gsr):
""" Convert the Wharton GSR format into the studyspaces API format. """
if "error" in gsr:
return gsr
categories = {
"cid": 1,
"name": "Huntsman Hall",
"rooms": []
}
for time in gsr["times"]:
for entry in time:
entry["name"] = entry["room_number"]
del entry["room_number"]
start_time_str = entry["start_time"]
end_time = datetime.datetime.strptime(start_time_str[:-6], '%Y-%m-%dT%H:%M:%S') + datetime.timedelta(minutes=30)
end_time_str = end_time.strftime("%Y-%m-%dT%H:%M:%S") + "-{}".format(self.get_dst_gmt_timezone())
time = {
"available": not entry["reserved"],
"start": entry["start_time"],
"end": end_time_str,
}
exists = False
for room in categories["rooms"]:
if room["name"] == entry["name"]:
room["times"].append(time)
exists = True
if not exists:
del entry["booked_by_user"]
del entry["building"]
if "reservation_id" in entry:
del entry["reservation_id"]
entry["lid"] = 1
entry["gid"] = 1
entry["capacity"] = 5
entry["room_id"] = int(entry["id"])
del entry["id"]
entry["times"] = [time]
del entry["reserved"]
del entry["end_time"]
del entry["start_time"]
categories["rooms"].append(entry)
return {"categories": [categories], "rooms": categories["rooms"]} | [
"def",
"switch_format",
"(",
"self",
",",
"gsr",
")",
":",
"if",
"\"error\"",
"in",
"gsr",
":",
"return",
"gsr",
"categories",
"=",
"{",
"\"cid\"",
":",
"1",
",",
"\"name\"",
":",
"\"Huntsman Hall\"",
",",
"\"rooms\"",
":",
"[",
"]",
"}",
"for",
"time",
"in",
"gsr",
"[",
"\"times\"",
"]",
":",
"for",
"entry",
"in",
"time",
":",
"entry",
"[",
"\"name\"",
"]",
"=",
"entry",
"[",
"\"room_number\"",
"]",
"del",
"entry",
"[",
"\"room_number\"",
"]",
"start_time_str",
"=",
"entry",
"[",
"\"start_time\"",
"]",
"end_time",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"start_time_str",
"[",
":",
"-",
"6",
"]",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"30",
")",
"end_time_str",
"=",
"end_time",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"+",
"\"-{}\"",
".",
"format",
"(",
"self",
".",
"get_dst_gmt_timezone",
"(",
")",
")",
"time",
"=",
"{",
"\"available\"",
":",
"not",
"entry",
"[",
"\"reserved\"",
"]",
",",
"\"start\"",
":",
"entry",
"[",
"\"start_time\"",
"]",
",",
"\"end\"",
":",
"end_time_str",
",",
"}",
"exists",
"=",
"False",
"for",
"room",
"in",
"categories",
"[",
"\"rooms\"",
"]",
":",
"if",
"room",
"[",
"\"name\"",
"]",
"==",
"entry",
"[",
"\"name\"",
"]",
":",
"room",
"[",
"\"times\"",
"]",
".",
"append",
"(",
"time",
")",
"exists",
"=",
"True",
"if",
"not",
"exists",
":",
"del",
"entry",
"[",
"\"booked_by_user\"",
"]",
"del",
"entry",
"[",
"\"building\"",
"]",
"if",
"\"reservation_id\"",
"in",
"entry",
":",
"del",
"entry",
"[",
"\"reservation_id\"",
"]",
"entry",
"[",
"\"lid\"",
"]",
"=",
"1",
"entry",
"[",
"\"gid\"",
"]",
"=",
"1",
"entry",
"[",
"\"capacity\"",
"]",
"=",
"5",
"entry",
"[",
"\"room_id\"",
"]",
"=",
"int",
"(",
"entry",
"[",
"\"id\"",
"]",
")",
"del",
"entry",
"[",
"\"id\"",
"]",
"entry",
"[",
"\"times\"",
"]",
"=",
"[",
"time",
"]",
"del",
"entry",
"[",
"\"reserved\"",
"]",
"del",
"entry",
"[",
"\"end_time\"",
"]",
"del",
"entry",
"[",
"\"start_time\"",
"]",
"categories",
"[",
"\"rooms\"",
"]",
".",
"append",
"(",
"entry",
")",
"return",
"{",
"\"categories\"",
":",
"[",
"categories",
"]",
",",
"\"rooms\"",
":",
"categories",
"[",
"\"rooms\"",
"]",
"}"
] | Convert the Wharton GSR format into the studyspaces API format. | [
"Convert",
"the",
"Wharton",
"GSR",
"format",
"into",
"the",
"studyspaces",
"API",
"format",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/wharton.py#L142-L184 | train |
pennlabs/penn-sdk-python | penn/wharton.py | Wharton.get_wharton_gsrs_formatted | def get_wharton_gsrs_formatted(self, sessionid, date=None):
""" Return the wharton GSR listing formatted in studyspaces format. """
gsrs = self.get_wharton_gsrs(sessionid, date)
return self.switch_format(gsrs) | python | def get_wharton_gsrs_formatted(self, sessionid, date=None):
""" Return the wharton GSR listing formatted in studyspaces format. """
gsrs = self.get_wharton_gsrs(sessionid, date)
return self.switch_format(gsrs) | [
"def",
"get_wharton_gsrs_formatted",
"(",
"self",
",",
"sessionid",
",",
"date",
"=",
"None",
")",
":",
"gsrs",
"=",
"self",
".",
"get_wharton_gsrs",
"(",
"sessionid",
",",
"date",
")",
"return",
"self",
".",
"switch_format",
"(",
"gsrs",
")"
] | Return the wharton GSR listing formatted in studyspaces format. | [
"Return",
"the",
"wharton",
"GSR",
"listing",
"formatted",
"in",
"studyspaces",
"format",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/wharton.py#L186-L189 | train |
cloudbase/python-hnvclient | hnv/config/options.py | get_options | def get_options():
"""Collect all the options info from the other modules."""
options = collections.defaultdict(list)
for opt_class in config_factory.get_options():
if not issubclass(opt_class, config_base.Options):
continue
config_options = opt_class(None)
options[config_options.group_name].extend(config_options.list())
return [(key, value) for key, value in options.items()] | python | def get_options():
"""Collect all the options info from the other modules."""
options = collections.defaultdict(list)
for opt_class in config_factory.get_options():
if not issubclass(opt_class, config_base.Options):
continue
config_options = opt_class(None)
options[config_options.group_name].extend(config_options.list())
return [(key, value) for key, value in options.items()] | [
"def",
"get_options",
"(",
")",
":",
"options",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"opt_class",
"in",
"config_factory",
".",
"get_options",
"(",
")",
":",
"if",
"not",
"issubclass",
"(",
"opt_class",
",",
"config_base",
".",
"Options",
")",
":",
"continue",
"config_options",
"=",
"opt_class",
"(",
"None",
")",
"options",
"[",
"config_options",
".",
"group_name",
"]",
".",
"extend",
"(",
"config_options",
".",
"list",
"(",
")",
")",
"return",
"[",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"options",
".",
"items",
"(",
")",
"]"
] | Collect all the options info from the other modules. | [
"Collect",
"all",
"the",
"options",
"info",
"from",
"the",
"other",
"modules",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/config/options.py#L26-L34 | train |
pennlabs/penn-sdk-python | penn/laundry.py | Laundry.check_is_working | def check_is_working(self):
""" Returns True if the wash alert web interface seems to be
working properly, or False otherwise.
>>> l.check_is_working()
"""
try:
r = requests.post("http://{}/".format(LAUNDRY_DOMAIN), timeout=60, data={
"locationid": "5faec7e9-a4aa-47c2-a514-950c03fac460",
"email": "[email protected]",
"washers": 0,
"dryers": 0,
"locationalert": "OK"
})
r.raise_for_status()
return "The transaction log for database 'QuantumCoin' is full due to 'LOG_BACKUP'." not in r.text
except requests.exceptions.HTTPError:
return False | python | def check_is_working(self):
""" Returns True if the wash alert web interface seems to be
working properly, or False otherwise.
>>> l.check_is_working()
"""
try:
r = requests.post("http://{}/".format(LAUNDRY_DOMAIN), timeout=60, data={
"locationid": "5faec7e9-a4aa-47c2-a514-950c03fac460",
"email": "[email protected]",
"washers": 0,
"dryers": 0,
"locationalert": "OK"
})
r.raise_for_status()
return "The transaction log for database 'QuantumCoin' is full due to 'LOG_BACKUP'." not in r.text
except requests.exceptions.HTTPError:
return False | [
"def",
"check_is_working",
"(",
"self",
")",
":",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"\"http://{}/\"",
".",
"format",
"(",
"LAUNDRY_DOMAIN",
")",
",",
"timeout",
"=",
"60",
",",
"data",
"=",
"{",
"\"locationid\"",
":",
"\"5faec7e9-a4aa-47c2-a514-950c03fac460\"",
",",
"\"email\"",
":",
"\"[email protected]\"",
",",
"\"washers\"",
":",
"0",
",",
"\"dryers\"",
":",
"0",
",",
"\"locationalert\"",
":",
"\"OK\"",
"}",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"\"The transaction log for database 'QuantumCoin' is full due to 'LOG_BACKUP'.\"",
"not",
"in",
"r",
".",
"text",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
":",
"return",
"False"
] | Returns True if the wash alert web interface seems to be
working properly, or False otherwise.
>>> l.check_is_working() | [
"Returns",
"True",
"if",
"the",
"wash",
"alert",
"web",
"interface",
"seems",
"to",
"be",
"working",
"properly",
"or",
"False",
"otherwise",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/laundry.py#L153-L170 | train |
pennlabs/penn-sdk-python | penn/laundry.py | Laundry.machine_usage | def machine_usage(self, hall_no):
"""Returns the average usage of laundry machines every hour
for a given hall.
The usages are returned in a dictionary, with the key being
the day of the week, and the value being an array listing the usages
per hour.
:param hall_no:
integer corresponding to the id number for the hall. Thus number
is returned as part of the all_status call.
>>> english_house = l.machine_usage(2)
"""
try:
num = int(hall_no)
except ValueError:
raise ValueError("Room Number must be integer")
r = requests.get(USAGE_BASE_URL + str(num), timeout=60)
parsed = BeautifulSoup(r.text, 'html5lib')
usage_table = parsed.find_all('table', width='504px')[0]
rows = usage_table.find_all('tr')
usages = {}
for i, row in enumerate(rows):
day = []
hours = row.find_all('td')
for hour in hours:
day.append(self.busy_dict[str(hour['class'][0])])
usages[self.days[i]] = day
return usages | python | def machine_usage(self, hall_no):
"""Returns the average usage of laundry machines every hour
for a given hall.
The usages are returned in a dictionary, with the key being
the day of the week, and the value being an array listing the usages
per hour.
:param hall_no:
integer corresponding to the id number for the hall. Thus number
is returned as part of the all_status call.
>>> english_house = l.machine_usage(2)
"""
try:
num = int(hall_no)
except ValueError:
raise ValueError("Room Number must be integer")
r = requests.get(USAGE_BASE_URL + str(num), timeout=60)
parsed = BeautifulSoup(r.text, 'html5lib')
usage_table = parsed.find_all('table', width='504px')[0]
rows = usage_table.find_all('tr')
usages = {}
for i, row in enumerate(rows):
day = []
hours = row.find_all('td')
for hour in hours:
day.append(self.busy_dict[str(hour['class'][0])])
usages[self.days[i]] = day
return usages | [
"def",
"machine_usage",
"(",
"self",
",",
"hall_no",
")",
":",
"try",
":",
"num",
"=",
"int",
"(",
"hall_no",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Room Number must be integer\"",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"USAGE_BASE_URL",
"+",
"str",
"(",
"num",
")",
",",
"timeout",
"=",
"60",
")",
"parsed",
"=",
"BeautifulSoup",
"(",
"r",
".",
"text",
",",
"'html5lib'",
")",
"usage_table",
"=",
"parsed",
".",
"find_all",
"(",
"'table'",
",",
"width",
"=",
"'504px'",
")",
"[",
"0",
"]",
"rows",
"=",
"usage_table",
".",
"find_all",
"(",
"'tr'",
")",
"usages",
"=",
"{",
"}",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"rows",
")",
":",
"day",
"=",
"[",
"]",
"hours",
"=",
"row",
".",
"find_all",
"(",
"'td'",
")",
"for",
"hour",
"in",
"hours",
":",
"day",
".",
"append",
"(",
"self",
".",
"busy_dict",
"[",
"str",
"(",
"hour",
"[",
"'class'",
"]",
"[",
"0",
"]",
")",
"]",
")",
"usages",
"[",
"self",
".",
"days",
"[",
"i",
"]",
"]",
"=",
"day",
"return",
"usages"
] | Returns the average usage of laundry machines every hour
for a given hall.
The usages are returned in a dictionary, with the key being
the day of the week, and the value being an array listing the usages
per hour.
:param hall_no:
integer corresponding to the id number for the hall. Thus number
is returned as part of the all_status call.
>>> english_house = l.machine_usage(2) | [
"Returns",
"the",
"average",
"usage",
"of",
"laundry",
"machines",
"every",
"hour",
"for",
"a",
"given",
"hall",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/laundry.py#L172-L202 | train |
lambdalisue/notify | src/notify/mailer.py | create_message | def create_message(from_addr, to_addr, subject, body, encoding=None):
"""
Create message object for sending email
Parameters
----------
from_addr : string
An email address used for 'From' attribute
to_addr : string
An email address used for 'To' attribute
subject : string
An email subject string
body : string
An email body string
encoding : string
An email encoding string (Default: utf8)
Returns
-------
object
An instance of email.mime.text.MIMEText
"""
if encoding == "None":
encoding = None
if not encoding:
encoding = 'utf-8'
msg = MIMEText(body.encode(encoding), 'plain', encoding)
msg['Subject'] = Header(subject.encode(encoding), encoding)
msg['From'] = from_addr
msg['To'] = to_addr
msg['Date'] = formatdate()
return msg | python | def create_message(from_addr, to_addr, subject, body, encoding=None):
"""
Create message object for sending email
Parameters
----------
from_addr : string
An email address used for 'From' attribute
to_addr : string
An email address used for 'To' attribute
subject : string
An email subject string
body : string
An email body string
encoding : string
An email encoding string (Default: utf8)
Returns
-------
object
An instance of email.mime.text.MIMEText
"""
if encoding == "None":
encoding = None
if not encoding:
encoding = 'utf-8'
msg = MIMEText(body.encode(encoding), 'plain', encoding)
msg['Subject'] = Header(subject.encode(encoding), encoding)
msg['From'] = from_addr
msg['To'] = to_addr
msg['Date'] = formatdate()
return msg | [
"def",
"create_message",
"(",
"from_addr",
",",
"to_addr",
",",
"subject",
",",
"body",
",",
"encoding",
"=",
"None",
")",
":",
"if",
"encoding",
"==",
"\"None\"",
":",
"encoding",
"=",
"None",
"if",
"not",
"encoding",
":",
"encoding",
"=",
"'utf-8'",
"msg",
"=",
"MIMEText",
"(",
"body",
".",
"encode",
"(",
"encoding",
")",
",",
"'plain'",
",",
"encoding",
")",
"msg",
"[",
"'Subject'",
"]",
"=",
"Header",
"(",
"subject",
".",
"encode",
"(",
"encoding",
")",
",",
"encoding",
")",
"msg",
"[",
"'From'",
"]",
"=",
"from_addr",
"msg",
"[",
"'To'",
"]",
"=",
"to_addr",
"msg",
"[",
"'Date'",
"]",
"=",
"formatdate",
"(",
")",
"return",
"msg"
] | Create message object for sending email
Parameters
----------
from_addr : string
An email address used for 'From' attribute
to_addr : string
An email address used for 'To' attribute
subject : string
An email subject string
body : string
An email body string
encoding : string
An email encoding string (Default: utf8)
Returns
-------
object
An instance of email.mime.text.MIMEText | [
"Create",
"message",
"object",
"for",
"sending",
"email"
] | 1b6d7d1faa2cea13bfaa1f35130f279a0115e686 | https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/mailer.py#L11-L42 | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | StudySpaces._obtain_token | def _obtain_token(self):
"""Obtain an auth token from client id and client secret."""
# don't renew token if hasn't expired yet
if self.expiration and self.expiration > datetime.datetime.now():
return
resp = requests.post("{}/1.1/oauth/token".format(API_URL), data={
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "client_credentials"
}).json()
if "error" in resp:
raise APIError("LibCal Auth Failed: {}, {}".format(resp["error"], resp.get("error_description")))
self.expiration = datetime.datetime.now() + datetime.timedelta(seconds=resp["expires_in"])
self.token = resp["access_token"]
print(self.token) | python | def _obtain_token(self):
"""Obtain an auth token from client id and client secret."""
# don't renew token if hasn't expired yet
if self.expiration and self.expiration > datetime.datetime.now():
return
resp = requests.post("{}/1.1/oauth/token".format(API_URL), data={
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "client_credentials"
}).json()
if "error" in resp:
raise APIError("LibCal Auth Failed: {}, {}".format(resp["error"], resp.get("error_description")))
self.expiration = datetime.datetime.now() + datetime.timedelta(seconds=resp["expires_in"])
self.token = resp["access_token"]
print(self.token) | [
"def",
"_obtain_token",
"(",
"self",
")",
":",
"# don't renew token if hasn't expired yet",
"if",
"self",
".",
"expiration",
"and",
"self",
".",
"expiration",
">",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
":",
"return",
"resp",
"=",
"requests",
".",
"post",
"(",
"\"{}/1.1/oauth/token\"",
".",
"format",
"(",
"API_URL",
")",
",",
"data",
"=",
"{",
"\"client_id\"",
":",
"self",
".",
"client_id",
",",
"\"client_secret\"",
":",
"self",
".",
"client_secret",
",",
"\"grant_type\"",
":",
"\"client_credentials\"",
"}",
")",
".",
"json",
"(",
")",
"if",
"\"error\"",
"in",
"resp",
":",
"raise",
"APIError",
"(",
"\"LibCal Auth Failed: {}, {}\"",
".",
"format",
"(",
"resp",
"[",
"\"error\"",
"]",
",",
"resp",
".",
"get",
"(",
"\"error_description\"",
")",
")",
")",
"self",
".",
"expiration",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"resp",
"[",
"\"expires_in\"",
"]",
")",
"self",
".",
"token",
"=",
"resp",
"[",
"\"access_token\"",
"]",
"print",
"(",
"self",
".",
"token",
")"
] | Obtain an auth token from client id and client secret. | [
"Obtain",
"an",
"auth",
"token",
"from",
"client",
"id",
"and",
"client",
"secret",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L30-L48 | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | StudySpaces._request | def _request(self, *args, **kwargs):
"""Make a signed request to the libcal API."""
if not self.token:
self._obtain_token()
headers = {
"Authorization": "Bearer {}".format(self.token)
}
# add authorization headers
if "headers" in kwargs:
kwargs["headers"].update(headers)
else:
kwargs["headers"] = headers
# add api site to url
args = list(args)
if not args[1].startswith("http"):
args[1] = "{}{}".format(API_URL, args[1])
has_no_token = kwargs.get("no_token")
if has_no_token:
del kwargs["no_token"]
resp = requests.request(*args, **kwargs)
if resp.status_code == 401 and not has_no_token:
self._obtain_token()
kwargs["no_token"] = True
self._request(*args, **kwargs)
return resp | python | def _request(self, *args, **kwargs):
"""Make a signed request to the libcal API."""
if not self.token:
self._obtain_token()
headers = {
"Authorization": "Bearer {}".format(self.token)
}
# add authorization headers
if "headers" in kwargs:
kwargs["headers"].update(headers)
else:
kwargs["headers"] = headers
# add api site to url
args = list(args)
if not args[1].startswith("http"):
args[1] = "{}{}".format(API_URL, args[1])
has_no_token = kwargs.get("no_token")
if has_no_token:
del kwargs["no_token"]
resp = requests.request(*args, **kwargs)
if resp.status_code == 401 and not has_no_token:
self._obtain_token()
kwargs["no_token"] = True
self._request(*args, **kwargs)
return resp | [
"def",
"_request",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"token",
":",
"self",
".",
"_obtain_token",
"(",
")",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"\"Bearer {}\"",
".",
"format",
"(",
"self",
".",
"token",
")",
"}",
"# add authorization headers",
"if",
"\"headers\"",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"headers\"",
"]",
".",
"update",
"(",
"headers",
")",
"else",
":",
"kwargs",
"[",
"\"headers\"",
"]",
"=",
"headers",
"# add api site to url",
"args",
"=",
"list",
"(",
"args",
")",
"if",
"not",
"args",
"[",
"1",
"]",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"args",
"[",
"1",
"]",
"=",
"\"{}{}\"",
".",
"format",
"(",
"API_URL",
",",
"args",
"[",
"1",
"]",
")",
"has_no_token",
"=",
"kwargs",
".",
"get",
"(",
"\"no_token\"",
")",
"if",
"has_no_token",
":",
"del",
"kwargs",
"[",
"\"no_token\"",
"]",
"resp",
"=",
"requests",
".",
"request",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"resp",
".",
"status_code",
"==",
"401",
"and",
"not",
"has_no_token",
":",
"self",
".",
"_obtain_token",
"(",
")",
"kwargs",
"[",
"\"no_token\"",
"]",
"=",
"True",
"self",
".",
"_request",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"resp"
] | Make a signed request to the libcal API. | [
"Make",
"a",
"signed",
"request",
"to",
"the",
"libcal",
"API",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L50-L78 | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | StudySpaces.get_rooms | def get_rooms(self, lid, start=None, end=None):
"""Returns a list of rooms and their availabilities, grouped by category.
:param lid: The ID of the location to retrieve rooms for.
:type lid: int
:param start: The start range for the availabilities to retrieve, in YYYY-MM-DD format.
:type start: str
:param end: The end range for the availabilities to retrieve, in YYYY-MM-DD format.
:type end: str
"""
range_str = "availability"
if start:
start_datetime = datetime.datetime.combine(datetime.datetime.strptime(start, "%Y-%m-%d").date(), datetime.datetime.min.time())
range_str += "=" + start
if end and not start == end:
range_str += "," + end
else:
start_datetime = None
resp = self._request("GET", "/1.1/space/categories/{}".format(lid)).json()
if "error" in resp:
raise APIError(resp["error"])
output = {"id": lid, "categories": []}
# if there aren't any rooms associated with this location, return
if len(resp) < 1:
return output
if "error" in resp[0]:
raise APIError(resp[0]["error"])
if "categories" not in resp[0]:
return output
categories = resp[0]["categories"]
id_to_category = {i["cid"]: i["name"] for i in categories}
categories = ",".join([str(x["cid"]) for x in categories])
resp = self._request("GET", "/1.1/space/category/{}".format(categories))
for category in resp.json():
cat_out = {"cid": category["cid"], "name": id_to_category[category["cid"]], "rooms": []}
# ignore equipment categories
if cat_out["name"].endswith("Equipment"):
continue
items = category["items"]
items = ",".join([str(x) for x in items])
resp = self._request("GET", "/1.1/space/item/{}?{}".format(items, range_str))
for room in resp.json():
if room["id"] in ROOM_BLACKLIST:
continue
# prepend protocol to urls
if "image" in room and room["image"]:
if not room["image"].startswith("http"):
room["image"] = "https:" + room["image"]
# convert html descriptions to text
if "description" in room:
description = room["description"].replace(u'\xa0', u' ')
room["description"] = BeautifulSoup(description, "html.parser").text.strip()
# remove extra fields
if "formid" in room:
del room["formid"]
# enforce date filter
# API returns dates outside of the range, fix this manually
if start_datetime:
out_times = []
for time in room["availability"]:
parsed_start = datetime.datetime.strptime(time["from"][:-6], "%Y-%m-%dT%H:%M:%S")
if parsed_start >= start_datetime:
out_times.append(time)
room["availability"] = out_times
cat_out["rooms"].append(room)
if cat_out["rooms"]:
output["categories"].append(cat_out)
return output | python | def get_rooms(self, lid, start=None, end=None):
"""Returns a list of rooms and their availabilities, grouped by category.
:param lid: The ID of the location to retrieve rooms for.
:type lid: int
:param start: The start range for the availabilities to retrieve, in YYYY-MM-DD format.
:type start: str
:param end: The end range for the availabilities to retrieve, in YYYY-MM-DD format.
:type end: str
"""
range_str = "availability"
if start:
start_datetime = datetime.datetime.combine(datetime.datetime.strptime(start, "%Y-%m-%d").date(), datetime.datetime.min.time())
range_str += "=" + start
if end and not start == end:
range_str += "," + end
else:
start_datetime = None
resp = self._request("GET", "/1.1/space/categories/{}".format(lid)).json()
if "error" in resp:
raise APIError(resp["error"])
output = {"id": lid, "categories": []}
# if there aren't any rooms associated with this location, return
if len(resp) < 1:
return output
if "error" in resp[0]:
raise APIError(resp[0]["error"])
if "categories" not in resp[0]:
return output
categories = resp[0]["categories"]
id_to_category = {i["cid"]: i["name"] for i in categories}
categories = ",".join([str(x["cid"]) for x in categories])
resp = self._request("GET", "/1.1/space/category/{}".format(categories))
for category in resp.json():
cat_out = {"cid": category["cid"], "name": id_to_category[category["cid"]], "rooms": []}
# ignore equipment categories
if cat_out["name"].endswith("Equipment"):
continue
items = category["items"]
items = ",".join([str(x) for x in items])
resp = self._request("GET", "/1.1/space/item/{}?{}".format(items, range_str))
for room in resp.json():
if room["id"] in ROOM_BLACKLIST:
continue
# prepend protocol to urls
if "image" in room and room["image"]:
if not room["image"].startswith("http"):
room["image"] = "https:" + room["image"]
# convert html descriptions to text
if "description" in room:
description = room["description"].replace(u'\xa0', u' ')
room["description"] = BeautifulSoup(description, "html.parser").text.strip()
# remove extra fields
if "formid" in room:
del room["formid"]
# enforce date filter
# API returns dates outside of the range, fix this manually
if start_datetime:
out_times = []
for time in room["availability"]:
parsed_start = datetime.datetime.strptime(time["from"][:-6], "%Y-%m-%dT%H:%M:%S")
if parsed_start >= start_datetime:
out_times.append(time)
room["availability"] = out_times
cat_out["rooms"].append(room)
if cat_out["rooms"]:
output["categories"].append(cat_out)
return output | [
"def",
"get_rooms",
"(",
"self",
",",
"lid",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"range_str",
"=",
"\"availability\"",
"if",
"start",
":",
"start_datetime",
"=",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"start",
",",
"\"%Y-%m-%d\"",
")",
".",
"date",
"(",
")",
",",
"datetime",
".",
"datetime",
".",
"min",
".",
"time",
"(",
")",
")",
"range_str",
"+=",
"\"=\"",
"+",
"start",
"if",
"end",
"and",
"not",
"start",
"==",
"end",
":",
"range_str",
"+=",
"\",\"",
"+",
"end",
"else",
":",
"start_datetime",
"=",
"None",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"GET\"",
",",
"\"/1.1/space/categories/{}\"",
".",
"format",
"(",
"lid",
")",
")",
".",
"json",
"(",
")",
"if",
"\"error\"",
"in",
"resp",
":",
"raise",
"APIError",
"(",
"resp",
"[",
"\"error\"",
"]",
")",
"output",
"=",
"{",
"\"id\"",
":",
"lid",
",",
"\"categories\"",
":",
"[",
"]",
"}",
"# if there aren't any rooms associated with this location, return",
"if",
"len",
"(",
"resp",
")",
"<",
"1",
":",
"return",
"output",
"if",
"\"error\"",
"in",
"resp",
"[",
"0",
"]",
":",
"raise",
"APIError",
"(",
"resp",
"[",
"0",
"]",
"[",
"\"error\"",
"]",
")",
"if",
"\"categories\"",
"not",
"in",
"resp",
"[",
"0",
"]",
":",
"return",
"output",
"categories",
"=",
"resp",
"[",
"0",
"]",
"[",
"\"categories\"",
"]",
"id_to_category",
"=",
"{",
"i",
"[",
"\"cid\"",
"]",
":",
"i",
"[",
"\"name\"",
"]",
"for",
"i",
"in",
"categories",
"}",
"categories",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
"[",
"\"cid\"",
"]",
")",
"for",
"x",
"in",
"categories",
"]",
")",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"GET\"",
",",
"\"/1.1/space/category/{}\"",
".",
"format",
"(",
"categories",
")",
")",
"for",
"category",
"in",
"resp",
".",
"json",
"(",
")",
":",
"cat_out",
"=",
"{",
"\"cid\"",
":",
"category",
"[",
"\"cid\"",
"]",
",",
"\"name\"",
":",
"id_to_category",
"[",
"category",
"[",
"\"cid\"",
"]",
"]",
",",
"\"rooms\"",
":",
"[",
"]",
"}",
"# ignore equipment categories",
"if",
"cat_out",
"[",
"\"name\"",
"]",
".",
"endswith",
"(",
"\"Equipment\"",
")",
":",
"continue",
"items",
"=",
"category",
"[",
"\"items\"",
"]",
"items",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"items",
"]",
")",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"GET\"",
",",
"\"/1.1/space/item/{}?{}\"",
".",
"format",
"(",
"items",
",",
"range_str",
")",
")",
"for",
"room",
"in",
"resp",
".",
"json",
"(",
")",
":",
"if",
"room",
"[",
"\"id\"",
"]",
"in",
"ROOM_BLACKLIST",
":",
"continue",
"# prepend protocol to urls",
"if",
"\"image\"",
"in",
"room",
"and",
"room",
"[",
"\"image\"",
"]",
":",
"if",
"not",
"room",
"[",
"\"image\"",
"]",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"room",
"[",
"\"image\"",
"]",
"=",
"\"https:\"",
"+",
"room",
"[",
"\"image\"",
"]",
"# convert html descriptions to text",
"if",
"\"description\"",
"in",
"room",
":",
"description",
"=",
"room",
"[",
"\"description\"",
"]",
".",
"replace",
"(",
"u'\\xa0'",
",",
"u' '",
")",
"room",
"[",
"\"description\"",
"]",
"=",
"BeautifulSoup",
"(",
"description",
",",
"\"html.parser\"",
")",
".",
"text",
".",
"strip",
"(",
")",
"# remove extra fields",
"if",
"\"formid\"",
"in",
"room",
":",
"del",
"room",
"[",
"\"formid\"",
"]",
"# enforce date filter",
"# API returns dates outside of the range, fix this manually",
"if",
"start_datetime",
":",
"out_times",
"=",
"[",
"]",
"for",
"time",
"in",
"room",
"[",
"\"availability\"",
"]",
":",
"parsed_start",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"time",
"[",
"\"from\"",
"]",
"[",
":",
"-",
"6",
"]",
",",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"if",
"parsed_start",
">=",
"start_datetime",
":",
"out_times",
".",
"append",
"(",
"time",
")",
"room",
"[",
"\"availability\"",
"]",
"=",
"out_times",
"cat_out",
"[",
"\"rooms\"",
"]",
".",
"append",
"(",
"room",
")",
"if",
"cat_out",
"[",
"\"rooms\"",
"]",
":",
"output",
"[",
"\"categories\"",
"]",
".",
"append",
"(",
"cat_out",
")",
"return",
"output"
] | Returns a list of rooms and their availabilities, grouped by category.
:param lid: The ID of the location to retrieve rooms for.
:type lid: int
:param start: The start range for the availabilities to retrieve, in YYYY-MM-DD format.
:type start: str
:param end: The end range for the availabilities to retrieve, in YYYY-MM-DD format.
:type end: str | [
"Returns",
"a",
"list",
"of",
"rooms",
"and",
"their",
"availabilities",
"grouped",
"by",
"category",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L93-L167 | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | StudySpaces.book_room | def book_room(self, item, start, end, fname, lname, email, nickname, custom={}, test=False):
"""Books a room given the required information.
:param item:
The ID of the room to book.
:type item: int
:param start:
The start time range of when to book the room, in the format returned by the LibCal API.
:type start: str
:param end:
The end time range of when to book the room, in the format returned by the LibCal API.
:type end: str
:param fname:
The first name of the user booking the room.
:type fname: str
:param lname:
The last name of the user booking the room.
:type lname: str
:param email:
The email of the user booking the room.
:type email: str
:param nickname:
The name of the reservation.
:type nickname: str
:param custom:
Any other custom fields required to book the room.
:type custom: dict
:param test:
If this is set to true, don't actually book the room. Default is false.
:type test: bool
:returns:
Dictionary containing a success and error field.
"""
data = {
"start": start,
"fname": fname,
"lname": lname,
"email": email,
"nickname": nickname,
"bookings": [
{
"id": item,
"to": end
}
],
"test": test
}
data.update(custom)
resp = self._request("POST", "/1.1/space/reserve", json=data)
out = resp.json()
if "errors" in out and "error" not in out:
errors = out["errors"]
if isinstance(errors, list):
errors = " ".join(errors)
out["error"] = BeautifulSoup(errors.replace("\n", " "), "html.parser").text.strip()
del out["errors"]
if "results" not in out:
if "error" not in out:
out["error"] = None
out["results"] = True
else:
out["results"] = False
return out | python | def book_room(self, item, start, end, fname, lname, email, nickname, custom={}, test=False):
"""Books a room given the required information.
:param item:
The ID of the room to book.
:type item: int
:param start:
The start time range of when to book the room, in the format returned by the LibCal API.
:type start: str
:param end:
The end time range of when to book the room, in the format returned by the LibCal API.
:type end: str
:param fname:
The first name of the user booking the room.
:type fname: str
:param lname:
The last name of the user booking the room.
:type lname: str
:param email:
The email of the user booking the room.
:type email: str
:param nickname:
The name of the reservation.
:type nickname: str
:param custom:
Any other custom fields required to book the room.
:type custom: dict
:param test:
If this is set to true, don't actually book the room. Default is false.
:type test: bool
:returns:
Dictionary containing a success and error field.
"""
data = {
"start": start,
"fname": fname,
"lname": lname,
"email": email,
"nickname": nickname,
"bookings": [
{
"id": item,
"to": end
}
],
"test": test
}
data.update(custom)
resp = self._request("POST", "/1.1/space/reserve", json=data)
out = resp.json()
if "errors" in out and "error" not in out:
errors = out["errors"]
if isinstance(errors, list):
errors = " ".join(errors)
out["error"] = BeautifulSoup(errors.replace("\n", " "), "html.parser").text.strip()
del out["errors"]
if "results" not in out:
if "error" not in out:
out["error"] = None
out["results"] = True
else:
out["results"] = False
return out | [
"def",
"book_room",
"(",
"self",
",",
"item",
",",
"start",
",",
"end",
",",
"fname",
",",
"lname",
",",
"email",
",",
"nickname",
",",
"custom",
"=",
"{",
"}",
",",
"test",
"=",
"False",
")",
":",
"data",
"=",
"{",
"\"start\"",
":",
"start",
",",
"\"fname\"",
":",
"fname",
",",
"\"lname\"",
":",
"lname",
",",
"\"email\"",
":",
"email",
",",
"\"nickname\"",
":",
"nickname",
",",
"\"bookings\"",
":",
"[",
"{",
"\"id\"",
":",
"item",
",",
"\"to\"",
":",
"end",
"}",
"]",
",",
"\"test\"",
":",
"test",
"}",
"data",
".",
"update",
"(",
"custom",
")",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"POST\"",
",",
"\"/1.1/space/reserve\"",
",",
"json",
"=",
"data",
")",
"out",
"=",
"resp",
".",
"json",
"(",
")",
"if",
"\"errors\"",
"in",
"out",
"and",
"\"error\"",
"not",
"in",
"out",
":",
"errors",
"=",
"out",
"[",
"\"errors\"",
"]",
"if",
"isinstance",
"(",
"errors",
",",
"list",
")",
":",
"errors",
"=",
"\" \"",
".",
"join",
"(",
"errors",
")",
"out",
"[",
"\"error\"",
"]",
"=",
"BeautifulSoup",
"(",
"errors",
".",
"replace",
"(",
"\"\\n\"",
",",
"\" \"",
")",
",",
"\"html.parser\"",
")",
".",
"text",
".",
"strip",
"(",
")",
"del",
"out",
"[",
"\"errors\"",
"]",
"if",
"\"results\"",
"not",
"in",
"out",
":",
"if",
"\"error\"",
"not",
"in",
"out",
":",
"out",
"[",
"\"error\"",
"]",
"=",
"None",
"out",
"[",
"\"results\"",
"]",
"=",
"True",
"else",
":",
"out",
"[",
"\"results\"",
"]",
"=",
"False",
"return",
"out"
] | Books a room given the required information.
:param item:
The ID of the room to book.
:type item: int
:param start:
The start time range of when to book the room, in the format returned by the LibCal API.
:type start: str
:param end:
The end time range of when to book the room, in the format returned by the LibCal API.
:type end: str
:param fname:
The first name of the user booking the room.
:type fname: str
:param lname:
The last name of the user booking the room.
:type lname: str
:param email:
The email of the user booking the room.
:type email: str
:param nickname:
The name of the reservation.
:type nickname: str
:param custom:
Any other custom fields required to book the room.
:type custom: dict
:param test:
If this is set to true, don't actually book the room. Default is false.
:type test: bool
:returns:
Dictionary containing a success and error field. | [
"Books",
"a",
"room",
"given",
"the",
"required",
"information",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L169-L231 | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | StudySpaces.cancel_room | def cancel_room(self, booking_id):
"""Cancel a room given a booking id.
:param booking_id: A booking id or a list of booking ids (separated by commas) to cancel.
:type booking_id: str
"""
resp = self._request("POST", "/1.1/space/cancel/{}".format(booking_id))
return resp.json() | python | def cancel_room(self, booking_id):
"""Cancel a room given a booking id.
:param booking_id: A booking id or a list of booking ids (separated by commas) to cancel.
:type booking_id: str
"""
resp = self._request("POST", "/1.1/space/cancel/{}".format(booking_id))
return resp.json() | [
"def",
"cancel_room",
"(",
"self",
",",
"booking_id",
")",
":",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"POST\"",
",",
"\"/1.1/space/cancel/{}\"",
".",
"format",
"(",
"booking_id",
")",
")",
"return",
"resp",
".",
"json",
"(",
")"
] | Cancel a room given a booking id.
:param booking_id: A booking id or a list of booking ids (separated by commas) to cancel.
:type booking_id: str | [
"Cancel",
"a",
"room",
"given",
"a",
"booking",
"id",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L233-L240 | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | StudySpaces.get_reservations | def get_reservations(self, email, date, timeout=None):
"""Gets reservations for a given email.
:param email: the email of the user who's reservations are to be fetched
:type email: str
"""
try:
resp = self._request("GET", "/1.1/space/bookings?email={}&date={}&limit=100".format(email, date), timeout=timeout)
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
except requests.exceptions.ConnectTimeout:
raise APIError("Timeout Error")
return resp.json() | python | def get_reservations(self, email, date, timeout=None):
"""Gets reservations for a given email.
:param email: the email of the user who's reservations are to be fetched
:type email: str
"""
try:
resp = self._request("GET", "/1.1/space/bookings?email={}&date={}&limit=100".format(email, date), timeout=timeout)
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
except requests.exceptions.ConnectTimeout:
raise APIError("Timeout Error")
return resp.json() | [
"def",
"get_reservations",
"(",
"self",
",",
"email",
",",
"date",
",",
"timeout",
"=",
"None",
")",
":",
"try",
":",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"GET\"",
",",
"\"/1.1/space/bookings?email={}&date={}&limit=100\"",
".",
"format",
"(",
"email",
",",
"date",
")",
",",
"timeout",
"=",
"timeout",
")",
"except",
"resp",
".",
"exceptions",
".",
"HTTPError",
"as",
"error",
":",
"raise",
"APIError",
"(",
"\"Server Error: {}\"",
".",
"format",
"(",
"error",
")",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectTimeout",
":",
"raise",
"APIError",
"(",
"\"Timeout Error\"",
")",
"return",
"resp",
".",
"json",
"(",
")"
] | Gets reservations for a given email.
:param email: the email of the user who's reservations are to be fetched
:type email: str | [
"Gets",
"reservations",
"for",
"a",
"given",
"email",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L242-L254 | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | StudySpaces.get_reservations_for_booking_ids | def get_reservations_for_booking_ids(self, booking_ids):
"""Gets booking information for a given list of booking ids.
:param booking_ids: a booking id or a list of room ids (comma separated).
:type booking_ids: string
"""
try:
resp = self._request("GET", "/1.1/space/booking/{}".format(booking_ids))
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
return resp.json() | python | def get_reservations_for_booking_ids(self, booking_ids):
"""Gets booking information for a given list of booking ids.
:param booking_ids: a booking id or a list of room ids (comma separated).
:type booking_ids: string
"""
try:
resp = self._request("GET", "/1.1/space/booking/{}".format(booking_ids))
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
return resp.json() | [
"def",
"get_reservations_for_booking_ids",
"(",
"self",
",",
"booking_ids",
")",
":",
"try",
":",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"GET\"",
",",
"\"/1.1/space/booking/{}\"",
".",
"format",
"(",
"booking_ids",
")",
")",
"except",
"resp",
".",
"exceptions",
".",
"HTTPError",
"as",
"error",
":",
"raise",
"APIError",
"(",
"\"Server Error: {}\"",
".",
"format",
"(",
"error",
")",
")",
"return",
"resp",
".",
"json",
"(",
")"
] | Gets booking information for a given list of booking ids.
:param booking_ids: a booking id or a list of room ids (comma separated).
:type booking_ids: string | [
"Gets",
"booking",
"information",
"for",
"a",
"given",
"list",
"of",
"booking",
"ids",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L256-L266 | train |
pennlabs/penn-sdk-python | penn/studyspaces.py | StudySpaces.get_room_info | def get_room_info(self, room_ids):
"""Gets room information for a given list of ids.
:param room_ids: a room id or a list of room ids (comma separated).
:type room_ids: string
"""
try:
resp = self._request("GET", "/1.1/space/item/{}".format(room_ids))
rooms = resp.json()
for room in rooms:
if not room["image"].startswith("http"):
room["image"] = "https:" + room["image"]
if "description" in room:
description = room["description"].replace(u'\xa0', u' ')
room["description"] = BeautifulSoup(description, "html.parser").text.strip()
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
return rooms | python | def get_room_info(self, room_ids):
"""Gets room information for a given list of ids.
:param room_ids: a room id or a list of room ids (comma separated).
:type room_ids: string
"""
try:
resp = self._request("GET", "/1.1/space/item/{}".format(room_ids))
rooms = resp.json()
for room in rooms:
if not room["image"].startswith("http"):
room["image"] = "https:" + room["image"]
if "description" in room:
description = room["description"].replace(u'\xa0', u' ')
room["description"] = BeautifulSoup(description, "html.parser").text.strip()
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
return rooms | [
"def",
"get_room_info",
"(",
"self",
",",
"room_ids",
")",
":",
"try",
":",
"resp",
"=",
"self",
".",
"_request",
"(",
"\"GET\"",
",",
"\"/1.1/space/item/{}\"",
".",
"format",
"(",
"room_ids",
")",
")",
"rooms",
"=",
"resp",
".",
"json",
"(",
")",
"for",
"room",
"in",
"rooms",
":",
"if",
"not",
"room",
"[",
"\"image\"",
"]",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"room",
"[",
"\"image\"",
"]",
"=",
"\"https:\"",
"+",
"room",
"[",
"\"image\"",
"]",
"if",
"\"description\"",
"in",
"room",
":",
"description",
"=",
"room",
"[",
"\"description\"",
"]",
".",
"replace",
"(",
"u'\\xa0'",
",",
"u' '",
")",
"room",
"[",
"\"description\"",
"]",
"=",
"BeautifulSoup",
"(",
"description",
",",
"\"html.parser\"",
")",
".",
"text",
".",
"strip",
"(",
")",
"except",
"resp",
".",
"exceptions",
".",
"HTTPError",
"as",
"error",
":",
"raise",
"APIError",
"(",
"\"Server Error: {}\"",
".",
"format",
"(",
"error",
")",
")",
"return",
"rooms"
] | Gets room information for a given list of ids.
:param room_ids: a room id or a list of room ids (comma separated).
:type room_ids: string | [
"Gets",
"room",
"information",
"for",
"a",
"given",
"list",
"of",
"ids",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/studyspaces.py#L268-L286 | train |
evolbioinfo/pastml | pastml/acr.py | reconstruct_ancestral_states | def reconstruct_ancestral_states(tree, character, states, prediction_method=MPPA, model=F81,
params=None, avg_br_len=None, num_nodes=None, num_tips=None,
force_joint=True):
"""
Reconstructs ancestral states for the given character on the given tree.
:param character: character whose ancestral states are to be reconstructed.
:type character: str
:param tree: tree whose ancestral state are to be reconstructed,
annotated with the feature specified as `character` containing node states when known.
:type tree: ete3.Tree
:param states: possible character states.
:type states: numpy.array
:param avg_br_len: (optional) average non-zero branch length for this tree. If not specified, will be calculated.
:type avg_br_len: float
:param model: (optional, default is F81) state evolution model to be used by PASTML.
:type model: str
:param prediction_method: (optional, default is MPPA) ancestral state prediction method to be used by PASTML.
:type prediction_method: str
:param num_nodes: (optional) total number of nodes in the given tree (including tips).
If not specified, will be calculated.
:type num_nodes: int
:param num_tips: (optional) total number of tips in the given tree.
If not specified, will be calculated.
:type num_tips: int
:param params: an optional way to fix some parameters,
must be in a form {param: value},
where param can be a state (then the value should specify its frequency between 0 and 1),
or "scaling factor" (then the value should be the scaling factor for three branches,
e.g. set to 1 to keep the original branches). Could also be in a form path_to_param_file.
Only makes sense for ML methods.
:type params: dict or str
:return: ACR result dictionary whose values depend on the prediction method.
:rtype: dict
"""
logging.getLogger('pastml').debug('ACR settings for {}:\n\tMethod:\t{}{}.'
.format(character, prediction_method,
'\n\tModel:\t{}'.format(model)
if model and is_ml(prediction_method) else ''))
if COPY == prediction_method:
return {CHARACTER: character, STATES: states, METHOD: prediction_method}
if not num_nodes:
num_nodes = sum(1 for _ in tree.traverse())
if not num_tips:
num_tips = len(tree)
if is_ml(prediction_method):
if avg_br_len is None:
avg_br_len = np.mean(n.dist for n in tree.traverse() if n.dist)
freqs, sf, kappa = None, None, None
if params is not None:
freqs, sf, kappa = _parse_pastml_parameters(params, states)
return ml_acr(tree=tree, character=character, prediction_method=prediction_method, model=model, states=states,
avg_br_len=avg_br_len, num_nodes=num_nodes, num_tips=num_tips, freqs=freqs, sf=sf, kappa=kappa,
force_joint=force_joint)
if is_parsimonious(prediction_method):
return parsimonious_acr(tree, character, prediction_method, states, num_nodes, num_tips)
raise ValueError('Method {} is unknown, should be one of ML ({}), one of MP ({}) or {}'
.format(prediction_method, ', '.join(ML_METHODS), ', '.join(MP_METHODS), COPY)) | python | def reconstruct_ancestral_states(tree, character, states, prediction_method=MPPA, model=F81,
params=None, avg_br_len=None, num_nodes=None, num_tips=None,
force_joint=True):
"""
Reconstructs ancestral states for the given character on the given tree.
:param character: character whose ancestral states are to be reconstructed.
:type character: str
:param tree: tree whose ancestral state are to be reconstructed,
annotated with the feature specified as `character` containing node states when known.
:type tree: ete3.Tree
:param states: possible character states.
:type states: numpy.array
:param avg_br_len: (optional) average non-zero branch length for this tree. If not specified, will be calculated.
:type avg_br_len: float
:param model: (optional, default is F81) state evolution model to be used by PASTML.
:type model: str
:param prediction_method: (optional, default is MPPA) ancestral state prediction method to be used by PASTML.
:type prediction_method: str
:param num_nodes: (optional) total number of nodes in the given tree (including tips).
If not specified, will be calculated.
:type num_nodes: int
:param num_tips: (optional) total number of tips in the given tree.
If not specified, will be calculated.
:type num_tips: int
:param params: an optional way to fix some parameters,
must be in a form {param: value},
where param can be a state (then the value should specify its frequency between 0 and 1),
or "scaling factor" (then the value should be the scaling factor for three branches,
e.g. set to 1 to keep the original branches). Could also be in a form path_to_param_file.
Only makes sense for ML methods.
:type params: dict or str
:return: ACR result dictionary whose values depend on the prediction method.
:rtype: dict
"""
logging.getLogger('pastml').debug('ACR settings for {}:\n\tMethod:\t{}{}.'
.format(character, prediction_method,
'\n\tModel:\t{}'.format(model)
if model and is_ml(prediction_method) else ''))
if COPY == prediction_method:
return {CHARACTER: character, STATES: states, METHOD: prediction_method}
if not num_nodes:
num_nodes = sum(1 for _ in tree.traverse())
if not num_tips:
num_tips = len(tree)
if is_ml(prediction_method):
if avg_br_len is None:
avg_br_len = np.mean(n.dist for n in tree.traverse() if n.dist)
freqs, sf, kappa = None, None, None
if params is not None:
freqs, sf, kappa = _parse_pastml_parameters(params, states)
return ml_acr(tree=tree, character=character, prediction_method=prediction_method, model=model, states=states,
avg_br_len=avg_br_len, num_nodes=num_nodes, num_tips=num_tips, freqs=freqs, sf=sf, kappa=kappa,
force_joint=force_joint)
if is_parsimonious(prediction_method):
return parsimonious_acr(tree, character, prediction_method, states, num_nodes, num_tips)
raise ValueError('Method {} is unknown, should be one of ML ({}), one of MP ({}) or {}'
.format(prediction_method, ', '.join(ML_METHODS), ', '.join(MP_METHODS), COPY)) | [
"def",
"reconstruct_ancestral_states",
"(",
"tree",
",",
"character",
",",
"states",
",",
"prediction_method",
"=",
"MPPA",
",",
"model",
"=",
"F81",
",",
"params",
"=",
"None",
",",
"avg_br_len",
"=",
"None",
",",
"num_nodes",
"=",
"None",
",",
"num_tips",
"=",
"None",
",",
"force_joint",
"=",
"True",
")",
":",
"logging",
".",
"getLogger",
"(",
"'pastml'",
")",
".",
"debug",
"(",
"'ACR settings for {}:\\n\\tMethod:\\t{}{}.'",
".",
"format",
"(",
"character",
",",
"prediction_method",
",",
"'\\n\\tModel:\\t{}'",
".",
"format",
"(",
"model",
")",
"if",
"model",
"and",
"is_ml",
"(",
"prediction_method",
")",
"else",
"''",
")",
")",
"if",
"COPY",
"==",
"prediction_method",
":",
"return",
"{",
"CHARACTER",
":",
"character",
",",
"STATES",
":",
"states",
",",
"METHOD",
":",
"prediction_method",
"}",
"if",
"not",
"num_nodes",
":",
"num_nodes",
"=",
"sum",
"(",
"1",
"for",
"_",
"in",
"tree",
".",
"traverse",
"(",
")",
")",
"if",
"not",
"num_tips",
":",
"num_tips",
"=",
"len",
"(",
"tree",
")",
"if",
"is_ml",
"(",
"prediction_method",
")",
":",
"if",
"avg_br_len",
"is",
"None",
":",
"avg_br_len",
"=",
"np",
".",
"mean",
"(",
"n",
".",
"dist",
"for",
"n",
"in",
"tree",
".",
"traverse",
"(",
")",
"if",
"n",
".",
"dist",
")",
"freqs",
",",
"sf",
",",
"kappa",
"=",
"None",
",",
"None",
",",
"None",
"if",
"params",
"is",
"not",
"None",
":",
"freqs",
",",
"sf",
",",
"kappa",
"=",
"_parse_pastml_parameters",
"(",
"params",
",",
"states",
")",
"return",
"ml_acr",
"(",
"tree",
"=",
"tree",
",",
"character",
"=",
"character",
",",
"prediction_method",
"=",
"prediction_method",
",",
"model",
"=",
"model",
",",
"states",
"=",
"states",
",",
"avg_br_len",
"=",
"avg_br_len",
",",
"num_nodes",
"=",
"num_nodes",
",",
"num_tips",
"=",
"num_tips",
",",
"freqs",
"=",
"freqs",
",",
"sf",
"=",
"sf",
",",
"kappa",
"=",
"kappa",
",",
"force_joint",
"=",
"force_joint",
")",
"if",
"is_parsimonious",
"(",
"prediction_method",
")",
":",
"return",
"parsimonious_acr",
"(",
"tree",
",",
"character",
",",
"prediction_method",
",",
"states",
",",
"num_nodes",
",",
"num_tips",
")",
"raise",
"ValueError",
"(",
"'Method {} is unknown, should be one of ML ({}), one of MP ({}) or {}'",
".",
"format",
"(",
"prediction_method",
",",
"', '",
".",
"join",
"(",
"ML_METHODS",
")",
",",
"', '",
".",
"join",
"(",
"MP_METHODS",
")",
",",
"COPY",
")",
")"
] | Reconstructs ancestral states for the given character on the given tree.
:param character: character whose ancestral states are to be reconstructed.
:type character: str
:param tree: tree whose ancestral state are to be reconstructed,
annotated with the feature specified as `character` containing node states when known.
:type tree: ete3.Tree
:param states: possible character states.
:type states: numpy.array
:param avg_br_len: (optional) average non-zero branch length for this tree. If not specified, will be calculated.
:type avg_br_len: float
:param model: (optional, default is F81) state evolution model to be used by PASTML.
:type model: str
:param prediction_method: (optional, default is MPPA) ancestral state prediction method to be used by PASTML.
:type prediction_method: str
:param num_nodes: (optional) total number of nodes in the given tree (including tips).
If not specified, will be calculated.
:type num_nodes: int
:param num_tips: (optional) total number of tips in the given tree.
If not specified, will be calculated.
:type num_tips: int
:param params: an optional way to fix some parameters,
must be in a form {param: value},
where param can be a state (then the value should specify its frequency between 0 and 1),
or "scaling factor" (then the value should be the scaling factor for three branches,
e.g. set to 1 to keep the original branches). Could also be in a form path_to_param_file.
Only makes sense for ML methods.
:type params: dict or str
:return: ACR result dictionary whose values depend on the prediction method.
:rtype: dict | [
"Reconstructs",
"ancestral",
"states",
"for",
"the",
"given",
"character",
"on",
"the",
"given",
"tree",
"."
] | df8a375841525738383e59548eed3441b07dbd3e | https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/acr.py#L137-L197 | train |
evolbioinfo/pastml | pastml/acr.py | acr | def acr(tree, df, prediction_method=MPPA, model=F81, column2parameters=None, force_joint=True):
"""
Reconstructs ancestral states for the given tree and
all the characters specified as columns of the given annotation dataframe.
:param df: dataframe indexed with tree node names
and containing characters for which ACR should be performed as columns.
:type df: pandas.DataFrame
:param tree: tree whose ancestral state are to be reconstructed.
:type tree: ete3.Tree
:param model: (optional, default is F81) model(s) to be used by PASTML,
can be either one model to be used for all the characters,
or a list of different models (in the same order as the annotation dataframe columns)
:type model: str or list(str)
:param prediction_method: (optional, default is MPPA) ancestral state prediction method(s) to be used by PASTML,
can be either one method to be used for all the characters,
or a list of different methods (in the same order as the annotation dataframe columns)
:type prediction_method: str or list(str)
:param column2parameters: an optional way to fix some parameters,
must be in a form {column: {param: value}},
where param can be a character state (then the value should specify its frequency between 0 and 1),
or pastml.ml.SCALING_FACTOR (then the value should be the scaling factor for three branches,
e.g. set to 1 to keep the original branches). Could also be in a form {column: path_to_param_file}.
:type column2parameters: dict
:param force_joint: (optional, default is True) whether the JOINT state should be added to the MPPA prediction
even when not selected by the Brier score
:type force_joint: bool
:return: list of ACR result dictionaries, one per character.
:rtype: list(dict)
"""
for c in df.columns:
df[c] = df[c].apply(lambda _: '' if pd.isna(_) else _.encode('ASCII', 'replace').decode())
columns = preannotate_tree(df, tree)
name_tree(tree)
collapse_zero_branches(tree, features_to_be_merged=df.columns)
avg_br_len, num_nodes, num_tips = get_tree_stats(tree)
logging.getLogger('pastml').debug('\n=============ACR===============================')
column2parameters = column2parameters if column2parameters else {}
def _work(args):
return reconstruct_ancestral_states(*args, avg_br_len=avg_br_len, num_nodes=num_nodes, num_tips=num_tips,
force_joint=force_joint)
prediction_methods = value2list(len(columns), prediction_method, MPPA)
models = value2list(len(columns), model, F81)
def get_states(method, model, column):
df_states = [_ for _ in df[column].unique() if pd.notnull(_) and _ != '']
if not is_ml(method) or model not in {HKY, JTT}:
return np.sort(df_states)
states = HKY_STATES if HKY == model else JTT_STATES
if not set(df_states) & set(states):
raise ValueError('The allowed states for model {} are {}, '
'but your annotation file specifies {} as states in column {}.'
.format(model, ', '.join(states), ', '.join(df_states), column))
state_set = set(states)
df[column] = df[column].apply(lambda _: _ if _ in state_set else '')
return states
with ThreadPool() as pool:
acr_results = \
pool.map(func=_work, iterable=((tree, column, get_states(method, model, column), method, model,
column2parameters[column] if column in column2parameters else None)
for (column, method, model) in zip(columns, prediction_methods, models)))
result = []
for acr_res in acr_results:
if isinstance(acr_res, list):
result.extend(acr_res)
else:
result.append(acr_res)
return result | python | def acr(tree, df, prediction_method=MPPA, model=F81, column2parameters=None, force_joint=True):
"""
Reconstructs ancestral states for the given tree and
all the characters specified as columns of the given annotation dataframe.
:param df: dataframe indexed with tree node names
and containing characters for which ACR should be performed as columns.
:type df: pandas.DataFrame
:param tree: tree whose ancestral state are to be reconstructed.
:type tree: ete3.Tree
:param model: (optional, default is F81) model(s) to be used by PASTML,
can be either one model to be used for all the characters,
or a list of different models (in the same order as the annotation dataframe columns)
:type model: str or list(str)
:param prediction_method: (optional, default is MPPA) ancestral state prediction method(s) to be used by PASTML,
can be either one method to be used for all the characters,
or a list of different methods (in the same order as the annotation dataframe columns)
:type prediction_method: str or list(str)
:param column2parameters: an optional way to fix some parameters,
must be in a form {column: {param: value}},
where param can be a character state (then the value should specify its frequency between 0 and 1),
or pastml.ml.SCALING_FACTOR (then the value should be the scaling factor for three branches,
e.g. set to 1 to keep the original branches). Could also be in a form {column: path_to_param_file}.
:type column2parameters: dict
:param force_joint: (optional, default is True) whether the JOINT state should be added to the MPPA prediction
even when not selected by the Brier score
:type force_joint: bool
:return: list of ACR result dictionaries, one per character.
:rtype: list(dict)
"""
for c in df.columns:
df[c] = df[c].apply(lambda _: '' if pd.isna(_) else _.encode('ASCII', 'replace').decode())
columns = preannotate_tree(df, tree)
name_tree(tree)
collapse_zero_branches(tree, features_to_be_merged=df.columns)
avg_br_len, num_nodes, num_tips = get_tree_stats(tree)
logging.getLogger('pastml').debug('\n=============ACR===============================')
column2parameters = column2parameters if column2parameters else {}
def _work(args):
return reconstruct_ancestral_states(*args, avg_br_len=avg_br_len, num_nodes=num_nodes, num_tips=num_tips,
force_joint=force_joint)
prediction_methods = value2list(len(columns), prediction_method, MPPA)
models = value2list(len(columns), model, F81)
def get_states(method, model, column):
df_states = [_ for _ in df[column].unique() if pd.notnull(_) and _ != '']
if not is_ml(method) or model not in {HKY, JTT}:
return np.sort(df_states)
states = HKY_STATES if HKY == model else JTT_STATES
if not set(df_states) & set(states):
raise ValueError('The allowed states for model {} are {}, '
'but your annotation file specifies {} as states in column {}.'
.format(model, ', '.join(states), ', '.join(df_states), column))
state_set = set(states)
df[column] = df[column].apply(lambda _: _ if _ in state_set else '')
return states
with ThreadPool() as pool:
acr_results = \
pool.map(func=_work, iterable=((tree, column, get_states(method, model, column), method, model,
column2parameters[column] if column in column2parameters else None)
for (column, method, model) in zip(columns, prediction_methods, models)))
result = []
for acr_res in acr_results:
if isinstance(acr_res, list):
result.extend(acr_res)
else:
result.append(acr_res)
return result | [
"def",
"acr",
"(",
"tree",
",",
"df",
",",
"prediction_method",
"=",
"MPPA",
",",
"model",
"=",
"F81",
",",
"column2parameters",
"=",
"None",
",",
"force_joint",
"=",
"True",
")",
":",
"for",
"c",
"in",
"df",
".",
"columns",
":",
"df",
"[",
"c",
"]",
"=",
"df",
"[",
"c",
"]",
".",
"apply",
"(",
"lambda",
"_",
":",
"''",
"if",
"pd",
".",
"isna",
"(",
"_",
")",
"else",
"_",
".",
"encode",
"(",
"'ASCII'",
",",
"'replace'",
")",
".",
"decode",
"(",
")",
")",
"columns",
"=",
"preannotate_tree",
"(",
"df",
",",
"tree",
")",
"name_tree",
"(",
"tree",
")",
"collapse_zero_branches",
"(",
"tree",
",",
"features_to_be_merged",
"=",
"df",
".",
"columns",
")",
"avg_br_len",
",",
"num_nodes",
",",
"num_tips",
"=",
"get_tree_stats",
"(",
"tree",
")",
"logging",
".",
"getLogger",
"(",
"'pastml'",
")",
".",
"debug",
"(",
"'\\n=============ACR==============================='",
")",
"column2parameters",
"=",
"column2parameters",
"if",
"column2parameters",
"else",
"{",
"}",
"def",
"_work",
"(",
"args",
")",
":",
"return",
"reconstruct_ancestral_states",
"(",
"*",
"args",
",",
"avg_br_len",
"=",
"avg_br_len",
",",
"num_nodes",
"=",
"num_nodes",
",",
"num_tips",
"=",
"num_tips",
",",
"force_joint",
"=",
"force_joint",
")",
"prediction_methods",
"=",
"value2list",
"(",
"len",
"(",
"columns",
")",
",",
"prediction_method",
",",
"MPPA",
")",
"models",
"=",
"value2list",
"(",
"len",
"(",
"columns",
")",
",",
"model",
",",
"F81",
")",
"def",
"get_states",
"(",
"method",
",",
"model",
",",
"column",
")",
":",
"df_states",
"=",
"[",
"_",
"for",
"_",
"in",
"df",
"[",
"column",
"]",
".",
"unique",
"(",
")",
"if",
"pd",
".",
"notnull",
"(",
"_",
")",
"and",
"_",
"!=",
"''",
"]",
"if",
"not",
"is_ml",
"(",
"method",
")",
"or",
"model",
"not",
"in",
"{",
"HKY",
",",
"JTT",
"}",
":",
"return",
"np",
".",
"sort",
"(",
"df_states",
")",
"states",
"=",
"HKY_STATES",
"if",
"HKY",
"==",
"model",
"else",
"JTT_STATES",
"if",
"not",
"set",
"(",
"df_states",
")",
"&",
"set",
"(",
"states",
")",
":",
"raise",
"ValueError",
"(",
"'The allowed states for model {} are {}, '",
"'but your annotation file specifies {} as states in column {}.'",
".",
"format",
"(",
"model",
",",
"', '",
".",
"join",
"(",
"states",
")",
",",
"', '",
".",
"join",
"(",
"df_states",
")",
",",
"column",
")",
")",
"state_set",
"=",
"set",
"(",
"states",
")",
"df",
"[",
"column",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"apply",
"(",
"lambda",
"_",
":",
"_",
"if",
"_",
"in",
"state_set",
"else",
"''",
")",
"return",
"states",
"with",
"ThreadPool",
"(",
")",
"as",
"pool",
":",
"acr_results",
"=",
"pool",
".",
"map",
"(",
"func",
"=",
"_work",
",",
"iterable",
"=",
"(",
"(",
"tree",
",",
"column",
",",
"get_states",
"(",
"method",
",",
"model",
",",
"column",
")",
",",
"method",
",",
"model",
",",
"column2parameters",
"[",
"column",
"]",
"if",
"column",
"in",
"column2parameters",
"else",
"None",
")",
"for",
"(",
"column",
",",
"method",
",",
"model",
")",
"in",
"zip",
"(",
"columns",
",",
"prediction_methods",
",",
"models",
")",
")",
")",
"result",
"=",
"[",
"]",
"for",
"acr_res",
"in",
"acr_results",
":",
"if",
"isinstance",
"(",
"acr_res",
",",
"list",
")",
":",
"result",
".",
"extend",
"(",
"acr_res",
")",
"else",
":",
"result",
".",
"append",
"(",
"acr_res",
")",
"return",
"result"
] | Reconstructs ancestral states for the given tree and
all the characters specified as columns of the given annotation dataframe.
:param df: dataframe indexed with tree node names
and containing characters for which ACR should be performed as columns.
:type df: pandas.DataFrame
:param tree: tree whose ancestral state are to be reconstructed.
:type tree: ete3.Tree
:param model: (optional, default is F81) model(s) to be used by PASTML,
can be either one model to be used for all the characters,
or a list of different models (in the same order as the annotation dataframe columns)
:type model: str or list(str)
:param prediction_method: (optional, default is MPPA) ancestral state prediction method(s) to be used by PASTML,
can be either one method to be used for all the characters,
or a list of different methods (in the same order as the annotation dataframe columns)
:type prediction_method: str or list(str)
:param column2parameters: an optional way to fix some parameters,
must be in a form {column: {param: value}},
where param can be a character state (then the value should specify its frequency between 0 and 1),
or pastml.ml.SCALING_FACTOR (then the value should be the scaling factor for three branches,
e.g. set to 1 to keep the original branches). Could also be in a form {column: path_to_param_file}.
:type column2parameters: dict
:param force_joint: (optional, default is True) whether the JOINT state should be added to the MPPA prediction
even when not selected by the Brier score
:type force_joint: bool
:return: list of ACR result dictionaries, one per character.
:rtype: list(dict) | [
"Reconstructs",
"ancestral",
"states",
"for",
"the",
"given",
"tree",
"and",
"all",
"the",
"characters",
"specified",
"as",
"columns",
"of",
"the",
"given",
"annotation",
"dataframe",
"."
] | df8a375841525738383e59548eed3441b07dbd3e | https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/acr.py#L200-L276 | train |
geophysics-ubonn/reda | lib/reda/utils/eit_fzj_utils.py | compute_correction_factors | def compute_correction_factors(data, true_conductivity, elem_file, elec_file):
"""Compute correction factors for 2D rhizotron geometries, following
Weigand and Kemna, 2017, Biogeosciences
https://doi.org/10.5194/bg-14-921-2017
Parameters
----------
data : :py:class:`pandas.DataFrame`
measured data
true_conductivity : float
Conductivity in S/m
elem_file : string
path to CRTomo FE mesh file (elem.dat)
elec_file : string
path to CRTomo FE electrode file (elec.dat)
Returns
-------
correction_factors : Nx5 :py:class.`numpy.ndarray`
measurement configurations and correction factors
(a,b,m,n,correction_factor)
"""
settings = {
'rho': 100,
'pha': 0,
'elem': 'elem.dat',
'elec': 'elec.dat',
'2D': True,
'sink_node': 100,
}
K = geometric_factors.compute_K_numerical(data, settings=settings)
data = geometric_factors.apply_K(data, K)
data = fixK.fix_sign_with_K(data)
frequency = 100
data_onef = data.query('frequency == {}'.format(frequency))
rho_measured = data_onef['r'] * data_onef['k']
rho_true = 1 / true_conductivity * 1e4
correction_factors = rho_true / rho_measured
collection = np.hstack((
data_onef[['a', 'b', 'm', 'n']].values,
np.abs(correction_factors)[:, np.newaxis]
))
return collection | python | def compute_correction_factors(data, true_conductivity, elem_file, elec_file):
"""Compute correction factors for 2D rhizotron geometries, following
Weigand and Kemna, 2017, Biogeosciences
https://doi.org/10.5194/bg-14-921-2017
Parameters
----------
data : :py:class:`pandas.DataFrame`
measured data
true_conductivity : float
Conductivity in S/m
elem_file : string
path to CRTomo FE mesh file (elem.dat)
elec_file : string
path to CRTomo FE electrode file (elec.dat)
Returns
-------
correction_factors : Nx5 :py:class.`numpy.ndarray`
measurement configurations and correction factors
(a,b,m,n,correction_factor)
"""
settings = {
'rho': 100,
'pha': 0,
'elem': 'elem.dat',
'elec': 'elec.dat',
'2D': True,
'sink_node': 100,
}
K = geometric_factors.compute_K_numerical(data, settings=settings)
data = geometric_factors.apply_K(data, K)
data = fixK.fix_sign_with_K(data)
frequency = 100
data_onef = data.query('frequency == {}'.format(frequency))
rho_measured = data_onef['r'] * data_onef['k']
rho_true = 1 / true_conductivity * 1e4
correction_factors = rho_true / rho_measured
collection = np.hstack((
data_onef[['a', 'b', 'm', 'n']].values,
np.abs(correction_factors)[:, np.newaxis]
))
return collection | [
"def",
"compute_correction_factors",
"(",
"data",
",",
"true_conductivity",
",",
"elem_file",
",",
"elec_file",
")",
":",
"settings",
"=",
"{",
"'rho'",
":",
"100",
",",
"'pha'",
":",
"0",
",",
"'elem'",
":",
"'elem.dat'",
",",
"'elec'",
":",
"'elec.dat'",
",",
"'2D'",
":",
"True",
",",
"'sink_node'",
":",
"100",
",",
"}",
"K",
"=",
"geometric_factors",
".",
"compute_K_numerical",
"(",
"data",
",",
"settings",
"=",
"settings",
")",
"data",
"=",
"geometric_factors",
".",
"apply_K",
"(",
"data",
",",
"K",
")",
"data",
"=",
"fixK",
".",
"fix_sign_with_K",
"(",
"data",
")",
"frequency",
"=",
"100",
"data_onef",
"=",
"data",
".",
"query",
"(",
"'frequency == {}'",
".",
"format",
"(",
"frequency",
")",
")",
"rho_measured",
"=",
"data_onef",
"[",
"'r'",
"]",
"*",
"data_onef",
"[",
"'k'",
"]",
"rho_true",
"=",
"1",
"/",
"true_conductivity",
"*",
"1e4",
"correction_factors",
"=",
"rho_true",
"/",
"rho_measured",
"collection",
"=",
"np",
".",
"hstack",
"(",
"(",
"data_onef",
"[",
"[",
"'a'",
",",
"'b'",
",",
"'m'",
",",
"'n'",
"]",
"]",
".",
"values",
",",
"np",
".",
"abs",
"(",
"correction_factors",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
")",
"return",
"collection"
] | Compute correction factors for 2D rhizotron geometries, following
Weigand and Kemna, 2017, Biogeosciences
https://doi.org/10.5194/bg-14-921-2017
Parameters
----------
data : :py:class:`pandas.DataFrame`
measured data
true_conductivity : float
Conductivity in S/m
elem_file : string
path to CRTomo FE mesh file (elem.dat)
elec_file : string
path to CRTomo FE electrode file (elec.dat)
Returns
-------
correction_factors : Nx5 :py:class.`numpy.ndarray`
measurement configurations and correction factors
(a,b,m,n,correction_factor) | [
"Compute",
"correction",
"factors",
"for",
"2D",
"rhizotron",
"geometries",
"following",
"Weigand",
"and",
"Kemna",
"2017",
"Biogeosciences"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/eit_fzj_utils.py#L12-L62 | train |
shexSpec/grammar | parsers/python/pyshexc/parser_impl/generate_shexj.py | rdf_suffix | def rdf_suffix(fmt: str) -> str:
""" Map the RDF format to the approproate suffix """
for k, v in SUFFIX_FORMAT_MAP.items():
if fmt == v:
return k
return 'rdf' | python | def rdf_suffix(fmt: str) -> str:
""" Map the RDF format to the approproate suffix """
for k, v in SUFFIX_FORMAT_MAP.items():
if fmt == v:
return k
return 'rdf' | [
"def",
"rdf_suffix",
"(",
"fmt",
":",
"str",
")",
"->",
"str",
":",
"for",
"k",
",",
"v",
"in",
"SUFFIX_FORMAT_MAP",
".",
"items",
"(",
")",
":",
"if",
"fmt",
"==",
"v",
":",
"return",
"k",
"return",
"'rdf'"
] | Map the RDF format to the approproate suffix | [
"Map",
"the",
"RDF",
"format",
"to",
"the",
"approproate",
"suffix"
] | 4497cd1f73fa6703bca6e2cb53ba9c120f22e48c | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/generate_shexj.py#L143-L148 | train |
geophysics-ubonn/reda | lib/reda/exporters/bert.py | export_bert | def export_bert(data, electrodes, filename):
"""Export to unified data format used in pyGIMLi & BERT.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame with at least a, b, m, n and r.
electrodes : :py:class:`pandas.DataFrame`
DataFrame with electrode positions.
filename : str
String of the output filename.
"""
# Check for multiple timesteps
if has_multiple_timesteps(data):
for i, timestep in enumerate(split_timesteps(data)):
export_bert(timestep, electrodes,
filename.replace(".", "_%.3d." % i))
# TODO: Make ABMN consistent
# index_full = ert.data.groupby(list("abmn")).groups.keys()
# g = ert.data.groupby('timestep')
# q = ert.data.pivot_table(values='r', index=list("abmn"), columns="timestep", dropna=True)
# ert.data.reset_index(list("abmn"))
f = open(filename, 'w')
f.write("%d\n" % len(electrodes))
f.write("# ")
# Make temporary copies for renaming
electrodes = electrodes.copy()
data = data.copy()
electrodes.columns = electrodes.columns.str.lower()
data.columns = data.columns.str.lower()
# Remove unnecessary columns and rename according to bert conventions
# https://gitlab.com/resistivity-net/bert#the-unified-data-format
cols_to_export = ["a", "b", "m", "n", "u", "i", "r", "rho_a", "error"]
data.drop(data.columns.difference(cols_to_export), 1, inplace=True)
data.rename(columns={"rho_a": "rhoa", "error": "err"}, inplace=True)
for key in electrodes.keys():
f.write("%s " % key)
f.write("\n")
for row in electrodes.itertuples(index=False):
for val in row:
f.write("%5.3f " % val)
f.write("\n")
f.write("%d\n" % len(data))
f.write("# ")
# Make sure that a, b, m, n are the first 4 columns
columns = data.columns.tolist()
for c in "abmn":
columns.remove(c)
columns = list("abmn") + columns
data = data[columns]
for key in data.keys():
f.write("%s " % key)
f.write("\n")
for row in data.itertuples(index=False):
for i, val in enumerate(row):
if i < 4:
f.write("%d " % val)
else:
f.write("%E " % val)
f.write("\n")
f.close() | python | def export_bert(data, electrodes, filename):
"""Export to unified data format used in pyGIMLi & BERT.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame with at least a, b, m, n and r.
electrodes : :py:class:`pandas.DataFrame`
DataFrame with electrode positions.
filename : str
String of the output filename.
"""
# Check for multiple timesteps
if has_multiple_timesteps(data):
for i, timestep in enumerate(split_timesteps(data)):
export_bert(timestep, electrodes,
filename.replace(".", "_%.3d." % i))
# TODO: Make ABMN consistent
# index_full = ert.data.groupby(list("abmn")).groups.keys()
# g = ert.data.groupby('timestep')
# q = ert.data.pivot_table(values='r', index=list("abmn"), columns="timestep", dropna=True)
# ert.data.reset_index(list("abmn"))
f = open(filename, 'w')
f.write("%d\n" % len(electrodes))
f.write("# ")
# Make temporary copies for renaming
electrodes = electrodes.copy()
data = data.copy()
electrodes.columns = electrodes.columns.str.lower()
data.columns = data.columns.str.lower()
# Remove unnecessary columns and rename according to bert conventions
# https://gitlab.com/resistivity-net/bert#the-unified-data-format
cols_to_export = ["a", "b", "m", "n", "u", "i", "r", "rho_a", "error"]
data.drop(data.columns.difference(cols_to_export), 1, inplace=True)
data.rename(columns={"rho_a": "rhoa", "error": "err"}, inplace=True)
for key in electrodes.keys():
f.write("%s " % key)
f.write("\n")
for row in electrodes.itertuples(index=False):
for val in row:
f.write("%5.3f " % val)
f.write("\n")
f.write("%d\n" % len(data))
f.write("# ")
# Make sure that a, b, m, n are the first 4 columns
columns = data.columns.tolist()
for c in "abmn":
columns.remove(c)
columns = list("abmn") + columns
data = data[columns]
for key in data.keys():
f.write("%s " % key)
f.write("\n")
for row in data.itertuples(index=False):
for i, val in enumerate(row):
if i < 4:
f.write("%d " % val)
else:
f.write("%E " % val)
f.write("\n")
f.close() | [
"def",
"export_bert",
"(",
"data",
",",
"electrodes",
",",
"filename",
")",
":",
"# Check for multiple timesteps",
"if",
"has_multiple_timesteps",
"(",
"data",
")",
":",
"for",
"i",
",",
"timestep",
"in",
"enumerate",
"(",
"split_timesteps",
"(",
"data",
")",
")",
":",
"export_bert",
"(",
"timestep",
",",
"electrodes",
",",
"filename",
".",
"replace",
"(",
"\".\"",
",",
"\"_%.3d.\"",
"%",
"i",
")",
")",
"# TODO: Make ABMN consistent",
"# index_full = ert.data.groupby(list(\"abmn\")).groups.keys()",
"# g = ert.data.groupby('timestep')",
"# q = ert.data.pivot_table(values='r', index=list(\"abmn\"), columns=\"timestep\", dropna=True)",
"# ert.data.reset_index(list(\"abmn\"))",
"f",
"=",
"open",
"(",
"filename",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"\"%d\\n\"",
"%",
"len",
"(",
"electrodes",
")",
")",
"f",
".",
"write",
"(",
"\"# \"",
")",
"# Make temporary copies for renaming",
"electrodes",
"=",
"electrodes",
".",
"copy",
"(",
")",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"electrodes",
".",
"columns",
"=",
"electrodes",
".",
"columns",
".",
"str",
".",
"lower",
"(",
")",
"data",
".",
"columns",
"=",
"data",
".",
"columns",
".",
"str",
".",
"lower",
"(",
")",
"# Remove unnecessary columns and rename according to bert conventions",
"# https://gitlab.com/resistivity-net/bert#the-unified-data-format",
"cols_to_export",
"=",
"[",
"\"a\"",
",",
"\"b\"",
",",
"\"m\"",
",",
"\"n\"",
",",
"\"u\"",
",",
"\"i\"",
",",
"\"r\"",
",",
"\"rho_a\"",
",",
"\"error\"",
"]",
"data",
".",
"drop",
"(",
"data",
".",
"columns",
".",
"difference",
"(",
"cols_to_export",
")",
",",
"1",
",",
"inplace",
"=",
"True",
")",
"data",
".",
"rename",
"(",
"columns",
"=",
"{",
"\"rho_a\"",
":",
"\"rhoa\"",
",",
"\"error\"",
":",
"\"err\"",
"}",
",",
"inplace",
"=",
"True",
")",
"for",
"key",
"in",
"electrodes",
".",
"keys",
"(",
")",
":",
"f",
".",
"write",
"(",
"\"%s \"",
"%",
"key",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
")",
"for",
"row",
"in",
"electrodes",
".",
"itertuples",
"(",
"index",
"=",
"False",
")",
":",
"for",
"val",
"in",
"row",
":",
"f",
".",
"write",
"(",
"\"%5.3f \"",
"%",
"val",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
")",
"f",
".",
"write",
"(",
"\"%d\\n\"",
"%",
"len",
"(",
"data",
")",
")",
"f",
".",
"write",
"(",
"\"# \"",
")",
"# Make sure that a, b, m, n are the first 4 columns",
"columns",
"=",
"data",
".",
"columns",
".",
"tolist",
"(",
")",
"for",
"c",
"in",
"\"abmn\"",
":",
"columns",
".",
"remove",
"(",
"c",
")",
"columns",
"=",
"list",
"(",
"\"abmn\"",
")",
"+",
"columns",
"data",
"=",
"data",
"[",
"columns",
"]",
"for",
"key",
"in",
"data",
".",
"keys",
"(",
")",
":",
"f",
".",
"write",
"(",
"\"%s \"",
"%",
"key",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
")",
"for",
"row",
"in",
"data",
".",
"itertuples",
"(",
"index",
"=",
"False",
")",
":",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"row",
")",
":",
"if",
"i",
"<",
"4",
":",
"f",
".",
"write",
"(",
"\"%d \"",
"%",
"val",
")",
"else",
":",
"f",
".",
"write",
"(",
"\"%E \"",
"%",
"val",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
")",
"f",
".",
"close",
"(",
")"
] | Export to unified data format used in pyGIMLi & BERT.
Parameters
----------
data : :py:class:`pandas.DataFrame`
DataFrame with at least a, b, m, n and r.
electrodes : :py:class:`pandas.DataFrame`
DataFrame with electrode positions.
filename : str
String of the output filename. | [
"Export",
"to",
"unified",
"data",
"format",
"used",
"in",
"pyGIMLi",
"&",
"BERT",
"."
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/exporters/bert.py#L4-L73 | train |
cfobel/webcam-recorder | webcam_recorder/view.py | VideoSelectorView.reset | def reset(self, index=None):
'''
Reset the points for the specified index position. If no index is
specified, reset points for all point handlers.
'''
points_handler_count = len(self.registration_view.points)
if index is None:
indexes = range(points_handler_count)
else:
indexes = [index]
indexes = [i for i in indexes if i < points_handler_count]
for i in indexes:
self.registration_view.points[i].reset()
if indexes:
self.registration_view.update_transform() | python | def reset(self, index=None):
'''
Reset the points for the specified index position. If no index is
specified, reset points for all point handlers.
'''
points_handler_count = len(self.registration_view.points)
if index is None:
indexes = range(points_handler_count)
else:
indexes = [index]
indexes = [i for i in indexes if i < points_handler_count]
for i in indexes:
self.registration_view.points[i].reset()
if indexes:
self.registration_view.update_transform() | [
"def",
"reset",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"points_handler_count",
"=",
"len",
"(",
"self",
".",
"registration_view",
".",
"points",
")",
"if",
"index",
"is",
"None",
":",
"indexes",
"=",
"range",
"(",
"points_handler_count",
")",
"else",
":",
"indexes",
"=",
"[",
"index",
"]",
"indexes",
"=",
"[",
"i",
"for",
"i",
"in",
"indexes",
"if",
"i",
"<",
"points_handler_count",
"]",
"for",
"i",
"in",
"indexes",
":",
"self",
".",
"registration_view",
".",
"points",
"[",
"i",
"]",
".",
"reset",
"(",
")",
"if",
"indexes",
":",
"self",
".",
"registration_view",
".",
"update_transform",
"(",
")"
] | Reset the points for the specified index position. If no index is
specified, reset points for all point handlers. | [
"Reset",
"the",
"points",
"for",
"the",
"specified",
"index",
"position",
".",
"If",
"no",
"index",
"is",
"specified",
"reset",
"points",
"for",
"all",
"point",
"handlers",
"."
] | ffeb57c9044033fbea6372b3e642b83fd42dea87 | https://github.com/cfobel/webcam-recorder/blob/ffeb57c9044033fbea6372b3e642b83fd42dea87/webcam_recorder/view.py#L102-L118 | train |
geophysics-ubonn/reda | lib/reda/importers/res2dinv.py | _read_file | def _read_file(filename):
"""
Read a res2dinv-file and return the header
Parameters
----------
filename : string
Data filename
Returns
------
type : int
type of array extracted from header
file_data : :py:class:`StringIO.StringIO`
content of file in a StringIO object
"""
# read data
with open(filename, 'r') as fid2:
abem_data_orig = fid2.read()
fid = StringIO()
fid.write(abem_data_orig)
fid.seek(0)
# determine type of array
fid.readline()
fid.readline()
file_type = int(fid.readline().strip())
# reset file pointer
fid.seek(0)
return file_type, fid | python | def _read_file(filename):
"""
Read a res2dinv-file and return the header
Parameters
----------
filename : string
Data filename
Returns
------
type : int
type of array extracted from header
file_data : :py:class:`StringIO.StringIO`
content of file in a StringIO object
"""
# read data
with open(filename, 'r') as fid2:
abem_data_orig = fid2.read()
fid = StringIO()
fid.write(abem_data_orig)
fid.seek(0)
# determine type of array
fid.readline()
fid.readline()
file_type = int(fid.readline().strip())
# reset file pointer
fid.seek(0)
return file_type, fid | [
"def",
"_read_file",
"(",
"filename",
")",
":",
"# read data",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fid2",
":",
"abem_data_orig",
"=",
"fid2",
".",
"read",
"(",
")",
"fid",
"=",
"StringIO",
"(",
")",
"fid",
".",
"write",
"(",
"abem_data_orig",
")",
"fid",
".",
"seek",
"(",
"0",
")",
"# determine type of array",
"fid",
".",
"readline",
"(",
")",
"fid",
".",
"readline",
"(",
")",
"file_type",
"=",
"int",
"(",
"fid",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
")",
"# reset file pointer",
"fid",
".",
"seek",
"(",
"0",
")",
"return",
"file_type",
",",
"fid"
] | Read a res2dinv-file and return the header
Parameters
----------
filename : string
Data filename
Returns
------
type : int
type of array extracted from header
file_data : :py:class:`StringIO.StringIO`
content of file in a StringIO object | [
"Read",
"a",
"res2dinv",
"-",
"file",
"and",
"return",
"the",
"header"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/res2dinv.py#L15-L46 | train |
geophysics-ubonn/reda | lib/reda/importers/res2dinv.py | add_dat_file | def add_dat_file(filename, settings, container=None, **kwargs):
""" Read a RES2DINV-style file produced by the ABEM export program.
"""
# each type is read by a different function
importers = {
# general array type
11: _read_general_type,
}
file_type, content = _read_file(filename)
if file_type not in importers:
raise Exception(
'type of RES2DINV data file not recognized: {0}'.format(file_type)
)
header, data = importers[file_type](content, settings)
timestep = settings.get('timestep', 0)
# add timestep column
data['timestep'] = timestep
if container is None:
container = ERT(data)
else:
container.data = pd.concat((container.data, data))
return container | python | def add_dat_file(filename, settings, container=None, **kwargs):
""" Read a RES2DINV-style file produced by the ABEM export program.
"""
# each type is read by a different function
importers = {
# general array type
11: _read_general_type,
}
file_type, content = _read_file(filename)
if file_type not in importers:
raise Exception(
'type of RES2DINV data file not recognized: {0}'.format(file_type)
)
header, data = importers[file_type](content, settings)
timestep = settings.get('timestep', 0)
# add timestep column
data['timestep'] = timestep
if container is None:
container = ERT(data)
else:
container.data = pd.concat((container.data, data))
return container | [
"def",
"add_dat_file",
"(",
"filename",
",",
"settings",
",",
"container",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# each type is read by a different function",
"importers",
"=",
"{",
"# general array type",
"11",
":",
"_read_general_type",
",",
"}",
"file_type",
",",
"content",
"=",
"_read_file",
"(",
"filename",
")",
"if",
"file_type",
"not",
"in",
"importers",
":",
"raise",
"Exception",
"(",
"'type of RES2DINV data file not recognized: {0}'",
".",
"format",
"(",
"file_type",
")",
")",
"header",
",",
"data",
"=",
"importers",
"[",
"file_type",
"]",
"(",
"content",
",",
"settings",
")",
"timestep",
"=",
"settings",
".",
"get",
"(",
"'timestep'",
",",
"0",
")",
"# add timestep column",
"data",
"[",
"'timestep'",
"]",
"=",
"timestep",
"if",
"container",
"is",
"None",
":",
"container",
"=",
"ERT",
"(",
"data",
")",
"else",
":",
"container",
".",
"data",
"=",
"pd",
".",
"concat",
"(",
"(",
"container",
".",
"data",
",",
"data",
")",
")",
"return",
"container"
] | Read a RES2DINV-style file produced by the ABEM export program. | [
"Read",
"a",
"RES2DINV",
"-",
"style",
"file",
"produced",
"by",
"the",
"ABEM",
"export",
"program",
"."
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/res2dinv.py#L142-L170 | train |
lambdalisue/notify | src/notify/wizard.py | console_input | def console_input(default, validation=None, allow_empty=False):
"""
Get user input value from stdin
Parameters
----------
default : string
A default value. It will be used when user input nothing.
validation : callable
A validation function. The validation function must raise an error
when validation has failed.
Returns
-------
string or any
A user input string or validated value
"""
value = raw_input("> ") or default
if value == "" and not allow_empty:
print "Invalid: Empty value is not permitted."
return console_input(default, validation)
if validation:
try:
return validation(value)
except ValidationError, e:
print "Invalid: ", e
return console_input(default, validation)
return value | python | def console_input(default, validation=None, allow_empty=False):
"""
Get user input value from stdin
Parameters
----------
default : string
A default value. It will be used when user input nothing.
validation : callable
A validation function. The validation function must raise an error
when validation has failed.
Returns
-------
string or any
A user input string or validated value
"""
value = raw_input("> ") or default
if value == "" and not allow_empty:
print "Invalid: Empty value is not permitted."
return console_input(default, validation)
if validation:
try:
return validation(value)
except ValidationError, e:
print "Invalid: ", e
return console_input(default, validation)
return value | [
"def",
"console_input",
"(",
"default",
",",
"validation",
"=",
"None",
",",
"allow_empty",
"=",
"False",
")",
":",
"value",
"=",
"raw_input",
"(",
"\"> \"",
")",
"or",
"default",
"if",
"value",
"==",
"\"\"",
"and",
"not",
"allow_empty",
":",
"print",
"\"Invalid: Empty value is not permitted.\"",
"return",
"console_input",
"(",
"default",
",",
"validation",
")",
"if",
"validation",
":",
"try",
":",
"return",
"validation",
"(",
"value",
")",
"except",
"ValidationError",
",",
"e",
":",
"print",
"\"Invalid: \"",
",",
"e",
"return",
"console_input",
"(",
"default",
",",
"validation",
")",
"return",
"value"
] | Get user input value from stdin
Parameters
----------
default : string
A default value. It will be used when user input nothing.
validation : callable
A validation function. The validation function must raise an error
when validation has failed.
Returns
-------
string or any
A user input string or validated value | [
"Get",
"user",
"input",
"value",
"from",
"stdin"
] | 1b6d7d1faa2cea13bfaa1f35130f279a0115e686 | https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/wizard.py#L22-L49 | train |
south-coast-science/scs_core | src/scs_core/gas/a4_temp_comp.py | A4TempComp.correct | def correct(self, calib, temp, we_t, ae_t):
"""
Compute weC from weT, aeT
"""
if not A4TempComp.in_range(temp):
return None
if self.__algorithm == 1:
return self.__eq1(temp, we_t, ae_t)
if self.__algorithm == 2:
return self.__eq2(temp, we_t, ae_t, calib.we_cal_mv, calib.ae_cal_mv)
if self.__algorithm == 3:
return self.__eq3(temp, we_t, ae_t, calib.we_cal_mv, calib.ae_cal_mv)
if self.__algorithm == 4:
return self.__eq4(temp, we_t, calib.we_cal_mv)
raise ValueError("A4TempComp.conv: unrecognised algorithm: %d." % self.__algorithm) | python | def correct(self, calib, temp, we_t, ae_t):
"""
Compute weC from weT, aeT
"""
if not A4TempComp.in_range(temp):
return None
if self.__algorithm == 1:
return self.__eq1(temp, we_t, ae_t)
if self.__algorithm == 2:
return self.__eq2(temp, we_t, ae_t, calib.we_cal_mv, calib.ae_cal_mv)
if self.__algorithm == 3:
return self.__eq3(temp, we_t, ae_t, calib.we_cal_mv, calib.ae_cal_mv)
if self.__algorithm == 4:
return self.__eq4(temp, we_t, calib.we_cal_mv)
raise ValueError("A4TempComp.conv: unrecognised algorithm: %d." % self.__algorithm) | [
"def",
"correct",
"(",
"self",
",",
"calib",
",",
"temp",
",",
"we_t",
",",
"ae_t",
")",
":",
"if",
"not",
"A4TempComp",
".",
"in_range",
"(",
"temp",
")",
":",
"return",
"None",
"if",
"self",
".",
"__algorithm",
"==",
"1",
":",
"return",
"self",
".",
"__eq1",
"(",
"temp",
",",
"we_t",
",",
"ae_t",
")",
"if",
"self",
".",
"__algorithm",
"==",
"2",
":",
"return",
"self",
".",
"__eq2",
"(",
"temp",
",",
"we_t",
",",
"ae_t",
",",
"calib",
".",
"we_cal_mv",
",",
"calib",
".",
"ae_cal_mv",
")",
"if",
"self",
".",
"__algorithm",
"==",
"3",
":",
"return",
"self",
".",
"__eq3",
"(",
"temp",
",",
"we_t",
",",
"ae_t",
",",
"calib",
".",
"we_cal_mv",
",",
"calib",
".",
"ae_cal_mv",
")",
"if",
"self",
".",
"__algorithm",
"==",
"4",
":",
"return",
"self",
".",
"__eq4",
"(",
"temp",
",",
"we_t",
",",
"calib",
".",
"we_cal_mv",
")",
"raise",
"ValueError",
"(",
"\"A4TempComp.conv: unrecognised algorithm: %d.\"",
"%",
"self",
".",
"__algorithm",
")"
] | Compute weC from weT, aeT | [
"Compute",
"weC",
"from",
"weT",
"aeT"
] | a4152b0bbed6acbbf257e1bba6a912f6ebe578e5 | https://github.com/south-coast-science/scs_core/blob/a4152b0bbed6acbbf257e1bba6a912f6ebe578e5/src/scs_core/gas/a4_temp_comp.py#L82-L101 | train |
south-coast-science/scs_core | src/scs_core/gas/a4_temp_comp.py | A4TempComp.cf_t | def cf_t(self, temp):
"""
Compute the linear-interpolated temperature compensation factor.
"""
index = int((temp - A4TempComp.__MIN_TEMP) // A4TempComp.__INTERVAL) # index of start of interval
# on boundary...
if temp % A4TempComp.__INTERVAL == 0:
return self.__values[index]
# all others...
y1 = self.__values[index] # y value at start of interval
y2 = self.__values[index + 1] # y value at end of interval
delta_y = y2 - y1
delta_x = float(temp % A4TempComp.__INTERVAL) / A4TempComp.__INTERVAL # proportion of interval
cf_t = y1 + (delta_y * delta_x)
# print("A4TempComp.cf_t: alg:%d, temp:%f y1:%f y2:%f delta_y:%f delta_x:%f cf_t:%f " %
# (self.__algorithm, temp, y1, y2, delta_y, delta_x, cf_t), file=sys.stderr)
return cf_t | python | def cf_t(self, temp):
"""
Compute the linear-interpolated temperature compensation factor.
"""
index = int((temp - A4TempComp.__MIN_TEMP) // A4TempComp.__INTERVAL) # index of start of interval
# on boundary...
if temp % A4TempComp.__INTERVAL == 0:
return self.__values[index]
# all others...
y1 = self.__values[index] # y value at start of interval
y2 = self.__values[index + 1] # y value at end of interval
delta_y = y2 - y1
delta_x = float(temp % A4TempComp.__INTERVAL) / A4TempComp.__INTERVAL # proportion of interval
cf_t = y1 + (delta_y * delta_x)
# print("A4TempComp.cf_t: alg:%d, temp:%f y1:%f y2:%f delta_y:%f delta_x:%f cf_t:%f " %
# (self.__algorithm, temp, y1, y2, delta_y, delta_x, cf_t), file=sys.stderr)
return cf_t | [
"def",
"cf_t",
"(",
"self",
",",
"temp",
")",
":",
"index",
"=",
"int",
"(",
"(",
"temp",
"-",
"A4TempComp",
".",
"__MIN_TEMP",
")",
"//",
"A4TempComp",
".",
"__INTERVAL",
")",
"# index of start of interval",
"# on boundary...",
"if",
"temp",
"%",
"A4TempComp",
".",
"__INTERVAL",
"==",
"0",
":",
"return",
"self",
".",
"__values",
"[",
"index",
"]",
"# all others...",
"y1",
"=",
"self",
".",
"__values",
"[",
"index",
"]",
"# y value at start of interval",
"y2",
"=",
"self",
".",
"__values",
"[",
"index",
"+",
"1",
"]",
"# y value at end of interval",
"delta_y",
"=",
"y2",
"-",
"y1",
"delta_x",
"=",
"float",
"(",
"temp",
"%",
"A4TempComp",
".",
"__INTERVAL",
")",
"/",
"A4TempComp",
".",
"__INTERVAL",
"# proportion of interval",
"cf_t",
"=",
"y1",
"+",
"(",
"delta_y",
"*",
"delta_x",
")",
"# print(\"A4TempComp.cf_t: alg:%d, temp:%f y1:%f y2:%f delta_y:%f delta_x:%f cf_t:%f \" %",
"# (self.__algorithm, temp, y1, y2, delta_y, delta_x, cf_t), file=sys.stderr)",
"return",
"cf_t"
] | Compute the linear-interpolated temperature compensation factor. | [
"Compute",
"the",
"linear",
"-",
"interpolated",
"temperature",
"compensation",
"factor",
"."
] | a4152b0bbed6acbbf257e1bba6a912f6ebe578e5 | https://github.com/south-coast-science/scs_core/blob/a4152b0bbed6acbbf257e1bba6a912f6ebe578e5/src/scs_core/gas/a4_temp_comp.py#L152-L175 | train |
cloudbase/python-hnvclient | hnv/common/utils.py | run_once | def run_once(function, state={}, errors={}):
"""A memoization decorator, whose purpose is to cache calls."""
@six.wraps(function)
def _wrapper(*args, **kwargs):
if function in errors:
# Deliberate use of LBYL.
six.reraise(*errors[function])
try:
return state[function]
except KeyError:
try:
state[function] = result = function(*args, **kwargs)
return result
except Exception:
errors[function] = sys.exc_info()
raise
return _wrapper | python | def run_once(function, state={}, errors={}):
"""A memoization decorator, whose purpose is to cache calls."""
@six.wraps(function)
def _wrapper(*args, **kwargs):
if function in errors:
# Deliberate use of LBYL.
six.reraise(*errors[function])
try:
return state[function]
except KeyError:
try:
state[function] = result = function(*args, **kwargs)
return result
except Exception:
errors[function] = sys.exc_info()
raise
return _wrapper | [
"def",
"run_once",
"(",
"function",
",",
"state",
"=",
"{",
"}",
",",
"errors",
"=",
"{",
"}",
")",
":",
"@",
"six",
".",
"wraps",
"(",
"function",
")",
"def",
"_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"function",
"in",
"errors",
":",
"# Deliberate use of LBYL.",
"six",
".",
"reraise",
"(",
"*",
"errors",
"[",
"function",
"]",
")",
"try",
":",
"return",
"state",
"[",
"function",
"]",
"except",
"KeyError",
":",
"try",
":",
"state",
"[",
"function",
"]",
"=",
"result",
"=",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"result",
"except",
"Exception",
":",
"errors",
"[",
"function",
"]",
"=",
"sys",
".",
"exc_info",
"(",
")",
"raise",
"return",
"_wrapper"
] | A memoization decorator, whose purpose is to cache calls. | [
"A",
"memoization",
"decorator",
"whose",
"purpose",
"is",
"to",
"cache",
"calls",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/utils.py#L187-L204 | train |
cloudbase/python-hnvclient | hnv/common/utils.py | _HNVClient._session | def _session(self):
"""The current session used by the client.
The Session object allows you to persist certain parameters across
requests. It also persists cookies across all requests made from
the Session instance, and will use urllib3's connection pooling.
So if you're making several requests to the same host, the underlying
TCP connection will be reused, which can result in a significant
performance increase.
"""
if self._http_session is None:
self._http_session = requests.Session()
self._http_session.headers.update(self._get_headers())
self._http_session.verify = self._verify_https_request()
if all(self._credentials):
username, password = self._credentials
self._http_session.auth = requests_ntlm.HttpNtlmAuth(
username=username, password=password)
return self._http_session | python | def _session(self):
"""The current session used by the client.
The Session object allows you to persist certain parameters across
requests. It also persists cookies across all requests made from
the Session instance, and will use urllib3's connection pooling.
So if you're making several requests to the same host, the underlying
TCP connection will be reused, which can result in a significant
performance increase.
"""
if self._http_session is None:
self._http_session = requests.Session()
self._http_session.headers.update(self._get_headers())
self._http_session.verify = self._verify_https_request()
if all(self._credentials):
username, password = self._credentials
self._http_session.auth = requests_ntlm.HttpNtlmAuth(
username=username, password=password)
return self._http_session | [
"def",
"_session",
"(",
"self",
")",
":",
"if",
"self",
".",
"_http_session",
"is",
"None",
":",
"self",
".",
"_http_session",
"=",
"requests",
".",
"Session",
"(",
")",
"self",
".",
"_http_session",
".",
"headers",
".",
"update",
"(",
"self",
".",
"_get_headers",
"(",
")",
")",
"self",
".",
"_http_session",
".",
"verify",
"=",
"self",
".",
"_verify_https_request",
"(",
")",
"if",
"all",
"(",
"self",
".",
"_credentials",
")",
":",
"username",
",",
"password",
"=",
"self",
".",
"_credentials",
"self",
".",
"_http_session",
".",
"auth",
"=",
"requests_ntlm",
".",
"HttpNtlmAuth",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"return",
"self",
".",
"_http_session"
] | The current session used by the client.
The Session object allows you to persist certain parameters across
requests. It also persists cookies across all requests made from
the Session instance, and will use urllib3's connection pooling.
So if you're making several requests to the same host, the underlying
TCP connection will be reused, which can result in a significant
performance increase. | [
"The",
"current",
"session",
"used",
"by",
"the",
"client",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/utils.py#L59-L79 | train |
cloudbase/python-hnvclient | hnv/common/utils.py | _HNVClient.get_resource | def get_resource(self, path):
"""Getting the required information from the API."""
response = self._http_request(path)
try:
return response.json()
except ValueError:
raise exception.ServiceException("Invalid service response.") | python | def get_resource(self, path):
"""Getting the required information from the API."""
response = self._http_request(path)
try:
return response.json()
except ValueError:
raise exception.ServiceException("Invalid service response.") | [
"def",
"get_resource",
"(",
"self",
",",
"path",
")",
":",
"response",
"=",
"self",
".",
"_http_request",
"(",
"path",
")",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"raise",
"exception",
".",
"ServiceException",
"(",
"\"Invalid service response.\"",
")"
] | Getting the required information from the API. | [
"Getting",
"the",
"required",
"information",
"from",
"the",
"API",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/utils.py#L164-L170 | train |
cloudbase/python-hnvclient | hnv/common/utils.py | _HNVClient.update_resource | def update_resource(self, path, data, if_match=None):
"""Update the required resource."""
response = self._http_request(resource=path, method="PUT", body=data,
if_match=if_match)
try:
return response.json()
except ValueError:
raise exception.ServiceException("Invalid service response.") | python | def update_resource(self, path, data, if_match=None):
"""Update the required resource."""
response = self._http_request(resource=path, method="PUT", body=data,
if_match=if_match)
try:
return response.json()
except ValueError:
raise exception.ServiceException("Invalid service response.") | [
"def",
"update_resource",
"(",
"self",
",",
"path",
",",
"data",
",",
"if_match",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"_http_request",
"(",
"resource",
"=",
"path",
",",
"method",
"=",
"\"PUT\"",
",",
"body",
"=",
"data",
",",
"if_match",
"=",
"if_match",
")",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"raise",
"exception",
".",
"ServiceException",
"(",
"\"Invalid service response.\"",
")"
] | Update the required resource. | [
"Update",
"the",
"required",
"resource",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/utils.py#L172-L179 | train |
Metatab/geoid | geoid/civick.py | GVid.summarize | def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
s = str(self.allval())
return self.parse(s[:2]+ ''.join(['Z']*len(s[2:]))) | python | def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
s = str(self.allval())
return self.parse(s[:2]+ ''.join(['Z']*len(s[2:]))) | [
"def",
"summarize",
"(",
"self",
")",
":",
"s",
"=",
"str",
"(",
"self",
".",
"allval",
"(",
")",
")",
"return",
"self",
".",
"parse",
"(",
"s",
"[",
":",
"2",
"]",
"+",
"''",
".",
"join",
"(",
"[",
"'Z'",
"]",
"*",
"len",
"(",
"s",
"[",
"2",
":",
"]",
")",
")",
")"
] | Convert all of the values to their max values. This form is used to represent the summary level | [
"Convert",
"all",
"of",
"the",
"values",
"to",
"their",
"max",
"values",
".",
"This",
"form",
"is",
"used",
"to",
"represent",
"the",
"summary",
"level"
] | 4b7769406b00e59376fb6046b42a2f8ed706b33b | https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/civick.py#L41-L46 | train |
geophysics-ubonn/reda | lib/reda/utils/filter_config_types.py | _filter_schlumberger | def _filter_schlumberger(configs):
"""Filter Schlumberger configurations
Schlumberger configurations are selected using the following criteria:
* For a given voltage dipole, there need to be at least two current
injections with electrodes located on the left and the right of the
voltage dipole.
* The distance between the current electrodes and the next voltage
electrode is the same on both sides.
Parameters
----------
configs: numpy.ndarray
Nx4 array with N measurement configurations
Returns
-------
configs: numpy.ndarray
Remaining configurations, all Schlumberger configurations are set to
numpy.nan
schl_indices: dict with one entry: numpy.ndarray
indices of Schlumberger configurations
"""
# sort configs
configs_sorted = np.hstack((
np.sort(configs[:, 0:2], axis=1),
np.sort(configs[:, 2:4], axis=1),
)).astype(int)
# determine unique voltage dipoles
MN = configs_sorted[:, 2:4].copy()
MN_unique = np.unique(
MN.view(
MN.dtype.descr * 2
)
)
MN_unique_reshape = MN_unique.view(
MN.dtype
).reshape(-1, 2)
schl_indices_list = []
for mn in MN_unique_reshape:
# check if there are more than one associated current injections
nr_current_binary = (
(configs_sorted[:, 2] == mn[0]) &
(configs_sorted[:, 3] == mn[1])
)
if len(np.where(nr_current_binary)[0]) < 2:
continue
# now which of these configurations have current electrodes on both
# sides of the voltage dipole
nr_left_right = (
(configs_sorted[:, 0] < mn[0]) &
(configs_sorted[:, 1] > mn[0]) &
nr_current_binary
)
# now check that the left/right distances are equal
distance_left = np.abs(
configs_sorted[nr_left_right, 0] - mn[0]
).squeeze()
distance_right = np.abs(
configs_sorted[nr_left_right, 1] - mn[1]
).squeeze()
nr_equal_distances = np.where(distance_left == distance_right)[0]
indices = np.where(nr_left_right)[0][nr_equal_distances]
if indices.size > 2:
schl_indices_list.append(indices)
# set Schlumberger configs to nan
if len(schl_indices_list) == 0:
return configs, {0: np.array([])}
else:
schl_indices = np.hstack(schl_indices_list).squeeze()
configs[schl_indices, :] = np.nan
return configs, {0: schl_indices} | python | def _filter_schlumberger(configs):
"""Filter Schlumberger configurations
Schlumberger configurations are selected using the following criteria:
* For a given voltage dipole, there need to be at least two current
injections with electrodes located on the left and the right of the
voltage dipole.
* The distance between the current electrodes and the next voltage
electrode is the same on both sides.
Parameters
----------
configs: numpy.ndarray
Nx4 array with N measurement configurations
Returns
-------
configs: numpy.ndarray
Remaining configurations, all Schlumberger configurations are set to
numpy.nan
schl_indices: dict with one entry: numpy.ndarray
indices of Schlumberger configurations
"""
# sort configs
configs_sorted = np.hstack((
np.sort(configs[:, 0:2], axis=1),
np.sort(configs[:, 2:4], axis=1),
)).astype(int)
# determine unique voltage dipoles
MN = configs_sorted[:, 2:4].copy()
MN_unique = np.unique(
MN.view(
MN.dtype.descr * 2
)
)
MN_unique_reshape = MN_unique.view(
MN.dtype
).reshape(-1, 2)
schl_indices_list = []
for mn in MN_unique_reshape:
# check if there are more than one associated current injections
nr_current_binary = (
(configs_sorted[:, 2] == mn[0]) &
(configs_sorted[:, 3] == mn[1])
)
if len(np.where(nr_current_binary)[0]) < 2:
continue
# now which of these configurations have current electrodes on both
# sides of the voltage dipole
nr_left_right = (
(configs_sorted[:, 0] < mn[0]) &
(configs_sorted[:, 1] > mn[0]) &
nr_current_binary
)
# now check that the left/right distances are equal
distance_left = np.abs(
configs_sorted[nr_left_right, 0] - mn[0]
).squeeze()
distance_right = np.abs(
configs_sorted[nr_left_right, 1] - mn[1]
).squeeze()
nr_equal_distances = np.where(distance_left == distance_right)[0]
indices = np.where(nr_left_right)[0][nr_equal_distances]
if indices.size > 2:
schl_indices_list.append(indices)
# set Schlumberger configs to nan
if len(schl_indices_list) == 0:
return configs, {0: np.array([])}
else:
schl_indices = np.hstack(schl_indices_list).squeeze()
configs[schl_indices, :] = np.nan
return configs, {0: schl_indices} | [
"def",
"_filter_schlumberger",
"(",
"configs",
")",
":",
"# sort configs",
"configs_sorted",
"=",
"np",
".",
"hstack",
"(",
"(",
"np",
".",
"sort",
"(",
"configs",
"[",
":",
",",
"0",
":",
"2",
"]",
",",
"axis",
"=",
"1",
")",
",",
"np",
".",
"sort",
"(",
"configs",
"[",
":",
",",
"2",
":",
"4",
"]",
",",
"axis",
"=",
"1",
")",
",",
")",
")",
".",
"astype",
"(",
"int",
")",
"# determine unique voltage dipoles",
"MN",
"=",
"configs_sorted",
"[",
":",
",",
"2",
":",
"4",
"]",
".",
"copy",
"(",
")",
"MN_unique",
"=",
"np",
".",
"unique",
"(",
"MN",
".",
"view",
"(",
"MN",
".",
"dtype",
".",
"descr",
"*",
"2",
")",
")",
"MN_unique_reshape",
"=",
"MN_unique",
".",
"view",
"(",
"MN",
".",
"dtype",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"2",
")",
"schl_indices_list",
"=",
"[",
"]",
"for",
"mn",
"in",
"MN_unique_reshape",
":",
"# check if there are more than one associated current injections",
"nr_current_binary",
"=",
"(",
"(",
"configs_sorted",
"[",
":",
",",
"2",
"]",
"==",
"mn",
"[",
"0",
"]",
")",
"&",
"(",
"configs_sorted",
"[",
":",
",",
"3",
"]",
"==",
"mn",
"[",
"1",
"]",
")",
")",
"if",
"len",
"(",
"np",
".",
"where",
"(",
"nr_current_binary",
")",
"[",
"0",
"]",
")",
"<",
"2",
":",
"continue",
"# now which of these configurations have current electrodes on both",
"# sides of the voltage dipole",
"nr_left_right",
"=",
"(",
"(",
"configs_sorted",
"[",
":",
",",
"0",
"]",
"<",
"mn",
"[",
"0",
"]",
")",
"&",
"(",
"configs_sorted",
"[",
":",
",",
"1",
"]",
">",
"mn",
"[",
"0",
"]",
")",
"&",
"nr_current_binary",
")",
"# now check that the left/right distances are equal",
"distance_left",
"=",
"np",
".",
"abs",
"(",
"configs_sorted",
"[",
"nr_left_right",
",",
"0",
"]",
"-",
"mn",
"[",
"0",
"]",
")",
".",
"squeeze",
"(",
")",
"distance_right",
"=",
"np",
".",
"abs",
"(",
"configs_sorted",
"[",
"nr_left_right",
",",
"1",
"]",
"-",
"mn",
"[",
"1",
"]",
")",
".",
"squeeze",
"(",
")",
"nr_equal_distances",
"=",
"np",
".",
"where",
"(",
"distance_left",
"==",
"distance_right",
")",
"[",
"0",
"]",
"indices",
"=",
"np",
".",
"where",
"(",
"nr_left_right",
")",
"[",
"0",
"]",
"[",
"nr_equal_distances",
"]",
"if",
"indices",
".",
"size",
">",
"2",
":",
"schl_indices_list",
".",
"append",
"(",
"indices",
")",
"# set Schlumberger configs to nan",
"if",
"len",
"(",
"schl_indices_list",
")",
"==",
"0",
":",
"return",
"configs",
",",
"{",
"0",
":",
"np",
".",
"array",
"(",
"[",
"]",
")",
"}",
"else",
":",
"schl_indices",
"=",
"np",
".",
"hstack",
"(",
"schl_indices_list",
")",
".",
"squeeze",
"(",
")",
"configs",
"[",
"schl_indices",
",",
":",
"]",
"=",
"np",
".",
"nan",
"return",
"configs",
",",
"{",
"0",
":",
"schl_indices",
"}"
] | Filter Schlumberger configurations
Schlumberger configurations are selected using the following criteria:
* For a given voltage dipole, there need to be at least two current
injections with electrodes located on the left and the right of the
voltage dipole.
* The distance between the current electrodes and the next voltage
electrode is the same on both sides.
Parameters
----------
configs: numpy.ndarray
Nx4 array with N measurement configurations
Returns
-------
configs: numpy.ndarray
Remaining configurations, all Schlumberger configurations are set to
numpy.nan
schl_indices: dict with one entry: numpy.ndarray
indices of Schlumberger configurations | [
"Filter",
"Schlumberger",
"configurations"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/filter_config_types.py#L17-L97 | train |
geophysics-ubonn/reda | lib/reda/utils/filter_config_types.py | _filter_dipole_dipole | def _filter_dipole_dipole(configs):
"""Filter dipole-dipole configurations
A dipole-dipole configuration is defined using the following criteria:
* equal distance between the two current electrodes and between the two
voltage electrodes
* no overlap of dipoles
Parameters
----------
configs: numpy.ndarray
Nx4 array with N measurement configurations
Returns
-------
configs: numpy.ndarray
Remaining configurations, all dipole-dipole configurations are set to
numpy.nan
dd_indices: numpy.ndarray
indices of dipole-dipole configurations
"""
# check that dipoles have equal size
dist_ab = np.abs(configs[:, 0] - configs[:, 1])
dist_mn = np.abs(configs[:, 2] - configs[:, 3])
distances_equal = (dist_ab == dist_mn)
# check that they are not overlapping
not_overlapping = (
# either a,b < m,n
(
(configs[:, 0] < configs[:, 2]) &
(configs[:, 1] < configs[:, 2]) &
(configs[:, 0] < configs[:, 3]) &
(configs[:, 1] < configs[:, 3])
) |
# or m,n < a,b
(
(configs[:, 2] < configs[:, 0]) &
(configs[:, 3] < configs[:, 0]) &
(configs[:, 2] < configs[:, 1]) &
(configs[:, 3] < configs[:, 1])
)
)
is_dipole_dipole = (distances_equal & not_overlapping)
dd_indices = np.where(is_dipole_dipole)[0]
dd_indices_sorted = _sort_dd_skips(configs[dd_indices, :], dd_indices)
# set all dd configs to nan
configs[dd_indices, :] = np.nan
return configs, dd_indices_sorted | python | def _filter_dipole_dipole(configs):
"""Filter dipole-dipole configurations
A dipole-dipole configuration is defined using the following criteria:
* equal distance between the two current electrodes and between the two
voltage electrodes
* no overlap of dipoles
Parameters
----------
configs: numpy.ndarray
Nx4 array with N measurement configurations
Returns
-------
configs: numpy.ndarray
Remaining configurations, all dipole-dipole configurations are set to
numpy.nan
dd_indices: numpy.ndarray
indices of dipole-dipole configurations
"""
# check that dipoles have equal size
dist_ab = np.abs(configs[:, 0] - configs[:, 1])
dist_mn = np.abs(configs[:, 2] - configs[:, 3])
distances_equal = (dist_ab == dist_mn)
# check that they are not overlapping
not_overlapping = (
# either a,b < m,n
(
(configs[:, 0] < configs[:, 2]) &
(configs[:, 1] < configs[:, 2]) &
(configs[:, 0] < configs[:, 3]) &
(configs[:, 1] < configs[:, 3])
) |
# or m,n < a,b
(
(configs[:, 2] < configs[:, 0]) &
(configs[:, 3] < configs[:, 0]) &
(configs[:, 2] < configs[:, 1]) &
(configs[:, 3] < configs[:, 1])
)
)
is_dipole_dipole = (distances_equal & not_overlapping)
dd_indices = np.where(is_dipole_dipole)[0]
dd_indices_sorted = _sort_dd_skips(configs[dd_indices, :], dd_indices)
# set all dd configs to nan
configs[dd_indices, :] = np.nan
return configs, dd_indices_sorted | [
"def",
"_filter_dipole_dipole",
"(",
"configs",
")",
":",
"# check that dipoles have equal size",
"dist_ab",
"=",
"np",
".",
"abs",
"(",
"configs",
"[",
":",
",",
"0",
"]",
"-",
"configs",
"[",
":",
",",
"1",
"]",
")",
"dist_mn",
"=",
"np",
".",
"abs",
"(",
"configs",
"[",
":",
",",
"2",
"]",
"-",
"configs",
"[",
":",
",",
"3",
"]",
")",
"distances_equal",
"=",
"(",
"dist_ab",
"==",
"dist_mn",
")",
"# check that they are not overlapping",
"not_overlapping",
"=",
"(",
"# either a,b < m,n",
"(",
"(",
"configs",
"[",
":",
",",
"0",
"]",
"<",
"configs",
"[",
":",
",",
"2",
"]",
")",
"&",
"(",
"configs",
"[",
":",
",",
"1",
"]",
"<",
"configs",
"[",
":",
",",
"2",
"]",
")",
"&",
"(",
"configs",
"[",
":",
",",
"0",
"]",
"<",
"configs",
"[",
":",
",",
"3",
"]",
")",
"&",
"(",
"configs",
"[",
":",
",",
"1",
"]",
"<",
"configs",
"[",
":",
",",
"3",
"]",
")",
")",
"|",
"# or m,n < a,b",
"(",
"(",
"configs",
"[",
":",
",",
"2",
"]",
"<",
"configs",
"[",
":",
",",
"0",
"]",
")",
"&",
"(",
"configs",
"[",
":",
",",
"3",
"]",
"<",
"configs",
"[",
":",
",",
"0",
"]",
")",
"&",
"(",
"configs",
"[",
":",
",",
"2",
"]",
"<",
"configs",
"[",
":",
",",
"1",
"]",
")",
"&",
"(",
"configs",
"[",
":",
",",
"3",
"]",
"<",
"configs",
"[",
":",
",",
"1",
"]",
")",
")",
")",
"is_dipole_dipole",
"=",
"(",
"distances_equal",
"&",
"not_overlapping",
")",
"dd_indices",
"=",
"np",
".",
"where",
"(",
"is_dipole_dipole",
")",
"[",
"0",
"]",
"dd_indices_sorted",
"=",
"_sort_dd_skips",
"(",
"configs",
"[",
"dd_indices",
",",
":",
"]",
",",
"dd_indices",
")",
"# set all dd configs to nan",
"configs",
"[",
"dd_indices",
",",
":",
"]",
"=",
"np",
".",
"nan",
"return",
"configs",
",",
"dd_indices_sorted"
] | Filter dipole-dipole configurations
A dipole-dipole configuration is defined using the following criteria:
* equal distance between the two current electrodes and between the two
voltage electrodes
* no overlap of dipoles
Parameters
----------
configs: numpy.ndarray
Nx4 array with N measurement configurations
Returns
-------
configs: numpy.ndarray
Remaining configurations, all dipole-dipole configurations are set to
numpy.nan
dd_indices: numpy.ndarray
indices of dipole-dipole configurations | [
"Filter",
"dipole",
"-",
"dipole",
"configurations"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/filter_config_types.py#L100-L155 | train |
geophysics-ubonn/reda | lib/reda/utils/filter_config_types.py | _sort_dd_skips | def _sort_dd_skips(configs, dd_indices_all):
"""Given a set of dipole-dipole configurations, sort them according to
their current skip.
Parameters
----------
configs: Nx4 numpy.ndarray
Dipole-Dipole configurations
Returns
-------
dd_configs_sorted: dict
dictionary with the skip as keys, and arrays/lists with indices to
these skips.
"""
config_current_skips = np.abs(configs[:, 1] - configs[:, 0])
if np.all(np.isnan(config_current_skips)):
return {0: []}
# determine skips
available_skips_raw = np.unique(config_current_skips)
available_skips = available_skips_raw[
~np.isnan(available_skips_raw)
].astype(int)
# now determine the configurations
dd_configs_sorted = {}
for skip in available_skips:
indices = np.where(config_current_skips == skip)[0]
dd_configs_sorted[skip - 1] = dd_indices_all[indices]
return dd_configs_sorted | python | def _sort_dd_skips(configs, dd_indices_all):
"""Given a set of dipole-dipole configurations, sort them according to
their current skip.
Parameters
----------
configs: Nx4 numpy.ndarray
Dipole-Dipole configurations
Returns
-------
dd_configs_sorted: dict
dictionary with the skip as keys, and arrays/lists with indices to
these skips.
"""
config_current_skips = np.abs(configs[:, 1] - configs[:, 0])
if np.all(np.isnan(config_current_skips)):
return {0: []}
# determine skips
available_skips_raw = np.unique(config_current_skips)
available_skips = available_skips_raw[
~np.isnan(available_skips_raw)
].astype(int)
# now determine the configurations
dd_configs_sorted = {}
for skip in available_skips:
indices = np.where(config_current_skips == skip)[0]
dd_configs_sorted[skip - 1] = dd_indices_all[indices]
return dd_configs_sorted | [
"def",
"_sort_dd_skips",
"(",
"configs",
",",
"dd_indices_all",
")",
":",
"config_current_skips",
"=",
"np",
".",
"abs",
"(",
"configs",
"[",
":",
",",
"1",
"]",
"-",
"configs",
"[",
":",
",",
"0",
"]",
")",
"if",
"np",
".",
"all",
"(",
"np",
".",
"isnan",
"(",
"config_current_skips",
")",
")",
":",
"return",
"{",
"0",
":",
"[",
"]",
"}",
"# determine skips",
"available_skips_raw",
"=",
"np",
".",
"unique",
"(",
"config_current_skips",
")",
"available_skips",
"=",
"available_skips_raw",
"[",
"~",
"np",
".",
"isnan",
"(",
"available_skips_raw",
")",
"]",
".",
"astype",
"(",
"int",
")",
"# now determine the configurations",
"dd_configs_sorted",
"=",
"{",
"}",
"for",
"skip",
"in",
"available_skips",
":",
"indices",
"=",
"np",
".",
"where",
"(",
"config_current_skips",
"==",
"skip",
")",
"[",
"0",
"]",
"dd_configs_sorted",
"[",
"skip",
"-",
"1",
"]",
"=",
"dd_indices_all",
"[",
"indices",
"]",
"return",
"dd_configs_sorted"
] | Given a set of dipole-dipole configurations, sort them according to
their current skip.
Parameters
----------
configs: Nx4 numpy.ndarray
Dipole-Dipole configurations
Returns
-------
dd_configs_sorted: dict
dictionary with the skip as keys, and arrays/lists with indices to
these skips. | [
"Given",
"a",
"set",
"of",
"dipole",
"-",
"dipole",
"configurations",
"sort",
"them",
"according",
"to",
"their",
"current",
"skip",
"."
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/filter_config_types.py#L158-L189 | train |
geophysics-ubonn/reda | lib/reda/utils/filter_config_types.py | filter | def filter(configs, settings):
"""Main entry function to filtering configuration types
Parameters
----------
configs: Nx4 array
array containing A-B-M-N configurations
settings: dict
'only_types': ['dd', 'other'], # filter only for those types
Returns
-------
dict
results dict containing filter results (indices) for all registered
filter functions. All remaining configs are stored under the keywords
'remaining'
"""
if isinstance(configs, pd.DataFrame):
configs = configs[['a', 'b', 'm', 'n']].values
# assign short labels to Python functions
filter_funcs = {
'dd': _filter_dipole_dipole,
'schlumberger': _filter_schlumberger,
}
# we need a list to fix the call order of filter functions
keys = ['dd', 'schlumberger', ]
allowed_keys = settings.get('only_types', filter_funcs.keys())
results = {}
# we operate iteratively on the configs, set the first round here
# rows are iteratively set to nan when filters remove them!
configs_filtered = configs.copy().astype(float)
for key in keys:
if key in allowed_keys:
configs_filtered, indices_filtered = filter_funcs[key](
configs_filtered,
)
if len(indices_filtered) > 0:
results[key] = indices_filtered
# add all remaining indices to the results dict
results['not_sorted'] = np.where(
~np.all(np.isnan(configs_filtered), axis=1)
)[0]
return results | python | def filter(configs, settings):
"""Main entry function to filtering configuration types
Parameters
----------
configs: Nx4 array
array containing A-B-M-N configurations
settings: dict
'only_types': ['dd', 'other'], # filter only for those types
Returns
-------
dict
results dict containing filter results (indices) for all registered
filter functions. All remaining configs are stored under the keywords
'remaining'
"""
if isinstance(configs, pd.DataFrame):
configs = configs[['a', 'b', 'm', 'n']].values
# assign short labels to Python functions
filter_funcs = {
'dd': _filter_dipole_dipole,
'schlumberger': _filter_schlumberger,
}
# we need a list to fix the call order of filter functions
keys = ['dd', 'schlumberger', ]
allowed_keys = settings.get('only_types', filter_funcs.keys())
results = {}
# we operate iteratively on the configs, set the first round here
# rows are iteratively set to nan when filters remove them!
configs_filtered = configs.copy().astype(float)
for key in keys:
if key in allowed_keys:
configs_filtered, indices_filtered = filter_funcs[key](
configs_filtered,
)
if len(indices_filtered) > 0:
results[key] = indices_filtered
# add all remaining indices to the results dict
results['not_sorted'] = np.where(
~np.all(np.isnan(configs_filtered), axis=1)
)[0]
return results | [
"def",
"filter",
"(",
"configs",
",",
"settings",
")",
":",
"if",
"isinstance",
"(",
"configs",
",",
"pd",
".",
"DataFrame",
")",
":",
"configs",
"=",
"configs",
"[",
"[",
"'a'",
",",
"'b'",
",",
"'m'",
",",
"'n'",
"]",
"]",
".",
"values",
"# assign short labels to Python functions",
"filter_funcs",
"=",
"{",
"'dd'",
":",
"_filter_dipole_dipole",
",",
"'schlumberger'",
":",
"_filter_schlumberger",
",",
"}",
"# we need a list to fix the call order of filter functions",
"keys",
"=",
"[",
"'dd'",
",",
"'schlumberger'",
",",
"]",
"allowed_keys",
"=",
"settings",
".",
"get",
"(",
"'only_types'",
",",
"filter_funcs",
".",
"keys",
"(",
")",
")",
"results",
"=",
"{",
"}",
"# we operate iteratively on the configs, set the first round here",
"# rows are iteratively set to nan when filters remove them!",
"configs_filtered",
"=",
"configs",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"float",
")",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"in",
"allowed_keys",
":",
"configs_filtered",
",",
"indices_filtered",
"=",
"filter_funcs",
"[",
"key",
"]",
"(",
"configs_filtered",
",",
")",
"if",
"len",
"(",
"indices_filtered",
")",
">",
"0",
":",
"results",
"[",
"key",
"]",
"=",
"indices_filtered",
"# add all remaining indices to the results dict",
"results",
"[",
"'not_sorted'",
"]",
"=",
"np",
".",
"where",
"(",
"~",
"np",
".",
"all",
"(",
"np",
".",
"isnan",
"(",
"configs_filtered",
")",
",",
"axis",
"=",
"1",
")",
")",
"[",
"0",
"]",
"return",
"results"
] | Main entry function to filtering configuration types
Parameters
----------
configs: Nx4 array
array containing A-B-M-N configurations
settings: dict
'only_types': ['dd', 'other'], # filter only for those types
Returns
-------
dict
results dict containing filter results (indices) for all registered
filter functions. All remaining configs are stored under the keywords
'remaining' | [
"Main",
"entry",
"function",
"to",
"filtering",
"configuration",
"types"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/filter_config_types.py#L192-L241 | train |
geophysics-ubonn/reda | lib/reda/exporters/crtomo.py | save_block_to_crt | def save_block_to_crt(filename, group, norrec='all', store_errors=False):
"""Save a dataset to a CRTomo-compatible .crt file
Parameters
----------
filename : string
Output filename
group : pandas.group
Data group
norrec : string
Which data to export Possible values: all|nor|rec
store_errors : bool
If true, store errors of the data in a separate column
"""
if norrec != 'all':
group = group.query('norrec == "{0}"'.format(norrec))
# todo: we need to fix the global naming scheme for columns!
with open(filename, 'wb') as fid:
fid.write(
bytes('{0}\n'.format(len(group)), 'UTF-8')
)
AB = group['a'] * 1e4 + group['b']
MN = group['m'] * 1e4 + group['n']
line = [
AB.values.astype(int),
MN.values.astype(int),
group['r'].values,
]
if 'rpha' in group:
line.append(group['rpha'].values)
else:
line.append(group['r'].values * 0.0)
fmt = '%i %i %f %f'
if store_errors:
line += (
group['d|Z|_[Ohm]'].values,
group['dphi_[mrad]'].values,
)
fmt += ' %f %f'
subdata = np.array(line).T
np.savetxt(fid, subdata, fmt=fmt) | python | def save_block_to_crt(filename, group, norrec='all', store_errors=False):
"""Save a dataset to a CRTomo-compatible .crt file
Parameters
----------
filename : string
Output filename
group : pandas.group
Data group
norrec : string
Which data to export Possible values: all|nor|rec
store_errors : bool
If true, store errors of the data in a separate column
"""
if norrec != 'all':
group = group.query('norrec == "{0}"'.format(norrec))
# todo: we need to fix the global naming scheme for columns!
with open(filename, 'wb') as fid:
fid.write(
bytes('{0}\n'.format(len(group)), 'UTF-8')
)
AB = group['a'] * 1e4 + group['b']
MN = group['m'] * 1e4 + group['n']
line = [
AB.values.astype(int),
MN.values.astype(int),
group['r'].values,
]
if 'rpha' in group:
line.append(group['rpha'].values)
else:
line.append(group['r'].values * 0.0)
fmt = '%i %i %f %f'
if store_errors:
line += (
group['d|Z|_[Ohm]'].values,
group['dphi_[mrad]'].values,
)
fmt += ' %f %f'
subdata = np.array(line).T
np.savetxt(fid, subdata, fmt=fmt) | [
"def",
"save_block_to_crt",
"(",
"filename",
",",
"group",
",",
"norrec",
"=",
"'all'",
",",
"store_errors",
"=",
"False",
")",
":",
"if",
"norrec",
"!=",
"'all'",
":",
"group",
"=",
"group",
".",
"query",
"(",
"'norrec == \"{0}\"'",
".",
"format",
"(",
"norrec",
")",
")",
"# todo: we need to fix the global naming scheme for columns!",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"fid",
":",
"fid",
".",
"write",
"(",
"bytes",
"(",
"'{0}\\n'",
".",
"format",
"(",
"len",
"(",
"group",
")",
")",
",",
"'UTF-8'",
")",
")",
"AB",
"=",
"group",
"[",
"'a'",
"]",
"*",
"1e4",
"+",
"group",
"[",
"'b'",
"]",
"MN",
"=",
"group",
"[",
"'m'",
"]",
"*",
"1e4",
"+",
"group",
"[",
"'n'",
"]",
"line",
"=",
"[",
"AB",
".",
"values",
".",
"astype",
"(",
"int",
")",
",",
"MN",
".",
"values",
".",
"astype",
"(",
"int",
")",
",",
"group",
"[",
"'r'",
"]",
".",
"values",
",",
"]",
"if",
"'rpha'",
"in",
"group",
":",
"line",
".",
"append",
"(",
"group",
"[",
"'rpha'",
"]",
".",
"values",
")",
"else",
":",
"line",
".",
"append",
"(",
"group",
"[",
"'r'",
"]",
".",
"values",
"*",
"0.0",
")",
"fmt",
"=",
"'%i %i %f %f'",
"if",
"store_errors",
":",
"line",
"+=",
"(",
"group",
"[",
"'d|Z|_[Ohm]'",
"]",
".",
"values",
",",
"group",
"[",
"'dphi_[mrad]'",
"]",
".",
"values",
",",
")",
"fmt",
"+=",
"' %f %f'",
"subdata",
"=",
"np",
".",
"array",
"(",
"line",
")",
".",
"T",
"np",
".",
"savetxt",
"(",
"fid",
",",
"subdata",
",",
"fmt",
"=",
"fmt",
")"
] | Save a dataset to a CRTomo-compatible .crt file
Parameters
----------
filename : string
Output filename
group : pandas.group
Data group
norrec : string
Which data to export Possible values: all|nor|rec
store_errors : bool
If true, store errors of the data in a separate column | [
"Save",
"a",
"dataset",
"to",
"a",
"CRTomo",
"-",
"compatible",
".",
"crt",
"file"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/exporters/crtomo.py#L7-L53 | train |
geophysics-ubonn/reda | lib/reda/eis/units.py | get_label | def get_label(parameter, ptype, flavor=None, mpl=None):
"""Return the label of a given SIP parameter
Parameters
----------
parameter: string
type of parameter, e.g. rmag|rpha|cre|cim
ptype: string
material|meas. Either return the material property (e.g. resistivity)
or the measurement parameter (e.g., impedance)
flavor: string, optional
if set, must be one of latex|mathml. Return a label for latex
processing, or for mathml processing
mpl: matplotlib, optional
if set, infer flavor from mpl.rcParams. Will not be used if flavor is
set
Returns
-------
label: string
the requested label
"""
# determine flavor
if flavor is not None:
if flavor not in ('latex', 'mathml'):
raise Exception('flavor not recognized: {}'.format(flavor))
else:
if mpl is None:
raise Exception('either the flavor or mpl must be provided')
rendering = mpl.rcParams['text.usetex']
if rendering:
flavor = 'latex'
else:
flavor = 'mathml'
# check if the requested label is present
if parameter not in labels:
raise Exception('parameter not known')
if ptype not in labels[parameter]:
raise Exception('ptype not known')
if flavor not in labels[parameter][ptype]:
raise Exception('flavor not known')
return labels[parameter][ptype][flavor] | python | def get_label(parameter, ptype, flavor=None, mpl=None):
"""Return the label of a given SIP parameter
Parameters
----------
parameter: string
type of parameter, e.g. rmag|rpha|cre|cim
ptype: string
material|meas. Either return the material property (e.g. resistivity)
or the measurement parameter (e.g., impedance)
flavor: string, optional
if set, must be one of latex|mathml. Return a label for latex
processing, or for mathml processing
mpl: matplotlib, optional
if set, infer flavor from mpl.rcParams. Will not be used if flavor is
set
Returns
-------
label: string
the requested label
"""
# determine flavor
if flavor is not None:
if flavor not in ('latex', 'mathml'):
raise Exception('flavor not recognized: {}'.format(flavor))
else:
if mpl is None:
raise Exception('either the flavor or mpl must be provided')
rendering = mpl.rcParams['text.usetex']
if rendering:
flavor = 'latex'
else:
flavor = 'mathml'
# check if the requested label is present
if parameter not in labels:
raise Exception('parameter not known')
if ptype not in labels[parameter]:
raise Exception('ptype not known')
if flavor not in labels[parameter][ptype]:
raise Exception('flavor not known')
return labels[parameter][ptype][flavor] | [
"def",
"get_label",
"(",
"parameter",
",",
"ptype",
",",
"flavor",
"=",
"None",
",",
"mpl",
"=",
"None",
")",
":",
"# determine flavor",
"if",
"flavor",
"is",
"not",
"None",
":",
"if",
"flavor",
"not",
"in",
"(",
"'latex'",
",",
"'mathml'",
")",
":",
"raise",
"Exception",
"(",
"'flavor not recognized: {}'",
".",
"format",
"(",
"flavor",
")",
")",
"else",
":",
"if",
"mpl",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'either the flavor or mpl must be provided'",
")",
"rendering",
"=",
"mpl",
".",
"rcParams",
"[",
"'text.usetex'",
"]",
"if",
"rendering",
":",
"flavor",
"=",
"'latex'",
"else",
":",
"flavor",
"=",
"'mathml'",
"# check if the requested label is present",
"if",
"parameter",
"not",
"in",
"labels",
":",
"raise",
"Exception",
"(",
"'parameter not known'",
")",
"if",
"ptype",
"not",
"in",
"labels",
"[",
"parameter",
"]",
":",
"raise",
"Exception",
"(",
"'ptype not known'",
")",
"if",
"flavor",
"not",
"in",
"labels",
"[",
"parameter",
"]",
"[",
"ptype",
"]",
":",
"raise",
"Exception",
"(",
"'flavor not known'",
")",
"return",
"labels",
"[",
"parameter",
"]",
"[",
"ptype",
"]",
"[",
"flavor",
"]"
] | Return the label of a given SIP parameter
Parameters
----------
parameter: string
type of parameter, e.g. rmag|rpha|cre|cim
ptype: string
material|meas. Either return the material property (e.g. resistivity)
or the measurement parameter (e.g., impedance)
flavor: string, optional
if set, must be one of latex|mathml. Return a label for latex
processing, or for mathml processing
mpl: matplotlib, optional
if set, infer flavor from mpl.rcParams. Will not be used if flavor is
set
Returns
-------
label: string
the requested label | [
"Return",
"the",
"label",
"of",
"a",
"given",
"SIP",
"parameter"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/eis/units.py#L47-L90 | train |
geophysics-ubonn/reda | lib/reda/eis/plots.py | sip_response._add_labels | def _add_labels(self, axes, dtype):
"""Given a 2x2 array of axes, add x and y labels
Parameters
----------
axes: numpy.ndarray, 2x2
A numpy array containing the four principal axes of an SIP plot
dtype: string
Can be either 'rho' or 'r', indicating the type of data that is
plotted: 'rho' stands for resistivities/conductivities, 'r' stands
for impedances/condactances
Returns
-------
None
"""
for ax in axes[1, :].flat:
ax.set_xlabel('frequency [Hz]')
if dtype == 'rho':
axes[0, 0].set_ylabel(r'$|\rho| [\Omega m]$')
axes[0, 1].set_ylabel(r'$-\phi [mrad]$')
axes[1, 0].set_ylabel(r"$\sigma' [S/m]$")
axes[1, 1].set_ylabel(r"$\sigma'' [S/m]$")
elif dtype == 'r':
axes[0, 0].set_ylabel(r'$|R| [\Omega]$')
axes[0, 1].set_ylabel(r'$-\phi [mrad]$')
axes[1, 0].set_ylabel(r"$Y' [S]$")
axes[1, 1].set_ylabel(r"$Y'' [S]$")
else:
raise Exception('dtype not known: {}'.format(dtype)) | python | def _add_labels(self, axes, dtype):
"""Given a 2x2 array of axes, add x and y labels
Parameters
----------
axes: numpy.ndarray, 2x2
A numpy array containing the four principal axes of an SIP plot
dtype: string
Can be either 'rho' or 'r', indicating the type of data that is
plotted: 'rho' stands for resistivities/conductivities, 'r' stands
for impedances/condactances
Returns
-------
None
"""
for ax in axes[1, :].flat:
ax.set_xlabel('frequency [Hz]')
if dtype == 'rho':
axes[0, 0].set_ylabel(r'$|\rho| [\Omega m]$')
axes[0, 1].set_ylabel(r'$-\phi [mrad]$')
axes[1, 0].set_ylabel(r"$\sigma' [S/m]$")
axes[1, 1].set_ylabel(r"$\sigma'' [S/m]$")
elif dtype == 'r':
axes[0, 0].set_ylabel(r'$|R| [\Omega]$')
axes[0, 1].set_ylabel(r'$-\phi [mrad]$')
axes[1, 0].set_ylabel(r"$Y' [S]$")
axes[1, 1].set_ylabel(r"$Y'' [S]$")
else:
raise Exception('dtype not known: {}'.format(dtype)) | [
"def",
"_add_labels",
"(",
"self",
",",
"axes",
",",
"dtype",
")",
":",
"for",
"ax",
"in",
"axes",
"[",
"1",
",",
":",
"]",
".",
"flat",
":",
"ax",
".",
"set_xlabel",
"(",
"'frequency [Hz]'",
")",
"if",
"dtype",
"==",
"'rho'",
":",
"axes",
"[",
"0",
",",
"0",
"]",
".",
"set_ylabel",
"(",
"r'$|\\rho| [\\Omega m]$'",
")",
"axes",
"[",
"0",
",",
"1",
"]",
".",
"set_ylabel",
"(",
"r'$-\\phi [mrad]$'",
")",
"axes",
"[",
"1",
",",
"0",
"]",
".",
"set_ylabel",
"(",
"r\"$\\sigma' [S/m]$\"",
")",
"axes",
"[",
"1",
",",
"1",
"]",
".",
"set_ylabel",
"(",
"r\"$\\sigma'' [S/m]$\"",
")",
"elif",
"dtype",
"==",
"'r'",
":",
"axes",
"[",
"0",
",",
"0",
"]",
".",
"set_ylabel",
"(",
"r'$|R| [\\Omega]$'",
")",
"axes",
"[",
"0",
",",
"1",
"]",
".",
"set_ylabel",
"(",
"r'$-\\phi [mrad]$'",
")",
"axes",
"[",
"1",
",",
"0",
"]",
".",
"set_ylabel",
"(",
"r\"$Y' [S]$\"",
")",
"axes",
"[",
"1",
",",
"1",
"]",
".",
"set_ylabel",
"(",
"r\"$Y'' [S]$\"",
")",
"else",
":",
"raise",
"Exception",
"(",
"'dtype not known: {}'",
".",
"format",
"(",
"dtype",
")",
")"
] | Given a 2x2 array of axes, add x and y labels
Parameters
----------
axes: numpy.ndarray, 2x2
A numpy array containing the four principal axes of an SIP plot
dtype: string
Can be either 'rho' or 'r', indicating the type of data that is
plotted: 'rho' stands for resistivities/conductivities, 'r' stands
for impedances/condactances
Returns
-------
None | [
"Given",
"a",
"2x2",
"array",
"of",
"axes",
"add",
"x",
"and",
"y",
"labels"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/eis/plots.py#L78-L108 | train |
geophysics-ubonn/reda | lib/reda/eis/plots.py | multi_sip_response.add | def add(self, response, label=None):
"""add one response object to the list
"""
if not isinstance(response, sip_response.sip_response):
raise Exception(
'can only add sip_reponse.sip_response objects'
)
self.objects.append(response)
if label is None:
self.labels.append('na')
else:
self.labels.append(label) | python | def add(self, response, label=None):
"""add one response object to the list
"""
if not isinstance(response, sip_response.sip_response):
raise Exception(
'can only add sip_reponse.sip_response objects'
)
self.objects.append(response)
if label is None:
self.labels.append('na')
else:
self.labels.append(label) | [
"def",
"add",
"(",
"self",
",",
"response",
",",
"label",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"response",
",",
"sip_response",
".",
"sip_response",
")",
":",
"raise",
"Exception",
"(",
"'can only add sip_reponse.sip_response objects'",
")",
"self",
".",
"objects",
".",
"append",
"(",
"response",
")",
"if",
"label",
"is",
"None",
":",
"self",
".",
"labels",
".",
"append",
"(",
"'na'",
")",
"else",
":",
"self",
".",
"labels",
".",
"append",
"(",
"label",
")"
] | add one response object to the list | [
"add",
"one",
"response",
"object",
"to",
"the",
"list"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/eis/plots.py#L358-L370 | train |
geophysics-ubonn/reda | lib/reda/eis/convert.py | split_data | def split_data(data, squeeze=False):
"""
Split 1D or 2D into two parts, using the last axis
Parameters
----------
data:
squeeze : squeeze results to remove unnecessary dimensions
"""
vdata = np.atleast_2d(data)
nr_freqs = int(vdata.shape[1] / 2)
part1 = vdata[:, 0:nr_freqs]
part2 = vdata[:, nr_freqs:]
if(squeeze):
part1 = part1.squeeze()
part2 = part2.squeeze()
return part1, part2 | python | def split_data(data, squeeze=False):
"""
Split 1D or 2D into two parts, using the last axis
Parameters
----------
data:
squeeze : squeeze results to remove unnecessary dimensions
"""
vdata = np.atleast_2d(data)
nr_freqs = int(vdata.shape[1] / 2)
part1 = vdata[:, 0:nr_freqs]
part2 = vdata[:, nr_freqs:]
if(squeeze):
part1 = part1.squeeze()
part2 = part2.squeeze()
return part1, part2 | [
"def",
"split_data",
"(",
"data",
",",
"squeeze",
"=",
"False",
")",
":",
"vdata",
"=",
"np",
".",
"atleast_2d",
"(",
"data",
")",
"nr_freqs",
"=",
"int",
"(",
"vdata",
".",
"shape",
"[",
"1",
"]",
"/",
"2",
")",
"part1",
"=",
"vdata",
"[",
":",
",",
"0",
":",
"nr_freqs",
"]",
"part2",
"=",
"vdata",
"[",
":",
",",
"nr_freqs",
":",
"]",
"if",
"(",
"squeeze",
")",
":",
"part1",
"=",
"part1",
".",
"squeeze",
"(",
")",
"part2",
"=",
"part2",
".",
"squeeze",
"(",
")",
"return",
"part1",
",",
"part2"
] | Split 1D or 2D into two parts, using the last axis
Parameters
----------
data:
squeeze : squeeze results to remove unnecessary dimensions | [
"Split",
"1D",
"or",
"2D",
"into",
"two",
"parts",
"using",
"the",
"last",
"axis"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/eis/convert.py#L14-L30 | train |
geophysics-ubonn/reda | lib/reda/eis/convert.py | convert | def convert(input_format, output_format, data, one_spectrum=False):
"""
Convert from the given format to the requested format
Parameters
----------
input_format : format of input data (parameter 'data')
output_format : format of output data
data : numpy array containing data in specified input format
one_spectrum : True|False, the input data comprises one spectrum. This
allows for an additional format of the data array.
Possible input/output formats:
------------------------------
'lnrmag_rpha'
'log10rmag_rpha'
'rmag_rpha'
'rre_rim'
'rre_rmim'
'cmag_cpha'
'cre_cim'
'cre_cmim'
'ccomplex'
'rcomplex'
Array format
------------
data is either 1D or 2D. A 1D array correspond to one spectrum, with double
the size of the frequencies (which are not needed for the conversion).
Thus, the first halt either comprises a magnitude data, and the second one
phase data, or the parts comprise real and imaginary parts.
For the 2D case there exist two possibilities:
First, if one_spectrum is False, then the first axis denotes the spectrum
number, and each spectrum is stored on the second axis as described for the
1D case.
Second, if one_spectrum is True, and the first axis has the size two, then
the axis denotes either magnitude (index 0) and phase (index 1), or real
(index 0) and imaginary (index 1) parts. The second axis has the same size
as there are frequencies.
Internally we always convert to real part and imaginary part of
conductivity, and then convert back to the output format.
Return values are of the same dimensions as input variables.
"""
if input_format == output_format:
return data
if input_format not in from_converters:
raise KeyError('Input format {0} not known!'.format(input_format))
if output_format not in to_converters:
raise KeyError('Output format {0} not known!'.format(output_format))
# internally we always work with the second axis of double the frequency
# size
if len(data.shape) == 2 and data.shape[0] == 2 and one_spectrum:
work_data = np.hstack((data[0, :], data[1, :]))
one_spec_2d = True
else:
work_data = data
one_spec_2d = False
cre, cim = from_converters[input_format](work_data)
converted_data = to_converters[output_format](cre, cim)
if one_spec_2d:
part1, part2 = split_data(converted_data, True)
converted_data = np.vstack((part1, part2))
# reshape to input size (this should only be necessary for 1D data)
if len(data.shape) == 1:
converted_data = np.squeeze(converted_data)
return converted_data | python | def convert(input_format, output_format, data, one_spectrum=False):
"""
Convert from the given format to the requested format
Parameters
----------
input_format : format of input data (parameter 'data')
output_format : format of output data
data : numpy array containing data in specified input format
one_spectrum : True|False, the input data comprises one spectrum. This
allows for an additional format of the data array.
Possible input/output formats:
------------------------------
'lnrmag_rpha'
'log10rmag_rpha'
'rmag_rpha'
'rre_rim'
'rre_rmim'
'cmag_cpha'
'cre_cim'
'cre_cmim'
'ccomplex'
'rcomplex'
Array format
------------
data is either 1D or 2D. A 1D array correspond to one spectrum, with double
the size of the frequencies (which are not needed for the conversion).
Thus, the first halt either comprises a magnitude data, and the second one
phase data, or the parts comprise real and imaginary parts.
For the 2D case there exist two possibilities:
First, if one_spectrum is False, then the first axis denotes the spectrum
number, and each spectrum is stored on the second axis as described for the
1D case.
Second, if one_spectrum is True, and the first axis has the size two, then
the axis denotes either magnitude (index 0) and phase (index 1), or real
(index 0) and imaginary (index 1) parts. The second axis has the same size
as there are frequencies.
Internally we always convert to real part and imaginary part of
conductivity, and then convert back to the output format.
Return values are of the same dimensions as input variables.
"""
if input_format == output_format:
return data
if input_format not in from_converters:
raise KeyError('Input format {0} not known!'.format(input_format))
if output_format not in to_converters:
raise KeyError('Output format {0} not known!'.format(output_format))
# internally we always work with the second axis of double the frequency
# size
if len(data.shape) == 2 and data.shape[0] == 2 and one_spectrum:
work_data = np.hstack((data[0, :], data[1, :]))
one_spec_2d = True
else:
work_data = data
one_spec_2d = False
cre, cim = from_converters[input_format](work_data)
converted_data = to_converters[output_format](cre, cim)
if one_spec_2d:
part1, part2 = split_data(converted_data, True)
converted_data = np.vstack((part1, part2))
# reshape to input size (this should only be necessary for 1D data)
if len(data.shape) == 1:
converted_data = np.squeeze(converted_data)
return converted_data | [
"def",
"convert",
"(",
"input_format",
",",
"output_format",
",",
"data",
",",
"one_spectrum",
"=",
"False",
")",
":",
"if",
"input_format",
"==",
"output_format",
":",
"return",
"data",
"if",
"input_format",
"not",
"in",
"from_converters",
":",
"raise",
"KeyError",
"(",
"'Input format {0} not known!'",
".",
"format",
"(",
"input_format",
")",
")",
"if",
"output_format",
"not",
"in",
"to_converters",
":",
"raise",
"KeyError",
"(",
"'Output format {0} not known!'",
".",
"format",
"(",
"output_format",
")",
")",
"# internally we always work with the second axis of double the frequency",
"# size",
"if",
"len",
"(",
"data",
".",
"shape",
")",
"==",
"2",
"and",
"data",
".",
"shape",
"[",
"0",
"]",
"==",
"2",
"and",
"one_spectrum",
":",
"work_data",
"=",
"np",
".",
"hstack",
"(",
"(",
"data",
"[",
"0",
",",
":",
"]",
",",
"data",
"[",
"1",
",",
":",
"]",
")",
")",
"one_spec_2d",
"=",
"True",
"else",
":",
"work_data",
"=",
"data",
"one_spec_2d",
"=",
"False",
"cre",
",",
"cim",
"=",
"from_converters",
"[",
"input_format",
"]",
"(",
"work_data",
")",
"converted_data",
"=",
"to_converters",
"[",
"output_format",
"]",
"(",
"cre",
",",
"cim",
")",
"if",
"one_spec_2d",
":",
"part1",
",",
"part2",
"=",
"split_data",
"(",
"converted_data",
",",
"True",
")",
"converted_data",
"=",
"np",
".",
"vstack",
"(",
"(",
"part1",
",",
"part2",
")",
")",
"# reshape to input size (this should only be necessary for 1D data)",
"if",
"len",
"(",
"data",
".",
"shape",
")",
"==",
"1",
":",
"converted_data",
"=",
"np",
".",
"squeeze",
"(",
"converted_data",
")",
"return",
"converted_data"
] | Convert from the given format to the requested format
Parameters
----------
input_format : format of input data (parameter 'data')
output_format : format of output data
data : numpy array containing data in specified input format
one_spectrum : True|False, the input data comprises one spectrum. This
allows for an additional format of the data array.
Possible input/output formats:
------------------------------
'lnrmag_rpha'
'log10rmag_rpha'
'rmag_rpha'
'rre_rim'
'rre_rmim'
'cmag_cpha'
'cre_cim'
'cre_cmim'
'ccomplex'
'rcomplex'
Array format
------------
data is either 1D or 2D. A 1D array correspond to one spectrum, with double
the size of the frequencies (which are not needed for the conversion).
Thus, the first halt either comprises a magnitude data, and the second one
phase data, or the parts comprise real and imaginary parts.
For the 2D case there exist two possibilities:
First, if one_spectrum is False, then the first axis denotes the spectrum
number, and each spectrum is stored on the second axis as described for the
1D case.
Second, if one_spectrum is True, and the first axis has the size two, then
the axis denotes either magnitude (index 0) and phase (index 1), or real
(index 0) and imaginary (index 1) parts. The second axis has the same size
as there are frequencies.
Internally we always convert to real part and imaginary part of
conductivity, and then convert back to the output format.
Return values are of the same dimensions as input variables. | [
"Convert",
"from",
"the",
"given",
"format",
"to",
"the",
"requested",
"format"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/eis/convert.py#L233-L311 | train |
pennlabs/penn-sdk-python | penn/directory.py | Directory.search | def search(self, params, standardize=False):
"""Get a list of person objects for the given search params.
:param params: Dictionary specifying the query parameters
:param standardize: Whether to standardize names and other features,
currently disabled for backwards compatibility. Currently
standardizes names, lowercases emails, and removes faculty label
from affiliation.
>>> people = d.search({'first_name': 'tobias', 'last_name': 'funke'})
"""
resp = self._request(ENDPOINTS['SEARCH'], params)
if not standardize:
return resp
# Standardization logic
for res in resp['result_data']:
res = self.standardize(res)
return resp | python | def search(self, params, standardize=False):
"""Get a list of person objects for the given search params.
:param params: Dictionary specifying the query parameters
:param standardize: Whether to standardize names and other features,
currently disabled for backwards compatibility. Currently
standardizes names, lowercases emails, and removes faculty label
from affiliation.
>>> people = d.search({'first_name': 'tobias', 'last_name': 'funke'})
"""
resp = self._request(ENDPOINTS['SEARCH'], params)
if not standardize:
return resp
# Standardization logic
for res in resp['result_data']:
res = self.standardize(res)
return resp | [
"def",
"search",
"(",
"self",
",",
"params",
",",
"standardize",
"=",
"False",
")",
":",
"resp",
"=",
"self",
".",
"_request",
"(",
"ENDPOINTS",
"[",
"'SEARCH'",
"]",
",",
"params",
")",
"if",
"not",
"standardize",
":",
"return",
"resp",
"# Standardization logic",
"for",
"res",
"in",
"resp",
"[",
"'result_data'",
"]",
":",
"res",
"=",
"self",
".",
"standardize",
"(",
"res",
")",
"return",
"resp"
] | Get a list of person objects for the given search params.
:param params: Dictionary specifying the query parameters
:param standardize: Whether to standardize names and other features,
currently disabled for backwards compatibility. Currently
standardizes names, lowercases emails, and removes faculty label
from affiliation.
>>> people = d.search({'first_name': 'tobias', 'last_name': 'funke'}) | [
"Get",
"a",
"list",
"of",
"person",
"objects",
"for",
"the",
"given",
"search",
"params",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/directory.py#L41-L58 | train |
pennlabs/penn-sdk-python | penn/directory.py | Directory.detail_search | def detail_search(self, params, standardize=False):
"""Get a detailed list of person objects for the given search params.
:param params:
Dictionary specifying the query parameters
>>> people_detailed = d.detail_search({'first_name': 'tobias', 'last_name': 'funke'})
"""
response = self._request(ENDPOINTS['SEARCH'], params)
result_data = []
for person in response['result_data']:
try:
detail = self.person_details(person['person_id'],
standardize=standardize)
except ValueError:
pass
else:
result_data.append(detail)
response['result_data'] = result_data
return response | python | def detail_search(self, params, standardize=False):
"""Get a detailed list of person objects for the given search params.
:param params:
Dictionary specifying the query parameters
>>> people_detailed = d.detail_search({'first_name': 'tobias', 'last_name': 'funke'})
"""
response = self._request(ENDPOINTS['SEARCH'], params)
result_data = []
for person in response['result_data']:
try:
detail = self.person_details(person['person_id'],
standardize=standardize)
except ValueError:
pass
else:
result_data.append(detail)
response['result_data'] = result_data
return response | [
"def",
"detail_search",
"(",
"self",
",",
"params",
",",
"standardize",
"=",
"False",
")",
":",
"response",
"=",
"self",
".",
"_request",
"(",
"ENDPOINTS",
"[",
"'SEARCH'",
"]",
",",
"params",
")",
"result_data",
"=",
"[",
"]",
"for",
"person",
"in",
"response",
"[",
"'result_data'",
"]",
":",
"try",
":",
"detail",
"=",
"self",
".",
"person_details",
"(",
"person",
"[",
"'person_id'",
"]",
",",
"standardize",
"=",
"standardize",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"result_data",
".",
"append",
"(",
"detail",
")",
"response",
"[",
"'result_data'",
"]",
"=",
"result_data",
"return",
"response"
] | Get a detailed list of person objects for the given search params.
:param params:
Dictionary specifying the query parameters
>>> people_detailed = d.detail_search({'first_name': 'tobias', 'last_name': 'funke'}) | [
"Get",
"a",
"detailed",
"list",
"of",
"person",
"objects",
"for",
"the",
"given",
"search",
"params",
"."
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/directory.py#L60-L81 | train |
pennlabs/penn-sdk-python | penn/directory.py | Directory.person_details | def person_details(self, person_id, standardize=False):
"""Get a detailed person object
:param person_id:
String corresponding to the person's id.
>>> instructor = d.person('jhs878sfd03b38b0d463b16320b5e438')
"""
resp = self._request(path.join(ENDPOINTS['DETAILS'], person_id))
if standardize:
resp['result_data'] = [self.standardize(res) for res in resp['result_data']]
return resp | python | def person_details(self, person_id, standardize=False):
"""Get a detailed person object
:param person_id:
String corresponding to the person's id.
>>> instructor = d.person('jhs878sfd03b38b0d463b16320b5e438')
"""
resp = self._request(path.join(ENDPOINTS['DETAILS'], person_id))
if standardize:
resp['result_data'] = [self.standardize(res) for res in resp['result_data']]
return resp | [
"def",
"person_details",
"(",
"self",
",",
"person_id",
",",
"standardize",
"=",
"False",
")",
":",
"resp",
"=",
"self",
".",
"_request",
"(",
"path",
".",
"join",
"(",
"ENDPOINTS",
"[",
"'DETAILS'",
"]",
",",
"person_id",
")",
")",
"if",
"standardize",
":",
"resp",
"[",
"'result_data'",
"]",
"=",
"[",
"self",
".",
"standardize",
"(",
"res",
")",
"for",
"res",
"in",
"resp",
"[",
"'result_data'",
"]",
"]",
"return",
"resp"
] | Get a detailed person object
:param person_id:
String corresponding to the person's id.
>>> instructor = d.person('jhs878sfd03b38b0d463b16320b5e438') | [
"Get",
"a",
"detailed",
"person",
"object"
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/directory.py#L83-L94 | train |
geophysics-ubonn/reda | lib/reda/plotters/pseudoplots.py | plot_ps_extra | def plot_ps_extra(dataobj, key, **kwargs):
"""Create grouped pseudoplots for one or more time steps
Parameters
----------
dataobj: :class:`reda.containers.ERT`
An ERT container with loaded data
key: string
The column name to plot
subquery: string, optional
cbmin: float, optional
cbmax: float, optional
Examples
--------
>>> import reda.testing.containers
>>> ert = reda.testing.containers.ERTContainer_nr
>>> import reda.plotters.pseudoplots as PS
>>> fig = PS.plot_ps_extra(ert, key='r')
"""
if isinstance(dataobj, pd.DataFrame):
df_raw = dataobj
else:
df_raw = dataobj.data
if kwargs.get('subquery', False):
df = df_raw.query(kwargs.get('subquery'))
else:
df = df_raw
def fancyfy(axes, N):
for ax in axes[0:-1, :].flat:
ax.set_xlabel('')
for ax in axes[:, 1:].flat:
ax.set_ylabel('')
g = df.groupby('timestep')
N = len(g.groups.keys())
nrx = min((N, 5))
nry = int(np.ceil(N / nrx))
# the sizes are heuristics [inches]
sizex = nrx * 3
sizey = nry * 4 - 1
fig, axes = plt.subplots(
nry, nrx,
sharex=True,
sharey=True,
figsize=(sizex, sizey),
)
axes = np.atleast_2d(axes)
cbs = []
for ax, (name, group) in zip(axes.flat, g):
fig1, axes1, cb1 = plot_pseudosection_type2(
group,
key,
ax=ax,
log10=False,
cbmin=kwargs.get('cbmin', None),
cbmax=kwargs.get('cbmax', None),
)
cbs.append(cb1)
ax.set_title('timestep: {0}'.format(int(name)))
ax.xaxis.set_ticks_position('bottom')
ax.set_aspect('equal')
for cb in np.array(cbs).reshape(axes.shape)[:, 0:-1].flat:
cb.ax.set_visible(False)
fancyfy(axes, N)
fig.tight_layout()
return fig | python | def plot_ps_extra(dataobj, key, **kwargs):
"""Create grouped pseudoplots for one or more time steps
Parameters
----------
dataobj: :class:`reda.containers.ERT`
An ERT container with loaded data
key: string
The column name to plot
subquery: string, optional
cbmin: float, optional
cbmax: float, optional
Examples
--------
>>> import reda.testing.containers
>>> ert = reda.testing.containers.ERTContainer_nr
>>> import reda.plotters.pseudoplots as PS
>>> fig = PS.plot_ps_extra(ert, key='r')
"""
if isinstance(dataobj, pd.DataFrame):
df_raw = dataobj
else:
df_raw = dataobj.data
if kwargs.get('subquery', False):
df = df_raw.query(kwargs.get('subquery'))
else:
df = df_raw
def fancyfy(axes, N):
for ax in axes[0:-1, :].flat:
ax.set_xlabel('')
for ax in axes[:, 1:].flat:
ax.set_ylabel('')
g = df.groupby('timestep')
N = len(g.groups.keys())
nrx = min((N, 5))
nry = int(np.ceil(N / nrx))
# the sizes are heuristics [inches]
sizex = nrx * 3
sizey = nry * 4 - 1
fig, axes = plt.subplots(
nry, nrx,
sharex=True,
sharey=True,
figsize=(sizex, sizey),
)
axes = np.atleast_2d(axes)
cbs = []
for ax, (name, group) in zip(axes.flat, g):
fig1, axes1, cb1 = plot_pseudosection_type2(
group,
key,
ax=ax,
log10=False,
cbmin=kwargs.get('cbmin', None),
cbmax=kwargs.get('cbmax', None),
)
cbs.append(cb1)
ax.set_title('timestep: {0}'.format(int(name)))
ax.xaxis.set_ticks_position('bottom')
ax.set_aspect('equal')
for cb in np.array(cbs).reshape(axes.shape)[:, 0:-1].flat:
cb.ax.set_visible(False)
fancyfy(axes, N)
fig.tight_layout()
return fig | [
"def",
"plot_ps_extra",
"(",
"dataobj",
",",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"dataobj",
",",
"pd",
".",
"DataFrame",
")",
":",
"df_raw",
"=",
"dataobj",
"else",
":",
"df_raw",
"=",
"dataobj",
".",
"data",
"if",
"kwargs",
".",
"get",
"(",
"'subquery'",
",",
"False",
")",
":",
"df",
"=",
"df_raw",
".",
"query",
"(",
"kwargs",
".",
"get",
"(",
"'subquery'",
")",
")",
"else",
":",
"df",
"=",
"df_raw",
"def",
"fancyfy",
"(",
"axes",
",",
"N",
")",
":",
"for",
"ax",
"in",
"axes",
"[",
"0",
":",
"-",
"1",
",",
":",
"]",
".",
"flat",
":",
"ax",
".",
"set_xlabel",
"(",
"''",
")",
"for",
"ax",
"in",
"axes",
"[",
":",
",",
"1",
":",
"]",
".",
"flat",
":",
"ax",
".",
"set_ylabel",
"(",
"''",
")",
"g",
"=",
"df",
".",
"groupby",
"(",
"'timestep'",
")",
"N",
"=",
"len",
"(",
"g",
".",
"groups",
".",
"keys",
"(",
")",
")",
"nrx",
"=",
"min",
"(",
"(",
"N",
",",
"5",
")",
")",
"nry",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"N",
"/",
"nrx",
")",
")",
"# the sizes are heuristics [inches]",
"sizex",
"=",
"nrx",
"*",
"3",
"sizey",
"=",
"nry",
"*",
"4",
"-",
"1",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nry",
",",
"nrx",
",",
"sharex",
"=",
"True",
",",
"sharey",
"=",
"True",
",",
"figsize",
"=",
"(",
"sizex",
",",
"sizey",
")",
",",
")",
"axes",
"=",
"np",
".",
"atleast_2d",
"(",
"axes",
")",
"cbs",
"=",
"[",
"]",
"for",
"ax",
",",
"(",
"name",
",",
"group",
")",
"in",
"zip",
"(",
"axes",
".",
"flat",
",",
"g",
")",
":",
"fig1",
",",
"axes1",
",",
"cb1",
"=",
"plot_pseudosection_type2",
"(",
"group",
",",
"key",
",",
"ax",
"=",
"ax",
",",
"log10",
"=",
"False",
",",
"cbmin",
"=",
"kwargs",
".",
"get",
"(",
"'cbmin'",
",",
"None",
")",
",",
"cbmax",
"=",
"kwargs",
".",
"get",
"(",
"'cbmax'",
",",
"None",
")",
",",
")",
"cbs",
".",
"append",
"(",
"cb1",
")",
"ax",
".",
"set_title",
"(",
"'timestep: {0}'",
".",
"format",
"(",
"int",
"(",
"name",
")",
")",
")",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'bottom'",
")",
"ax",
".",
"set_aspect",
"(",
"'equal'",
")",
"for",
"cb",
"in",
"np",
".",
"array",
"(",
"cbs",
")",
".",
"reshape",
"(",
"axes",
".",
"shape",
")",
"[",
":",
",",
"0",
":",
"-",
"1",
"]",
".",
"flat",
":",
"cb",
".",
"ax",
".",
"set_visible",
"(",
"False",
")",
"fancyfy",
"(",
"axes",
",",
"N",
")",
"fig",
".",
"tight_layout",
"(",
")",
"return",
"fig"
] | Create grouped pseudoplots for one or more time steps
Parameters
----------
dataobj: :class:`reda.containers.ERT`
An ERT container with loaded data
key: string
The column name to plot
subquery: string, optional
cbmin: float, optional
cbmax: float, optional
Examples
--------
>>> import reda.testing.containers
>>> ert = reda.testing.containers.ERTContainer_nr
>>> import reda.plotters.pseudoplots as PS
>>> fig = PS.plot_ps_extra(ert, key='r') | [
"Create",
"grouped",
"pseudoplots",
"for",
"one",
"or",
"more",
"time",
"steps"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/pseudoplots.py#L314-L385 | train |
fuzeman/PyUPnP | pyupnp/util.py | twisted_absolute_path | def twisted_absolute_path(path, request):
"""Hack to fix twisted not accepting absolute URIs"""
parsed = urlparse.urlparse(request.uri)
if parsed.scheme != '':
path_parts = parsed.path.lstrip('/').split('/')
request.prepath = path_parts[0:1]
request.postpath = path_parts[1:]
path = request.prepath[0]
return path, request | python | def twisted_absolute_path(path, request):
"""Hack to fix twisted not accepting absolute URIs"""
parsed = urlparse.urlparse(request.uri)
if parsed.scheme != '':
path_parts = parsed.path.lstrip('/').split('/')
request.prepath = path_parts[0:1]
request.postpath = path_parts[1:]
path = request.prepath[0]
return path, request | [
"def",
"twisted_absolute_path",
"(",
"path",
",",
"request",
")",
":",
"parsed",
"=",
"urlparse",
".",
"urlparse",
"(",
"request",
".",
"uri",
")",
"if",
"parsed",
".",
"scheme",
"!=",
"''",
":",
"path_parts",
"=",
"parsed",
".",
"path",
".",
"lstrip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"request",
".",
"prepath",
"=",
"path_parts",
"[",
"0",
":",
"1",
"]",
"request",
".",
"postpath",
"=",
"path_parts",
"[",
"1",
":",
"]",
"path",
"=",
"request",
".",
"prepath",
"[",
"0",
"]",
"return",
"path",
",",
"request"
] | Hack to fix twisted not accepting absolute URIs | [
"Hack",
"to",
"fix",
"twisted",
"not",
"accepting",
"absolute",
"URIs"
] | 6dea64be299952346a14300ab6cc7dac42736433 | https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/util.py#L24-L32 | train |
geophysics-ubonn/reda | lib/reda/importers/legacy/eit40.py | _add_rhoa | def _add_rhoa(df, spacing):
"""a simple wrapper to compute K factors and add rhoa
"""
df['k'] = redaK.compute_K_analytical(df, spacing=spacing)
df['rho_a'] = df['r'] * df['k']
if 'Zt' in df.columns:
df['rho_a_complex'] = df['Zt'] * df['k']
return df | python | def _add_rhoa(df, spacing):
"""a simple wrapper to compute K factors and add rhoa
"""
df['k'] = redaK.compute_K_analytical(df, spacing=spacing)
df['rho_a'] = df['r'] * df['k']
if 'Zt' in df.columns:
df['rho_a_complex'] = df['Zt'] * df['k']
return df | [
"def",
"_add_rhoa",
"(",
"df",
",",
"spacing",
")",
":",
"df",
"[",
"'k'",
"]",
"=",
"redaK",
".",
"compute_K_analytical",
"(",
"df",
",",
"spacing",
"=",
"spacing",
")",
"df",
"[",
"'rho_a'",
"]",
"=",
"df",
"[",
"'r'",
"]",
"*",
"df",
"[",
"'k'",
"]",
"if",
"'Zt'",
"in",
"df",
".",
"columns",
":",
"df",
"[",
"'rho_a_complex'",
"]",
"=",
"df",
"[",
"'Zt'",
"]",
"*",
"df",
"[",
"'k'",
"]",
"return",
"df"
] | a simple wrapper to compute K factors and add rhoa | [
"a",
"simple",
"wrapper",
"to",
"compute",
"K",
"factors",
"and",
"add",
"rhoa"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/legacy/eit40.py#L52-L59 | train |
Metatab/geoid | geoid/util.py | simplify | def simplify(geoids):
"""
Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level
convert them to a single geoid at the higher level.
:param geoids:
:return:
"""
from collections import defaultdict
aggregated = defaultdict(set)
d = {}
for g in geoids:
if not bool(g):
continue
av = g.allval()
d[av] = None
aggregated[av].add(g)
compiled = set()
for k, v in aggregated.items():
if len(v) >= 5:
compiled.add(k)
compiled.add(k.promote())
else:
compiled |= v
return compiled | python | def simplify(geoids):
"""
Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level
convert them to a single geoid at the higher level.
:param geoids:
:return:
"""
from collections import defaultdict
aggregated = defaultdict(set)
d = {}
for g in geoids:
if not bool(g):
continue
av = g.allval()
d[av] = None
aggregated[av].add(g)
compiled = set()
for k, v in aggregated.items():
if len(v) >= 5:
compiled.add(k)
compiled.add(k.promote())
else:
compiled |= v
return compiled | [
"def",
"simplify",
"(",
"geoids",
")",
":",
"from",
"collections",
"import",
"defaultdict",
"aggregated",
"=",
"defaultdict",
"(",
"set",
")",
"d",
"=",
"{",
"}",
"for",
"g",
"in",
"geoids",
":",
"if",
"not",
"bool",
"(",
"g",
")",
":",
"continue",
"av",
"=",
"g",
".",
"allval",
"(",
")",
"d",
"[",
"av",
"]",
"=",
"None",
"aggregated",
"[",
"av",
"]",
".",
"add",
"(",
"g",
")",
"compiled",
"=",
"set",
"(",
")",
"for",
"k",
",",
"v",
"in",
"aggregated",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"v",
")",
">=",
"5",
":",
"compiled",
".",
"add",
"(",
"k",
")",
"compiled",
".",
"add",
"(",
"k",
".",
"promote",
"(",
")",
")",
"else",
":",
"compiled",
"|=",
"v",
"return",
"compiled"
] | Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level
convert them to a single geoid at the higher level.
:param geoids:
:return: | [
"Given",
"a",
"list",
"of",
"geoids",
"reduce",
"it",
"to",
"a",
"simpler",
"set",
".",
"If",
"there",
"are",
"five",
"or",
"more",
"geoids",
"at",
"one",
"summary",
"level",
"convert",
"them",
"to",
"a",
"single",
"geoid",
"at",
"the",
"higher",
"level",
"."
] | 4b7769406b00e59376fb6046b42a2f8ed706b33b | https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/util.py#L3-L38 | train |
Metatab/geoid | geoid/util.py | isimplify | def isimplify(geoids):
"""Iteratively simplify until the set stops getting smaller. """
s0 = list(geoids)
for i in range(10):
s1 = simplify(s0)
if len(s1) == len(s0):
return s1
s0 = s1 | python | def isimplify(geoids):
"""Iteratively simplify until the set stops getting smaller. """
s0 = list(geoids)
for i in range(10):
s1 = simplify(s0)
if len(s1) == len(s0):
return s1
s0 = s1 | [
"def",
"isimplify",
"(",
"geoids",
")",
":",
"s0",
"=",
"list",
"(",
"geoids",
")",
"for",
"i",
"in",
"range",
"(",
"10",
")",
":",
"s1",
"=",
"simplify",
"(",
"s0",
")",
"if",
"len",
"(",
"s1",
")",
"==",
"len",
"(",
"s0",
")",
":",
"return",
"s1",
"s0",
"=",
"s1"
] | Iteratively simplify until the set stops getting smaller. | [
"Iteratively",
"simplify",
"until",
"the",
"set",
"stops",
"getting",
"smaller",
"."
] | 4b7769406b00e59376fb6046b42a2f8ed706b33b | https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/util.py#L40-L51 | train |
gtaylor/django-athumb | athumb/management/commands/athumb_regen_field.py | Command.regenerate_thumbs | def regenerate_thumbs(self):
"""
Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple.
"""
Model = self.model
instances = Model.objects.all()
num_instances = instances.count()
# Filenames are keys in here, to help avoid re-genning something that
# we have already done.
regen_tracker = {}
counter = 1
for instance in instances:
file = getattr(instance, self.field)
if not file:
print "(%d/%d) ID: %d -- Skipped -- No file" % (counter,
num_instances,
instance.id)
counter += 1
continue
file_name = os.path.basename(file.name)
if regen_tracker.has_key(file_name):
print "(%d/%d) ID: %d -- Skipped -- Already re-genned %s" % (
counter,
num_instances,
instance.id,
file_name)
counter += 1
continue
# Keep them informed on the progress.
print "(%d/%d) ID: %d -- %s" % (counter, num_instances,
instance.id, file_name)
try:
fdat = file.read()
file.close()
del file.file
except IOError:
# Key didn't exist.
print "(%d/%d) ID %d -- Error -- File missing on S3" % (
counter,
num_instances,
instance.id)
counter += 1
continue
try:
file_contents = ContentFile(fdat)
except ValueError:
# This field has no file associated with it, skip it.
print "(%d/%d) ID %d -- Skipped -- No file on field)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
# Saving pumps it back through the thumbnailer, if this is a
# ThumbnailField. If not, it's still pretty harmless.
try:
file.generate_thumbs(file_name, file_contents)
except IOError, e:
print "(%d/%d) ID %d -- Error -- Image may be corrupt)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
regen_tracker[file_name] = True
counter += 1 | python | def regenerate_thumbs(self):
"""
Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple.
"""
Model = self.model
instances = Model.objects.all()
num_instances = instances.count()
# Filenames are keys in here, to help avoid re-genning something that
# we have already done.
regen_tracker = {}
counter = 1
for instance in instances:
file = getattr(instance, self.field)
if not file:
print "(%d/%d) ID: %d -- Skipped -- No file" % (counter,
num_instances,
instance.id)
counter += 1
continue
file_name = os.path.basename(file.name)
if regen_tracker.has_key(file_name):
print "(%d/%d) ID: %d -- Skipped -- Already re-genned %s" % (
counter,
num_instances,
instance.id,
file_name)
counter += 1
continue
# Keep them informed on the progress.
print "(%d/%d) ID: %d -- %s" % (counter, num_instances,
instance.id, file_name)
try:
fdat = file.read()
file.close()
del file.file
except IOError:
# Key didn't exist.
print "(%d/%d) ID %d -- Error -- File missing on S3" % (
counter,
num_instances,
instance.id)
counter += 1
continue
try:
file_contents = ContentFile(fdat)
except ValueError:
# This field has no file associated with it, skip it.
print "(%d/%d) ID %d -- Skipped -- No file on field)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
# Saving pumps it back through the thumbnailer, if this is a
# ThumbnailField. If not, it's still pretty harmless.
try:
file.generate_thumbs(file_name, file_contents)
except IOError, e:
print "(%d/%d) ID %d -- Error -- Image may be corrupt)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
regen_tracker[file_name] = True
counter += 1 | [
"def",
"regenerate_thumbs",
"(",
"self",
")",
":",
"Model",
"=",
"self",
".",
"model",
"instances",
"=",
"Model",
".",
"objects",
".",
"all",
"(",
")",
"num_instances",
"=",
"instances",
".",
"count",
"(",
")",
"# Filenames are keys in here, to help avoid re-genning something that",
"# we have already done.",
"regen_tracker",
"=",
"{",
"}",
"counter",
"=",
"1",
"for",
"instance",
"in",
"instances",
":",
"file",
"=",
"getattr",
"(",
"instance",
",",
"self",
".",
"field",
")",
"if",
"not",
"file",
":",
"print",
"\"(%d/%d) ID: %d -- Skipped -- No file\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
")",
"counter",
"+=",
"1",
"continue",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file",
".",
"name",
")",
"if",
"regen_tracker",
".",
"has_key",
"(",
"file_name",
")",
":",
"print",
"\"(%d/%d) ID: %d -- Skipped -- Already re-genned %s\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
",",
"file_name",
")",
"counter",
"+=",
"1",
"continue",
"# Keep them informed on the progress.",
"print",
"\"(%d/%d) ID: %d -- %s\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
",",
"file_name",
")",
"try",
":",
"fdat",
"=",
"file",
".",
"read",
"(",
")",
"file",
".",
"close",
"(",
")",
"del",
"file",
".",
"file",
"except",
"IOError",
":",
"# Key didn't exist.",
"print",
"\"(%d/%d) ID %d -- Error -- File missing on S3\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
")",
"counter",
"+=",
"1",
"continue",
"try",
":",
"file_contents",
"=",
"ContentFile",
"(",
"fdat",
")",
"except",
"ValueError",
":",
"# This field has no file associated with it, skip it.",
"print",
"\"(%d/%d) ID %d -- Skipped -- No file on field)\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
")",
"counter",
"+=",
"1",
"continue",
"# Saving pumps it back through the thumbnailer, if this is a",
"# ThumbnailField. If not, it's still pretty harmless.",
"try",
":",
"file",
".",
"generate_thumbs",
"(",
"file_name",
",",
"file_contents",
")",
"except",
"IOError",
",",
"e",
":",
"print",
"\"(%d/%d) ID %d -- Error -- Image may be corrupt)\"",
"%",
"(",
"counter",
",",
"num_instances",
",",
"instance",
".",
"id",
")",
"counter",
"+=",
"1",
"continue",
"regen_tracker",
"[",
"file_name",
"]",
"=",
"True",
"counter",
"+=",
"1"
] | Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple. | [
"Handle",
"re",
"-",
"generating",
"the",
"thumbnails",
".",
"All",
"this",
"involves",
"is",
"reading",
"the",
"original",
"file",
"then",
"saving",
"the",
"same",
"exact",
"thing",
".",
"Kind",
"of",
"annoying",
"but",
"it",
"s",
"simple",
"."
] | 69261ace0dff81e33156a54440874456a7b38dfb | https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/management/commands/athumb_regen_field.py#L43-L119 | train |
kaustavdm/pyAvroPhonetic | pyavrophonetic/utils/count.py | count_vowels | def count_vowels(text):
"""Count number of occurrences of vowels in a given string"""
count = 0
for i in text:
if i.lower() in config.AVRO_VOWELS:
count += 1
return count | python | def count_vowels(text):
"""Count number of occurrences of vowels in a given string"""
count = 0
for i in text:
if i.lower() in config.AVRO_VOWELS:
count += 1
return count | [
"def",
"count_vowels",
"(",
"text",
")",
":",
"count",
"=",
"0",
"for",
"i",
"in",
"text",
":",
"if",
"i",
".",
"lower",
"(",
")",
"in",
"config",
".",
"AVRO_VOWELS",
":",
"count",
"+=",
"1",
"return",
"count"
] | Count number of occurrences of vowels in a given string | [
"Count",
"number",
"of",
"occurrences",
"of",
"vowels",
"in",
"a",
"given",
"string"
] | 26b7d567d8db025f2cac4de817e716390d7ac337 | https://github.com/kaustavdm/pyAvroPhonetic/blob/26b7d567d8db025f2cac4de817e716390d7ac337/pyavrophonetic/utils/count.py#L31-L37 | train |
kaustavdm/pyAvroPhonetic | pyavrophonetic/utils/count.py | count_consonants | def count_consonants(text):
"""Count number of occurrences of consonants in a given string"""
count = 0
for i in text:
if i.lower() in config.AVRO_CONSONANTS:
count += 1
return count | python | def count_consonants(text):
"""Count number of occurrences of consonants in a given string"""
count = 0
for i in text:
if i.lower() in config.AVRO_CONSONANTS:
count += 1
return count | [
"def",
"count_consonants",
"(",
"text",
")",
":",
"count",
"=",
"0",
"for",
"i",
"in",
"text",
":",
"if",
"i",
".",
"lower",
"(",
")",
"in",
"config",
".",
"AVRO_CONSONANTS",
":",
"count",
"+=",
"1",
"return",
"count"
] | Count number of occurrences of consonants in a given string | [
"Count",
"number",
"of",
"occurrences",
"of",
"consonants",
"in",
"a",
"given",
"string"
] | 26b7d567d8db025f2cac4de817e716390d7ac337 | https://github.com/kaustavdm/pyAvroPhonetic/blob/26b7d567d8db025f2cac4de817e716390d7ac337/pyavrophonetic/utils/count.py#L39-L45 | train |
geophysics-ubonn/reda | lib/reda/plotters/plots2d.py | _pseudodepths_wenner | def _pseudodepths_wenner(configs, spacing=1, grid=None):
"""Given distances between electrodes, compute Wenner pseudo
depths for the provided configuration
The pseudodepth is computed after Roy & Apparao, 1971, as 0.11 times
the distance between the two outermost electrodes. It's not really
clear why the Wenner depths are different from the Dipole-Dipole
depths, given the fact that Wenner configurations are a complete subset
of the Dipole-Dipole configurations.
"""
if grid is None:
xpositions = (configs - 1) * spacing
else:
xpositions = grid.get_electrode_positions()[configs - 1, 0]
z = np.abs(np.max(xpositions, axis=1) - np.min(xpositions, axis=1)) * -0.11
x = np.mean(xpositions, axis=1)
return x, z | python | def _pseudodepths_wenner(configs, spacing=1, grid=None):
"""Given distances between electrodes, compute Wenner pseudo
depths for the provided configuration
The pseudodepth is computed after Roy & Apparao, 1971, as 0.11 times
the distance between the two outermost electrodes. It's not really
clear why the Wenner depths are different from the Dipole-Dipole
depths, given the fact that Wenner configurations are a complete subset
of the Dipole-Dipole configurations.
"""
if grid is None:
xpositions = (configs - 1) * spacing
else:
xpositions = grid.get_electrode_positions()[configs - 1, 0]
z = np.abs(np.max(xpositions, axis=1) - np.min(xpositions, axis=1)) * -0.11
x = np.mean(xpositions, axis=1)
return x, z | [
"def",
"_pseudodepths_wenner",
"(",
"configs",
",",
"spacing",
"=",
"1",
",",
"grid",
"=",
"None",
")",
":",
"if",
"grid",
"is",
"None",
":",
"xpositions",
"=",
"(",
"configs",
"-",
"1",
")",
"*",
"spacing",
"else",
":",
"xpositions",
"=",
"grid",
".",
"get_electrode_positions",
"(",
")",
"[",
"configs",
"-",
"1",
",",
"0",
"]",
"z",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"max",
"(",
"xpositions",
",",
"axis",
"=",
"1",
")",
"-",
"np",
".",
"min",
"(",
"xpositions",
",",
"axis",
"=",
"1",
")",
")",
"*",
"-",
"0.11",
"x",
"=",
"np",
".",
"mean",
"(",
"xpositions",
",",
"axis",
"=",
"1",
")",
"return",
"x",
",",
"z"
] | Given distances between electrodes, compute Wenner pseudo
depths for the provided configuration
The pseudodepth is computed after Roy & Apparao, 1971, as 0.11 times
the distance between the two outermost electrodes. It's not really
clear why the Wenner depths are different from the Dipole-Dipole
depths, given the fact that Wenner configurations are a complete subset
of the Dipole-Dipole configurations. | [
"Given",
"distances",
"between",
"electrodes",
"compute",
"Wenner",
"pseudo",
"depths",
"for",
"the",
"provided",
"configuration"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/plots2d.py#L13-L31 | train |
geophysics-ubonn/reda | lib/reda/plotters/plots2d.py | plot_pseudodepths | def plot_pseudodepths(configs, nr_electrodes, spacing=1, grid=None,
ctypes=None, dd_merge=False, **kwargs):
"""Plot pseudodepths for the measurements. If grid is given, then the
actual electrode positions are used, and the parameter 'spacing' is
ignored'
Parameters
----------
configs: :class:`numpy.ndarray`
Nx4 array containing the quadrupoles for different measurements
nr_electrodes: int
The overall number of electrodes of the dataset. This is used to plot
the surface electrodes
spacing: float, optional
assumed distance between electrodes. Default=1
grid: crtomo.grid.crt_grid instance, optional
grid instance. Used to infer real electrode positions
ctypes: list of strings, optional
a list of configuration types that will be plotted. All
configurations that can not be sorted into these types will not be
plotted! Possible types:
* dd
* schlumberger
dd_merge: bool, optional
if True, merge all skips. Otherwise, generate individual plots for
each skip
Returns
-------
figs: matplotlib.figure.Figure instance or list of Figure instances
if only one type was plotted, then the figure instance is returned.
Otherwise, return a list of figure instances.
axes: axes object or list of axes ojects
plot axes
Examples
--------
.. plot::
:include-source:
from reda.plotters.plots2d import plot_pseudodepths
# define a few measurements
import numpy as np
configs = np.array((
(1, 2, 4, 3),
(1, 2, 5, 4),
(1, 2, 6, 5),
(2, 3, 5, 4),
(2, 3, 6, 5),
(3, 4, 6, 5),
))
# plot
fig, axes = plot_pseudodepths(configs, nr_electrodes=6, spacing=1,
ctypes=['dd', ])
.. plot::
:include-source:
from reda.plotters.plots2d import plot_pseudodepths
# define a few measurements
import numpy as np
configs = np.array((
(4, 7, 5, 6),
(3, 8, 5, 6),
(2, 9, 5, 6),
(1, 10, 5, 6),
))
# plot
fig, axes = plot_pseudodepths(configs, nr_electrodes=10, spacing=1,
ctypes=['schlumberger', ])
"""
# for each configuration type we have different ways of computing
# pseudodepths
pseudo_d_functions = {
'dd': _pseudodepths_dd_simple,
'schlumberger': _pseudodepths_schlumberger,
'wenner': _pseudodepths_wenner,
}
titles = {
'dd': 'dipole-dipole configurations',
'schlumberger': 'Schlumberger configurations',
'wenner': 'Wenner configurations',
}
# sort the configurations into the various types of configurations
only_types = ctypes or ['dd', ]
results = fT.filter(configs, settings={'only_types': only_types, })
# loop through all measurement types
figs = []
axes = []
for key in sorted(results.keys()):
print('plotting: ', key)
if key == 'not_sorted':
continue
index_dict = results[key]
# it is possible that we want to generate multiple plots for one
# type of measurement, i.e., to separate skips of dipole-dipole
# measurements. Therefore we generate two lists:
# 1) list of list of indices to plot
# 2) corresponding labels
if key == 'dd' and not dd_merge:
plot_list = []
labels_add = []
for skip in sorted(index_dict.keys()):
plot_list.append(index_dict[skip])
labels_add.append(' - skip {0}'.format(skip))
else:
# merge all indices
plot_list = [np.hstack(index_dict.values()), ]
print('schlumberger', plot_list)
labels_add = ['', ]
grid = None
# generate plots
for indices, label_add in zip(plot_list, labels_add):
if len(indices) == 0:
continue
ddc = configs[indices]
px, pz = pseudo_d_functions[key](ddc, spacing, grid)
fig, ax = plt.subplots(figsize=(15 / 2.54, 5 / 2.54))
ax.scatter(px, pz, color='k', alpha=0.5)
# plot electrodes
if grid is not None:
electrodes = grid.get_electrode_positions()
ax.scatter(
electrodes[:, 0],
electrodes[:, 1],
color='b',
label='electrodes', )
else:
ax.scatter(
np.arange(0, nr_electrodes) * spacing,
np.zeros(nr_electrodes),
color='b',
label='electrodes', )
ax.set_title(titles[key] + label_add)
ax.set_aspect('equal')
ax.set_xlabel('x [m]')
ax.set_ylabel('x [z]')
fig.tight_layout()
figs.append(fig)
axes.append(ax)
if len(figs) == 1:
return figs[0], axes[0]
else:
return figs, axes | python | def plot_pseudodepths(configs, nr_electrodes, spacing=1, grid=None,
ctypes=None, dd_merge=False, **kwargs):
"""Plot pseudodepths for the measurements. If grid is given, then the
actual electrode positions are used, and the parameter 'spacing' is
ignored'
Parameters
----------
configs: :class:`numpy.ndarray`
Nx4 array containing the quadrupoles for different measurements
nr_electrodes: int
The overall number of electrodes of the dataset. This is used to plot
the surface electrodes
spacing: float, optional
assumed distance between electrodes. Default=1
grid: crtomo.grid.crt_grid instance, optional
grid instance. Used to infer real electrode positions
ctypes: list of strings, optional
a list of configuration types that will be plotted. All
configurations that can not be sorted into these types will not be
plotted! Possible types:
* dd
* schlumberger
dd_merge: bool, optional
if True, merge all skips. Otherwise, generate individual plots for
each skip
Returns
-------
figs: matplotlib.figure.Figure instance or list of Figure instances
if only one type was plotted, then the figure instance is returned.
Otherwise, return a list of figure instances.
axes: axes object or list of axes ojects
plot axes
Examples
--------
.. plot::
:include-source:
from reda.plotters.plots2d import plot_pseudodepths
# define a few measurements
import numpy as np
configs = np.array((
(1, 2, 4, 3),
(1, 2, 5, 4),
(1, 2, 6, 5),
(2, 3, 5, 4),
(2, 3, 6, 5),
(3, 4, 6, 5),
))
# plot
fig, axes = plot_pseudodepths(configs, nr_electrodes=6, spacing=1,
ctypes=['dd', ])
.. plot::
:include-source:
from reda.plotters.plots2d import plot_pseudodepths
# define a few measurements
import numpy as np
configs = np.array((
(4, 7, 5, 6),
(3, 8, 5, 6),
(2, 9, 5, 6),
(1, 10, 5, 6),
))
# plot
fig, axes = plot_pseudodepths(configs, nr_electrodes=10, spacing=1,
ctypes=['schlumberger', ])
"""
# for each configuration type we have different ways of computing
# pseudodepths
pseudo_d_functions = {
'dd': _pseudodepths_dd_simple,
'schlumberger': _pseudodepths_schlumberger,
'wenner': _pseudodepths_wenner,
}
titles = {
'dd': 'dipole-dipole configurations',
'schlumberger': 'Schlumberger configurations',
'wenner': 'Wenner configurations',
}
# sort the configurations into the various types of configurations
only_types = ctypes or ['dd', ]
results = fT.filter(configs, settings={'only_types': only_types, })
# loop through all measurement types
figs = []
axes = []
for key in sorted(results.keys()):
print('plotting: ', key)
if key == 'not_sorted':
continue
index_dict = results[key]
# it is possible that we want to generate multiple plots for one
# type of measurement, i.e., to separate skips of dipole-dipole
# measurements. Therefore we generate two lists:
# 1) list of list of indices to plot
# 2) corresponding labels
if key == 'dd' and not dd_merge:
plot_list = []
labels_add = []
for skip in sorted(index_dict.keys()):
plot_list.append(index_dict[skip])
labels_add.append(' - skip {0}'.format(skip))
else:
# merge all indices
plot_list = [np.hstack(index_dict.values()), ]
print('schlumberger', plot_list)
labels_add = ['', ]
grid = None
# generate plots
for indices, label_add in zip(plot_list, labels_add):
if len(indices) == 0:
continue
ddc = configs[indices]
px, pz = pseudo_d_functions[key](ddc, spacing, grid)
fig, ax = plt.subplots(figsize=(15 / 2.54, 5 / 2.54))
ax.scatter(px, pz, color='k', alpha=0.5)
# plot electrodes
if grid is not None:
electrodes = grid.get_electrode_positions()
ax.scatter(
electrodes[:, 0],
electrodes[:, 1],
color='b',
label='electrodes', )
else:
ax.scatter(
np.arange(0, nr_electrodes) * spacing,
np.zeros(nr_electrodes),
color='b',
label='electrodes', )
ax.set_title(titles[key] + label_add)
ax.set_aspect('equal')
ax.set_xlabel('x [m]')
ax.set_ylabel('x [z]')
fig.tight_layout()
figs.append(fig)
axes.append(ax)
if len(figs) == 1:
return figs[0], axes[0]
else:
return figs, axes | [
"def",
"plot_pseudodepths",
"(",
"configs",
",",
"nr_electrodes",
",",
"spacing",
"=",
"1",
",",
"grid",
"=",
"None",
",",
"ctypes",
"=",
"None",
",",
"dd_merge",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# for each configuration type we have different ways of computing",
"# pseudodepths",
"pseudo_d_functions",
"=",
"{",
"'dd'",
":",
"_pseudodepths_dd_simple",
",",
"'schlumberger'",
":",
"_pseudodepths_schlumberger",
",",
"'wenner'",
":",
"_pseudodepths_wenner",
",",
"}",
"titles",
"=",
"{",
"'dd'",
":",
"'dipole-dipole configurations'",
",",
"'schlumberger'",
":",
"'Schlumberger configurations'",
",",
"'wenner'",
":",
"'Wenner configurations'",
",",
"}",
"# sort the configurations into the various types of configurations",
"only_types",
"=",
"ctypes",
"or",
"[",
"'dd'",
",",
"]",
"results",
"=",
"fT",
".",
"filter",
"(",
"configs",
",",
"settings",
"=",
"{",
"'only_types'",
":",
"only_types",
",",
"}",
")",
"# loop through all measurement types",
"figs",
"=",
"[",
"]",
"axes",
"=",
"[",
"]",
"for",
"key",
"in",
"sorted",
"(",
"results",
".",
"keys",
"(",
")",
")",
":",
"print",
"(",
"'plotting: '",
",",
"key",
")",
"if",
"key",
"==",
"'not_sorted'",
":",
"continue",
"index_dict",
"=",
"results",
"[",
"key",
"]",
"# it is possible that we want to generate multiple plots for one",
"# type of measurement, i.e., to separate skips of dipole-dipole",
"# measurements. Therefore we generate two lists:",
"# 1) list of list of indices to plot",
"# 2) corresponding labels",
"if",
"key",
"==",
"'dd'",
"and",
"not",
"dd_merge",
":",
"plot_list",
"=",
"[",
"]",
"labels_add",
"=",
"[",
"]",
"for",
"skip",
"in",
"sorted",
"(",
"index_dict",
".",
"keys",
"(",
")",
")",
":",
"plot_list",
".",
"append",
"(",
"index_dict",
"[",
"skip",
"]",
")",
"labels_add",
".",
"append",
"(",
"' - skip {0}'",
".",
"format",
"(",
"skip",
")",
")",
"else",
":",
"# merge all indices",
"plot_list",
"=",
"[",
"np",
".",
"hstack",
"(",
"index_dict",
".",
"values",
"(",
")",
")",
",",
"]",
"print",
"(",
"'schlumberger'",
",",
"plot_list",
")",
"labels_add",
"=",
"[",
"''",
",",
"]",
"grid",
"=",
"None",
"# generate plots",
"for",
"indices",
",",
"label_add",
"in",
"zip",
"(",
"plot_list",
",",
"labels_add",
")",
":",
"if",
"len",
"(",
"indices",
")",
"==",
"0",
":",
"continue",
"ddc",
"=",
"configs",
"[",
"indices",
"]",
"px",
",",
"pz",
"=",
"pseudo_d_functions",
"[",
"key",
"]",
"(",
"ddc",
",",
"spacing",
",",
"grid",
")",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"15",
"/",
"2.54",
",",
"5",
"/",
"2.54",
")",
")",
"ax",
".",
"scatter",
"(",
"px",
",",
"pz",
",",
"color",
"=",
"'k'",
",",
"alpha",
"=",
"0.5",
")",
"# plot electrodes",
"if",
"grid",
"is",
"not",
"None",
":",
"electrodes",
"=",
"grid",
".",
"get_electrode_positions",
"(",
")",
"ax",
".",
"scatter",
"(",
"electrodes",
"[",
":",
",",
"0",
"]",
",",
"electrodes",
"[",
":",
",",
"1",
"]",
",",
"color",
"=",
"'b'",
",",
"label",
"=",
"'electrodes'",
",",
")",
"else",
":",
"ax",
".",
"scatter",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"nr_electrodes",
")",
"*",
"spacing",
",",
"np",
".",
"zeros",
"(",
"nr_electrodes",
")",
",",
"color",
"=",
"'b'",
",",
"label",
"=",
"'electrodes'",
",",
")",
"ax",
".",
"set_title",
"(",
"titles",
"[",
"key",
"]",
"+",
"label_add",
")",
"ax",
".",
"set_aspect",
"(",
"'equal'",
")",
"ax",
".",
"set_xlabel",
"(",
"'x [m]'",
")",
"ax",
".",
"set_ylabel",
"(",
"'x [z]'",
")",
"fig",
".",
"tight_layout",
"(",
")",
"figs",
".",
"append",
"(",
"fig",
")",
"axes",
".",
"append",
"(",
"ax",
")",
"if",
"len",
"(",
"figs",
")",
"==",
"1",
":",
"return",
"figs",
"[",
"0",
"]",
",",
"axes",
"[",
"0",
"]",
"else",
":",
"return",
"figs",
",",
"axes"
] | Plot pseudodepths for the measurements. If grid is given, then the
actual electrode positions are used, and the parameter 'spacing' is
ignored'
Parameters
----------
configs: :class:`numpy.ndarray`
Nx4 array containing the quadrupoles for different measurements
nr_electrodes: int
The overall number of electrodes of the dataset. This is used to plot
the surface electrodes
spacing: float, optional
assumed distance between electrodes. Default=1
grid: crtomo.grid.crt_grid instance, optional
grid instance. Used to infer real electrode positions
ctypes: list of strings, optional
a list of configuration types that will be plotted. All
configurations that can not be sorted into these types will not be
plotted! Possible types:
* dd
* schlumberger
dd_merge: bool, optional
if True, merge all skips. Otherwise, generate individual plots for
each skip
Returns
-------
figs: matplotlib.figure.Figure instance or list of Figure instances
if only one type was plotted, then the figure instance is returned.
Otherwise, return a list of figure instances.
axes: axes object or list of axes ojects
plot axes
Examples
--------
.. plot::
:include-source:
from reda.plotters.plots2d import plot_pseudodepths
# define a few measurements
import numpy as np
configs = np.array((
(1, 2, 4, 3),
(1, 2, 5, 4),
(1, 2, 6, 5),
(2, 3, 5, 4),
(2, 3, 6, 5),
(3, 4, 6, 5),
))
# plot
fig, axes = plot_pseudodepths(configs, nr_electrodes=6, spacing=1,
ctypes=['dd', ])
.. plot::
:include-source:
from reda.plotters.plots2d import plot_pseudodepths
# define a few measurements
import numpy as np
configs = np.array((
(4, 7, 5, 6),
(3, 8, 5, 6),
(2, 9, 5, 6),
(1, 10, 5, 6),
))
# plot
fig, axes = plot_pseudodepths(configs, nr_electrodes=10, spacing=1,
ctypes=['schlumberger', ]) | [
"Plot",
"pseudodepths",
"for",
"the",
"measurements",
".",
"If",
"grid",
"is",
"given",
"then",
"the",
"actual",
"electrode",
"positions",
"are",
"used",
"and",
"the",
"parameter",
"spacing",
"is",
"ignored"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/plots2d.py#L72-L227 | train |
geophysics-ubonn/reda | lib/reda/plotters/plots2d.py | matplot | def matplot(x, y, z, ax=None, colorbar=True, **kwargs):
""" Plot x, y, z as expected with correct axis labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from reda.plotters import matplot
>>> a = np.arange(4)
>>> b = np.arange(3) + 3
>>> def sum(a, b):
... return a + b
>>> x, y = np.meshgrid(a, b)
>>> c = sum(x, y)
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> im = ax1.pcolormesh(x, y, c)
>>> _ = plt.colorbar(im, ax=ax1)
>>> _ = ax1.set_title("plt.pcolormesh")
>>> _, _ = matplot(x, y, c, ax=ax2)
>>> _ = ax2.set_title("reda.plotters.matplot")
>>> fig.show()
Note
----
Only works for equidistant data at the moment.
"""
xmin = x.min()
xmax = x.max()
dx = np.abs(x[0, 1] - x[0, 0])
ymin = y.min()
ymax = y.max()
dy = np.abs(y[1, 0] - y[0, 0])
x2, y2 = np.meshgrid(
np.arange(xmin, xmax + 2 * dx, dx) - dx / 2.,
np.arange(ymin, ymax + 2 * dy, dy) - dy / 2.)
if not ax:
fig, ax = plt.subplots()
else:
fig = ax.figure
im = ax.pcolormesh(x2, y2, z, **kwargs)
ax.axis([x2.min(), x2.max(), y2.min(), y2.max()])
ax.set_xticks(np.arange(xmin, xmax + dx, dx))
ax.set_yticks(np.arange(ymin, ymax + dx, dy))
if colorbar:
cbar = fig.colorbar(im, ax=ax)
else:
cbar = None
return ax, cbar | python | def matplot(x, y, z, ax=None, colorbar=True, **kwargs):
""" Plot x, y, z as expected with correct axis labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from reda.plotters import matplot
>>> a = np.arange(4)
>>> b = np.arange(3) + 3
>>> def sum(a, b):
... return a + b
>>> x, y = np.meshgrid(a, b)
>>> c = sum(x, y)
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> im = ax1.pcolormesh(x, y, c)
>>> _ = plt.colorbar(im, ax=ax1)
>>> _ = ax1.set_title("plt.pcolormesh")
>>> _, _ = matplot(x, y, c, ax=ax2)
>>> _ = ax2.set_title("reda.plotters.matplot")
>>> fig.show()
Note
----
Only works for equidistant data at the moment.
"""
xmin = x.min()
xmax = x.max()
dx = np.abs(x[0, 1] - x[0, 0])
ymin = y.min()
ymax = y.max()
dy = np.abs(y[1, 0] - y[0, 0])
x2, y2 = np.meshgrid(
np.arange(xmin, xmax + 2 * dx, dx) - dx / 2.,
np.arange(ymin, ymax + 2 * dy, dy) - dy / 2.)
if not ax:
fig, ax = plt.subplots()
else:
fig = ax.figure
im = ax.pcolormesh(x2, y2, z, **kwargs)
ax.axis([x2.min(), x2.max(), y2.min(), y2.max()])
ax.set_xticks(np.arange(xmin, xmax + dx, dx))
ax.set_yticks(np.arange(ymin, ymax + dx, dy))
if colorbar:
cbar = fig.colorbar(im, ax=ax)
else:
cbar = None
return ax, cbar | [
"def",
"matplot",
"(",
"x",
",",
"y",
",",
"z",
",",
"ax",
"=",
"None",
",",
"colorbar",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"xmin",
"=",
"x",
".",
"min",
"(",
")",
"xmax",
"=",
"x",
".",
"max",
"(",
")",
"dx",
"=",
"np",
".",
"abs",
"(",
"x",
"[",
"0",
",",
"1",
"]",
"-",
"x",
"[",
"0",
",",
"0",
"]",
")",
"ymin",
"=",
"y",
".",
"min",
"(",
")",
"ymax",
"=",
"y",
".",
"max",
"(",
")",
"dy",
"=",
"np",
".",
"abs",
"(",
"y",
"[",
"1",
",",
"0",
"]",
"-",
"y",
"[",
"0",
",",
"0",
"]",
")",
"x2",
",",
"y2",
"=",
"np",
".",
"meshgrid",
"(",
"np",
".",
"arange",
"(",
"xmin",
",",
"xmax",
"+",
"2",
"*",
"dx",
",",
"dx",
")",
"-",
"dx",
"/",
"2.",
",",
"np",
".",
"arange",
"(",
"ymin",
",",
"ymax",
"+",
"2",
"*",
"dy",
",",
"dy",
")",
"-",
"dy",
"/",
"2.",
")",
"if",
"not",
"ax",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"else",
":",
"fig",
"=",
"ax",
".",
"figure",
"im",
"=",
"ax",
".",
"pcolormesh",
"(",
"x2",
",",
"y2",
",",
"z",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"axis",
"(",
"[",
"x2",
".",
"min",
"(",
")",
",",
"x2",
".",
"max",
"(",
")",
",",
"y2",
".",
"min",
"(",
")",
",",
"y2",
".",
"max",
"(",
")",
"]",
")",
"ax",
".",
"set_xticks",
"(",
"np",
".",
"arange",
"(",
"xmin",
",",
"xmax",
"+",
"dx",
",",
"dx",
")",
")",
"ax",
".",
"set_yticks",
"(",
"np",
".",
"arange",
"(",
"ymin",
",",
"ymax",
"+",
"dx",
",",
"dy",
")",
")",
"if",
"colorbar",
":",
"cbar",
"=",
"fig",
".",
"colorbar",
"(",
"im",
",",
"ax",
"=",
"ax",
")",
"else",
":",
"cbar",
"=",
"None",
"return",
"ax",
",",
"cbar"
] | Plot x, y, z as expected with correct axis labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from reda.plotters import matplot
>>> a = np.arange(4)
>>> b = np.arange(3) + 3
>>> def sum(a, b):
... return a + b
>>> x, y = np.meshgrid(a, b)
>>> c = sum(x, y)
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> im = ax1.pcolormesh(x, y, c)
>>> _ = plt.colorbar(im, ax=ax1)
>>> _ = ax1.set_title("plt.pcolormesh")
>>> _, _ = matplot(x, y, c, ax=ax2)
>>> _ = ax2.set_title("reda.plotters.matplot")
>>> fig.show()
Note
----
Only works for equidistant data at the moment. | [
"Plot",
"x",
"y",
"z",
"as",
"expected",
"with",
"correct",
"axis",
"labels",
"."
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/plots2d.py#L417-L470 | train |
frasertweedale/ledgertools | ltlib/xn.py | Xn.summary | def summary(self):
"""Return a string summary of transaction"""
return "\n".join([
"Transaction:",
" When: " + self.date.strftime("%a %d %b %Y"),
" Description: " + self.desc.replace('\n', ' '),
" For amount: {}".format(self.amount),
" From: {}".format(
", ".join(map(lambda x: x.account, self.src)) if self.src \
else "UNKNOWN"
),
" To: {}".format(
", ".join(map(lambda x: x.account, self.dst)) if self.dst \
else "UNKNOWN"
),
""
]) | python | def summary(self):
"""Return a string summary of transaction"""
return "\n".join([
"Transaction:",
" When: " + self.date.strftime("%a %d %b %Y"),
" Description: " + self.desc.replace('\n', ' '),
" For amount: {}".format(self.amount),
" From: {}".format(
", ".join(map(lambda x: x.account, self.src)) if self.src \
else "UNKNOWN"
),
" To: {}".format(
", ".join(map(lambda x: x.account, self.dst)) if self.dst \
else "UNKNOWN"
),
""
]) | [
"def",
"summary",
"(",
"self",
")",
":",
"return",
"\"\\n\"",
".",
"join",
"(",
"[",
"\"Transaction:\"",
",",
"\" When: \"",
"+",
"self",
".",
"date",
".",
"strftime",
"(",
"\"%a %d %b %Y\"",
")",
",",
"\" Description: \"",
"+",
"self",
".",
"desc",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
",",
"\" For amount: {}\"",
".",
"format",
"(",
"self",
".",
"amount",
")",
",",
"\" From: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"account",
",",
"self",
".",
"src",
")",
")",
"if",
"self",
".",
"src",
"else",
"\"UNKNOWN\"",
")",
",",
"\" To: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"account",
",",
"self",
".",
"dst",
")",
")",
"if",
"self",
".",
"dst",
"else",
"\"UNKNOWN\"",
")",
",",
"\"\"",
"]",
")"
] | Return a string summary of transaction | [
"Return",
"a",
"string",
"summary",
"of",
"transaction"
] | a695f8667d72253e5448693c12f0282d09902aaa | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/xn.py#L88-L104 | train |
frasertweedale/ledgertools | ltlib/xn.py | Xn.check | def check(self):
"""Check this transaction for completeness"""
if not self.date:
raise XnDataError("Missing date")
if not self.desc:
raise XnDataError("Missing description")
if not self.dst:
raise XnDataError("No destination accounts")
if not self.src:
raise XnDataError("No source accounts")
if not self.amount:
raise XnDataError("No transaction amount") | python | def check(self):
"""Check this transaction for completeness"""
if not self.date:
raise XnDataError("Missing date")
if not self.desc:
raise XnDataError("Missing description")
if not self.dst:
raise XnDataError("No destination accounts")
if not self.src:
raise XnDataError("No source accounts")
if not self.amount:
raise XnDataError("No transaction amount") | [
"def",
"check",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"date",
":",
"raise",
"XnDataError",
"(",
"\"Missing date\"",
")",
"if",
"not",
"self",
".",
"desc",
":",
"raise",
"XnDataError",
"(",
"\"Missing description\"",
")",
"if",
"not",
"self",
".",
"dst",
":",
"raise",
"XnDataError",
"(",
"\"No destination accounts\"",
")",
"if",
"not",
"self",
".",
"src",
":",
"raise",
"XnDataError",
"(",
"\"No source accounts\"",
")",
"if",
"not",
"self",
".",
"amount",
":",
"raise",
"XnDataError",
"(",
"\"No transaction amount\"",
")"
] | Check this transaction for completeness | [
"Check",
"this",
"transaction",
"for",
"completeness"
] | a695f8667d72253e5448693c12f0282d09902aaa | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/xn.py#L106-L117 | train |
frasertweedale/ledgertools | ltlib/xn.py | Xn.balance | def balance(self):
"""Check this transaction for correctness"""
self.check()
if not sum(map(lambda x: x.amount, self.src)) == -self.amount:
raise XnBalanceError("Sum of source amounts "
"not equal to transaction amount")
if not sum(map(lambda x: x.amount, self.dst)) == self.amount:
raise XnBalanceError("Sum of destination amounts "
"not equal to transaction amount")
return True | python | def balance(self):
"""Check this transaction for correctness"""
self.check()
if not sum(map(lambda x: x.amount, self.src)) == -self.amount:
raise XnBalanceError("Sum of source amounts "
"not equal to transaction amount")
if not sum(map(lambda x: x.amount, self.dst)) == self.amount:
raise XnBalanceError("Sum of destination amounts "
"not equal to transaction amount")
return True | [
"def",
"balance",
"(",
"self",
")",
":",
"self",
".",
"check",
"(",
")",
"if",
"not",
"sum",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"amount",
",",
"self",
".",
"src",
")",
")",
"==",
"-",
"self",
".",
"amount",
":",
"raise",
"XnBalanceError",
"(",
"\"Sum of source amounts \"",
"\"not equal to transaction amount\"",
")",
"if",
"not",
"sum",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"amount",
",",
"self",
".",
"dst",
")",
")",
"==",
"self",
".",
"amount",
":",
"raise",
"XnBalanceError",
"(",
"\"Sum of destination amounts \"",
"\"not equal to transaction amount\"",
")",
"return",
"True"
] | Check this transaction for correctness | [
"Check",
"this",
"transaction",
"for",
"correctness"
] | a695f8667d72253e5448693c12f0282d09902aaa | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/xn.py#L119-L128 | train |
frasertweedale/ledgertools | ltlib/xn.py | Xn.match_rules | def match_rules(self, rules):
"""Process this transaction against the given ruleset
Returns a dict of fields with ScoreSet values, which may be empty.
Notably, the rule processing will be shortcircuited if the Xn is
already complete - in this case, None is returned.
"""
try:
self.check()
return None
except XnDataError:
pass
scores = {}
for r in rules:
outcomes = r.match(self)
if not outcomes:
continue
for outcome in outcomes:
if isinstance(outcome, rule.SourceOutcome):
key = 'src'
elif isinstance(outcome, rule.DestinationOutcome):
key = 'dst'
elif isinstance(outcome, rule.DescriptionOutcome):
key = 'desc'
elif isinstance(outcome, rule.DropOutcome):
key = 'drop'
elif isinstance(outcome, rule.RebateOutcome):
key = 'rebate'
else:
raise KeyError
if key not in scores:
scores[key] = score.ScoreSet() # initialise ScoreSet
scores[key].append((outcome.value, outcome.score))
return scores | python | def match_rules(self, rules):
"""Process this transaction against the given ruleset
Returns a dict of fields with ScoreSet values, which may be empty.
Notably, the rule processing will be shortcircuited if the Xn is
already complete - in this case, None is returned.
"""
try:
self.check()
return None
except XnDataError:
pass
scores = {}
for r in rules:
outcomes = r.match(self)
if not outcomes:
continue
for outcome in outcomes:
if isinstance(outcome, rule.SourceOutcome):
key = 'src'
elif isinstance(outcome, rule.DestinationOutcome):
key = 'dst'
elif isinstance(outcome, rule.DescriptionOutcome):
key = 'desc'
elif isinstance(outcome, rule.DropOutcome):
key = 'drop'
elif isinstance(outcome, rule.RebateOutcome):
key = 'rebate'
else:
raise KeyError
if key not in scores:
scores[key] = score.ScoreSet() # initialise ScoreSet
scores[key].append((outcome.value, outcome.score))
return scores | [
"def",
"match_rules",
"(",
"self",
",",
"rules",
")",
":",
"try",
":",
"self",
".",
"check",
"(",
")",
"return",
"None",
"except",
"XnDataError",
":",
"pass",
"scores",
"=",
"{",
"}",
"for",
"r",
"in",
"rules",
":",
"outcomes",
"=",
"r",
".",
"match",
"(",
"self",
")",
"if",
"not",
"outcomes",
":",
"continue",
"for",
"outcome",
"in",
"outcomes",
":",
"if",
"isinstance",
"(",
"outcome",
",",
"rule",
".",
"SourceOutcome",
")",
":",
"key",
"=",
"'src'",
"elif",
"isinstance",
"(",
"outcome",
",",
"rule",
".",
"DestinationOutcome",
")",
":",
"key",
"=",
"'dst'",
"elif",
"isinstance",
"(",
"outcome",
",",
"rule",
".",
"DescriptionOutcome",
")",
":",
"key",
"=",
"'desc'",
"elif",
"isinstance",
"(",
"outcome",
",",
"rule",
".",
"DropOutcome",
")",
":",
"key",
"=",
"'drop'",
"elif",
"isinstance",
"(",
"outcome",
",",
"rule",
".",
"RebateOutcome",
")",
":",
"key",
"=",
"'rebate'",
"else",
":",
"raise",
"KeyError",
"if",
"key",
"not",
"in",
"scores",
":",
"scores",
"[",
"key",
"]",
"=",
"score",
".",
"ScoreSet",
"(",
")",
"# initialise ScoreSet",
"scores",
"[",
"key",
"]",
".",
"append",
"(",
"(",
"outcome",
".",
"value",
",",
"outcome",
".",
"score",
")",
")",
"return",
"scores"
] | Process this transaction against the given ruleset
Returns a dict of fields with ScoreSet values, which may be empty.
Notably, the rule processing will be shortcircuited if the Xn is
already complete - in this case, None is returned. | [
"Process",
"this",
"transaction",
"against",
"the",
"given",
"ruleset"
] | a695f8667d72253e5448693c12f0282d09902aaa | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/xn.py#L130-L166 | train |
frasertweedale/ledgertools | ltlib/xn.py | Xn.complete | def complete(self, uio, dropped=False):
"""Query for all missing information in the transaction"""
if self.dropped and not dropped:
# do nothing for dropped xn, unless specifically told to
return
for end in ['src', 'dst']:
if getattr(self, end):
continue # we have this information
uio.show('\nEnter ' + end + ' for transaction:')
uio.show('')
uio.show(self.summary())
try:
endpoints = []
remaining = self.amount
while remaining:
account = uio.text(' Enter account', None)
amount = uio.decimal(
' Enter amount',
default=remaining,
lower=0,
upper=remaining
)
endpoints.append(Endpoint(account, amount))
remaining = self.amount \
- sum(map(lambda x: x.amount, endpoints))
except ui.RejectWarning:
# bail out
sys.exit("bye!")
# flip amounts if it was a src outcome
if end == 'src':
endpoints = map(
lambda x: Endpoint(x.account, -x.amount),
endpoints
)
# set endpoints
setattr(self, end, endpoints) | python | def complete(self, uio, dropped=False):
"""Query for all missing information in the transaction"""
if self.dropped and not dropped:
# do nothing for dropped xn, unless specifically told to
return
for end in ['src', 'dst']:
if getattr(self, end):
continue # we have this information
uio.show('\nEnter ' + end + ' for transaction:')
uio.show('')
uio.show(self.summary())
try:
endpoints = []
remaining = self.amount
while remaining:
account = uio.text(' Enter account', None)
amount = uio.decimal(
' Enter amount',
default=remaining,
lower=0,
upper=remaining
)
endpoints.append(Endpoint(account, amount))
remaining = self.amount \
- sum(map(lambda x: x.amount, endpoints))
except ui.RejectWarning:
# bail out
sys.exit("bye!")
# flip amounts if it was a src outcome
if end == 'src':
endpoints = map(
lambda x: Endpoint(x.account, -x.amount),
endpoints
)
# set endpoints
setattr(self, end, endpoints) | [
"def",
"complete",
"(",
"self",
",",
"uio",
",",
"dropped",
"=",
"False",
")",
":",
"if",
"self",
".",
"dropped",
"and",
"not",
"dropped",
":",
"# do nothing for dropped xn, unless specifically told to",
"return",
"for",
"end",
"in",
"[",
"'src'",
",",
"'dst'",
"]",
":",
"if",
"getattr",
"(",
"self",
",",
"end",
")",
":",
"continue",
"# we have this information",
"uio",
".",
"show",
"(",
"'\\nEnter '",
"+",
"end",
"+",
"' for transaction:'",
")",
"uio",
".",
"show",
"(",
"''",
")",
"uio",
".",
"show",
"(",
"self",
".",
"summary",
"(",
")",
")",
"try",
":",
"endpoints",
"=",
"[",
"]",
"remaining",
"=",
"self",
".",
"amount",
"while",
"remaining",
":",
"account",
"=",
"uio",
".",
"text",
"(",
"' Enter account'",
",",
"None",
")",
"amount",
"=",
"uio",
".",
"decimal",
"(",
"' Enter amount'",
",",
"default",
"=",
"remaining",
",",
"lower",
"=",
"0",
",",
"upper",
"=",
"remaining",
")",
"endpoints",
".",
"append",
"(",
"Endpoint",
"(",
"account",
",",
"amount",
")",
")",
"remaining",
"=",
"self",
".",
"amount",
"-",
"sum",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"amount",
",",
"endpoints",
")",
")",
"except",
"ui",
".",
"RejectWarning",
":",
"# bail out",
"sys",
".",
"exit",
"(",
"\"bye!\"",
")",
"# flip amounts if it was a src outcome",
"if",
"end",
"==",
"'src'",
":",
"endpoints",
"=",
"map",
"(",
"lambda",
"x",
":",
"Endpoint",
"(",
"x",
".",
"account",
",",
"-",
"x",
".",
"amount",
")",
",",
"endpoints",
")",
"# set endpoints",
"setattr",
"(",
"self",
",",
"end",
",",
"endpoints",
")"
] | Query for all missing information in the transaction | [
"Query",
"for",
"all",
"missing",
"information",
"in",
"the",
"transaction"
] | a695f8667d72253e5448693c12f0282d09902aaa | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/xn.py#L312-L351 | train |
frasertweedale/ledgertools | ltlib/xn.py | Xn.process | def process(self, rules, uio, prevxn=None):
"""Matches rules and applies outcomes"""
self.apply_outcomes(self.match_rules(rules), uio, prevxn=prevxn) | python | def process(self, rules, uio, prevxn=None):
"""Matches rules and applies outcomes"""
self.apply_outcomes(self.match_rules(rules), uio, prevxn=prevxn) | [
"def",
"process",
"(",
"self",
",",
"rules",
",",
"uio",
",",
"prevxn",
"=",
"None",
")",
":",
"self",
".",
"apply_outcomes",
"(",
"self",
".",
"match_rules",
"(",
"rules",
")",
",",
"uio",
",",
"prevxn",
"=",
"prevxn",
")"
] | Matches rules and applies outcomes | [
"Matches",
"rules",
"and",
"applies",
"outcomes"
] | a695f8667d72253e5448693c12f0282d09902aaa | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/xn.py#L353-L355 | train |
geophysics-ubonn/reda | lib/reda/plotters/time_series.py | plot_quadpole_evolution | def plot_quadpole_evolution(dataobj, quadpole, cols, threshold=5,
rolling=False, ax=None):
"""Visualize time-lapse evolution of a single quadropole.
Parameters
----------
dataobj : :py:class:`pandas.DataFrame`
DataFrame containing the data. Please refer to the documentation for
required columns.
quadpole : list of integers
Electrode numbers of the the quadropole.
cols : str
The column/parameter to plot over time.
threshold : float
Allowed percentage deviation from the rolling standard deviation.
rolling : bool
Calculate rolling median values (the default is False).
ax : mpl.axes
Optional axes object to plot to.
"""
if isinstance(dataobj, pd.DataFrame):
df = dataobj
else:
df = dataobj.data
subquery = df.query(
'a == {0} and b == {1} and m == {2} and n == {3}'.format(*quadpole))
# rhoa = subquery['rho_a'].values
# rhoa[30] = 300
# subquery['rho_a'] = rhoa
if ax is not None:
fig = ax.get_figure()
else:
fig, ax = plt.subplots(1, 1, figsize=(20 / 2.54, 7 / 2.54))
ax.plot(
subquery['timestep'],
subquery[cols],
'.',
color='blue',
label='valid data',
)
if rolling:
# rolling mean
rolling_m = subquery.rolling(3, center=True, min_periods=1).median()
ax.plot(
rolling_m['timestep'].values,
rolling_m['rho_a'].values,
'-',
label='rolling median',
)
ax.fill_between(
rolling_m['timestep'].values,
rolling_m['rho_a'].values * (1 - threshold),
rolling_m['rho_a'].values * (1 + threshold),
alpha=0.4,
color='blue',
label='{0}\% confidence region'.format(threshold * 100),
)
# find all values that deviate by more than X percent from the
# rolling_m
bad_values = (np.abs(
np.abs(subquery['rho_a'].values - rolling_m['rho_a'].values) /
rolling_m['rho_a'].values) > threshold)
bad = subquery.loc[bad_values]
ax.plot(
bad['timestep'].values,
bad['rho_a'].values,
'.',
# s=15,
color='r',
label='discarded data',
)
ax.legend(loc='upper center', fontsize=6)
# ax.set_xlim(10, 20)
ax.set_ylabel(r'$\rho_a$ [$\Omega$m]')
ax.set_xlabel('timestep')
return fig, ax | python | def plot_quadpole_evolution(dataobj, quadpole, cols, threshold=5,
rolling=False, ax=None):
"""Visualize time-lapse evolution of a single quadropole.
Parameters
----------
dataobj : :py:class:`pandas.DataFrame`
DataFrame containing the data. Please refer to the documentation for
required columns.
quadpole : list of integers
Electrode numbers of the the quadropole.
cols : str
The column/parameter to plot over time.
threshold : float
Allowed percentage deviation from the rolling standard deviation.
rolling : bool
Calculate rolling median values (the default is False).
ax : mpl.axes
Optional axes object to plot to.
"""
if isinstance(dataobj, pd.DataFrame):
df = dataobj
else:
df = dataobj.data
subquery = df.query(
'a == {0} and b == {1} and m == {2} and n == {3}'.format(*quadpole))
# rhoa = subquery['rho_a'].values
# rhoa[30] = 300
# subquery['rho_a'] = rhoa
if ax is not None:
fig = ax.get_figure()
else:
fig, ax = plt.subplots(1, 1, figsize=(20 / 2.54, 7 / 2.54))
ax.plot(
subquery['timestep'],
subquery[cols],
'.',
color='blue',
label='valid data',
)
if rolling:
# rolling mean
rolling_m = subquery.rolling(3, center=True, min_periods=1).median()
ax.plot(
rolling_m['timestep'].values,
rolling_m['rho_a'].values,
'-',
label='rolling median',
)
ax.fill_between(
rolling_m['timestep'].values,
rolling_m['rho_a'].values * (1 - threshold),
rolling_m['rho_a'].values * (1 + threshold),
alpha=0.4,
color='blue',
label='{0}\% confidence region'.format(threshold * 100),
)
# find all values that deviate by more than X percent from the
# rolling_m
bad_values = (np.abs(
np.abs(subquery['rho_a'].values - rolling_m['rho_a'].values) /
rolling_m['rho_a'].values) > threshold)
bad = subquery.loc[bad_values]
ax.plot(
bad['timestep'].values,
bad['rho_a'].values,
'.',
# s=15,
color='r',
label='discarded data',
)
ax.legend(loc='upper center', fontsize=6)
# ax.set_xlim(10, 20)
ax.set_ylabel(r'$\rho_a$ [$\Omega$m]')
ax.set_xlabel('timestep')
return fig, ax | [
"def",
"plot_quadpole_evolution",
"(",
"dataobj",
",",
"quadpole",
",",
"cols",
",",
"threshold",
"=",
"5",
",",
"rolling",
"=",
"False",
",",
"ax",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"dataobj",
",",
"pd",
".",
"DataFrame",
")",
":",
"df",
"=",
"dataobj",
"else",
":",
"df",
"=",
"dataobj",
".",
"data",
"subquery",
"=",
"df",
".",
"query",
"(",
"'a == {0} and b == {1} and m == {2} and n == {3}'",
".",
"format",
"(",
"*",
"quadpole",
")",
")",
"# rhoa = subquery['rho_a'].values",
"# rhoa[30] = 300",
"# subquery['rho_a'] = rhoa",
"if",
"ax",
"is",
"not",
"None",
":",
"fig",
"=",
"ax",
".",
"get_figure",
"(",
")",
"else",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"1",
",",
"figsize",
"=",
"(",
"20",
"/",
"2.54",
",",
"7",
"/",
"2.54",
")",
")",
"ax",
".",
"plot",
"(",
"subquery",
"[",
"'timestep'",
"]",
",",
"subquery",
"[",
"cols",
"]",
",",
"'.'",
",",
"color",
"=",
"'blue'",
",",
"label",
"=",
"'valid data'",
",",
")",
"if",
"rolling",
":",
"# rolling mean",
"rolling_m",
"=",
"subquery",
".",
"rolling",
"(",
"3",
",",
"center",
"=",
"True",
",",
"min_periods",
"=",
"1",
")",
".",
"median",
"(",
")",
"ax",
".",
"plot",
"(",
"rolling_m",
"[",
"'timestep'",
"]",
".",
"values",
",",
"rolling_m",
"[",
"'rho_a'",
"]",
".",
"values",
",",
"'-'",
",",
"label",
"=",
"'rolling median'",
",",
")",
"ax",
".",
"fill_between",
"(",
"rolling_m",
"[",
"'timestep'",
"]",
".",
"values",
",",
"rolling_m",
"[",
"'rho_a'",
"]",
".",
"values",
"*",
"(",
"1",
"-",
"threshold",
")",
",",
"rolling_m",
"[",
"'rho_a'",
"]",
".",
"values",
"*",
"(",
"1",
"+",
"threshold",
")",
",",
"alpha",
"=",
"0.4",
",",
"color",
"=",
"'blue'",
",",
"label",
"=",
"'{0}\\% confidence region'",
".",
"format",
"(",
"threshold",
"*",
"100",
")",
",",
")",
"# find all values that deviate by more than X percent from the",
"# rolling_m",
"bad_values",
"=",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"abs",
"(",
"subquery",
"[",
"'rho_a'",
"]",
".",
"values",
"-",
"rolling_m",
"[",
"'rho_a'",
"]",
".",
"values",
")",
"/",
"rolling_m",
"[",
"'rho_a'",
"]",
".",
"values",
")",
">",
"threshold",
")",
"bad",
"=",
"subquery",
".",
"loc",
"[",
"bad_values",
"]",
"ax",
".",
"plot",
"(",
"bad",
"[",
"'timestep'",
"]",
".",
"values",
",",
"bad",
"[",
"'rho_a'",
"]",
".",
"values",
",",
"'.'",
",",
"# s=15,",
"color",
"=",
"'r'",
",",
"label",
"=",
"'discarded data'",
",",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"'upper center'",
",",
"fontsize",
"=",
"6",
")",
"# ax.set_xlim(10, 20)",
"ax",
".",
"set_ylabel",
"(",
"r'$\\rho_a$ [$\\Omega$m]'",
")",
"ax",
".",
"set_xlabel",
"(",
"'timestep'",
")",
"return",
"fig",
",",
"ax"
] | Visualize time-lapse evolution of a single quadropole.
Parameters
----------
dataobj : :py:class:`pandas.DataFrame`
DataFrame containing the data. Please refer to the documentation for
required columns.
quadpole : list of integers
Electrode numbers of the the quadropole.
cols : str
The column/parameter to plot over time.
threshold : float
Allowed percentage deviation from the rolling standard deviation.
rolling : bool
Calculate rolling median values (the default is False).
ax : mpl.axes
Optional axes object to plot to. | [
"Visualize",
"time",
"-",
"lapse",
"evolution",
"of",
"a",
"single",
"quadropole",
"."
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/plotters/time_series.py#L9-L93 | train |
shexSpec/grammar | parsers/python/pyshexc/parser_impl/shex_oneofshape_parser.py | ShexOneOfShapeParser.visitSenseFlags | def visitSenseFlags(self, ctx: ShExDocParser.SenseFlagsContext):
""" '!' '^'? | '^' '!'? """
if '!' in ctx.getText():
self.expression.negated = True
if '^' in ctx.getText():
self.expression.inverse = True | python | def visitSenseFlags(self, ctx: ShExDocParser.SenseFlagsContext):
""" '!' '^'? | '^' '!'? """
if '!' in ctx.getText():
self.expression.negated = True
if '^' in ctx.getText():
self.expression.inverse = True | [
"def",
"visitSenseFlags",
"(",
"self",
",",
"ctx",
":",
"ShExDocParser",
".",
"SenseFlagsContext",
")",
":",
"if",
"'!'",
"in",
"ctx",
".",
"getText",
"(",
")",
":",
"self",
".",
"expression",
".",
"negated",
"=",
"True",
"if",
"'^'",
"in",
"ctx",
".",
"getText",
"(",
")",
":",
"self",
".",
"expression",
".",
"inverse",
"=",
"True"
] | '!' '^'? | '^' '!'? | [
"!",
"^",
"?",
"|",
"^",
"!",
"?"
] | 4497cd1f73fa6703bca6e2cb53ba9c120f22e48c | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_oneofshape_parser.py#L114-L119 | train |
andy-z/ged4py | ged4py/detail/date.py | CalendarDate.as_tuple | def as_tuple(self):
"""Date as three-tuple of numbers"""
if self._tuple is None:
# extract leading digits from year
year = 9999
if self.year:
m = self.DIGITS.match(self.year)
if m:
year = int(m.group(0))
month = self.month_num or 99
day = self.day if self.day is not None else 99
# should we include calendar name in tuple too?
self._tuple = year, month, day
return self._tuple | python | def as_tuple(self):
"""Date as three-tuple of numbers"""
if self._tuple is None:
# extract leading digits from year
year = 9999
if self.year:
m = self.DIGITS.match(self.year)
if m:
year = int(m.group(0))
month = self.month_num or 99
day = self.day if self.day is not None else 99
# should we include calendar name in tuple too?
self._tuple = year, month, day
return self._tuple | [
"def",
"as_tuple",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tuple",
"is",
"None",
":",
"# extract leading digits from year",
"year",
"=",
"9999",
"if",
"self",
".",
"year",
":",
"m",
"=",
"self",
".",
"DIGITS",
".",
"match",
"(",
"self",
".",
"year",
")",
"if",
"m",
":",
"year",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"0",
")",
")",
"month",
"=",
"self",
".",
"month_num",
"or",
"99",
"day",
"=",
"self",
".",
"day",
"if",
"self",
".",
"day",
"is",
"not",
"None",
"else",
"99",
"# should we include calendar name in tuple too?",
"self",
".",
"_tuple",
"=",
"year",
",",
"month",
",",
"day",
"return",
"self",
".",
"_tuple"
] | Date as three-tuple of numbers | [
"Date",
"as",
"three",
"-",
"tuple",
"of",
"numbers"
] | d0e0cceaadf0a84cbf052705e3c27303b12e1757 | https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/date.py#L142-L157 | train |
andy-z/ged4py | ged4py/detail/date.py | DateValue._cmp_date | def _cmp_date(self):
"""Returns Calendar date used for comparison.
Use the earliest date out of all CalendarDates in this instance,
or some date in the future if there are no CalendarDates (e.g.
when Date is a phrase).
"""
dates = sorted(val for val in self.kw.values()
if isinstance(val, CalendarDate))
if dates:
return dates[0]
# return date very far in the future
return CalendarDate() | python | def _cmp_date(self):
"""Returns Calendar date used for comparison.
Use the earliest date out of all CalendarDates in this instance,
or some date in the future if there are no CalendarDates (e.g.
when Date is a phrase).
"""
dates = sorted(val for val in self.kw.values()
if isinstance(val, CalendarDate))
if dates:
return dates[0]
# return date very far in the future
return CalendarDate() | [
"def",
"_cmp_date",
"(",
"self",
")",
":",
"dates",
"=",
"sorted",
"(",
"val",
"for",
"val",
"in",
"self",
".",
"kw",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"val",
",",
"CalendarDate",
")",
")",
"if",
"dates",
":",
"return",
"dates",
"[",
"0",
"]",
"# return date very far in the future",
"return",
"CalendarDate",
"(",
")"
] | Returns Calendar date used for comparison.
Use the earliest date out of all CalendarDates in this instance,
or some date in the future if there are no CalendarDates (e.g.
when Date is a phrase). | [
"Returns",
"Calendar",
"date",
"used",
"for",
"comparison",
"."
] | d0e0cceaadf0a84cbf052705e3c27303b12e1757 | https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/detail/date.py#L237-L249 | train |
lekhakpadmanabh/Summarizer | smrzr/better_sentences.py | better_sentences | def better_sentences(func):
"""takes care of some edge cases of sentence
tokenization for cases when websites don't
close sentences properly, usually after
blockquotes, image captions or attributions"""
@wraps(func)
def wrapped(*args):
sentences = func(*args)
new_sentences = []
for i, l in enumerate(sentences):
if '\n\n' in l:
splits = l.split('\n\n')
if len(splits)>1:
for ind,spl in enumerate(splits):
if len(spl) <20:
#if DEBUG: print "Discarding: ", spl
del splits[ind]
new_sentences.extend(splits)
else:
new_sentences.append(l)
return new_sentences
return wrapped | python | def better_sentences(func):
"""takes care of some edge cases of sentence
tokenization for cases when websites don't
close sentences properly, usually after
blockquotes, image captions or attributions"""
@wraps(func)
def wrapped(*args):
sentences = func(*args)
new_sentences = []
for i, l in enumerate(sentences):
if '\n\n' in l:
splits = l.split('\n\n')
if len(splits)>1:
for ind,spl in enumerate(splits):
if len(spl) <20:
#if DEBUG: print "Discarding: ", spl
del splits[ind]
new_sentences.extend(splits)
else:
new_sentences.append(l)
return new_sentences
return wrapped | [
"def",
"better_sentences",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"*",
"args",
")",
":",
"sentences",
"=",
"func",
"(",
"*",
"args",
")",
"new_sentences",
"=",
"[",
"]",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"sentences",
")",
":",
"if",
"'\\n\\n'",
"in",
"l",
":",
"splits",
"=",
"l",
".",
"split",
"(",
"'\\n\\n'",
")",
"if",
"len",
"(",
"splits",
")",
">",
"1",
":",
"for",
"ind",
",",
"spl",
"in",
"enumerate",
"(",
"splits",
")",
":",
"if",
"len",
"(",
"spl",
")",
"<",
"20",
":",
"#if DEBUG: print \"Discarding: \", spl",
"del",
"splits",
"[",
"ind",
"]",
"new_sentences",
".",
"extend",
"(",
"splits",
")",
"else",
":",
"new_sentences",
".",
"append",
"(",
"l",
")",
"return",
"new_sentences",
"return",
"wrapped"
] | takes care of some edge cases of sentence
tokenization for cases when websites don't
close sentences properly, usually after
blockquotes, image captions or attributions | [
"takes",
"care",
"of",
"some",
"edge",
"cases",
"of",
"sentence",
"tokenization",
"for",
"cases",
"when",
"websites",
"don",
"t",
"close",
"sentences",
"properly",
"usually",
"after",
"blockquotes",
"image",
"captions",
"or",
"attributions"
] | 143456a48217905c720d87331f410e5c8b4e24aa | https://github.com/lekhakpadmanabh/Summarizer/blob/143456a48217905c720d87331f410e5c8b4e24aa/smrzr/better_sentences.py#L3-L26 | train |
south-coast-science/scs_core | src/scs_core/gas/pid_datum.py | PIDDatum.__we_c | def __we_c(cls, calib, tc, temp, we_v):
"""
Compute weC from sensor temperature compensation of weV
"""
offset_v = calib.pid_elc_mv / 1000.0
response_v = we_v - offset_v # remove electronic zero
response_c = tc.correct(temp, response_v) # correct the response component
if response_c is None:
return None
we_c = response_c + offset_v # replace electronic zero
return we_c | python | def __we_c(cls, calib, tc, temp, we_v):
"""
Compute weC from sensor temperature compensation of weV
"""
offset_v = calib.pid_elc_mv / 1000.0
response_v = we_v - offset_v # remove electronic zero
response_c = tc.correct(temp, response_v) # correct the response component
if response_c is None:
return None
we_c = response_c + offset_v # replace electronic zero
return we_c | [
"def",
"__we_c",
"(",
"cls",
",",
"calib",
",",
"tc",
",",
"temp",
",",
"we_v",
")",
":",
"offset_v",
"=",
"calib",
".",
"pid_elc_mv",
"/",
"1000.0",
"response_v",
"=",
"we_v",
"-",
"offset_v",
"# remove electronic zero",
"response_c",
"=",
"tc",
".",
"correct",
"(",
"temp",
",",
"response_v",
")",
"# correct the response component",
"if",
"response_c",
"is",
"None",
":",
"return",
"None",
"we_c",
"=",
"response_c",
"+",
"offset_v",
"# replace electronic zero",
"return",
"we_c"
] | Compute weC from sensor temperature compensation of weV | [
"Compute",
"weC",
"from",
"sensor",
"temperature",
"compensation",
"of",
"weV"
] | a4152b0bbed6acbbf257e1bba6a912f6ebe578e5 | https://github.com/south-coast-science/scs_core/blob/a4152b0bbed6acbbf257e1bba6a912f6ebe578e5/src/scs_core/gas/pid_datum.py#L44-L58 | train |
south-coast-science/scs_core | src/scs_core/gas/pid_datum.py | PIDDatum.__cnc | def __cnc(cls, calib, we_c):
"""
Compute cnc from weC
"""
if we_c is None:
return None
offset_v = calib.pid_elc_mv / 1000.0
response_c = we_c - offset_v # remove electronic zero
cnc = response_c / calib.pid_sens_mv # pid_sens_mv set for ppb or ppm - see PID.init()
return cnc | python | def __cnc(cls, calib, we_c):
"""
Compute cnc from weC
"""
if we_c is None:
return None
offset_v = calib.pid_elc_mv / 1000.0
response_c = we_c - offset_v # remove electronic zero
cnc = response_c / calib.pid_sens_mv # pid_sens_mv set for ppb or ppm - see PID.init()
return cnc | [
"def",
"__cnc",
"(",
"cls",
",",
"calib",
",",
"we_c",
")",
":",
"if",
"we_c",
"is",
"None",
":",
"return",
"None",
"offset_v",
"=",
"calib",
".",
"pid_elc_mv",
"/",
"1000.0",
"response_c",
"=",
"we_c",
"-",
"offset_v",
"# remove electronic zero",
"cnc",
"=",
"response_c",
"/",
"calib",
".",
"pid_sens_mv",
"# pid_sens_mv set for ppb or ppm - see PID.init()",
"return",
"cnc"
] | Compute cnc from weC | [
"Compute",
"cnc",
"from",
"weC"
] | a4152b0bbed6acbbf257e1bba6a912f6ebe578e5 | https://github.com/south-coast-science/scs_core/blob/a4152b0bbed6acbbf257e1bba6a912f6ebe578e5/src/scs_core/gas/pid_datum.py#L62-L74 | train |
cloudbase/python-hnvclient | hnv/common/model.py | Field.add_to_class | def add_to_class(self, model_class):
"""Replace the `Field` attribute with a named `_FieldDescriptor`.
.. note::
This method is called during construction of the `Model`.
"""
model_class._meta.add_field(self)
setattr(model_class, self.name, _FieldDescriptor(self)) | python | def add_to_class(self, model_class):
"""Replace the `Field` attribute with a named `_FieldDescriptor`.
.. note::
This method is called during construction of the `Model`.
"""
model_class._meta.add_field(self)
setattr(model_class, self.name, _FieldDescriptor(self)) | [
"def",
"add_to_class",
"(",
"self",
",",
"model_class",
")",
":",
"model_class",
".",
"_meta",
".",
"add_field",
"(",
"self",
")",
"setattr",
"(",
"model_class",
",",
"self",
".",
"name",
",",
"_FieldDescriptor",
"(",
"self",
")",
")"
] | Replace the `Field` attribute with a named `_FieldDescriptor`.
.. note::
This method is called during construction of the `Model`. | [
"Replace",
"the",
"Field",
"attribute",
"with",
"a",
"named",
"_FieldDescriptor",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/model.py#L126-L133 | train |
cloudbase/python-hnvclient | hnv/common/model.py | _ModelOptions.add_field | def add_field(self, field):
"""Add the received field to the model."""
self.remove_field(field.name)
self._fields[field.name] = field
if field.default is not None:
if six.callable(field.default):
self._default_callables[field.key] = field.default
else:
self._defaults[field.key] = field.default | python | def add_field(self, field):
"""Add the received field to the model."""
self.remove_field(field.name)
self._fields[field.name] = field
if field.default is not None:
if six.callable(field.default):
self._default_callables[field.key] = field.default
else:
self._defaults[field.key] = field.default | [
"def",
"add_field",
"(",
"self",
",",
"field",
")",
":",
"self",
".",
"remove_field",
"(",
"field",
".",
"name",
")",
"self",
".",
"_fields",
"[",
"field",
".",
"name",
"]",
"=",
"field",
"if",
"field",
".",
"default",
"is",
"not",
"None",
":",
"if",
"six",
".",
"callable",
"(",
"field",
".",
"default",
")",
":",
"self",
".",
"_default_callables",
"[",
"field",
".",
"key",
"]",
"=",
"field",
".",
"default",
"else",
":",
"self",
".",
"_defaults",
"[",
"field",
".",
"key",
"]",
"=",
"field",
".",
"default"
] | Add the received field to the model. | [
"Add",
"the",
"received",
"field",
"to",
"the",
"model",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/model.py#L157-L166 | train |
cloudbase/python-hnvclient | hnv/common/model.py | _ModelOptions.remove_field | def remove_field(self, field_name):
"""Remove the field with the received field name from model."""
field = self._fields.pop(field_name, None)
if field is not None and field.default is not None:
if six.callable(field.default):
self._default_callables.pop(field.key, None)
else:
self._defaults.pop(field.key, None) | python | def remove_field(self, field_name):
"""Remove the field with the received field name from model."""
field = self._fields.pop(field_name, None)
if field is not None and field.default is not None:
if six.callable(field.default):
self._default_callables.pop(field.key, None)
else:
self._defaults.pop(field.key, None) | [
"def",
"remove_field",
"(",
"self",
",",
"field_name",
")",
":",
"field",
"=",
"self",
".",
"_fields",
".",
"pop",
"(",
"field_name",
",",
"None",
")",
"if",
"field",
"is",
"not",
"None",
"and",
"field",
".",
"default",
"is",
"not",
"None",
":",
"if",
"six",
".",
"callable",
"(",
"field",
".",
"default",
")",
":",
"self",
".",
"_default_callables",
".",
"pop",
"(",
"field",
".",
"key",
",",
"None",
")",
"else",
":",
"self",
".",
"_defaults",
".",
"pop",
"(",
"field",
".",
"key",
",",
"None",
")"
] | Remove the field with the received field name from model. | [
"Remove",
"the",
"field",
"with",
"the",
"received",
"field",
"name",
"from",
"model",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/model.py#L168-L175 | train |
cloudbase/python-hnvclient | hnv/common/model.py | _ModelOptions.get_defaults | def get_defaults(self):
"""Get a dictionary that contains all the available defaults."""
defaults = self._defaults.copy()
for field_key, default in self._default_callables.items():
defaults[field_key] = default()
return defaults | python | def get_defaults(self):
"""Get a dictionary that contains all the available defaults."""
defaults = self._defaults.copy()
for field_key, default in self._default_callables.items():
defaults[field_key] = default()
return defaults | [
"def",
"get_defaults",
"(",
"self",
")",
":",
"defaults",
"=",
"self",
".",
"_defaults",
".",
"copy",
"(",
")",
"for",
"field_key",
",",
"default",
"in",
"self",
".",
"_default_callables",
".",
"items",
"(",
")",
":",
"defaults",
"[",
"field_key",
"]",
"=",
"default",
"(",
")",
"return",
"defaults"
] | Get a dictionary that contains all the available defaults. | [
"Get",
"a",
"dictionary",
"that",
"contains",
"all",
"the",
"available",
"defaults",
"."
] | b019452af01db22629809b8930357a2ebf6494be | https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/model.py#L177-L182 | train |
newfies-dialer/python-msspeak | msspeak/msspeak.py | MSSpeak.speak | def speak(self, textstr, lang='en-US', gender='female', format='riff-16khz-16bit-mono-pcm'):
"""
Run will call Microsoft Translate API and and produce audio
"""
# print("speak(textstr=%s, lang=%s, gender=%s, format=%s)" % (textstr, lang, gender, format))
concatkey = '%s-%s-%s-%s' % (textstr, lang.lower(), gender.lower(), format)
key = self.tts_engine + '' + str(hash(concatkey))
self.filename = '%s-%s.mp3' % (key, lang)
# check if file exists
fileloc = self.directory + self.filename
if self.cache and os.path.isfile(self.directory + self.filename):
return self.filename
else:
with open(fileloc, 'wb') as f:
self.speech.speak_to_file(f, textstr, lang, gender, format)
return self.filename
return False | python | def speak(self, textstr, lang='en-US', gender='female', format='riff-16khz-16bit-mono-pcm'):
"""
Run will call Microsoft Translate API and and produce audio
"""
# print("speak(textstr=%s, lang=%s, gender=%s, format=%s)" % (textstr, lang, gender, format))
concatkey = '%s-%s-%s-%s' % (textstr, lang.lower(), gender.lower(), format)
key = self.tts_engine + '' + str(hash(concatkey))
self.filename = '%s-%s.mp3' % (key, lang)
# check if file exists
fileloc = self.directory + self.filename
if self.cache and os.path.isfile(self.directory + self.filename):
return self.filename
else:
with open(fileloc, 'wb') as f:
self.speech.speak_to_file(f, textstr, lang, gender, format)
return self.filename
return False | [
"def",
"speak",
"(",
"self",
",",
"textstr",
",",
"lang",
"=",
"'en-US'",
",",
"gender",
"=",
"'female'",
",",
"format",
"=",
"'riff-16khz-16bit-mono-pcm'",
")",
":",
"# print(\"speak(textstr=%s, lang=%s, gender=%s, format=%s)\" % (textstr, lang, gender, format))",
"concatkey",
"=",
"'%s-%s-%s-%s'",
"%",
"(",
"textstr",
",",
"lang",
".",
"lower",
"(",
")",
",",
"gender",
".",
"lower",
"(",
")",
",",
"format",
")",
"key",
"=",
"self",
".",
"tts_engine",
"+",
"''",
"+",
"str",
"(",
"hash",
"(",
"concatkey",
")",
")",
"self",
".",
"filename",
"=",
"'%s-%s.mp3'",
"%",
"(",
"key",
",",
"lang",
")",
"# check if file exists",
"fileloc",
"=",
"self",
".",
"directory",
"+",
"self",
".",
"filename",
"if",
"self",
".",
"cache",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"directory",
"+",
"self",
".",
"filename",
")",
":",
"return",
"self",
".",
"filename",
"else",
":",
"with",
"open",
"(",
"fileloc",
",",
"'wb'",
")",
"as",
"f",
":",
"self",
".",
"speech",
".",
"speak_to_file",
"(",
"f",
",",
"textstr",
",",
"lang",
",",
"gender",
",",
"format",
")",
"return",
"self",
".",
"filename",
"return",
"False"
] | Run will call Microsoft Translate API and and produce audio | [
"Run",
"will",
"call",
"Microsoft",
"Translate",
"API",
"and",
"and",
"produce",
"audio"
] | 106475122be73df152865c4fe6e9388caf974085 | https://github.com/newfies-dialer/python-msspeak/blob/106475122be73df152865c4fe6e9388caf974085/msspeak/msspeak.py#L193-L210 | train |
lambdalisue/notify | src/notify/executor.py | call | def call(args):
"""
Call terminal command and return exit_code and stdout
Parameters
----------
args : list
A command and arguments list
Returns
-------
list : [exit_code, stdout]
exit_code indicate the exit code of the command and stdout indicate the
output of the command
"""
b = StringIO()
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8'
# old python has bug in p.stdout, so the following little
# hack is required.
for stdout in iter(p.stdout.readline, ''):
if len(stdout) == 0:
break
# translate non unicode to unicode
stdout = force_unicode(stdout, encoding)
# StringIO store unicode
b.write(stdout)
# stdout require non unicode
sys.stdout.write(from_unicode(stdout, encoding))
sys.stdout.flush()
buf = b.getvalue()
p.stdout.close()
return p.returncode or 0, buf | python | def call(args):
"""
Call terminal command and return exit_code and stdout
Parameters
----------
args : list
A command and arguments list
Returns
-------
list : [exit_code, stdout]
exit_code indicate the exit code of the command and stdout indicate the
output of the command
"""
b = StringIO()
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8'
# old python has bug in p.stdout, so the following little
# hack is required.
for stdout in iter(p.stdout.readline, ''):
if len(stdout) == 0:
break
# translate non unicode to unicode
stdout = force_unicode(stdout, encoding)
# StringIO store unicode
b.write(stdout)
# stdout require non unicode
sys.stdout.write(from_unicode(stdout, encoding))
sys.stdout.flush()
buf = b.getvalue()
p.stdout.close()
return p.returncode or 0, buf | [
"def",
"call",
"(",
"args",
")",
":",
"b",
"=",
"StringIO",
"(",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"encoding",
"=",
"getattr",
"(",
"sys",
".",
"stdout",
",",
"'encoding'",
",",
"None",
")",
"or",
"'utf-8'",
"# old python has bug in p.stdout, so the following little",
"# hack is required.",
"for",
"stdout",
"in",
"iter",
"(",
"p",
".",
"stdout",
".",
"readline",
",",
"''",
")",
":",
"if",
"len",
"(",
"stdout",
")",
"==",
"0",
":",
"break",
"# translate non unicode to unicode",
"stdout",
"=",
"force_unicode",
"(",
"stdout",
",",
"encoding",
")",
"# StringIO store unicode",
"b",
".",
"write",
"(",
"stdout",
")",
"# stdout require non unicode",
"sys",
".",
"stdout",
".",
"write",
"(",
"from_unicode",
"(",
"stdout",
",",
"encoding",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"buf",
"=",
"b",
".",
"getvalue",
"(",
")",
"p",
".",
"stdout",
".",
"close",
"(",
")",
"return",
"p",
".",
"returncode",
"or",
"0",
",",
"buf"
] | Call terminal command and return exit_code and stdout
Parameters
----------
args : list
A command and arguments list
Returns
-------
list : [exit_code, stdout]
exit_code indicate the exit code of the command and stdout indicate the
output of the command | [
"Call",
"terminal",
"command",
"and",
"return",
"exit_code",
"and",
"stdout"
] | 1b6d7d1faa2cea13bfaa1f35130f279a0115e686 | https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/executor.py#L17-L51 | train |
lambdalisue/notify | src/notify/executor.py | get_command_str | def get_command_str(args):
"""
Get terminal command string from list of command and arguments
Parameters
----------
args : list
A command and arguments list (unicode list)
Returns
-------
str
A string indicate terminal command
"""
single_quote = "'"
double_quote = '"'
for i, value in enumerate(args):
if " " in value and double_quote not in value:
args[i] = '"%s"' % value
elif " " in value and single_quote not in value:
args[i] = "'%s'" % value
return " ".join(args) | python | def get_command_str(args):
"""
Get terminal command string from list of command and arguments
Parameters
----------
args : list
A command and arguments list (unicode list)
Returns
-------
str
A string indicate terminal command
"""
single_quote = "'"
double_quote = '"'
for i, value in enumerate(args):
if " " in value and double_quote not in value:
args[i] = '"%s"' % value
elif " " in value and single_quote not in value:
args[i] = "'%s'" % value
return " ".join(args) | [
"def",
"get_command_str",
"(",
"args",
")",
":",
"single_quote",
"=",
"\"'\"",
"double_quote",
"=",
"'\"'",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"\" \"",
"in",
"value",
"and",
"double_quote",
"not",
"in",
"value",
":",
"args",
"[",
"i",
"]",
"=",
"'\"%s\"'",
"%",
"value",
"elif",
"\" \"",
"in",
"value",
"and",
"single_quote",
"not",
"in",
"value",
":",
"args",
"[",
"i",
"]",
"=",
"\"'%s'\"",
"%",
"value",
"return",
"\" \"",
".",
"join",
"(",
"args",
")"
] | Get terminal command string from list of command and arguments
Parameters
----------
args : list
A command and arguments list (unicode list)
Returns
-------
str
A string indicate terminal command | [
"Get",
"terminal",
"command",
"string",
"from",
"list",
"of",
"command",
"and",
"arguments"
] | 1b6d7d1faa2cea13bfaa1f35130f279a0115e686 | https://github.com/lambdalisue/notify/blob/1b6d7d1faa2cea13bfaa1f35130f279a0115e686/src/notify/executor.py#L53-L74 | train |
gtaylor/django-athumb | athumb/upload_handlers/gunicorn_eventlet.py | EventletTmpFileUploadHandler.receive_data_chunk | def receive_data_chunk(self, raw_data, start):
"""
Over-ridden method to circumvent the worker timeouts on large uploads.
"""
self.file.write(raw_data)
# CHANGED: This un-hangs us long enough to keep things rolling.
eventlet.sleep(0) | python | def receive_data_chunk(self, raw_data, start):
"""
Over-ridden method to circumvent the worker timeouts on large uploads.
"""
self.file.write(raw_data)
# CHANGED: This un-hangs us long enough to keep things rolling.
eventlet.sleep(0) | [
"def",
"receive_data_chunk",
"(",
"self",
",",
"raw_data",
",",
"start",
")",
":",
"self",
".",
"file",
".",
"write",
"(",
"raw_data",
")",
"# CHANGED: This un-hangs us long enough to keep things rolling.",
"eventlet",
".",
"sleep",
"(",
"0",
")"
] | Over-ridden method to circumvent the worker timeouts on large uploads. | [
"Over",
"-",
"ridden",
"method",
"to",
"circumvent",
"the",
"worker",
"timeouts",
"on",
"large",
"uploads",
"."
] | 69261ace0dff81e33156a54440874456a7b38dfb | https://github.com/gtaylor/django-athumb/blob/69261ace0dff81e33156a54440874456a7b38dfb/athumb/upload_handlers/gunicorn_eventlet.py#L15-L21 | train |
pennlabs/penn-sdk-python | penn/transit.py | Transit.stoptimes | def stoptimes(self, start_date, end_date):
"""Return all stop times in the date range
:param start_date:
The starting date for the query.
:param end_date:
The end date for the query.
>>> import datetime
>>> today = datetime.date.today()
>>> trans.stoptimes(today - datetime.timedelta(days=1), today)
"""
params = {
'start': self.format_date(start_date),
'end': self.format_date(end_date)
}
response = self._request(ENDPOINTS['STOPTIMES'], params)
return response | python | def stoptimes(self, start_date, end_date):
"""Return all stop times in the date range
:param start_date:
The starting date for the query.
:param end_date:
The end date for the query.
>>> import datetime
>>> today = datetime.date.today()
>>> trans.stoptimes(today - datetime.timedelta(days=1), today)
"""
params = {
'start': self.format_date(start_date),
'end': self.format_date(end_date)
}
response = self._request(ENDPOINTS['STOPTIMES'], params)
return response | [
"def",
"stoptimes",
"(",
"self",
",",
"start_date",
",",
"end_date",
")",
":",
"params",
"=",
"{",
"'start'",
":",
"self",
".",
"format_date",
"(",
"start_date",
")",
",",
"'end'",
":",
"self",
".",
"format_date",
"(",
"end_date",
")",
"}",
"response",
"=",
"self",
".",
"_request",
"(",
"ENDPOINTS",
"[",
"'STOPTIMES'",
"]",
",",
"params",
")",
"return",
"response"
] | Return all stop times in the date range
:param start_date:
The starting date for the query.
:param end_date:
The end date for the query.
>>> import datetime
>>> today = datetime.date.today()
>>> trans.stoptimes(today - datetime.timedelta(days=1), today) | [
"Return",
"all",
"stop",
"times",
"in",
"the",
"date",
"range"
] | 31ff12c20d69438d63bc7a796f83ce4f4c828396 | https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/transit.py#L115-L131 | train |
geophysics-ubonn/reda | lib/reda/main/logger.py | LoggingClass.setup_logger | def setup_logger(self):
"""Setup a logger
"""
self.log_list = []
handler = ListHandler(self.log_list)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.INFO)
self.handler = handler
self.logger = logger | python | def setup_logger(self):
"""Setup a logger
"""
self.log_list = []
handler = ListHandler(self.log_list)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.INFO)
self.handler = handler
self.logger = logger | [
"def",
"setup_logger",
"(",
"self",
")",
":",
"self",
".",
"log_list",
"=",
"[",
"]",
"handler",
"=",
"ListHandler",
"(",
"self",
".",
"log_list",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s - %(name)s - %(levelname)s - %(message)s'",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"self",
".",
"handler",
"=",
"handler",
"self",
".",
"logger",
"=",
"logger"
] | Setup a logger | [
"Setup",
"a",
"logger"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/main/logger.py#L30-L46 | train |
frasertweedale/ledgertools | ltlib/balance.py | match_to_dict | def match_to_dict(match):
"""Convert a match object into a dict.
Values are:
indent: amount of indentation of this [sub]account
parent: the parent dict (None)
account_fragment: account name fragment
balance: decimal.Decimal balance
children: sub-accounts ([])
"""
balance, indent, account_fragment = match.group(1, 2, 3)
return {
'balance': decimal.Decimal(balance),
'indent': len(indent),
'account_fragment': account_fragment,
'parent': None,
'children': [],
} | python | def match_to_dict(match):
"""Convert a match object into a dict.
Values are:
indent: amount of indentation of this [sub]account
parent: the parent dict (None)
account_fragment: account name fragment
balance: decimal.Decimal balance
children: sub-accounts ([])
"""
balance, indent, account_fragment = match.group(1, 2, 3)
return {
'balance': decimal.Decimal(balance),
'indent': len(indent),
'account_fragment': account_fragment,
'parent': None,
'children': [],
} | [
"def",
"match_to_dict",
"(",
"match",
")",
":",
"balance",
",",
"indent",
",",
"account_fragment",
"=",
"match",
".",
"group",
"(",
"1",
",",
"2",
",",
"3",
")",
"return",
"{",
"'balance'",
":",
"decimal",
".",
"Decimal",
"(",
"balance",
")",
",",
"'indent'",
":",
"len",
"(",
"indent",
")",
",",
"'account_fragment'",
":",
"account_fragment",
",",
"'parent'",
":",
"None",
",",
"'children'",
":",
"[",
"]",
",",
"}"
] | Convert a match object into a dict.
Values are:
indent: amount of indentation of this [sub]account
parent: the parent dict (None)
account_fragment: account name fragment
balance: decimal.Decimal balance
children: sub-accounts ([]) | [
"Convert",
"a",
"match",
"object",
"into",
"a",
"dict",
"."
] | a695f8667d72253e5448693c12f0282d09902aaa | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/balance.py#L24-L41 | train |
frasertweedale/ledgertools | ltlib/balance.py | balance | def balance(output):
"""Convert `ledger balance` output into an hierarchical data structure."""
lines = map(pattern.search, output.splitlines())
stack = []
top = []
for item in map(match_to_dict, itertools.takewhile(lambda x: x, lines)):
# pop items off stack while current item has indent <=
while stack and item['indent'] <= stack[-1]['indent']:
stack.pop()
# check if this is a top-level item
if not stack:
stack.append(item)
top.append(item)
else:
item['parent'] = stack[-1]
stack[-1]['children'].append(item)
stack.append(item)
return top | python | def balance(output):
"""Convert `ledger balance` output into an hierarchical data structure."""
lines = map(pattern.search, output.splitlines())
stack = []
top = []
for item in map(match_to_dict, itertools.takewhile(lambda x: x, lines)):
# pop items off stack while current item has indent <=
while stack and item['indent'] <= stack[-1]['indent']:
stack.pop()
# check if this is a top-level item
if not stack:
stack.append(item)
top.append(item)
else:
item['parent'] = stack[-1]
stack[-1]['children'].append(item)
stack.append(item)
return top | [
"def",
"balance",
"(",
"output",
")",
":",
"lines",
"=",
"map",
"(",
"pattern",
".",
"search",
",",
"output",
".",
"splitlines",
"(",
")",
")",
"stack",
"=",
"[",
"]",
"top",
"=",
"[",
"]",
"for",
"item",
"in",
"map",
"(",
"match_to_dict",
",",
"itertools",
".",
"takewhile",
"(",
"lambda",
"x",
":",
"x",
",",
"lines",
")",
")",
":",
"# pop items off stack while current item has indent <=",
"while",
"stack",
"and",
"item",
"[",
"'indent'",
"]",
"<=",
"stack",
"[",
"-",
"1",
"]",
"[",
"'indent'",
"]",
":",
"stack",
".",
"pop",
"(",
")",
"# check if this is a top-level item",
"if",
"not",
"stack",
":",
"stack",
".",
"append",
"(",
"item",
")",
"top",
".",
"append",
"(",
"item",
")",
"else",
":",
"item",
"[",
"'parent'",
"]",
"=",
"stack",
"[",
"-",
"1",
"]",
"stack",
"[",
"-",
"1",
"]",
"[",
"'children'",
"]",
".",
"append",
"(",
"item",
")",
"stack",
".",
"append",
"(",
"item",
")",
"return",
"top"
] | Convert `ledger balance` output into an hierarchical data structure. | [
"Convert",
"ledger",
"balance",
"output",
"into",
"an",
"hierarchical",
"data",
"structure",
"."
] | a695f8667d72253e5448693c12f0282d09902aaa | https://github.com/frasertweedale/ledgertools/blob/a695f8667d72253e5448693c12f0282d09902aaa/ltlib/balance.py#L44-L64 | train |
kaustavdm/pyAvroPhonetic | pyavrophonetic/utils/validate.py | is_punctuation | def is_punctuation(text):
"""Check if given string is a punctuation"""
return not (text.lower() in config.AVRO_VOWELS or
text.lower() in config.AVRO_CONSONANTS) | python | def is_punctuation(text):
"""Check if given string is a punctuation"""
return not (text.lower() in config.AVRO_VOWELS or
text.lower() in config.AVRO_CONSONANTS) | [
"def",
"is_punctuation",
"(",
"text",
")",
":",
"return",
"not",
"(",
"text",
".",
"lower",
"(",
")",
"in",
"config",
".",
"AVRO_VOWELS",
"or",
"text",
".",
"lower",
"(",
")",
"in",
"config",
".",
"AVRO_CONSONANTS",
")"
] | Check if given string is a punctuation | [
"Check",
"if",
"given",
"string",
"is",
"a",
"punctuation"
] | 26b7d567d8db025f2cac4de817e716390d7ac337 | https://github.com/kaustavdm/pyAvroPhonetic/blob/26b7d567d8db025f2cac4de817e716390d7ac337/pyavrophonetic/utils/validate.py#L43-L46 | train |
kaustavdm/pyAvroPhonetic | pyavrophonetic/utils/validate.py | is_exact | def is_exact(needle, haystack, start, end, matchnot):
"""Check exact occurrence of needle in haystack"""
return ((start >= 0 and end < len(haystack) and
haystack[start:end] == needle) ^ matchnot) | python | def is_exact(needle, haystack, start, end, matchnot):
"""Check exact occurrence of needle in haystack"""
return ((start >= 0 and end < len(haystack) and
haystack[start:end] == needle) ^ matchnot) | [
"def",
"is_exact",
"(",
"needle",
",",
"haystack",
",",
"start",
",",
"end",
",",
"matchnot",
")",
":",
"return",
"(",
"(",
"start",
">=",
"0",
"and",
"end",
"<",
"len",
"(",
"haystack",
")",
"and",
"haystack",
"[",
"start",
":",
"end",
"]",
"==",
"needle",
")",
"^",
"matchnot",
")"
] | Check exact occurrence of needle in haystack | [
"Check",
"exact",
"occurrence",
"of",
"needle",
"in",
"haystack"
] | 26b7d567d8db025f2cac4de817e716390d7ac337 | https://github.com/kaustavdm/pyAvroPhonetic/blob/26b7d567d8db025f2cac4de817e716390d7ac337/pyavrophonetic/utils/validate.py#L52-L55 | train |
kaustavdm/pyAvroPhonetic | pyavrophonetic/utils/validate.py | fix_string_case | def fix_string_case(text):
"""Converts case-insensitive characters to lower case
Case-sensitive characters as defined in config.AVRO_CASESENSITIVES
retain their case, but others are converted to their lowercase
equivalents. The result is a string with phonetic-compatible case
which will the parser will understand without confusion.
"""
fixed = []
for i in text:
if is_case_sensitive(i):
fixed.append(i)
else:
fixed.append(i.lower())
return ''.join(fixed) | python | def fix_string_case(text):
"""Converts case-insensitive characters to lower case
Case-sensitive characters as defined in config.AVRO_CASESENSITIVES
retain their case, but others are converted to their lowercase
equivalents. The result is a string with phonetic-compatible case
which will the parser will understand without confusion.
"""
fixed = []
for i in text:
if is_case_sensitive(i):
fixed.append(i)
else:
fixed.append(i.lower())
return ''.join(fixed) | [
"def",
"fix_string_case",
"(",
"text",
")",
":",
"fixed",
"=",
"[",
"]",
"for",
"i",
"in",
"text",
":",
"if",
"is_case_sensitive",
"(",
"i",
")",
":",
"fixed",
".",
"append",
"(",
"i",
")",
"else",
":",
"fixed",
".",
"append",
"(",
"i",
".",
"lower",
"(",
")",
")",
"return",
"''",
".",
"join",
"(",
"fixed",
")"
] | Converts case-insensitive characters to lower case
Case-sensitive characters as defined in config.AVRO_CASESENSITIVES
retain their case, but others are converted to their lowercase
equivalents. The result is a string with phonetic-compatible case
which will the parser will understand without confusion. | [
"Converts",
"case",
"-",
"insensitive",
"characters",
"to",
"lower",
"case"
] | 26b7d567d8db025f2cac4de817e716390d7ac337 | https://github.com/kaustavdm/pyAvroPhonetic/blob/26b7d567d8db025f2cac4de817e716390d7ac337/pyavrophonetic/utils/validate.py#L57-L71 | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | ConfigManager._crmod_to_abmn | def _crmod_to_abmn(self, configs):
"""convert crmod-style configurations to a Nx4 array
CRMod-style configurations merge A and B, and M and N, electrode
numbers into one large integer each:
.. math ::
AB = A \cdot 10^4 + B
MN = M \cdot 10^4 + N
Parameters
----------
configs: numpy.ndarray
Nx2 array holding the configurations to convert
Examples
--------
>>> import numpy as np
>>> from reda.configs.configManager import ConfigManager
>>> config = ConfigManager(nr_of_electrodes=5)
>>> crmod_configs = np.array((
... (10002, 40003),
... (10010, 30004),
... ))
>>> abmn = config._crmod_to_abmn(crmod_configs)
>>> print(abmn)
[[ 2 1 3 4]
[10 1 4 3]]
"""
A = configs[:, 0] % 1e4
B = np.floor(configs[:, 0] / 1e4).astype(int)
M = configs[:, 1] % 1e4
N = np.floor(configs[:, 1] / 1e4).astype(int)
ABMN = np.hstack((A[:, np.newaxis], B[:, np.newaxis], M[:, np.newaxis],
N[:, np.newaxis])).astype(int)
return ABMN | python | def _crmod_to_abmn(self, configs):
"""convert crmod-style configurations to a Nx4 array
CRMod-style configurations merge A and B, and M and N, electrode
numbers into one large integer each:
.. math ::
AB = A \cdot 10^4 + B
MN = M \cdot 10^4 + N
Parameters
----------
configs: numpy.ndarray
Nx2 array holding the configurations to convert
Examples
--------
>>> import numpy as np
>>> from reda.configs.configManager import ConfigManager
>>> config = ConfigManager(nr_of_electrodes=5)
>>> crmod_configs = np.array((
... (10002, 40003),
... (10010, 30004),
... ))
>>> abmn = config._crmod_to_abmn(crmod_configs)
>>> print(abmn)
[[ 2 1 3 4]
[10 1 4 3]]
"""
A = configs[:, 0] % 1e4
B = np.floor(configs[:, 0] / 1e4).astype(int)
M = configs[:, 1] % 1e4
N = np.floor(configs[:, 1] / 1e4).astype(int)
ABMN = np.hstack((A[:, np.newaxis], B[:, np.newaxis], M[:, np.newaxis],
N[:, np.newaxis])).astype(int)
return ABMN | [
"def",
"_crmod_to_abmn",
"(",
"self",
",",
"configs",
")",
":",
"A",
"=",
"configs",
"[",
":",
",",
"0",
"]",
"%",
"1e4",
"B",
"=",
"np",
".",
"floor",
"(",
"configs",
"[",
":",
",",
"0",
"]",
"/",
"1e4",
")",
".",
"astype",
"(",
"int",
")",
"M",
"=",
"configs",
"[",
":",
",",
"1",
"]",
"%",
"1e4",
"N",
"=",
"np",
".",
"floor",
"(",
"configs",
"[",
":",
",",
"1",
"]",
"/",
"1e4",
")",
".",
"astype",
"(",
"int",
")",
"ABMN",
"=",
"np",
".",
"hstack",
"(",
"(",
"A",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"B",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"M",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"N",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
")",
".",
"astype",
"(",
"int",
")",
"return",
"ABMN"
] | convert crmod-style configurations to a Nx4 array
CRMod-style configurations merge A and B, and M and N, electrode
numbers into one large integer each:
.. math ::
AB = A \cdot 10^4 + B
MN = M \cdot 10^4 + N
Parameters
----------
configs: numpy.ndarray
Nx2 array holding the configurations to convert
Examples
--------
>>> import numpy as np
>>> from reda.configs.configManager import ConfigManager
>>> config = ConfigManager(nr_of_electrodes=5)
>>> crmod_configs = np.array((
... (10002, 40003),
... (10010, 30004),
... ))
>>> abmn = config._crmod_to_abmn(crmod_configs)
>>> print(abmn)
[[ 2 1 3 4]
[10 1 4 3]] | [
"convert",
"crmod",
"-",
"style",
"configurations",
"to",
"a",
"Nx4",
"array"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L54-L93 | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | ConfigManager.load_crmod_config | def load_crmod_config(self, filename):
"""Load a CRMod configuration file
Parameters
----------
filename: string
absolute or relative path to a crmod config.dat file
"""
with open(filename, 'r') as fid:
nr_of_configs = int(fid.readline().strip())
configs = np.loadtxt(fid)
print('loaded configs:', configs.shape)
if nr_of_configs != configs.shape[0]:
raise Exception(
'indicated number of measurements does not equal ' +
'to actual number of measurements')
ABMN = self._crmod_to_abmn(configs[:, 0:2])
self.configs = ABMN | python | def load_crmod_config(self, filename):
"""Load a CRMod configuration file
Parameters
----------
filename: string
absolute or relative path to a crmod config.dat file
"""
with open(filename, 'r') as fid:
nr_of_configs = int(fid.readline().strip())
configs = np.loadtxt(fid)
print('loaded configs:', configs.shape)
if nr_of_configs != configs.shape[0]:
raise Exception(
'indicated number of measurements does not equal ' +
'to actual number of measurements')
ABMN = self._crmod_to_abmn(configs[:, 0:2])
self.configs = ABMN | [
"def",
"load_crmod_config",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fid",
":",
"nr_of_configs",
"=",
"int",
"(",
"fid",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
")",
"configs",
"=",
"np",
".",
"loadtxt",
"(",
"fid",
")",
"print",
"(",
"'loaded configs:'",
",",
"configs",
".",
"shape",
")",
"if",
"nr_of_configs",
"!=",
"configs",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"Exception",
"(",
"'indicated number of measurements does not equal '",
"+",
"'to actual number of measurements'",
")",
"ABMN",
"=",
"self",
".",
"_crmod_to_abmn",
"(",
"configs",
"[",
":",
",",
"0",
":",
"2",
"]",
")",
"self",
".",
"configs",
"=",
"ABMN"
] | Load a CRMod configuration file
Parameters
----------
filename: string
absolute or relative path to a crmod config.dat file | [
"Load",
"a",
"CRMod",
"configuration",
"file"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L101-L119 | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | ConfigManager._get_crmod_abmn | def _get_crmod_abmn(self):
"""return a Nx2 array with the measurement configurations formatted
CRTomo style
"""
ABMN = np.vstack((
self.configs[:, 0] * 1e4 + self.configs[:, 1],
self.configs[:, 2] * 1e4 + self.configs[:, 3],
)).T.astype(int)
return ABMN | python | def _get_crmod_abmn(self):
"""return a Nx2 array with the measurement configurations formatted
CRTomo style
"""
ABMN = np.vstack((
self.configs[:, 0] * 1e4 + self.configs[:, 1],
self.configs[:, 2] * 1e4 + self.configs[:, 3],
)).T.astype(int)
return ABMN | [
"def",
"_get_crmod_abmn",
"(",
"self",
")",
":",
"ABMN",
"=",
"np",
".",
"vstack",
"(",
"(",
"self",
".",
"configs",
"[",
":",
",",
"0",
"]",
"*",
"1e4",
"+",
"self",
".",
"configs",
"[",
":",
",",
"1",
"]",
",",
"self",
".",
"configs",
"[",
":",
",",
"2",
"]",
"*",
"1e4",
"+",
"self",
".",
"configs",
"[",
":",
",",
"3",
"]",
",",
")",
")",
".",
"T",
".",
"astype",
"(",
"int",
")",
"return",
"ABMN"
] | return a Nx2 array with the measurement configurations formatted
CRTomo style | [
"return",
"a",
"Nx2",
"array",
"with",
"the",
"measurement",
"configurations",
"formatted",
"CRTomo",
"style"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L156-L164 | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | ConfigManager.write_crmod_volt | def write_crmod_volt(self, filename, mid):
"""Write the measurements to the output file in the volt.dat file
format that can be read by CRTomo.
Parameters
----------
filename: string
output filename
mid: int or [int, int]
measurement ids of magnitude and phase measurements. If only one ID
is given, then the phase column is filled with zeros
"""
ABMN = self._get_crmod_abmn()
if isinstance(mid, (list, tuple)):
mag_data = self.measurements[mid[0]]
pha_data = self.measurements[mid[1]]
else:
mag_data = self.measurements[mid]
pha_data = np.zeros(mag_data.shape)
all_data = np.hstack((ABMN, mag_data[:, np.newaxis],
pha_data[:, np.newaxis]))
with open(filename, 'wb') as fid:
fid.write(bytes(
'{0}\n'.format(ABMN.shape[0]),
'utf-8',
))
np.savetxt(fid, all_data, fmt='%i %i %f %f') | python | def write_crmod_volt(self, filename, mid):
"""Write the measurements to the output file in the volt.dat file
format that can be read by CRTomo.
Parameters
----------
filename: string
output filename
mid: int or [int, int]
measurement ids of magnitude and phase measurements. If only one ID
is given, then the phase column is filled with zeros
"""
ABMN = self._get_crmod_abmn()
if isinstance(mid, (list, tuple)):
mag_data = self.measurements[mid[0]]
pha_data = self.measurements[mid[1]]
else:
mag_data = self.measurements[mid]
pha_data = np.zeros(mag_data.shape)
all_data = np.hstack((ABMN, mag_data[:, np.newaxis],
pha_data[:, np.newaxis]))
with open(filename, 'wb') as fid:
fid.write(bytes(
'{0}\n'.format(ABMN.shape[0]),
'utf-8',
))
np.savetxt(fid, all_data, fmt='%i %i %f %f') | [
"def",
"write_crmod_volt",
"(",
"self",
",",
"filename",
",",
"mid",
")",
":",
"ABMN",
"=",
"self",
".",
"_get_crmod_abmn",
"(",
")",
"if",
"isinstance",
"(",
"mid",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"mag_data",
"=",
"self",
".",
"measurements",
"[",
"mid",
"[",
"0",
"]",
"]",
"pha_data",
"=",
"self",
".",
"measurements",
"[",
"mid",
"[",
"1",
"]",
"]",
"else",
":",
"mag_data",
"=",
"self",
".",
"measurements",
"[",
"mid",
"]",
"pha_data",
"=",
"np",
".",
"zeros",
"(",
"mag_data",
".",
"shape",
")",
"all_data",
"=",
"np",
".",
"hstack",
"(",
"(",
"ABMN",
",",
"mag_data",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"pha_data",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"fid",
":",
"fid",
".",
"write",
"(",
"bytes",
"(",
"'{0}\\n'",
".",
"format",
"(",
"ABMN",
".",
"shape",
"[",
"0",
"]",
")",
",",
"'utf-8'",
",",
")",
")",
"np",
".",
"savetxt",
"(",
"fid",
",",
"all_data",
",",
"fmt",
"=",
"'%i %i %f %f'",
")"
] | Write the measurements to the output file in the volt.dat file
format that can be read by CRTomo.
Parameters
----------
filename: string
output filename
mid: int or [int, int]
measurement ids of magnitude and phase measurements. If only one ID
is given, then the phase column is filled with zeros | [
"Write",
"the",
"measurements",
"to",
"the",
"output",
"file",
"in",
"the",
"volt",
".",
"dat",
"file",
"format",
"that",
"can",
"be",
"read",
"by",
"CRTomo",
"."
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L166-L196 | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | ConfigManager.write_crmod_config | def write_crmod_config(self, filename):
"""Write the configurations to a configuration file in the CRMod format
All configurations are merged into one previor to writing to file
Parameters
----------
filename: string
absolute or relative path to output filename (usually config.dat)
"""
ABMN = self._get_crmod_abmn()
with open(filename, 'wb') as fid:
fid.write(bytes(
'{0}\n'.format(ABMN.shape[0]),
'utf-8',
))
np.savetxt(fid, ABMN.astype(int), fmt='%i %i') | python | def write_crmod_config(self, filename):
"""Write the configurations to a configuration file in the CRMod format
All configurations are merged into one previor to writing to file
Parameters
----------
filename: string
absolute or relative path to output filename (usually config.dat)
"""
ABMN = self._get_crmod_abmn()
with open(filename, 'wb') as fid:
fid.write(bytes(
'{0}\n'.format(ABMN.shape[0]),
'utf-8',
))
np.savetxt(fid, ABMN.astype(int), fmt='%i %i') | [
"def",
"write_crmod_config",
"(",
"self",
",",
"filename",
")",
":",
"ABMN",
"=",
"self",
".",
"_get_crmod_abmn",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"fid",
":",
"fid",
".",
"write",
"(",
"bytes",
"(",
"'{0}\\n'",
".",
"format",
"(",
"ABMN",
".",
"shape",
"[",
"0",
"]",
")",
",",
"'utf-8'",
",",
")",
")",
"np",
".",
"savetxt",
"(",
"fid",
",",
"ABMN",
".",
"astype",
"(",
"int",
")",
",",
"fmt",
"=",
"'%i %i'",
")"
] | Write the configurations to a configuration file in the CRMod format
All configurations are merged into one previor to writing to file
Parameters
----------
filename: string
absolute or relative path to output filename (usually config.dat) | [
"Write",
"the",
"configurations",
"to",
"a",
"configuration",
"file",
"in",
"the",
"CRMod",
"format",
"All",
"configurations",
"are",
"merged",
"into",
"one",
"previor",
"to",
"writing",
"to",
"file"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L198-L214 | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | ConfigManager.gen_dipole_dipole | def gen_dipole_dipole(self,
skipc,
skipv=None,
stepc=1,
stepv=1,
nr_voltage_dipoles=10,
before_current=False,
start_skip=0,
N=None):
"""Generate dipole-dipole configurations
Parameters
----------
skipc: int
number of electrode positions that are skipped between electrodes
of a given dipole
skipv: int
steplength between subsequent voltage dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
stepc: int
steplength between subsequent current dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
stepv: int
steplength between subsequent voltage dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
nr_voltage_dipoles: int
the number of voltage dipoles to generate for each current
injection dipole
before_current: bool, optional
if set to True, also generate voltage dipoles in front of current
dipoles.
start_skip: int, optional
how many electrode to skip before/after the first/second current
electrode.
N: int, optional
number of electrodes, must be given if not already known by the
config instance
Examples
--------
>>> from reda.configs.configManager import ConfigManager
>>> config = ConfigManager(nr_of_electrodes=10)
>>> config.gen_dipole_dipole(skipc=2)
array([[ 1, 4, 5, 8],
[ 1, 4, 6, 9],
[ 1, 4, 7, 10],
[ 2, 5, 6, 9],
[ 2, 5, 7, 10],
[ 3, 6, 7, 10]])
"""
if N is None and self.nr_electrodes is None:
raise Exception('You must provide the number of electrodes')
elif N is None:
N = self.nr_electrodes
# by default, current voltage dipoles have the same size
if skipv is None:
skipv = skipc
configs = []
# current dipoles
for a in range(0, N - skipv - skipc - 3, stepc):
b = a + skipc + 1
nr = 0
# potential dipoles before current injection
if before_current:
for n in range(a - start_skip - 1, -1, -stepv):
nr += 1
if nr > nr_voltage_dipoles:
continue
m = n - skipv - 1
if m < 0:
continue
quadpole = np.array((a, b, m, n)) + 1
configs.append(quadpole)
# potential dipoles after current injection
nr = 0
for m in range(b + start_skip + 1, N - skipv - 1, stepv):
nr += 1
if nr > nr_voltage_dipoles:
continue
n = m + skipv + 1
quadpole = np.array((a, b, m, n)) + 1
configs.append(quadpole)
configs = np.array(configs)
# now add to the instance
if self.configs is None:
self.configs = configs
else:
self.configs = np.vstack((self.configs, configs))
return configs | python | def gen_dipole_dipole(self,
skipc,
skipv=None,
stepc=1,
stepv=1,
nr_voltage_dipoles=10,
before_current=False,
start_skip=0,
N=None):
"""Generate dipole-dipole configurations
Parameters
----------
skipc: int
number of electrode positions that are skipped between electrodes
of a given dipole
skipv: int
steplength between subsequent voltage dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
stepc: int
steplength between subsequent current dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
stepv: int
steplength between subsequent voltage dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
nr_voltage_dipoles: int
the number of voltage dipoles to generate for each current
injection dipole
before_current: bool, optional
if set to True, also generate voltage dipoles in front of current
dipoles.
start_skip: int, optional
how many electrode to skip before/after the first/second current
electrode.
N: int, optional
number of electrodes, must be given if not already known by the
config instance
Examples
--------
>>> from reda.configs.configManager import ConfigManager
>>> config = ConfigManager(nr_of_electrodes=10)
>>> config.gen_dipole_dipole(skipc=2)
array([[ 1, 4, 5, 8],
[ 1, 4, 6, 9],
[ 1, 4, 7, 10],
[ 2, 5, 6, 9],
[ 2, 5, 7, 10],
[ 3, 6, 7, 10]])
"""
if N is None and self.nr_electrodes is None:
raise Exception('You must provide the number of electrodes')
elif N is None:
N = self.nr_electrodes
# by default, current voltage dipoles have the same size
if skipv is None:
skipv = skipc
configs = []
# current dipoles
for a in range(0, N - skipv - skipc - 3, stepc):
b = a + skipc + 1
nr = 0
# potential dipoles before current injection
if before_current:
for n in range(a - start_skip - 1, -1, -stepv):
nr += 1
if nr > nr_voltage_dipoles:
continue
m = n - skipv - 1
if m < 0:
continue
quadpole = np.array((a, b, m, n)) + 1
configs.append(quadpole)
# potential dipoles after current injection
nr = 0
for m in range(b + start_skip + 1, N - skipv - 1, stepv):
nr += 1
if nr > nr_voltage_dipoles:
continue
n = m + skipv + 1
quadpole = np.array((a, b, m, n)) + 1
configs.append(quadpole)
configs = np.array(configs)
# now add to the instance
if self.configs is None:
self.configs = configs
else:
self.configs = np.vstack((self.configs, configs))
return configs | [
"def",
"gen_dipole_dipole",
"(",
"self",
",",
"skipc",
",",
"skipv",
"=",
"None",
",",
"stepc",
"=",
"1",
",",
"stepv",
"=",
"1",
",",
"nr_voltage_dipoles",
"=",
"10",
",",
"before_current",
"=",
"False",
",",
"start_skip",
"=",
"0",
",",
"N",
"=",
"None",
")",
":",
"if",
"N",
"is",
"None",
"and",
"self",
".",
"nr_electrodes",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'You must provide the number of electrodes'",
")",
"elif",
"N",
"is",
"None",
":",
"N",
"=",
"self",
".",
"nr_electrodes",
"# by default, current voltage dipoles have the same size",
"if",
"skipv",
"is",
"None",
":",
"skipv",
"=",
"skipc",
"configs",
"=",
"[",
"]",
"# current dipoles",
"for",
"a",
"in",
"range",
"(",
"0",
",",
"N",
"-",
"skipv",
"-",
"skipc",
"-",
"3",
",",
"stepc",
")",
":",
"b",
"=",
"a",
"+",
"skipc",
"+",
"1",
"nr",
"=",
"0",
"# potential dipoles before current injection",
"if",
"before_current",
":",
"for",
"n",
"in",
"range",
"(",
"a",
"-",
"start_skip",
"-",
"1",
",",
"-",
"1",
",",
"-",
"stepv",
")",
":",
"nr",
"+=",
"1",
"if",
"nr",
">",
"nr_voltage_dipoles",
":",
"continue",
"m",
"=",
"n",
"-",
"skipv",
"-",
"1",
"if",
"m",
"<",
"0",
":",
"continue",
"quadpole",
"=",
"np",
".",
"array",
"(",
"(",
"a",
",",
"b",
",",
"m",
",",
"n",
")",
")",
"+",
"1",
"configs",
".",
"append",
"(",
"quadpole",
")",
"# potential dipoles after current injection",
"nr",
"=",
"0",
"for",
"m",
"in",
"range",
"(",
"b",
"+",
"start_skip",
"+",
"1",
",",
"N",
"-",
"skipv",
"-",
"1",
",",
"stepv",
")",
":",
"nr",
"+=",
"1",
"if",
"nr",
">",
"nr_voltage_dipoles",
":",
"continue",
"n",
"=",
"m",
"+",
"skipv",
"+",
"1",
"quadpole",
"=",
"np",
".",
"array",
"(",
"(",
"a",
",",
"b",
",",
"m",
",",
"n",
")",
")",
"+",
"1",
"configs",
".",
"append",
"(",
"quadpole",
")",
"configs",
"=",
"np",
".",
"array",
"(",
"configs",
")",
"# now add to the instance",
"if",
"self",
".",
"configs",
"is",
"None",
":",
"self",
".",
"configs",
"=",
"configs",
"else",
":",
"self",
".",
"configs",
"=",
"np",
".",
"vstack",
"(",
"(",
"self",
".",
"configs",
",",
"configs",
")",
")",
"return",
"configs"
] | Generate dipole-dipole configurations
Parameters
----------
skipc: int
number of electrode positions that are skipped between electrodes
of a given dipole
skipv: int
steplength between subsequent voltage dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
stepc: int
steplength between subsequent current dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
stepv: int
steplength between subsequent voltage dipoles. A steplength of 0
will produce increments by one, i.e., 3-4, 4-5, 5-6 ...
nr_voltage_dipoles: int
the number of voltage dipoles to generate for each current
injection dipole
before_current: bool, optional
if set to True, also generate voltage dipoles in front of current
dipoles.
start_skip: int, optional
how many electrode to skip before/after the first/second current
electrode.
N: int, optional
number of electrodes, must be given if not already known by the
config instance
Examples
--------
>>> from reda.configs.configManager import ConfigManager
>>> config = ConfigManager(nr_of_electrodes=10)
>>> config.gen_dipole_dipole(skipc=2)
array([[ 1, 4, 5, 8],
[ 1, 4, 6, 9],
[ 1, 4, 7, 10],
[ 2, 5, 6, 9],
[ 2, 5, 7, 10],
[ 3, 6, 7, 10]]) | [
"Generate",
"dipole",
"-",
"dipole",
"configurations"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L216-L311 | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | ConfigManager.gen_gradient | def gen_gradient(self, skip=0, step=1, vskip=0, vstep=1):
"""Generate gradient measurements
Parameters
----------
skip: int
distance between current electrodes
step: int
steplength between subsequent current dipoles
vskip: int
distance between voltage electrodes
vstep: int
steplength between subsequent voltage dipoles
"""
N = self.nr_electrodes
quadpoles = []
for a in range(1, N - skip, step):
b = a + skip + 1
for m in range(a + 1, b - vskip - 1, vstep):
n = m + vskip + 1
quadpoles.append((a, b, m, n))
configs = np.array(quadpoles)
if configs.size == 0:
return None
self.add_to_configs(configs)
return configs | python | def gen_gradient(self, skip=0, step=1, vskip=0, vstep=1):
"""Generate gradient measurements
Parameters
----------
skip: int
distance between current electrodes
step: int
steplength between subsequent current dipoles
vskip: int
distance between voltage electrodes
vstep: int
steplength between subsequent voltage dipoles
"""
N = self.nr_electrodes
quadpoles = []
for a in range(1, N - skip, step):
b = a + skip + 1
for m in range(a + 1, b - vskip - 1, vstep):
n = m + vskip + 1
quadpoles.append((a, b, m, n))
configs = np.array(quadpoles)
if configs.size == 0:
return None
self.add_to_configs(configs)
return configs | [
"def",
"gen_gradient",
"(",
"self",
",",
"skip",
"=",
"0",
",",
"step",
"=",
"1",
",",
"vskip",
"=",
"0",
",",
"vstep",
"=",
"1",
")",
":",
"N",
"=",
"self",
".",
"nr_electrodes",
"quadpoles",
"=",
"[",
"]",
"for",
"a",
"in",
"range",
"(",
"1",
",",
"N",
"-",
"skip",
",",
"step",
")",
":",
"b",
"=",
"a",
"+",
"skip",
"+",
"1",
"for",
"m",
"in",
"range",
"(",
"a",
"+",
"1",
",",
"b",
"-",
"vskip",
"-",
"1",
",",
"vstep",
")",
":",
"n",
"=",
"m",
"+",
"vskip",
"+",
"1",
"quadpoles",
".",
"append",
"(",
"(",
"a",
",",
"b",
",",
"m",
",",
"n",
")",
")",
"configs",
"=",
"np",
".",
"array",
"(",
"quadpoles",
")",
"if",
"configs",
".",
"size",
"==",
"0",
":",
"return",
"None",
"self",
".",
"add_to_configs",
"(",
"configs",
")",
"return",
"configs"
] | Generate gradient measurements
Parameters
----------
skip: int
distance between current electrodes
step: int
steplength between subsequent current dipoles
vskip: int
distance between voltage electrodes
vstep: int
steplength between subsequent voltage dipoles | [
"Generate",
"gradient",
"measurements"
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L313-L341 | train |
geophysics-ubonn/reda | lib/reda/configs/configManager.py | ConfigManager.remove_duplicates | def remove_duplicates(self, configs=None):
"""remove duplicate entries from 4-point configurations. If no
configurations are provided, then use self.configs. Unique
configurations are only returned if configs is not None.
Parameters
----------
configs: Nx4 numpy.ndarray, optional
remove duplicates from these configurations instead from
self.configs.
Returns
-------
configs_unique: Kx4 numpy.ndarray
unique configurations. Only returned if configs is not None
"""
if configs is None:
c = self.configs
else:
c = configs
struct = c.view(c.dtype.descr * 4)
configs_unique = np.unique(struct).view(c.dtype).reshape(-1, 4)
if configs is None:
self.configs = configs_unique
else:
return configs_unique | python | def remove_duplicates(self, configs=None):
"""remove duplicate entries from 4-point configurations. If no
configurations are provided, then use self.configs. Unique
configurations are only returned if configs is not None.
Parameters
----------
configs: Nx4 numpy.ndarray, optional
remove duplicates from these configurations instead from
self.configs.
Returns
-------
configs_unique: Kx4 numpy.ndarray
unique configurations. Only returned if configs is not None
"""
if configs is None:
c = self.configs
else:
c = configs
struct = c.view(c.dtype.descr * 4)
configs_unique = np.unique(struct).view(c.dtype).reshape(-1, 4)
if configs is None:
self.configs = configs_unique
else:
return configs_unique | [
"def",
"remove_duplicates",
"(",
"self",
",",
"configs",
"=",
"None",
")",
":",
"if",
"configs",
"is",
"None",
":",
"c",
"=",
"self",
".",
"configs",
"else",
":",
"c",
"=",
"configs",
"struct",
"=",
"c",
".",
"view",
"(",
"c",
".",
"dtype",
".",
"descr",
"*",
"4",
")",
"configs_unique",
"=",
"np",
".",
"unique",
"(",
"struct",
")",
".",
"view",
"(",
"c",
".",
"dtype",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"4",
")",
"if",
"configs",
"is",
"None",
":",
"self",
".",
"configs",
"=",
"configs_unique",
"else",
":",
"return",
"configs_unique"
] | remove duplicate entries from 4-point configurations. If no
configurations are provided, then use self.configs. Unique
configurations are only returned if configs is not None.
Parameters
----------
configs: Nx4 numpy.ndarray, optional
remove duplicates from these configurations instead from
self.configs.
Returns
-------
configs_unique: Kx4 numpy.ndarray
unique configurations. Only returned if configs is not None | [
"remove",
"duplicate",
"entries",
"from",
"4",
"-",
"point",
"configurations",
".",
"If",
"no",
"configurations",
"are",
"provided",
"then",
"use",
"self",
".",
"configs",
".",
"Unique",
"configurations",
"are",
"only",
"returned",
"if",
"configs",
"is",
"not",
"None",
"."
] | 46a939729e40c7c4723315c03679c40761152e9e | https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/configs/configManager.py#L431-L457 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.