repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
franciscogarate/pyliferisk
pyliferisk/__init__.py
Mx
def Mx(mt, x): """ Return the Mx """ n = len(mt.Cx) sum1 = 0 for j in range(x, n): k = mt.Cx[j] sum1 += k return sum1
python
def Mx(mt, x): """ Return the Mx """ n = len(mt.Cx) sum1 = 0 for j in range(x, n): k = mt.Cx[j] sum1 += k return sum1
[ "def", "Mx", "(", "mt", ",", "x", ")", ":", "n", "=", "len", "(", "mt", ".", "Cx", ")", "sum1", "=", "0", "for", "j", "in", "range", "(", "x", ",", "n", ")", ":", "k", "=", "mt", ".", "Cx", "[", "j", "]", "sum1", "+=", "k", "return", "sum1" ]
Return the Mx
[ "Return", "the", "Mx" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L272-L279
franciscogarate/pyliferisk
pyliferisk/__init__.py
nEx
def nEx(mt, x, n): """ nEx : Returns the EPV of a pure endowment (deferred capital). Pure endowment benefits are conditional on the survival of the policyholder. (v^n * npx) """ return mt.Dx[x + n] / mt.Dx[x]
python
def nEx(mt, x, n): """ nEx : Returns the EPV of a pure endowment (deferred capital). Pure endowment benefits are conditional on the survival of the policyholder. (v^n * npx) """ return mt.Dx[x + n] / mt.Dx[x]
[ "def", "nEx", "(", "mt", ",", "x", ",", "n", ")", ":", "return", "mt", ".", "Dx", "[", "x", "+", "n", "]", "/", "mt", ".", "Dx", "[", "x", "]" ]
nEx : Returns the EPV of a pure endowment (deferred capital). Pure endowment benefits are conditional on the survival of the policyholder. (v^n * npx)
[ "nEx", ":", "Returns", "the", "EPV", "of", "a", "pure", "endowment", "(", "deferred", "capital", ")", ".", "Pure", "endowment", "benefits", "are", "conditional", "on", "the", "survival", "of", "the", "policyholder", ".", "(", "v^n", "*", "npx", ")" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L291-L294
franciscogarate/pyliferisk
pyliferisk/__init__.py
Axn
def Axn(mt, x, n): """ (A^1)x:n : Returns the EPV (net single premium) of a term insurance. """ return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x]
python
def Axn(mt, x, n): """ (A^1)x:n : Returns the EPV (net single premium) of a term insurance. """ return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x]
[ "def", "Axn", "(", "mt", ",", "x", ",", "n", ")", ":", "return", "(", "mt", ".", "Mx", "[", "x", "]", "-", "mt", ".", "Mx", "[", "x", "+", "n", "]", ")", "/", "mt", ".", "Dx", "[", "x", "]" ]
(A^1)x:n : Returns the EPV (net single premium) of a term insurance.
[ "(", "A^1", ")", "x", ":", "n", ":", "Returns", "the", "EPV", "(", "net", "single", "premium", ")", "of", "a", "term", "insurance", "." ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L305-L307
franciscogarate/pyliferisk
pyliferisk/__init__.py
AExn
def AExn(mt, x, n): """ AExn : Returns the EPV of a endowment insurance. An endowment insurance provides a combination of a term insurance and a pure endowment """ return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] + mt.Dx[x + n] / mt.Dx[x]
python
def AExn(mt, x, n): """ AExn : Returns the EPV of a endowment insurance. An endowment insurance provides a combination of a term insurance and a pure endowment """ return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] + mt.Dx[x + n] / mt.Dx[x]
[ "def", "AExn", "(", "mt", ",", "x", ",", "n", ")", ":", "return", "(", "mt", ".", "Mx", "[", "x", "]", "-", "mt", ".", "Mx", "[", "x", "+", "n", "]", ")", "/", "mt", ".", "Dx", "[", "x", "]", "+", "mt", ".", "Dx", "[", "x", "+", "n", "]", "/", "mt", ".", "Dx", "[", "x", "]" ]
AExn : Returns the EPV of a endowment insurance. An endowment insurance provides a combination of a term insurance and a pure endowment
[ "AExn", ":", "Returns", "the", "EPV", "of", "a", "endowment", "insurance", ".", "An", "endowment", "insurance", "provides", "a", "combination", "of", "a", "term", "insurance", "and", "a", "pure", "endowment" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L310-L314
franciscogarate/pyliferisk
pyliferisk/__init__.py
tAx
def tAx(mt, x, t): """ n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance. """ return mt.Mx[x + t] / mt.Dx[x]
python
def tAx(mt, x, t): """ n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance. """ return mt.Mx[x + t] / mt.Dx[x]
[ "def", "tAx", "(", "mt", ",", "x", ",", "t", ")", ":", "return", "mt", ".", "Mx", "[", "x", "+", "t", "]", "/", "mt", ".", "Dx", "[", "x", "]" ]
n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance.
[ "n", "/", "Ax", ":", "Returns", "the", "EPV", "(", "net", "single", "premium", ")", "of", "a", "deferred", "whole", "life", "insurance", "." ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L317-L319
franciscogarate/pyliferisk
pyliferisk/__init__.py
qAx
def qAx(mt, x, q): """ This function evaluates the APV of a geometrically increasing annual annuity-due """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return Ax(mtj, x)
python
def qAx(mt, x, q): """ This function evaluates the APV of a geometrically increasing annual annuity-due """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return Ax(mtj, x)
[ "def", "qAx", "(", "mt", ",", "x", ",", "q", ")", ":", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actuarial", "(", "nt", "=", "mt", ".", "nt", ",", "i", "=", "j", ")", "return", "Ax", "(", "mtj", ",", "x", ")" ]
This function evaluates the APV of a geometrically increasing annual annuity-due
[ "This", "function", "evaluates", "the", "APV", "of", "a", "geometrically", "increasing", "annual", "annuity", "-", "due" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L333-L338
franciscogarate/pyliferisk
pyliferisk/__init__.py
aaxn
def aaxn(mt, x, n, m=1): """ äxn : Return the actuarial present value of a (immediate) temporal (term certain) annuity: n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period """ if m == 1: return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] else: return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, n)))
python
def aaxn(mt, x, n, m=1): """ äxn : Return the actuarial present value of a (immediate) temporal (term certain) annuity: n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period """ if m == 1: return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] else: return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, n)))
[ "def", "aaxn", "(", "mt", ",", "x", ",", "n", ",", "m", "=", "1", ")", ":", "if", "m", "==", "1", ":", "return", "(", "mt", ".", "Nx", "[", "x", "]", "-", "mt", ".", "Nx", "[", "x", "+", "n", "]", ")", "/", "mt", ".", "Dx", "[", "x", "]", "else", ":", "return", "(", "mt", ".", "Nx", "[", "x", "]", "-", "mt", ".", "Nx", "[", "x", "+", "n", "]", ")", "/", "mt", ".", "Dx", "[", "x", "]", "-", "(", "(", "float", "(", "m", "-", "1", ")", "/", "float", "(", "m", "*", "2", ")", ")", "*", "(", "1", "-", "nEx", "(", "mt", ",", "x", ",", "n", ")", ")", ")" ]
äxn : Return the actuarial present value of a (immediate) temporal (term certain) annuity: n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period
[ "äxn", ":", "Return", "the", "actuarial", "present", "value", "of", "a", "(", "immediate", ")", "temporal", "(", "term", "certain", ")", "annuity", ":", "n", "-", "year", "temporary", "life", "annuity", "-", "anticipatory", ".", "Payable", "m", "per", "year", "at", "the", "beginning", "of", "the", "period" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L357-L364
franciscogarate/pyliferisk
pyliferisk/__init__.py
aax
def aax(mt, x, m=1): """ äx : Returns the actuarial present value of an (immediate) annuity of 1 per time period (whole life annuity-anticipatory). Payable 'm' per year at the beginning of the period """ return mt.Nx[x] / mt.Dx[x] - (float(m - 1) / float(m * 2))
python
def aax(mt, x, m=1): """ äx : Returns the actuarial present value of an (immediate) annuity of 1 per time period (whole life annuity-anticipatory). Payable 'm' per year at the beginning of the period """ return mt.Nx[x] / mt.Dx[x] - (float(m - 1) / float(m * 2))
[ "def", "aax", "(", "mt", ",", "x", ",", "m", "=", "1", ")", ":", "return", "mt", ".", "Nx", "[", "x", "]", "/", "mt", ".", "Dx", "[", "x", "]", "-", "(", "float", "(", "m", "-", "1", ")", "/", "float", "(", "m", "*", "2", ")", ")" ]
äx : Returns the actuarial present value of an (immediate) annuity of 1 per time period (whole life annuity-anticipatory). Payable 'm' per year at the beginning of the period
[ "äx", ":", "Returns", "the", "actuarial", "present", "value", "of", "an", "(", "immediate", ")", "annuity", "of", "1", "per", "time", "period", "(", "whole", "life", "annuity", "-", "anticipatory", ")", ".", "Payable", "m", "per", "year", "at", "the", "beginning", "of", "the", "period" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L375-L379
franciscogarate/pyliferisk
pyliferisk/__init__.py
ax
def ax(mt, x, m=1): """ ax : Returns the actuarial present value of an (immediate) annuity of 1 per time period (whole life annuity-late). Payable 'm' per year at the ends of the period """ return (mt.Nx[x] / mt.Dx[x] - 1) + (float(m - 1) / float(m * 2))
python
def ax(mt, x, m=1): """ ax : Returns the actuarial present value of an (immediate) annuity of 1 per time period (whole life annuity-late). Payable 'm' per year at the ends of the period """ return (mt.Nx[x] / mt.Dx[x] - 1) + (float(m - 1) / float(m * 2))
[ "def", "ax", "(", "mt", ",", "x", ",", "m", "=", "1", ")", ":", "return", "(", "mt", ".", "Nx", "[", "x", "]", "/", "mt", ".", "Dx", "[", "x", "]", "-", "1", ")", "+", "(", "float", "(", "m", "-", "1", ")", "/", "float", "(", "m", "*", "2", ")", ")" ]
ax : Returns the actuarial present value of an (immediate) annuity of 1 per time period (whole life annuity-late). Payable 'm' per year at the ends of the period
[ "ax", ":", "Returns", "the", "actuarial", "present", "value", "of", "an", "(", "immediate", ")", "annuity", "of", "1", "per", "time", "period", "(", "whole", "life", "annuity", "-", "late", ")", ".", "Payable", "m", "per", "year", "at", "the", "ends", "of", "the", "period" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L381-L385
franciscogarate/pyliferisk
pyliferisk/__init__.py
taax
def taax(mt, x, t, m=1): """ n/äx : Return the actuarial present value of a deferred annuity (deferred n years): n-year deferred whole life annuity-anticipatory. Payable 'm' per year at the beginning of the period """ return mt.Nx[x + t] / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
python
def taax(mt, x, t, m=1): """ n/äx : Return the actuarial present value of a deferred annuity (deferred n years): n-year deferred whole life annuity-anticipatory. Payable 'm' per year at the beginning of the period """ return mt.Nx[x + t] / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
[ "def", "taax", "(", "mt", ",", "x", ",", "t", ",", "m", "=", "1", ")", ":", "return", "mt", ".", "Nx", "[", "x", "+", "t", "]", "/", "mt", ".", "Dx", "[", "x", "]", "-", "(", "(", "float", "(", "m", "-", "1", ")", "/", "float", "(", "m", "*", "2", ")", ")", "*", "(", "1", "-", "nEx", "(", "mt", ",", "x", ",", "t", ")", ")", ")" ]
n/äx : Return the actuarial present value of a deferred annuity (deferred n years): n-year deferred whole life annuity-anticipatory. Payable 'm' per year at the beginning of the period
[ "n", "/", "äx", ":", "Return", "the", "actuarial", "present", "value", "of", "a", "deferred", "annuity", "(", "deferred", "n", "years", ")", ":", "n", "-", "year", "deferred", "whole", "life", "annuity", "-", "anticipatory", ".", "Payable", "m", "per", "year", "at", "the", "beginning", "of", "the", "period" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L393-L397
franciscogarate/pyliferisk
pyliferisk/__init__.py
Iaaxn
def Iaaxn(mt, x, n, *args): """ during a term certain, IAn """ return (Sx(mt, x) - Sx(nt, x + n) - n * Nx(nt, x + n)) / Dx(nt, x)
python
def Iaaxn(mt, x, n, *args): """ during a term certain, IAn """ return (Sx(mt, x) - Sx(nt, x + n) - n * Nx(nt, x + n)) / Dx(nt, x)
[ "def", "Iaaxn", "(", "mt", ",", "x", ",", "n", ",", "*", "args", ")", ":", "return", "(", "Sx", "(", "mt", ",", "x", ")", "-", "Sx", "(", "nt", ",", "x", "+", "n", ")", "-", "n", "*", "Nx", "(", "nt", ",", "x", "+", "n", ")", ")", "/", "Dx", "(", "nt", ",", "x", ")" ]
during a term certain, IAn
[ "during", "a", "term", "certain", "IAn" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L408-L410
franciscogarate/pyliferisk
pyliferisk/__init__.py
Iaxn
def Iaxn(mt, x, n, *args): """ during a term certain, IAn """ return (Sx(mt, x + 1) - Sx(mt, x + n + 1) - n * Nx(mt, x + n + 1)) / Dx(mt, x)
python
def Iaxn(mt, x, n, *args): """ during a term certain, IAn """ return (Sx(mt, x + 1) - Sx(mt, x + n + 1) - n * Nx(mt, x + n + 1)) / Dx(mt, x)
[ "def", "Iaxn", "(", "mt", ",", "x", ",", "n", ",", "*", "args", ")", ":", "return", "(", "Sx", "(", "mt", ",", "x", "+", "1", ")", "-", "Sx", "(", "mt", ",", "x", "+", "n", "+", "1", ")", "-", "n", "*", "Nx", "(", "mt", ",", "x", "+", "n", "+", "1", ")", ")", "/", "Dx", "(", "mt", ",", "x", ")" ]
during a term certain, IAn
[ "during", "a", "term", "certain", "IAn" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L412-L414
franciscogarate/pyliferisk
pyliferisk/__init__.py
Iaax
def Iaax(mt, x, *args): """ (Iä)x : Returns the present value of annuity-certain at the beginning of the first year and increasing linerly. Arithmetically increasing annuity-anticipatory """ return Sx(mt, x) / Dx(mt, x)
python
def Iaax(mt, x, *args): """ (Iä)x : Returns the present value of annuity-certain at the beginning of the first year and increasing linerly. Arithmetically increasing annuity-anticipatory """ return Sx(mt, x) / Dx(mt, x)
[ "def", "Iaax", "(", "mt", ",", "x", ",", "*", "args", ")", ":", "return", "Sx", "(", "mt", ",", "x", ")", "/", "Dx", "(", "mt", ",", "x", ")" ]
(Iä)x : Returns the present value of annuity-certain at the beginning of the first year and increasing linerly. Arithmetically increasing annuity-anticipatory
[ "(", "Iä", ")", "x", ":", "Returns", "the", "present", "value", "of", "annuity", "-", "certain", "at", "the", "beginning", "of", "the", "first", "year", "and", "increasing", "linerly", ".", "Arithmetically", "increasing", "annuity", "-", "anticipatory" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L416-L420
franciscogarate/pyliferisk
pyliferisk/__init__.py
Iax
def Iax(mt, x, *args): """ (Ia)x : Returns the present value of annuity-certain at the end of the first year and increasing linerly. Arithmetically increasing annuity-late """ return Sx(mt, x + 1) / Dx(mt, x)
python
def Iax(mt, x, *args): """ (Ia)x : Returns the present value of annuity-certain at the end of the first year and increasing linerly. Arithmetically increasing annuity-late """ return Sx(mt, x + 1) / Dx(mt, x)
[ "def", "Iax", "(", "mt", ",", "x", ",", "*", "args", ")", ":", "return", "Sx", "(", "mt", ",", "x", "+", "1", ")", "/", "Dx", "(", "mt", ",", "x", ")" ]
(Ia)x : Returns the present value of annuity-certain at the end of the first year and increasing linerly. Arithmetically increasing annuity-late
[ "(", "Ia", ")", "x", ":", "Returns", "the", "present", "value", "of", "annuity", "-", "certain", "at", "the", "end", "of", "the", "first", "year", "and", "increasing", "linerly", ".", "Arithmetically", "increasing", "annuity", "-", "late" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L422-L426
franciscogarate/pyliferisk
pyliferisk/__init__.py
Itaax
def Itaax(mt, x, t): """ deffered t years """ return (Sx(mt, x) - Sx(mt, x + t)) / Dx(mt, x)
python
def Itaax(mt, x, t): """ deffered t years """ return (Sx(mt, x) - Sx(mt, x + t)) / Dx(mt, x)
[ "def", "Itaax", "(", "mt", ",", "x", ",", "t", ")", ":", "return", "(", "Sx", "(", "mt", ",", "x", ")", "-", "Sx", "(", "mt", ",", "x", "+", "t", ")", ")", "/", "Dx", "(", "mt", ",", "x", ")" ]
deffered t years
[ "deffered", "t", "years" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L434-L436
franciscogarate/pyliferisk
pyliferisk/__init__.py
Itax
def Itax(mt, x, t): """ deffered t years """ return (Sx(mt, x + 1) - Sx(mt, x + t + 1)) / Dx(mt, x)
python
def Itax(mt, x, t): """ deffered t years """ return (Sx(mt, x + 1) - Sx(mt, x + t + 1)) / Dx(mt, x)
[ "def", "Itax", "(", "mt", ",", "x", ",", "t", ")", ":", "return", "(", "Sx", "(", "mt", ",", "x", "+", "1", ")", "-", "Sx", "(", "mt", ",", "x", "+", "t", "+", "1", ")", ")", "/", "Dx", "(", "mt", ",", "x", ")" ]
deffered t years
[ "deffered", "t", "years" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L438-L440
franciscogarate/pyliferisk
pyliferisk/__init__.py
qax
def qax(mt, x, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return ax(mtj, x, m)
python
def qax(mt, x, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return ax(mtj, x, m)
[ "def", "qax", "(", "mt", ",", "x", ",", "q", ",", "m", "=", "1", ")", ":", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actuarial", "(", "nt", "=", "mt", ".", "nt", ",", "i", "=", "j", ")", "return", "ax", "(", "mtj", ",", "x", ",", "m", ")" ]
geometrica
[ "geometrica" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L443-L448
franciscogarate/pyliferisk
pyliferisk/__init__.py
qaax
def qaax(mt, x, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return aax(mtj, x, m)
python
def qaax(mt, x, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return aax(mtj, x, m)
[ "def", "qaax", "(", "mt", ",", "x", ",", "q", ",", "m", "=", "1", ")", ":", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actuarial", "(", "nt", "=", "mt", ".", "nt", ",", "i", "=", "j", ")", "return", "aax", "(", "mtj", ",", "x", ",", "m", ")" ]
geometrica
[ "geometrica" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L450-L455
franciscogarate/pyliferisk
pyliferisk/__init__.py
qaxn
def qaxn(mt, x, n, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return axn(mtj, x, n, m)
python
def qaxn(mt, x, n, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return axn(mtj, x, n, m)
[ "def", "qaxn", "(", "mt", ",", "x", ",", "n", ",", "q", ",", "m", "=", "1", ")", ":", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actuarial", "(", "nt", "=", "mt", ".", "nt", ",", "i", "=", "j", ")", "return", "axn", "(", "mtj", ",", "x", ",", "n", ",", "m", ")" ]
geometrica
[ "geometrica" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L457-L462
franciscogarate/pyliferisk
pyliferisk/__init__.py
qaaxn
def qaaxn(mt, x, n, q, m = 1): """ geometrica """ #i = float(nt[1]) q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return aaxn(mtj, x, n, m)
python
def qaaxn(mt, x, n, q, m = 1): """ geometrica """ #i = float(nt[1]) q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return aaxn(mtj, x, n, m)
[ "def", "qaaxn", "(", "mt", ",", "x", ",", "n", ",", "q", ",", "m", "=", "1", ")", ":", "#i = float(nt[1])", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actuarial", "(", "nt", "=", "mt", ".", "nt", ",", "i", "=", "j", ")", "return", "aaxn", "(", "mtj", ",", "x", ",", "n", ",", "m", ")" ]
geometrica
[ "geometrica" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L464-L470
franciscogarate/pyliferisk
pyliferisk/__init__.py
qtax
def qtax(mt, x, t, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return tax(mtj, x, t) + ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
python
def qtax(mt, x, t, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return tax(mtj, x, t) + ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
[ "def", "qtax", "(", "mt", ",", "x", ",", "t", ",", "q", ",", "m", "=", "1", ")", ":", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actuarial", "(", "nt", "=", "mt", ".", "nt", ",", "i", "=", "j", ")", "return", "tax", "(", "mtj", ",", "x", ",", "t", ")", "+", "(", "(", "float", "(", "m", "-", "1", ")", "/", "float", "(", "m", "*", "2", ")", ")", "*", "(", "1", "-", "nEx", "(", "mt", ",", "x", ",", "t", ")", ")", ")" ]
geometrica
[ "geometrica" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L472-L477
franciscogarate/pyliferisk
pyliferisk/__init__.py
qtaax
def qtaax(mt, x, t, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return taax(mtj, x, t) - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
python
def qtaax(mt, x, t, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return taax(mtj, x, t) - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
[ "def", "qtaax", "(", "mt", ",", "x", ",", "t", ",", "q", ",", "m", "=", "1", ")", ":", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actuarial", "(", "nt", "=", "mt", ".", "nt", ",", "i", "=", "j", ")", "return", "taax", "(", "mtj", ",", "x", ",", "t", ")", "-", "(", "(", "float", "(", "m", "-", "1", ")", "/", "float", "(", "m", "*", "2", ")", ")", "*", "(", "1", "-", "nEx", "(", "mt", ",", "x", ",", "t", ")", ")", ")" ]
geometrica
[ "geometrica" ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L479-L484
franciscogarate/pyliferisk
pyliferisk/__init__.py
annuity
def annuity(mt, x, n, p, m=1 , *args): """Syntax: annuity(nt, x, n, p, m, ['a/g', q], -d) Args: mt = the mortality table x = the age as integer number. n = A integer number (term of insurance in years) or 'w' = whole-life. (Also, 99 years is defined to be whole-life). p = Moment of payment. Syntaxis: 0 = begining of each period (prepaid), 1 = end of each period (postpaid), Optional variables: m = Payable 'm' per year (frational payments). Default = 1 (annually) a or g = a: Arithmetical / g: Geometrical q = The increase rate. Syntax: ['g',q] or ['a',q]. For example, ['g',0.03] Deferring period: -d = The n-years deferring period as negative number. """ l = len(args) post = False incr = False deff = False arit = False wh_l = False if isinstance(n,str) or n == 99: wh_l = True else: pass if isinstance(m,int) and m >=0 and l == 0: pass elif l == 0 and isinstance(m,list): args = (m,) m = 1 incr = True elif l == 0 and int(m) < 0: args = False deff = True t = int(m) * -1 m = 1 elif l == 1: if isinstance(args[0], list): incr = True elif isinstance(args[0], int): if isinstance(m, list): deff = True incr = True t = int(args[0]) * -1 args = (m, ) m = 1 else: deff = True t = int(args[0]) * -1 args = False else: pass elif l == 2: if isinstance(args[0], list): deff = True t = int(args[1]) * -1 incr = True elif isinstance(args[0], int): deff = True t = int(args[0]) * -1 args = args[1] else: pass else: pass if p == 1: post = True elif p == 0: pass else: print('Error: payment value is 0 or 1') if incr: if 'a' in args[0]: arit = True incr = False elif 'g' in args[0]: incr = True q = args[0][1] else: return "Error: increasing value is 'a' or 'g'" else: pass if not incr and not deff and not wh_l and not post: return aaxn(mt, x, n, m) elif not incr and not deff and not wh_l and post: return axn(mt, x, n, m) elif not incr and not deff and wh_l and not post: return aax(mt, x, m) elif not incr and not deff and wh_l and post: return ax(mt, x, m) elif not incr and deff and not wh_l and not post: return taaxn(mt, x, n, t, m) elif not incr and deff and not wh_l and post: return taxn(mt, x, n, t, m) elif not incr and deff and wh_l and not post: return taax(mt, x, t, m) elif not incr and deff and wh_l and post: return tax(mt, x, t, m) elif incr and not deff and not wh_l and not post: return qaaxn(mt, x, n, q, m) elif incr and not deff and not wh_l and post: return qaxn(mt, x, n, q, m) elif incr and not deff and wh_l and not post: return qaax(mt, x, q, m) elif incr and not deff and wh_l and post: return qax(mt, x, q, m) elif incr and deff and not wh_l and not post: return qtaaxn(mt, x, n, t, q, m) elif incr and deff and not wh_l and post: return qtaxn(mt, x, n, t, q, m) elif incr and deff and wh_l and not post: return qtaax(mt, x, t, q, m) else: #elif incr and deff and wh_l and post: return Itax(mt, x, t)
python
def annuity(mt, x, n, p, m=1 , *args): """Syntax: annuity(nt, x, n, p, m, ['a/g', q], -d) Args: mt = the mortality table x = the age as integer number. n = A integer number (term of insurance in years) or 'w' = whole-life. (Also, 99 years is defined to be whole-life). p = Moment of payment. Syntaxis: 0 = begining of each period (prepaid), 1 = end of each period (postpaid), Optional variables: m = Payable 'm' per year (frational payments). Default = 1 (annually) a or g = a: Arithmetical / g: Geometrical q = The increase rate. Syntax: ['g',q] or ['a',q]. For example, ['g',0.03] Deferring period: -d = The n-years deferring period as negative number. """ l = len(args) post = False incr = False deff = False arit = False wh_l = False if isinstance(n,str) or n == 99: wh_l = True else: pass if isinstance(m,int) and m >=0 and l == 0: pass elif l == 0 and isinstance(m,list): args = (m,) m = 1 incr = True elif l == 0 and int(m) < 0: args = False deff = True t = int(m) * -1 m = 1 elif l == 1: if isinstance(args[0], list): incr = True elif isinstance(args[0], int): if isinstance(m, list): deff = True incr = True t = int(args[0]) * -1 args = (m, ) m = 1 else: deff = True t = int(args[0]) * -1 args = False else: pass elif l == 2: if isinstance(args[0], list): deff = True t = int(args[1]) * -1 incr = True elif isinstance(args[0], int): deff = True t = int(args[0]) * -1 args = args[1] else: pass else: pass if p == 1: post = True elif p == 0: pass else: print('Error: payment value is 0 or 1') if incr: if 'a' in args[0]: arit = True incr = False elif 'g' in args[0]: incr = True q = args[0][1] else: return "Error: increasing value is 'a' or 'g'" else: pass if not incr and not deff and not wh_l and not post: return aaxn(mt, x, n, m) elif not incr and not deff and not wh_l and post: return axn(mt, x, n, m) elif not incr and not deff and wh_l and not post: return aax(mt, x, m) elif not incr and not deff and wh_l and post: return ax(mt, x, m) elif not incr and deff and not wh_l and not post: return taaxn(mt, x, n, t, m) elif not incr and deff and not wh_l and post: return taxn(mt, x, n, t, m) elif not incr and deff and wh_l and not post: return taax(mt, x, t, m) elif not incr and deff and wh_l and post: return tax(mt, x, t, m) elif incr and not deff and not wh_l and not post: return qaaxn(mt, x, n, q, m) elif incr and not deff and not wh_l and post: return qaxn(mt, x, n, q, m) elif incr and not deff and wh_l and not post: return qaax(mt, x, q, m) elif incr and not deff and wh_l and post: return qax(mt, x, q, m) elif incr and deff and not wh_l and not post: return qtaaxn(mt, x, n, t, q, m) elif incr and deff and not wh_l and post: return qtaxn(mt, x, n, t, q, m) elif incr and deff and wh_l and not post: return qtaax(mt, x, t, q, m) else: #elif incr and deff and wh_l and post: return Itax(mt, x, t)
[ "def", "annuity", "(", "mt", ",", "x", ",", "n", ",", "p", ",", "m", "=", "1", ",", "*", "args", ")", ":", "l", "=", "len", "(", "args", ")", "post", "=", "False", "incr", "=", "False", "deff", "=", "False", "arit", "=", "False", "wh_l", "=", "False", "if", "isinstance", "(", "n", ",", "str", ")", "or", "n", "==", "99", ":", "wh_l", "=", "True", "else", ":", "pass", "if", "isinstance", "(", "m", ",", "int", ")", "and", "m", ">=", "0", "and", "l", "==", "0", ":", "pass", "elif", "l", "==", "0", "and", "isinstance", "(", "m", ",", "list", ")", ":", "args", "=", "(", "m", ",", ")", "m", "=", "1", "incr", "=", "True", "elif", "l", "==", "0", "and", "int", "(", "m", ")", "<", "0", ":", "args", "=", "False", "deff", "=", "True", "t", "=", "int", "(", "m", ")", "*", "-", "1", "m", "=", "1", "elif", "l", "==", "1", ":", "if", "isinstance", "(", "args", "[", "0", "]", ",", "list", ")", ":", "incr", "=", "True", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "int", ")", ":", "if", "isinstance", "(", "m", ",", "list", ")", ":", "deff", "=", "True", "incr", "=", "True", "t", "=", "int", "(", "args", "[", "0", "]", ")", "*", "-", "1", "args", "=", "(", "m", ",", ")", "m", "=", "1", "else", ":", "deff", "=", "True", "t", "=", "int", "(", "args", "[", "0", "]", ")", "*", "-", "1", "args", "=", "False", "else", ":", "pass", "elif", "l", "==", "2", ":", "if", "isinstance", "(", "args", "[", "0", "]", ",", "list", ")", ":", "deff", "=", "True", "t", "=", "int", "(", "args", "[", "1", "]", ")", "*", "-", "1", "incr", "=", "True", "elif", "isinstance", "(", "args", "[", "0", "]", ",", "int", ")", ":", "deff", "=", "True", "t", "=", "int", "(", "args", "[", "0", "]", ")", "*", "-", "1", "args", "=", "args", "[", "1", "]", "else", ":", "pass", "else", ":", "pass", "if", "p", "==", "1", ":", "post", "=", "True", "elif", "p", "==", "0", ":", "pass", "else", ":", "print", "(", "'Error: payment value is 0 or 1'", ")", "if", "incr", ":", "if", "'a'", "in", "args", "[", "0", "]", ":", "arit", "=", "True", "incr", "=", "False", "elif", "'g'", "in", "args", "[", "0", "]", ":", "incr", "=", "True", "q", "=", "args", "[", "0", "]", "[", "1", "]", "else", ":", "return", "\"Error: increasing value is 'a' or 'g'\"", "else", ":", "pass", "if", "not", "incr", "and", "not", "deff", "and", "not", "wh_l", "and", "not", "post", ":", "return", "aaxn", "(", "mt", ",", "x", ",", "n", ",", "m", ")", "elif", "not", "incr", "and", "not", "deff", "and", "not", "wh_l", "and", "post", ":", "return", "axn", "(", "mt", ",", "x", ",", "n", ",", "m", ")", "elif", "not", "incr", "and", "not", "deff", "and", "wh_l", "and", "not", "post", ":", "return", "aax", "(", "mt", ",", "x", ",", "m", ")", "elif", "not", "incr", "and", "not", "deff", "and", "wh_l", "and", "post", ":", "return", "ax", "(", "mt", ",", "x", ",", "m", ")", "elif", "not", "incr", "and", "deff", "and", "not", "wh_l", "and", "not", "post", ":", "return", "taaxn", "(", "mt", ",", "x", ",", "n", ",", "t", ",", "m", ")", "elif", "not", "incr", "and", "deff", "and", "not", "wh_l", "and", "post", ":", "return", "taxn", "(", "mt", ",", "x", ",", "n", ",", "t", ",", "m", ")", "elif", "not", "incr", "and", "deff", "and", "wh_l", "and", "not", "post", ":", "return", "taax", "(", "mt", ",", "x", ",", "t", ",", "m", ")", "elif", "not", "incr", "and", "deff", "and", "wh_l", "and", "post", ":", "return", "tax", "(", "mt", ",", "x", ",", "t", ",", "m", ")", "elif", "incr", "and", "not", "deff", "and", "not", "wh_l", "and", "not", "post", ":", "return", "qaaxn", "(", "mt", ",", "x", ",", "n", ",", "q", ",", "m", ")", "elif", "incr", "and", "not", "deff", "and", "not", "wh_l", "and", "post", ":", "return", "qaxn", "(", "mt", ",", "x", ",", "n", ",", "q", ",", "m", ")", "elif", "incr", "and", "not", "deff", "and", "wh_l", "and", "not", "post", ":", "return", "qaax", "(", "mt", ",", "x", ",", "q", ",", "m", ")", "elif", "incr", "and", "not", "deff", "and", "wh_l", "and", "post", ":", "return", "qax", "(", "mt", ",", "x", ",", "q", ",", "m", ")", "elif", "incr", "and", "deff", "and", "not", "wh_l", "and", "not", "post", ":", "return", "qtaaxn", "(", "mt", ",", "x", ",", "n", ",", "t", ",", "q", ",", "m", ")", "elif", "incr", "and", "deff", "and", "not", "wh_l", "and", "post", ":", "return", "qtaxn", "(", "mt", ",", "x", ",", "n", ",", "t", ",", "q", ",", "m", ")", "elif", "incr", "and", "deff", "and", "wh_l", "and", "not", "post", ":", "return", "qtaax", "(", "mt", ",", "x", ",", "t", ",", "q", ",", "m", ")", "else", ":", "#elif incr and deff and wh_l and post:", "return", "Itax", "(", "mt", ",", "x", ",", "t", ")" ]
Syntax: annuity(nt, x, n, p, m, ['a/g', q], -d) Args: mt = the mortality table x = the age as integer number. n = A integer number (term of insurance in years) or 'w' = whole-life. (Also, 99 years is defined to be whole-life). p = Moment of payment. Syntaxis: 0 = begining of each period (prepaid), 1 = end of each period (postpaid), Optional variables: m = Payable 'm' per year (frational payments). Default = 1 (annually) a or g = a: Arithmetical / g: Geometrical q = The increase rate. Syntax: ['g',q] or ['a',q]. For example, ['g',0.03] Deferring period: -d = The n-years deferring period as negative number.
[ "Syntax", ":", "annuity", "(", "nt", "x", "n", "p", "m", "[", "a", "/", "g", "q", "]", "-", "d", ")", "Args", ":", "mt", "=", "the", "mortality", "table", "x", "=", "the", "age", "as", "integer", "number", ".", "n", "=", "A", "integer", "number", "(", "term", "of", "insurance", "in", "years", ")", "or", "w", "=", "whole", "-", "life", ".", "(", "Also", "99", "years", "is", "defined", "to", "be", "whole", "-", "life", ")", ".", "p", "=", "Moment", "of", "payment", ".", "Syntaxis", ":", "0", "=", "begining", "of", "each", "period", "(", "prepaid", ")", "1", "=", "end", "of", "each", "period", "(", "postpaid", ")", "Optional", "variables", ":", "m", "=", "Payable", "m", "per", "year", "(", "frational", "payments", ")", ".", "Default", "=", "1", "(", "annually", ")", "a", "or", "g", "=", "a", ":", "Arithmetical", "/", "g", ":", "Geometrical", "q", "=", "The", "increase", "rate", ".", "Syntax", ":", "[", "g", "q", "]", "or", "[", "a", "q", "]", ".", "For", "example", "[", "g", "0", ".", "03", "]", "Deferring", "period", ":", "-", "d", "=", "The", "n", "-", "years", "deferring", "period", "as", "negative", "number", "." ]
train
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L489-L609
bdcht/grandalf
grandalf/layouts.py
Layer._meanvalueattr
def _meanvalueattr(self,v): """ find new position of vertex v according to adjacency in prevlayer. position is given by the mean value of adjacent positions. experiments show that meanvalue heuristic performs better than median. """ sug = self.layout if not self.prevlayer(): return sug.grx[v].bar bars = [sug.grx[x].bar for x in self._neighbors(v)] return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars)
python
def _meanvalueattr(self,v): """ find new position of vertex v according to adjacency in prevlayer. position is given by the mean value of adjacent positions. experiments show that meanvalue heuristic performs better than median. """ sug = self.layout if not self.prevlayer(): return sug.grx[v].bar bars = [sug.grx[x].bar for x in self._neighbors(v)] return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars)
[ "def", "_meanvalueattr", "(", "self", ",", "v", ")", ":", "sug", "=", "self", ".", "layout", "if", "not", "self", ".", "prevlayer", "(", ")", ":", "return", "sug", ".", "grx", "[", "v", "]", ".", "bar", "bars", "=", "[", "sug", ".", "grx", "[", "x", "]", ".", "bar", "for", "x", "in", "self", ".", "_neighbors", "(", "v", ")", "]", "return", "sug", ".", "grx", "[", "v", "]", ".", "bar", "if", "len", "(", "bars", ")", "==", "0", "else", "float", "(", "sum", "(", "bars", ")", ")", "/", "len", "(", "bars", ")" ]
find new position of vertex v according to adjacency in prevlayer. position is given by the mean value of adjacent positions. experiments show that meanvalue heuristic performs better than median.
[ "find", "new", "position", "of", "vertex", "v", "according", "to", "adjacency", "in", "prevlayer", ".", "position", "is", "given", "by", "the", "mean", "value", "of", "adjacent", "positions", ".", "experiments", "show", "that", "meanvalue", "heuristic", "performs", "better", "than", "median", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L211-L220
bdcht/grandalf
grandalf/layouts.py
Layer._medianindex
def _medianindex(self,v): """ find new position of vertex v according to adjacency in layer l+dir. position is given by the median value of adjacent positions. median heuristic is proven to achieve at most 3 times the minimum of crossings (while barycenter achieve in theory the order of |V|) """ assert self.prevlayer()!=None N = self._neighbors(v) g=self.layout.grx pos = [g[x].pos for x in N] lp = len(pos) if lp==0: return [] pos.sort() pos = pos[::self.layout.dirh] i,j = divmod(lp-1,2) return [pos[i]] if j==0 else [pos[i],pos[i+j]]
python
def _medianindex(self,v): """ find new position of vertex v according to adjacency in layer l+dir. position is given by the median value of adjacent positions. median heuristic is proven to achieve at most 3 times the minimum of crossings (while barycenter achieve in theory the order of |V|) """ assert self.prevlayer()!=None N = self._neighbors(v) g=self.layout.grx pos = [g[x].pos for x in N] lp = len(pos) if lp==0: return [] pos.sort() pos = pos[::self.layout.dirh] i,j = divmod(lp-1,2) return [pos[i]] if j==0 else [pos[i],pos[i+j]]
[ "def", "_medianindex", "(", "self", ",", "v", ")", ":", "assert", "self", ".", "prevlayer", "(", ")", "!=", "None", "N", "=", "self", ".", "_neighbors", "(", "v", ")", "g", "=", "self", ".", "layout", ".", "grx", "pos", "=", "[", "g", "[", "x", "]", ".", "pos", "for", "x", "in", "N", "]", "lp", "=", "len", "(", "pos", ")", "if", "lp", "==", "0", ":", "return", "[", "]", "pos", ".", "sort", "(", ")", "pos", "=", "pos", "[", ":", ":", "self", ".", "layout", ".", "dirh", "]", "i", ",", "j", "=", "divmod", "(", "lp", "-", "1", ",", "2", ")", "return", "[", "pos", "[", "i", "]", "]", "if", "j", "==", "0", "else", "[", "pos", "[", "i", "]", ",", "pos", "[", "i", "+", "j", "]", "]" ]
find new position of vertex v according to adjacency in layer l+dir. position is given by the median value of adjacent positions. median heuristic is proven to achieve at most 3 times the minimum of crossings (while barycenter achieve in theory the order of |V|)
[ "find", "new", "position", "of", "vertex", "v", "according", "to", "adjacency", "in", "layer", "l", "+", "dir", ".", "position", "is", "given", "by", "the", "median", "value", "of", "adjacent", "positions", ".", "median", "heuristic", "is", "proven", "to", "achieve", "at", "most", "3", "times", "the", "minimum", "of", "crossings", "(", "while", "barycenter", "achieve", "in", "theory", "the", "order", "of", "|V|", ")" ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L222-L238
bdcht/grandalf
grandalf/layouts.py
Layer._neighbors
def _neighbors(self,v): """ neighbors refer to upper/lower adjacent nodes. Note that v.N() provides neighbors of v in the graph, while this method provides the Vertex and DummyVertex adjacent to v in the upper or lower layer (depending on layout.dirv state). """ assert self.layout.dag dirv = self.layout.dirv grxv = self.layout.grx[v] try: #(cache) return grxv.nvs[dirv] except AttributeError: grxv.nvs={-1:v.N(-1),+1:v.N(+1)} if grxv.dummy: return grxv.nvs[dirv] # v is real, v.N are graph neigbors but we need layers neighbors for d in (-1,+1): tr=grxv.rank+d for i,x in enumerate(v.N(d)): if self.layout.grx[x].rank==tr:continue e=v.e_with(x) dum = self.layout.ctrls[e][tr] grxv.nvs[d][i]=dum return grxv.nvs[dirv]
python
def _neighbors(self,v): """ neighbors refer to upper/lower adjacent nodes. Note that v.N() provides neighbors of v in the graph, while this method provides the Vertex and DummyVertex adjacent to v in the upper or lower layer (depending on layout.dirv state). """ assert self.layout.dag dirv = self.layout.dirv grxv = self.layout.grx[v] try: #(cache) return grxv.nvs[dirv] except AttributeError: grxv.nvs={-1:v.N(-1),+1:v.N(+1)} if grxv.dummy: return grxv.nvs[dirv] # v is real, v.N are graph neigbors but we need layers neighbors for d in (-1,+1): tr=grxv.rank+d for i,x in enumerate(v.N(d)): if self.layout.grx[x].rank==tr:continue e=v.e_with(x) dum = self.layout.ctrls[e][tr] grxv.nvs[d][i]=dum return grxv.nvs[dirv]
[ "def", "_neighbors", "(", "self", ",", "v", ")", ":", "assert", "self", ".", "layout", ".", "dag", "dirv", "=", "self", ".", "layout", ".", "dirv", "grxv", "=", "self", ".", "layout", ".", "grx", "[", "v", "]", "try", ":", "#(cache)", "return", "grxv", ".", "nvs", "[", "dirv", "]", "except", "AttributeError", ":", "grxv", ".", "nvs", "=", "{", "-", "1", ":", "v", ".", "N", "(", "-", "1", ")", ",", "+", "1", ":", "v", ".", "N", "(", "+", "1", ")", "}", "if", "grxv", ".", "dummy", ":", "return", "grxv", ".", "nvs", "[", "dirv", "]", "# v is real, v.N are graph neigbors but we need layers neighbors", "for", "d", "in", "(", "-", "1", ",", "+", "1", ")", ":", "tr", "=", "grxv", ".", "rank", "+", "d", "for", "i", ",", "x", "in", "enumerate", "(", "v", ".", "N", "(", "d", ")", ")", ":", "if", "self", ".", "layout", ".", "grx", "[", "x", "]", ".", "rank", "==", "tr", ":", "continue", "e", "=", "v", ".", "e_with", "(", "x", ")", "dum", "=", "self", ".", "layout", ".", "ctrls", "[", "e", "]", "[", "tr", "]", "grxv", ".", "nvs", "[", "d", "]", "[", "i", "]", "=", "dum", "return", "grxv", ".", "nvs", "[", "dirv", "]" ]
neighbors refer to upper/lower adjacent nodes. Note that v.N() provides neighbors of v in the graph, while this method provides the Vertex and DummyVertex adjacent to v in the upper or lower layer (depending on layout.dirv state).
[ "neighbors", "refer", "to", "upper", "/", "lower", "adjacent", "nodes", ".", "Note", "that", "v", ".", "N", "()", "provides", "neighbors", "of", "v", "in", "the", "graph", "while", "this", "method", "provides", "the", "Vertex", "and", "DummyVertex", "adjacent", "to", "v", "in", "the", "upper", "or", "lower", "layer", "(", "depending", "on", "layout", ".", "dirv", "state", ")", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L240-L263
bdcht/grandalf
grandalf/layouts.py
Layer._crossings
def _crossings(self): """ counts (inefficently but at least accurately) the number of crossing edges between layer l and l+dirv. P[i][j] counts the number of crossings from j-th edge of vertex i. The total count of crossings is the sum of flattened P: x = sum(sum(P,[])) """ g=self.layout.grx P=[] for v in self: P.append([g[x].pos for x in self._neighbors(v)]) for i,p in enumerate(P): candidates = sum(P[i+1:],[]) for j,e in enumerate(p): p[j] = len(filter((lambda nx:nx<e), candidates)) del candidates return P
python
def _crossings(self): """ counts (inefficently but at least accurately) the number of crossing edges between layer l and l+dirv. P[i][j] counts the number of crossings from j-th edge of vertex i. The total count of crossings is the sum of flattened P: x = sum(sum(P,[])) """ g=self.layout.grx P=[] for v in self: P.append([g[x].pos for x in self._neighbors(v)]) for i,p in enumerate(P): candidates = sum(P[i+1:],[]) for j,e in enumerate(p): p[j] = len(filter((lambda nx:nx<e), candidates)) del candidates return P
[ "def", "_crossings", "(", "self", ")", ":", "g", "=", "self", ".", "layout", ".", "grx", "P", "=", "[", "]", "for", "v", "in", "self", ":", "P", ".", "append", "(", "[", "g", "[", "x", "]", ".", "pos", "for", "x", "in", "self", ".", "_neighbors", "(", "v", ")", "]", ")", "for", "i", ",", "p", "in", "enumerate", "(", "P", ")", ":", "candidates", "=", "sum", "(", "P", "[", "i", "+", "1", ":", "]", ",", "[", "]", ")", "for", "j", ",", "e", "in", "enumerate", "(", "p", ")", ":", "p", "[", "j", "]", "=", "len", "(", "filter", "(", "(", "lambda", "nx", ":", "nx", "<", "e", ")", ",", "candidates", ")", ")", "del", "candidates", "return", "P" ]
counts (inefficently but at least accurately) the number of crossing edges between layer l and l+dirv. P[i][j] counts the number of crossings from j-th edge of vertex i. The total count of crossings is the sum of flattened P: x = sum(sum(P,[]))
[ "counts", "(", "inefficently", "but", "at", "least", "accurately", ")", "the", "number", "of", "crossing", "edges", "between", "layer", "l", "and", "l", "+", "dirv", ".", "P", "[", "i", "]", "[", "j", "]", "counts", "the", "number", "of", "crossings", "from", "j", "-", "th", "edge", "of", "vertex", "i", ".", "The", "total", "count", "of", "crossings", "is", "the", "sum", "of", "flattened", "P", ":", "x", "=", "sum", "(", "sum", "(", "P", "[]", "))" ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L265-L282
bdcht/grandalf
grandalf/layouts.py
Layer._cc
def _cc(self): """ implementation of the efficient bilayer cross counting by insert-sort (see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting") """ g=self.layout.grx P=[] for v in self: P.extend(sorted([g[x].pos for x in self._neighbors(v)])) # count inversions in P: s = [] count = 0 for i,p in enumerate(P): j = bisect(s,p) if j<i: count += (i-j) s.insert(j,p) return count
python
def _cc(self): """ implementation of the efficient bilayer cross counting by insert-sort (see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting") """ g=self.layout.grx P=[] for v in self: P.extend(sorted([g[x].pos for x in self._neighbors(v)])) # count inversions in P: s = [] count = 0 for i,p in enumerate(P): j = bisect(s,p) if j<i: count += (i-j) s.insert(j,p) return count
[ "def", "_cc", "(", "self", ")", ":", "g", "=", "self", ".", "layout", ".", "grx", "P", "=", "[", "]", "for", "v", "in", "self", ":", "P", ".", "extend", "(", "sorted", "(", "[", "g", "[", "x", "]", ".", "pos", "for", "x", "in", "self", ".", "_neighbors", "(", "v", ")", "]", ")", ")", "# count inversions in P:", "s", "=", "[", "]", "count", "=", "0", "for", "i", ",", "p", "in", "enumerate", "(", "P", ")", ":", "j", "=", "bisect", "(", "s", ",", "p", ")", "if", "j", "<", "i", ":", "count", "+=", "(", "i", "-", "j", ")", "s", ".", "insert", "(", "j", ",", "p", ")", "return", "count" ]
implementation of the efficient bilayer cross counting by insert-sort (see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting")
[ "implementation", "of", "the", "efficient", "bilayer", "cross", "counting", "by", "insert", "-", "sort", "(", "see", "Barth", "&", "Mutzel", "paper", "Simple", "and", "Efficient", "Bilayer", "Cross", "Counting", ")" ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L284-L300
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.init_all
def init_all(self,roots=None,inverted_edges=None,optimize=False): """initializes the layout algorithm by computing roots (unless provided), inverted edges (unless provided), vertices ranks and creates all dummy vertices and layers. Parameters: roots (list[Vertex]): set *root* vertices (layer 0) inverted_edges (list[Edge]): set edges to invert to have a DAG. optimize (bool): optimize ranking if True (default False) """ if self.initdone: return # For layered sugiyama algorithm, the input graph must be acyclic, # so we must provide a list of root nodes and a list of inverted edges. if roots==None: roots = [v for v in self.g.sV if len(v.e_in())==0] if inverted_edges==None: L = self.g.get_scs_with_feedback(roots) inverted_edges = [x for x in self.g.sE if x.feedback] self.alt_e = inverted_edges # assign rank to all vertices: self.rank_all(roots,optimize) # add dummy vertex/edge for 'long' edges: for e in self.g.E(): self.setdummies(e) # precompute some layers values: for l in self.layers: l.setup(self) self.initdone = True
python
def init_all(self,roots=None,inverted_edges=None,optimize=False): """initializes the layout algorithm by computing roots (unless provided), inverted edges (unless provided), vertices ranks and creates all dummy vertices and layers. Parameters: roots (list[Vertex]): set *root* vertices (layer 0) inverted_edges (list[Edge]): set edges to invert to have a DAG. optimize (bool): optimize ranking if True (default False) """ if self.initdone: return # For layered sugiyama algorithm, the input graph must be acyclic, # so we must provide a list of root nodes and a list of inverted edges. if roots==None: roots = [v for v in self.g.sV if len(v.e_in())==0] if inverted_edges==None: L = self.g.get_scs_with_feedback(roots) inverted_edges = [x for x in self.g.sE if x.feedback] self.alt_e = inverted_edges # assign rank to all vertices: self.rank_all(roots,optimize) # add dummy vertex/edge for 'long' edges: for e in self.g.E(): self.setdummies(e) # precompute some layers values: for l in self.layers: l.setup(self) self.initdone = True
[ "def", "init_all", "(", "self", ",", "roots", "=", "None", ",", "inverted_edges", "=", "None", ",", "optimize", "=", "False", ")", ":", "if", "self", ".", "initdone", ":", "return", "# For layered sugiyama algorithm, the input graph must be acyclic,", "# so we must provide a list of root nodes and a list of inverted edges.", "if", "roots", "==", "None", ":", "roots", "=", "[", "v", "for", "v", "in", "self", ".", "g", ".", "sV", "if", "len", "(", "v", ".", "e_in", "(", ")", ")", "==", "0", "]", "if", "inverted_edges", "==", "None", ":", "L", "=", "self", ".", "g", ".", "get_scs_with_feedback", "(", "roots", ")", "inverted_edges", "=", "[", "x", "for", "x", "in", "self", ".", "g", ".", "sE", "if", "x", ".", "feedback", "]", "self", ".", "alt_e", "=", "inverted_edges", "# assign rank to all vertices:", "self", ".", "rank_all", "(", "roots", ",", "optimize", ")", "# add dummy vertex/edge for 'long' edges:", "for", "e", "in", "self", ".", "g", ".", "E", "(", ")", ":", "self", ".", "setdummies", "(", "e", ")", "# precompute some layers values:", "for", "l", "in", "self", ".", "layers", ":", "l", ".", "setup", "(", "self", ")", "self", ".", "initdone", "=", "True" ]
initializes the layout algorithm by computing roots (unless provided), inverted edges (unless provided), vertices ranks and creates all dummy vertices and layers. Parameters: roots (list[Vertex]): set *root* vertices (layer 0) inverted_edges (list[Edge]): set edges to invert to have a DAG. optimize (bool): optimize ranking if True (default False)
[ "initializes", "the", "layout", "algorithm", "by", "computing", "roots", "(", "unless", "provided", ")", "inverted", "edges", "(", "unless", "provided", ")", "vertices", "ranks", "and", "creates", "all", "dummy", "vertices", "and", "layers", ".", "Parameters", ":", "roots", "(", "list", "[", "Vertex", "]", ")", ":", "set", "*", "root", "*", "vertices", "(", "layer", "0", ")", "inverted_edges", "(", "list", "[", "Edge", "]", ")", ":", "set", "edges", "to", "invert", "to", "have", "a", "DAG", ".", "optimize", "(", "bool", ")", ":", "optimize", "ranking", "if", "True", "(", "default", "False", ")" ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L378-L404
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.draw
def draw(self,N=1.5): """compute every node coordinates after converging to optimal ordering by N rounds, and finally perform the edge routing. """ while N>0.5: for (l,mvmt) in self.ordering_step(): pass N = N-1 if N>0: for (l,mvmt) in self.ordering_step(oneway=True): pass self.setxy() self.draw_edges()
python
def draw(self,N=1.5): """compute every node coordinates after converging to optimal ordering by N rounds, and finally perform the edge routing. """ while N>0.5: for (l,mvmt) in self.ordering_step(): pass N = N-1 if N>0: for (l,mvmt) in self.ordering_step(oneway=True): pass self.setxy() self.draw_edges()
[ "def", "draw", "(", "self", ",", "N", "=", "1.5", ")", ":", "while", "N", ">", "0.5", ":", "for", "(", "l", ",", "mvmt", ")", "in", "self", ".", "ordering_step", "(", ")", ":", "pass", "N", "=", "N", "-", "1", "if", "N", ">", "0", ":", "for", "(", "l", ",", "mvmt", ")", "in", "self", ".", "ordering_step", "(", "oneway", "=", "True", ")", ":", "pass", "self", ".", "setxy", "(", ")", "self", ".", "draw_edges", "(", ")" ]
compute every node coordinates after converging to optimal ordering by N rounds, and finally perform the edge routing.
[ "compute", "every", "node", "coordinates", "after", "converging", "to", "optimal", "ordering", "by", "N", "rounds", "and", "finally", "perform", "the", "edge", "routing", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L406-L418
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.rank_all
def rank_all(self,roots,optimize=False): """Computes rank of all vertices. add provided roots to rank 0 vertices, otherwise update ranking from provided roots. The initial rank is based on precedence relationships, optimal ranking may be derived from network flow (simplex). """ self._edge_inverter() r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)] self._rank_init(roots+r) if optimize: self._rank_optimize() self._edge_inverter()
python
def rank_all(self,roots,optimize=False): """Computes rank of all vertices. add provided roots to rank 0 vertices, otherwise update ranking from provided roots. The initial rank is based on precedence relationships, optimal ranking may be derived from network flow (simplex). """ self._edge_inverter() r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)] self._rank_init(roots+r) if optimize: self._rank_optimize() self._edge_inverter()
[ "def", "rank_all", "(", "self", ",", "roots", ",", "optimize", "=", "False", ")", ":", "self", ".", "_edge_inverter", "(", ")", "r", "=", "[", "x", "for", "x", "in", "self", ".", "g", ".", "sV", "if", "(", "len", "(", "x", ".", "e_in", "(", ")", ")", "==", "0", "and", "x", "not", "in", "roots", ")", "]", "self", ".", "_rank_init", "(", "roots", "+", "r", ")", "if", "optimize", ":", "self", ".", "_rank_optimize", "(", ")", "self", ".", "_edge_inverter", "(", ")" ]
Computes rank of all vertices. add provided roots to rank 0 vertices, otherwise update ranking from provided roots. The initial rank is based on precedence relationships, optimal ranking may be derived from network flow (simplex).
[ "Computes", "rank", "of", "all", "vertices", ".", "add", "provided", "roots", "to", "rank", "0", "vertices", "otherwise", "update", "ranking", "from", "provided", "roots", ".", "The", "initial", "rank", "is", "based", "on", "precedence", "relationships", "optimal", "ranking", "may", "be", "derived", "from", "network", "flow", "(", "simplex", ")", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L461-L472
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout._rank_init
def _rank_init(self,unranked): """Computes rank of provided unranked list of vertices and all their children. A vertex will be asign a rank when all its inward edges have been *scanned*. When a vertex is asigned a rank, its outward edges are marked *scanned*. """ assert self.dag scan = {} # set rank of unranked based on its in-edges vertices ranks: while len(unranked)>0: l = [] for v in unranked: self.setrank(v) # mark out-edges has scan-able: for e in v.e_out(): scan[e]=True # check if out-vertices are rank-able: for x in v.N(+1): if not (False in [scan.get(e,False) for e in x.e_in()]): if x not in l: l.append(x) unranked=l
python
def _rank_init(self,unranked): """Computes rank of provided unranked list of vertices and all their children. A vertex will be asign a rank when all its inward edges have been *scanned*. When a vertex is asigned a rank, its outward edges are marked *scanned*. """ assert self.dag scan = {} # set rank of unranked based on its in-edges vertices ranks: while len(unranked)>0: l = [] for v in unranked: self.setrank(v) # mark out-edges has scan-able: for e in v.e_out(): scan[e]=True # check if out-vertices are rank-able: for x in v.N(+1): if not (False in [scan.get(e,False) for e in x.e_in()]): if x not in l: l.append(x) unranked=l
[ "def", "_rank_init", "(", "self", ",", "unranked", ")", ":", "assert", "self", ".", "dag", "scan", "=", "{", "}", "# set rank of unranked based on its in-edges vertices ranks:", "while", "len", "(", "unranked", ")", ">", "0", ":", "l", "=", "[", "]", "for", "v", "in", "unranked", ":", "self", ".", "setrank", "(", "v", ")", "# mark out-edges has scan-able:", "for", "e", "in", "v", ".", "e_out", "(", ")", ":", "scan", "[", "e", "]", "=", "True", "# check if out-vertices are rank-able:", "for", "x", "in", "v", ".", "N", "(", "+", "1", ")", ":", "if", "not", "(", "False", "in", "[", "scan", ".", "get", "(", "e", ",", "False", ")", "for", "e", "in", "x", ".", "e_in", "(", ")", "]", ")", ":", "if", "x", "not", "in", "l", ":", "l", ".", "append", "(", "x", ")", "unranked", "=", "l" ]
Computes rank of provided unranked list of vertices and all their children. A vertex will be asign a rank when all its inward edges have been *scanned*. When a vertex is asigned a rank, its outward edges are marked *scanned*.
[ "Computes", "rank", "of", "provided", "unranked", "list", "of", "vertices", "and", "all", "their", "children", ".", "A", "vertex", "will", "be", "asign", "a", "rank", "when", "all", "its", "inward", "edges", "have", "been", "*", "scanned", "*", ".", "When", "a", "vertex", "is", "asigned", "a", "rank", "its", "outward", "edges", "are", "marked", "*", "scanned", "*", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L474-L493
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout._rank_optimize
def _rank_optimize(self): """optimize ranking by pushing long edges toward lower layers as much as possible. see other interersting network flow solver to minimize total edge length (http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf) """ assert self.dag for l in reversed(self.layers): for v in l: gv = self.grx[v] for x in v.N(-1): if all((self.grx[y].rank>=gv.rank for y in x.N(+1))): gx = self.grx[x] self.layers[gx.rank].remove(x) gx.rank = gv.rank-1 self.layers[gv.rank-1].append(x)
python
def _rank_optimize(self): """optimize ranking by pushing long edges toward lower layers as much as possible. see other interersting network flow solver to minimize total edge length (http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf) """ assert self.dag for l in reversed(self.layers): for v in l: gv = self.grx[v] for x in v.N(-1): if all((self.grx[y].rank>=gv.rank for y in x.N(+1))): gx = self.grx[x] self.layers[gx.rank].remove(x) gx.rank = gv.rank-1 self.layers[gv.rank-1].append(x)
[ "def", "_rank_optimize", "(", "self", ")", ":", "assert", "self", ".", "dag", "for", "l", "in", "reversed", "(", "self", ".", "layers", ")", ":", "for", "v", "in", "l", ":", "gv", "=", "self", ".", "grx", "[", "v", "]", "for", "x", "in", "v", ".", "N", "(", "-", "1", ")", ":", "if", "all", "(", "(", "self", ".", "grx", "[", "y", "]", ".", "rank", ">=", "gv", ".", "rank", "for", "y", "in", "x", ".", "N", "(", "+", "1", ")", ")", ")", ":", "gx", "=", "self", ".", "grx", "[", "x", "]", "self", ".", "layers", "[", "gx", ".", "rank", "]", ".", "remove", "(", "x", ")", "gx", ".", "rank", "=", "gv", ".", "rank", "-", "1", "self", ".", "layers", "[", "gv", ".", "rank", "-", "1", "]", ".", "append", "(", "x", ")" ]
optimize ranking by pushing long edges toward lower layers as much as possible. see other interersting network flow solver to minimize total edge length (http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
[ "optimize", "ranking", "by", "pushing", "long", "edges", "toward", "lower", "layers", "as", "much", "as", "possible", ".", "see", "other", "interersting", "network", "flow", "solver", "to", "minimize", "total", "edge", "length", "(", "http", ":", "//", "jgaa", ".", "info", "/", "accepted", "/", "2005", "/", "EiglspergerSiebenhallerKaufmann2005", ".", "9", ".", "3", ".", "pdf", ")" ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L495-L509
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.setrank
def setrank(self,v): """set rank value for vertex v and add it to the corresponding layer. The Layer is created if it is the first vertex with this rank. """ assert self.dag r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1 self.grx[v].rank=r # add it to its layer: try: self.layers[r].append(v) except IndexError: assert r==len(self.layers) self.layers.append(Layer([v]))
python
def setrank(self,v): """set rank value for vertex v and add it to the corresponding layer. The Layer is created if it is the first vertex with this rank. """ assert self.dag r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1 self.grx[v].rank=r # add it to its layer: try: self.layers[r].append(v) except IndexError: assert r==len(self.layers) self.layers.append(Layer([v]))
[ "def", "setrank", "(", "self", ",", "v", ")", ":", "assert", "self", ".", "dag", "r", "=", "max", "(", "[", "self", ".", "grx", "[", "x", "]", ".", "rank", "for", "x", "in", "v", ".", "N", "(", "-", "1", ")", "]", "+", "[", "-", "1", "]", ")", "+", "1", "self", ".", "grx", "[", "v", "]", ".", "rank", "=", "r", "# add it to its layer:", "try", ":", "self", ".", "layers", "[", "r", "]", ".", "append", "(", "v", ")", "except", "IndexError", ":", "assert", "r", "==", "len", "(", "self", ".", "layers", ")", "self", ".", "layers", ".", "append", "(", "Layer", "(", "[", "v", "]", ")", ")" ]
set rank value for vertex v and add it to the corresponding layer. The Layer is created if it is the first vertex with this rank.
[ "set", "rank", "value", "for", "vertex", "v", "and", "add", "it", "to", "the", "corresponding", "layer", ".", "The", "Layer", "is", "created", "if", "it", "is", "the", "first", "vertex", "with", "this", "rank", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L512-L524
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.dummyctrl
def dummyctrl(self,r,ctrl): """creates a DummyVertex at rank r inserted in the ctrl dict of the associated edge and layer. Arguments: r (int): rank value ctrl (dict): the edge's control vertices Returns: DummyVertex : the created DummyVertex. """ dv = DummyVertex(r) dv.view.w,dv.view.h=self.dw,self.dh self.grx[dv] = dv dv.ctrl = ctrl ctrl[r] = dv self.layers[r].append(dv) return dv
python
def dummyctrl(self,r,ctrl): """creates a DummyVertex at rank r inserted in the ctrl dict of the associated edge and layer. Arguments: r (int): rank value ctrl (dict): the edge's control vertices Returns: DummyVertex : the created DummyVertex. """ dv = DummyVertex(r) dv.view.w,dv.view.h=self.dw,self.dh self.grx[dv] = dv dv.ctrl = ctrl ctrl[r] = dv self.layers[r].append(dv) return dv
[ "def", "dummyctrl", "(", "self", ",", "r", ",", "ctrl", ")", ":", "dv", "=", "DummyVertex", "(", "r", ")", "dv", ".", "view", ".", "w", ",", "dv", ".", "view", ".", "h", "=", "self", ".", "dw", ",", "self", ".", "dh", "self", ".", "grx", "[", "dv", "]", "=", "dv", "dv", ".", "ctrl", "=", "ctrl", "ctrl", "[", "r", "]", "=", "dv", "self", ".", "layers", "[", "r", "]", ".", "append", "(", "dv", ")", "return", "dv" ]
creates a DummyVertex at rank r inserted in the ctrl dict of the associated edge and layer. Arguments: r (int): rank value ctrl (dict): the edge's control vertices Returns: DummyVertex : the created DummyVertex.
[ "creates", "a", "DummyVertex", "at", "rank", "r", "inserted", "in", "the", "ctrl", "dict", "of", "the", "associated", "edge", "and", "layer", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L526-L543
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.setdummies
def setdummies(self,e): """creates and defines all needed dummy vertices for edge e. """ v0,v1 = e.v r0,r1 = self.grx[v0].rank,self.grx[v1].rank if r0>r1: assert e in self.alt_e v0,v1 = v1,v0 r0,r1 = r1,r0 if (r1-r0)>1: # "dummy vertices" are stored in the edge ctrl dict, # keyed by their rank in layers. ctrl=self.ctrls[e]={} ctrl[r0]=v0 ctrl[r1]=v1 for r in xrange(r0+1,r1): self.dummyctrl(r,ctrl)
python
def setdummies(self,e): """creates and defines all needed dummy vertices for edge e. """ v0,v1 = e.v r0,r1 = self.grx[v0].rank,self.grx[v1].rank if r0>r1: assert e in self.alt_e v0,v1 = v1,v0 r0,r1 = r1,r0 if (r1-r0)>1: # "dummy vertices" are stored in the edge ctrl dict, # keyed by their rank in layers. ctrl=self.ctrls[e]={} ctrl[r0]=v0 ctrl[r1]=v1 for r in xrange(r0+1,r1): self.dummyctrl(r,ctrl)
[ "def", "setdummies", "(", "self", ",", "e", ")", ":", "v0", ",", "v1", "=", "e", ".", "v", "r0", ",", "r1", "=", "self", ".", "grx", "[", "v0", "]", ".", "rank", ",", "self", ".", "grx", "[", "v1", "]", ".", "rank", "if", "r0", ">", "r1", ":", "assert", "e", "in", "self", ".", "alt_e", "v0", ",", "v1", "=", "v1", ",", "v0", "r0", ",", "r1", "=", "r1", ",", "r0", "if", "(", "r1", "-", "r0", ")", ">", "1", ":", "# \"dummy vertices\" are stored in the edge ctrl dict,", "# keyed by their rank in layers.", "ctrl", "=", "self", ".", "ctrls", "[", "e", "]", "=", "{", "}", "ctrl", "[", "r0", "]", "=", "v0", "ctrl", "[", "r1", "]", "=", "v1", "for", "r", "in", "xrange", "(", "r0", "+", "1", ",", "r1", ")", ":", "self", ".", "dummyctrl", "(", "r", ",", "ctrl", ")" ]
creates and defines all needed dummy vertices for edge e.
[ "creates", "and", "defines", "all", "needed", "dummy", "vertices", "for", "edge", "e", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L545-L561
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.draw_step
def draw_step(self): """iterator that computes all vertices coordinates and edge routing after just one step (one layer after the other from top to bottom to top). Purely inefficient ! Use it only for "animation" or debugging purpose. """ ostep = self.ordering_step() for s in ostep: self.setxy() self.draw_edges() yield s
python
def draw_step(self): """iterator that computes all vertices coordinates and edge routing after just one step (one layer after the other from top to bottom to top). Purely inefficient ! Use it only for "animation" or debugging purpose. """ ostep = self.ordering_step() for s in ostep: self.setxy() self.draw_edges() yield s
[ "def", "draw_step", "(", "self", ")", ":", "ostep", "=", "self", ".", "ordering_step", "(", ")", "for", "s", "in", "ostep", ":", "self", ".", "setxy", "(", ")", "self", ".", "draw_edges", "(", ")", "yield", "s" ]
iterator that computes all vertices coordinates and edge routing after just one step (one layer after the other from top to bottom to top). Purely inefficient ! Use it only for "animation" or debugging purpose.
[ "iterator", "that", "computes", "all", "vertices", "coordinates", "and", "edge", "routing", "after", "just", "one", "step", "(", "one", "layer", "after", "the", "other", "from", "top", "to", "bottom", "to", "top", ")", ".", "Purely", "inefficient", "!", "Use", "it", "only", "for", "animation", "or", "debugging", "purpose", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L563-L572
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.ordering_step
def ordering_step(self,oneway=False): """iterator that computes all vertices ordering in their layers (one layer after the other from top to bottom, to top again unless oneway is True). """ self.dirv=-1 crossings = 0 for l in self.layers: mvmt = l.order() crossings += mvmt yield (l,mvmt) if oneway or (crossings == 0): return self.dirv=+1 while l: mvmt = l.order() yield (l,mvmt) l = l.nextlayer()
python
def ordering_step(self,oneway=False): """iterator that computes all vertices ordering in their layers (one layer after the other from top to bottom, to top again unless oneway is True). """ self.dirv=-1 crossings = 0 for l in self.layers: mvmt = l.order() crossings += mvmt yield (l,mvmt) if oneway or (crossings == 0): return self.dirv=+1 while l: mvmt = l.order() yield (l,mvmt) l = l.nextlayer()
[ "def", "ordering_step", "(", "self", ",", "oneway", "=", "False", ")", ":", "self", ".", "dirv", "=", "-", "1", "crossings", "=", "0", "for", "l", "in", "self", ".", "layers", ":", "mvmt", "=", "l", ".", "order", "(", ")", "crossings", "+=", "mvmt", "yield", "(", "l", ",", "mvmt", ")", "if", "oneway", "or", "(", "crossings", "==", "0", ")", ":", "return", "self", ".", "dirv", "=", "+", "1", "while", "l", ":", "mvmt", "=", "l", ".", "order", "(", ")", "yield", "(", "l", ",", "mvmt", ")", "l", "=", "l", ".", "nextlayer", "(", ")" ]
iterator that computes all vertices ordering in their layers (one layer after the other from top to bottom, to top again unless oneway is True).
[ "iterator", "that", "computes", "all", "vertices", "ordering", "in", "their", "layers", "(", "one", "layer", "after", "the", "other", "from", "top", "to", "bottom", "to", "top", "again", "unless", "oneway", "is", "True", ")", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L574-L591
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.setxy
def setxy(self): """computes all vertex coordinates (x,y) using an algorithm by Brandes & Kopf. """ self._edge_inverter() self._detect_alignment_conflicts() inf = float('infinity') # initialize vertex coordinates attributes: for l in self.layers: for v in l: self.grx[v].root = v self.grx[v].align = v self.grx[v].sink = v self.grx[v].shift = inf self.grx[v].X = None self.grx[v].x = [0.0]*4 curvh = self.dirvh # save current dirvh value for dirvh in xrange(4): self.dirvh = dirvh self._coord_vertical_alignment() self._coord_horizontal_compact() self.dirvh = curvh # restore it # vertical coordinate assigment of all nodes: Y = 0 for l in self.layers: dY = max([v.view.h/2. for v in l]) for v in l: vx = sorted(self.grx[v].x) # mean of the 2 medians out of the 4 x-coord computed above: avgm = (vx[1]+vx[2])/2. # final xy-coordinates : v.view.xy = (avgm,Y+dY) Y += 2*dY+self.yspace self._edge_inverter()
python
def setxy(self): """computes all vertex coordinates (x,y) using an algorithm by Brandes & Kopf. """ self._edge_inverter() self._detect_alignment_conflicts() inf = float('infinity') # initialize vertex coordinates attributes: for l in self.layers: for v in l: self.grx[v].root = v self.grx[v].align = v self.grx[v].sink = v self.grx[v].shift = inf self.grx[v].X = None self.grx[v].x = [0.0]*4 curvh = self.dirvh # save current dirvh value for dirvh in xrange(4): self.dirvh = dirvh self._coord_vertical_alignment() self._coord_horizontal_compact() self.dirvh = curvh # restore it # vertical coordinate assigment of all nodes: Y = 0 for l in self.layers: dY = max([v.view.h/2. for v in l]) for v in l: vx = sorted(self.grx[v].x) # mean of the 2 medians out of the 4 x-coord computed above: avgm = (vx[1]+vx[2])/2. # final xy-coordinates : v.view.xy = (avgm,Y+dY) Y += 2*dY+self.yspace self._edge_inverter()
[ "def", "setxy", "(", "self", ")", ":", "self", ".", "_edge_inverter", "(", ")", "self", ".", "_detect_alignment_conflicts", "(", ")", "inf", "=", "float", "(", "'infinity'", ")", "# initialize vertex coordinates attributes:", "for", "l", "in", "self", ".", "layers", ":", "for", "v", "in", "l", ":", "self", ".", "grx", "[", "v", "]", ".", "root", "=", "v", "self", ".", "grx", "[", "v", "]", ".", "align", "=", "v", "self", ".", "grx", "[", "v", "]", ".", "sink", "=", "v", "self", ".", "grx", "[", "v", "]", ".", "shift", "=", "inf", "self", ".", "grx", "[", "v", "]", ".", "X", "=", "None", "self", ".", "grx", "[", "v", "]", ".", "x", "=", "[", "0.0", "]", "*", "4", "curvh", "=", "self", ".", "dirvh", "# save current dirvh value", "for", "dirvh", "in", "xrange", "(", "4", ")", ":", "self", ".", "dirvh", "=", "dirvh", "self", ".", "_coord_vertical_alignment", "(", ")", "self", ".", "_coord_horizontal_compact", "(", ")", "self", ".", "dirvh", "=", "curvh", "# restore it", "# vertical coordinate assigment of all nodes:", "Y", "=", "0", "for", "l", "in", "self", ".", "layers", ":", "dY", "=", "max", "(", "[", "v", ".", "view", ".", "h", "/", "2.", "for", "v", "in", "l", "]", ")", "for", "v", "in", "l", ":", "vx", "=", "sorted", "(", "self", ".", "grx", "[", "v", "]", ".", "x", ")", "# mean of the 2 medians out of the 4 x-coord computed above:", "avgm", "=", "(", "vx", "[", "1", "]", "+", "vx", "[", "2", "]", ")", "/", "2.", "# final xy-coordinates :", "v", ".", "view", ".", "xy", "=", "(", "avgm", ",", "Y", "+", "dY", ")", "Y", "+=", "2", "*", "dY", "+", "self", ".", "yspace", "self", ".", "_edge_inverter", "(", ")" ]
computes all vertex coordinates (x,y) using an algorithm by Brandes & Kopf.
[ "computes", "all", "vertex", "coordinates", "(", "x", "y", ")", "using", "an", "algorithm", "by", "Brandes", "&", "Kopf", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L593-L626
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout._detect_alignment_conflicts
def _detect_alignment_conflicts(self): """mark conflicts between edges: inner edges are edges between dummy nodes type 0 is regular crossing regular (or sharing vertex) type 1 is inner crossing regular (targeted crossings) type 2 is inner crossing inner (avoided by reduce_crossings phase) """ curvh = self.dirvh # save current dirvh value self.dirvh=0 self.conflicts = [] for L in self.layers: last = len(L)-1 prev = L.prevlayer() if not prev: continue k0=0 k1_init=len(prev)-1 l=0 for l1,v in enumerate(L): if not self.grx[v].dummy: continue if l1==last or v.inner(-1): k1=k1_init if v.inner(-1): k1=self.grx[v.N(-1)[-1]].pos for vl in L[l:l1+1]: for vk in L._neighbors(vl): k = self.grx[vk].pos if (k<k0 or k>k1): self.conflicts.append((vk,vl)) l=l1+1 k0=k1 self.dirvh = curvh
python
def _detect_alignment_conflicts(self): """mark conflicts between edges: inner edges are edges between dummy nodes type 0 is regular crossing regular (or sharing vertex) type 1 is inner crossing regular (targeted crossings) type 2 is inner crossing inner (avoided by reduce_crossings phase) """ curvh = self.dirvh # save current dirvh value self.dirvh=0 self.conflicts = [] for L in self.layers: last = len(L)-1 prev = L.prevlayer() if not prev: continue k0=0 k1_init=len(prev)-1 l=0 for l1,v in enumerate(L): if not self.grx[v].dummy: continue if l1==last or v.inner(-1): k1=k1_init if v.inner(-1): k1=self.grx[v.N(-1)[-1]].pos for vl in L[l:l1+1]: for vk in L._neighbors(vl): k = self.grx[vk].pos if (k<k0 or k>k1): self.conflicts.append((vk,vl)) l=l1+1 k0=k1 self.dirvh = curvh
[ "def", "_detect_alignment_conflicts", "(", "self", ")", ":", "curvh", "=", "self", ".", "dirvh", "# save current dirvh value", "self", ".", "dirvh", "=", "0", "self", ".", "conflicts", "=", "[", "]", "for", "L", "in", "self", ".", "layers", ":", "last", "=", "len", "(", "L", ")", "-", "1", "prev", "=", "L", ".", "prevlayer", "(", ")", "if", "not", "prev", ":", "continue", "k0", "=", "0", "k1_init", "=", "len", "(", "prev", ")", "-", "1", "l", "=", "0", "for", "l1", ",", "v", "in", "enumerate", "(", "L", ")", ":", "if", "not", "self", ".", "grx", "[", "v", "]", ".", "dummy", ":", "continue", "if", "l1", "==", "last", "or", "v", ".", "inner", "(", "-", "1", ")", ":", "k1", "=", "k1_init", "if", "v", ".", "inner", "(", "-", "1", ")", ":", "k1", "=", "self", ".", "grx", "[", "v", ".", "N", "(", "-", "1", ")", "[", "-", "1", "]", "]", ".", "pos", "for", "vl", "in", "L", "[", "l", ":", "l1", "+", "1", "]", ":", "for", "vk", "in", "L", ".", "_neighbors", "(", "vl", ")", ":", "k", "=", "self", ".", "grx", "[", "vk", "]", ".", "pos", "if", "(", "k", "<", "k0", "or", "k", ">", "k1", ")", ":", "self", ".", "conflicts", ".", "append", "(", "(", "vk", ",", "vl", ")", ")", "l", "=", "l1", "+", "1", "k0", "=", "k1", "self", ".", "dirvh", "=", "curvh" ]
mark conflicts between edges: inner edges are edges between dummy nodes type 0 is regular crossing regular (or sharing vertex) type 1 is inner crossing regular (targeted crossings) type 2 is inner crossing inner (avoided by reduce_crossings phase)
[ "mark", "conflicts", "between", "edges", ":", "inner", "edges", "are", "edges", "between", "dummy", "nodes", "type", "0", "is", "regular", "crossing", "regular", "(", "or", "sharing", "vertex", ")", "type", "1", "is", "inner", "crossing", "regular", "(", "targeted", "crossings", ")", "type", "2", "is", "inner", "crossing", "inner", "(", "avoided", "by", "reduce_crossings", "phase", ")" ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L628-L658
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout._coord_vertical_alignment
def _coord_vertical_alignment(self): """performs vertical alignment according to current dirvh internal state. """ dirh,dirv = self.dirh,self.dirv g = self.grx for l in self.layers[::-dirv]: if not l.prevlayer(): continue r=None for vk in l[::dirh]: for m in l._medianindex(vk): # take the median node in dirv layer: um = l.prevlayer()[m] # if vk is "free" align it with um's root if g[vk].align is vk: if dirv==1: vpair = (vk,um) else: vpair = (um,vk) # if vk<->um link is used for alignment if (vpair not in self.conflicts) and \ (r==None or dirh*r<dirh*m): g[um].align = vk g[vk].root = g[um].root g[vk].align = g[vk].root r = m
python
def _coord_vertical_alignment(self): """performs vertical alignment according to current dirvh internal state. """ dirh,dirv = self.dirh,self.dirv g = self.grx for l in self.layers[::-dirv]: if not l.prevlayer(): continue r=None for vk in l[::dirh]: for m in l._medianindex(vk): # take the median node in dirv layer: um = l.prevlayer()[m] # if vk is "free" align it with um's root if g[vk].align is vk: if dirv==1: vpair = (vk,um) else: vpair = (um,vk) # if vk<->um link is used for alignment if (vpair not in self.conflicts) and \ (r==None or dirh*r<dirh*m): g[um].align = vk g[vk].root = g[um].root g[vk].align = g[vk].root r = m
[ "def", "_coord_vertical_alignment", "(", "self", ")", ":", "dirh", ",", "dirv", "=", "self", ".", "dirh", ",", "self", ".", "dirv", "g", "=", "self", ".", "grx", "for", "l", "in", "self", ".", "layers", "[", ":", ":", "-", "dirv", "]", ":", "if", "not", "l", ".", "prevlayer", "(", ")", ":", "continue", "r", "=", "None", "for", "vk", "in", "l", "[", ":", ":", "dirh", "]", ":", "for", "m", "in", "l", ".", "_medianindex", "(", "vk", ")", ":", "# take the median node in dirv layer:", "um", "=", "l", ".", "prevlayer", "(", ")", "[", "m", "]", "# if vk is \"free\" align it with um's root", "if", "g", "[", "vk", "]", ".", "align", "is", "vk", ":", "if", "dirv", "==", "1", ":", "vpair", "=", "(", "vk", ",", "um", ")", "else", ":", "vpair", "=", "(", "um", ",", "vk", ")", "# if vk<->um link is used for alignment", "if", "(", "vpair", "not", "in", "self", ".", "conflicts", ")", "and", "(", "r", "==", "None", "or", "dirh", "*", "r", "<", "dirh", "*", "m", ")", ":", "g", "[", "um", "]", ".", "align", "=", "vk", "g", "[", "vk", "]", ".", "root", "=", "g", "[", "um", "]", ".", "root", "g", "[", "vk", "]", ".", "align", "=", "g", "[", "vk", "]", ".", "root", "r", "=", "m" ]
performs vertical alignment according to current dirvh internal state.
[ "performs", "vertical", "alignment", "according", "to", "current", "dirvh", "internal", "state", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L660-L682
bdcht/grandalf
grandalf/layouts.py
SugiyamaLayout.draw_edges
def draw_edges(self): """Basic edge routing applied only for edges with dummy points. Enhanced edge routing can be performed by using the apropriate *route_with_xxx* functions from :ref:routing_ in the edges' view. """ for e in self.g.E(): if hasattr(e,'view'): l=[] r0,r1 = None,None if e in self.ctrls: D = self.ctrls[e] r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank if r0<r1: ranks = xrange(r0+1,r1) else: ranks = xrange(r0-1,r1,-1) l = [D[r].view.xy for r in ranks] l.insert(0,e.v[0].view.xy) l.append(e.v[1].view.xy) try: self.route_edge(e,l) except AttributeError: pass e.view.setpath(l)
python
def draw_edges(self): """Basic edge routing applied only for edges with dummy points. Enhanced edge routing can be performed by using the apropriate *route_with_xxx* functions from :ref:routing_ in the edges' view. """ for e in self.g.E(): if hasattr(e,'view'): l=[] r0,r1 = None,None if e in self.ctrls: D = self.ctrls[e] r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank if r0<r1: ranks = xrange(r0+1,r1) else: ranks = xrange(r0-1,r1,-1) l = [D[r].view.xy for r in ranks] l.insert(0,e.v[0].view.xy) l.append(e.v[1].view.xy) try: self.route_edge(e,l) except AttributeError: pass e.view.setpath(l)
[ "def", "draw_edges", "(", "self", ")", ":", "for", "e", "in", "self", ".", "g", ".", "E", "(", ")", ":", "if", "hasattr", "(", "e", ",", "'view'", ")", ":", "l", "=", "[", "]", "r0", ",", "r1", "=", "None", ",", "None", "if", "e", "in", "self", ".", "ctrls", ":", "D", "=", "self", ".", "ctrls", "[", "e", "]", "r0", ",", "r1", "=", "self", ".", "grx", "[", "e", ".", "v", "[", "0", "]", "]", ".", "rank", ",", "self", ".", "grx", "[", "e", ".", "v", "[", "1", "]", "]", ".", "rank", "if", "r0", "<", "r1", ":", "ranks", "=", "xrange", "(", "r0", "+", "1", ",", "r1", ")", "else", ":", "ranks", "=", "xrange", "(", "r0", "-", "1", ",", "r1", ",", "-", "1", ")", "l", "=", "[", "D", "[", "r", "]", ".", "view", ".", "xy", "for", "r", "in", "ranks", "]", "l", ".", "insert", "(", "0", ",", "e", ".", "v", "[", "0", "]", ".", "view", ".", "xy", ")", "l", ".", "append", "(", "e", ".", "v", "[", "1", "]", ".", "view", ".", "xy", ")", "try", ":", "self", ".", "route_edge", "(", "e", ",", "l", ")", "except", "AttributeError", ":", "pass", "e", ".", "view", ".", "setpath", "(", "l", ")" ]
Basic edge routing applied only for edges with dummy points. Enhanced edge routing can be performed by using the apropriate *route_with_xxx* functions from :ref:routing_ in the edges' view.
[ "Basic", "edge", "routing", "applied", "only", "for", "edges", "with", "dummy", "points", ".", "Enhanced", "edge", "routing", "can", "be", "performed", "by", "using", "the", "apropriate", "*", "route_with_xxx", "*", "functions", "from", ":", "ref", ":", "routing_", "in", "the", "edges", "view", "." ]
train
https://github.com/bdcht/grandalf/blob/b0a604afa79e5201eebe5feb56ae5ec7afc07b95/grandalf/layouts.py#L755-L778
MSchnei/pyprf_feature
pyprf_feature/analysis/pyprf_main.py
pyprf
def pyprf(strCsvCnfg, lgcTest=False, varRat=None, strPathHrf=None): """ Main function for pRF mapping. Parameters ---------- strCsvCnfg : str Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of pyprf libary will be prepended to config file paths. varRat : float, default None Ratio of size suppressive surround to size of center pRF strPathHrf : str or None: Path to npy file with custom hrf parameters. If None, default parameters will be used. """ # ************************************************************************* # *** Check time print('---pRF analysis') varTme01 = time.time() # ************************************************************************* # ************************************************************************* # *** Preparations # Load config parameters from csv file into dictionary: dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest) # Load config parameters from dictionary into namespace: cfg = cls_set_config(dicCnfg) # Conditional imports: if cfg.strVersion == 'gpu': from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu if ((cfg.strVersion == 'cython') or (cfg.strVersion == 'numpy')): from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu # Convert preprocessing parameters (for temporal smoothing) # from SI units (i.e. [s]) into units of data array (volumes): cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr) # ************************************************************************* # ************************************************************************* # *** Create or load pRF time course models # Create model time courses. Also return logical for inclusion of model # parameters which will be needed later when we create model parameters # in degree. aryPrfTc, lgcMdlInc = model_creation(dicCnfg, varRat=varRat, strPathHrf=strPathHrf) # Deduce the number of features from the pRF time course models array cfg.varNumFtr = aryPrfTc.shape[1] # ************************************************************************* # ************************************************************************* # *** Preprocessing # The model time courses will be preprocessed such that they are smoothed # (temporally) with same factor as the data and that they will be z-scored: aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp) # The functional data will be masked and demeaned: aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func( cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100) # set the precision of the header to np.float32 so that the prf results # will be saved in this precision later hdrMsk.set_data_dtype(np.float32) # ************************************************************************* # ************************************************************************* # *** Checks # Make sure that if gpu fitting is used, the number of cross-validations is # set to 1, not higher if cfg.strVersion == 'gpu': strErrMsg = 'Stopping program. ' + \ 'Cross-validation on GPU is currently not supported. ' + \ 'Set varNumXval equal to 1 in csv file in order to continue. ' assert cfg.varNumXval == 1, strErrMsg # For the GPU version, we need to set down the parallelisation to 1 now, # because no separate CPU threads are to be created. We may still use CPU # parallelisation for preprocessing, which is why the parallelisation # factor is only reduced now, not earlier. if cfg.strVersion == 'gpu': cfg.varPar = 1 # Make sure that if cython is used, the number of features is 1 or 2, # not higher if cfg.strVersion == 'cython': strErrMsg = 'Stopping program. ' + \ 'Cython is not supported for more features than 1. ' + \ 'Set strVersion equal \'numpy\'.' assert cfg.varNumFtr in [1, 2], strErrMsg # Check whether we need to crossvalidate if np.greater(cfg.varNumXval, 1): cfg.lgcXval = True elif np.equal(cfg.varNumXval, 1): cfg.lgcXval = False strErrMsg = 'Stopping program. ' + \ 'Set numXval (number of crossvalidation folds) to 1 or higher' assert np.greater_equal(cfg.varNumXval, 1), strErrMsg # ************************************************************************* # *** Find pRF models for voxel time courses print('------Find pRF models for voxel time courses') # Number of voxels for which pRF finding will be performed: cfg.varNumVoxInc = aryFunc.shape[0] print('---------Number of voxels on which pRF finding will be performed: ' + str(cfg.varNumVoxInc)) print('---------Number of features pRF finding will be performed with: ' + str(cfg.varNumFtr)) print('---------Preparing parallel pRF model finding') # Get array with all possible model parameter combination: # [x positions, y positions, sigmas] aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX), int(cfg.varVslSpcSzeY)), cfg.varNum1, cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2, cfg.varExtYmin, cfg.varExtYmax, cfg.varNumPrfSizes, cfg.varPrfStdMin, cfg.varPrfStdMax, kwUnt='deg', kwCrd=cfg.strKwCrd) # Exclude models with prf center outside stimulated area aryMdlParams = aryMdlParams[lgcMdlInc, :] # Empty list for results (parameters of best fitting pRF model): lstPrfRes = [None] * cfg.varPar # Empty list for processes: lstPrcs = [None] * cfg.varPar # Create a queue to put the results in: queOut = mp.Queue() # Create list with chunks of functional data for the parallel processes: lstFunc = np.array_split(aryFunc, cfg.varPar) # We don't need the original array with the functional data anymore: del(aryFunc) # Prepare dictionary to pass as kwargs to find_prf_cpu dctKw = {'lgcRstr': None, 'lgcPrint': True} # CPU version (using numpy or cython for pRF finding): if ((cfg.strVersion == 'numpy') or (cfg.strVersion == 'cython')): print('---------pRF finding on CPU') print('---------Creating parallel processes') # Create processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu, args=(idxPrc, lstFunc[idxPrc], aryPrfTc, aryMdlParams, cfg.strVersion, cfg.lgcXval, cfg.varNumXval, queOut), kwargs=dctKw, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # GPU version (using tensorflow for pRF finding): elif cfg.strVersion == 'gpu': print('---------pRF finding on GPU') # Create processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu, args=(idxPrc, aryMdlParams, lstFunc[idxPrc], aryPrfTc, queOut), kwargs=dctKw, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # Start processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc].start() # Delete reference to list with function data (the data continues to exists # in child process): del(lstFunc) # Collect results from queue: for idxPrc in range(0, cfg.varPar): lstPrfRes[idxPrc] = queOut.get(True) # Join processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc].join() # ************************************************************************* # ************************************************************************* # *** Prepare pRF finding results for export print('---------Prepare pRF finding results for export') # Put output into correct order: lstPrfRes = sorted(lstPrfRes) # collect results from parallelization aryBstXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D') aryBstYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D') aryBstSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D') aryBstR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D') aryBstBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D') if np.greater(cfg.varNumXval, 1): aryBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6, inFormat='2D') # Delete unneeded large objects: del(lstPrfRes) # ************************************************************************* # ************************************************************************* # Calculate polar angle map: aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos) # Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ): aryEcc = np.sqrt(np.add(np.square(aryBstXpos), np.square(aryBstYpos))) # ************************************************************************* # ************************************************************************* # Export each map of best parameters as a 3D nii file print('---------Exporting results') # Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf if strPathHrf is not None: cfg.strPathOut = cfg.strPathOut + '_hrf' # Xoncatenate all the best voxel maps aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2, aryPlrAng, aryEcc], axis=1) # List with name suffices of output images: lstNiiNames = ['_x_pos', '_y_pos', '_SD', '_R2', '_polar_angle', '_eccentricity'] # Append ratio to nii file name, if fitting was done with sup surround if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export map results as seperate 3D nii files export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='3D') # ************************************************************************* # ************************************************************************* # Save beta parameter estimates for every feature: # List with name suffices of output images: lstNiiNames = ['_Betas'] # Append ratio to nii file name, if fitting was done with sup surround if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export beta parameter as a single 4D nii file export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='4D') # ************************************************************************* # ************************************************************************* # Save R2 maps from crossvalidation (saved for every run) as nii: if np.greater(cfg.varNumXval, 1): # truncate extremely negative R2 values aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0 # List with name suffices of output images: lstNiiNames = ['_R2_single'] # Append ratio to nii file name, if fitting was done with sup surround if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export R2 maps as a single 4D nii file export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='4D') # ************************************************************************* # ************************************************************************* # *** Report time varTme02 = time.time() varTme03 = varTme02 - varTme01 print('---Elapsed time: ' + str(varTme03) + ' s') print('---Done.')
python
def pyprf(strCsvCnfg, lgcTest=False, varRat=None, strPathHrf=None): """ Main function for pRF mapping. Parameters ---------- strCsvCnfg : str Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of pyprf libary will be prepended to config file paths. varRat : float, default None Ratio of size suppressive surround to size of center pRF strPathHrf : str or None: Path to npy file with custom hrf parameters. If None, default parameters will be used. """ # ************************************************************************* # *** Check time print('---pRF analysis') varTme01 = time.time() # ************************************************************************* # ************************************************************************* # *** Preparations # Load config parameters from csv file into dictionary: dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest) # Load config parameters from dictionary into namespace: cfg = cls_set_config(dicCnfg) # Conditional imports: if cfg.strVersion == 'gpu': from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu if ((cfg.strVersion == 'cython') or (cfg.strVersion == 'numpy')): from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu # Convert preprocessing parameters (for temporal smoothing) # from SI units (i.e. [s]) into units of data array (volumes): cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr) # ************************************************************************* # ************************************************************************* # *** Create or load pRF time course models # Create model time courses. Also return logical for inclusion of model # parameters which will be needed later when we create model parameters # in degree. aryPrfTc, lgcMdlInc = model_creation(dicCnfg, varRat=varRat, strPathHrf=strPathHrf) # Deduce the number of features from the pRF time course models array cfg.varNumFtr = aryPrfTc.shape[1] # ************************************************************************* # ************************************************************************* # *** Preprocessing # The model time courses will be preprocessed such that they are smoothed # (temporally) with same factor as the data and that they will be z-scored: aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp) # The functional data will be masked and demeaned: aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func( cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100) # set the precision of the header to np.float32 so that the prf results # will be saved in this precision later hdrMsk.set_data_dtype(np.float32) # ************************************************************************* # ************************************************************************* # *** Checks # Make sure that if gpu fitting is used, the number of cross-validations is # set to 1, not higher if cfg.strVersion == 'gpu': strErrMsg = 'Stopping program. ' + \ 'Cross-validation on GPU is currently not supported. ' + \ 'Set varNumXval equal to 1 in csv file in order to continue. ' assert cfg.varNumXval == 1, strErrMsg # For the GPU version, we need to set down the parallelisation to 1 now, # because no separate CPU threads are to be created. We may still use CPU # parallelisation for preprocessing, which is why the parallelisation # factor is only reduced now, not earlier. if cfg.strVersion == 'gpu': cfg.varPar = 1 # Make sure that if cython is used, the number of features is 1 or 2, # not higher if cfg.strVersion == 'cython': strErrMsg = 'Stopping program. ' + \ 'Cython is not supported for more features than 1. ' + \ 'Set strVersion equal \'numpy\'.' assert cfg.varNumFtr in [1, 2], strErrMsg # Check whether we need to crossvalidate if np.greater(cfg.varNumXval, 1): cfg.lgcXval = True elif np.equal(cfg.varNumXval, 1): cfg.lgcXval = False strErrMsg = 'Stopping program. ' + \ 'Set numXval (number of crossvalidation folds) to 1 or higher' assert np.greater_equal(cfg.varNumXval, 1), strErrMsg # ************************************************************************* # *** Find pRF models for voxel time courses print('------Find pRF models for voxel time courses') # Number of voxels for which pRF finding will be performed: cfg.varNumVoxInc = aryFunc.shape[0] print('---------Number of voxels on which pRF finding will be performed: ' + str(cfg.varNumVoxInc)) print('---------Number of features pRF finding will be performed with: ' + str(cfg.varNumFtr)) print('---------Preparing parallel pRF model finding') # Get array with all possible model parameter combination: # [x positions, y positions, sigmas] aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX), int(cfg.varVslSpcSzeY)), cfg.varNum1, cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2, cfg.varExtYmin, cfg.varExtYmax, cfg.varNumPrfSizes, cfg.varPrfStdMin, cfg.varPrfStdMax, kwUnt='deg', kwCrd=cfg.strKwCrd) # Exclude models with prf center outside stimulated area aryMdlParams = aryMdlParams[lgcMdlInc, :] # Empty list for results (parameters of best fitting pRF model): lstPrfRes = [None] * cfg.varPar # Empty list for processes: lstPrcs = [None] * cfg.varPar # Create a queue to put the results in: queOut = mp.Queue() # Create list with chunks of functional data for the parallel processes: lstFunc = np.array_split(aryFunc, cfg.varPar) # We don't need the original array with the functional data anymore: del(aryFunc) # Prepare dictionary to pass as kwargs to find_prf_cpu dctKw = {'lgcRstr': None, 'lgcPrint': True} # CPU version (using numpy or cython for pRF finding): if ((cfg.strVersion == 'numpy') or (cfg.strVersion == 'cython')): print('---------pRF finding on CPU') print('---------Creating parallel processes') # Create processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu, args=(idxPrc, lstFunc[idxPrc], aryPrfTc, aryMdlParams, cfg.strVersion, cfg.lgcXval, cfg.varNumXval, queOut), kwargs=dctKw, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # GPU version (using tensorflow for pRF finding): elif cfg.strVersion == 'gpu': print('---------pRF finding on GPU') # Create processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu, args=(idxPrc, aryMdlParams, lstFunc[idxPrc], aryPrfTc, queOut), kwargs=dctKw, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # Start processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc].start() # Delete reference to list with function data (the data continues to exists # in child process): del(lstFunc) # Collect results from queue: for idxPrc in range(0, cfg.varPar): lstPrfRes[idxPrc] = queOut.get(True) # Join processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc].join() # ************************************************************************* # ************************************************************************* # *** Prepare pRF finding results for export print('---------Prepare pRF finding results for export') # Put output into correct order: lstPrfRes = sorted(lstPrfRes) # collect results from parallelization aryBstXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D') aryBstYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D') aryBstSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D') aryBstR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D') aryBstBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D') if np.greater(cfg.varNumXval, 1): aryBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6, inFormat='2D') # Delete unneeded large objects: del(lstPrfRes) # ************************************************************************* # ************************************************************************* # Calculate polar angle map: aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos) # Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ): aryEcc = np.sqrt(np.add(np.square(aryBstXpos), np.square(aryBstYpos))) # ************************************************************************* # ************************************************************************* # Export each map of best parameters as a 3D nii file print('---------Exporting results') # Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf if strPathHrf is not None: cfg.strPathOut = cfg.strPathOut + '_hrf' # Xoncatenate all the best voxel maps aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2, aryPlrAng, aryEcc], axis=1) # List with name suffices of output images: lstNiiNames = ['_x_pos', '_y_pos', '_SD', '_R2', '_polar_angle', '_eccentricity'] # Append ratio to nii file name, if fitting was done with sup surround if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export map results as seperate 3D nii files export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='3D') # ************************************************************************* # ************************************************************************* # Save beta parameter estimates for every feature: # List with name suffices of output images: lstNiiNames = ['_Betas'] # Append ratio to nii file name, if fitting was done with sup surround if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export beta parameter as a single 4D nii file export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='4D') # ************************************************************************* # ************************************************************************* # Save R2 maps from crossvalidation (saved for every run) as nii: if np.greater(cfg.varNumXval, 1): # truncate extremely negative R2 values aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0 # List with name suffices of output images: lstNiiNames = ['_R2_single'] # Append ratio to nii file name, if fitting was done with sup surround if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export R2 maps as a single 4D nii file export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='4D') # ************************************************************************* # ************************************************************************* # *** Report time varTme02 = time.time() varTme03 = varTme02 - varTme01 print('---Elapsed time: ' + str(varTme03) + ' s') print('---Done.')
[ "def", "pyprf", "(", "strCsvCnfg", ",", "lgcTest", "=", "False", ",", "varRat", "=", "None", ",", "strPathHrf", "=", "None", ")", ":", "# *************************************************************************", "# *** Check time", "print", "(", "'---pRF analysis'", ")", "varTme01", "=", "time", ".", "time", "(", ")", "# *************************************************************************", "# *************************************************************************", "# *** Preparations", "# Load config parameters from csv file into dictionary:", "dicCnfg", "=", "load_config", "(", "strCsvCnfg", ",", "lgcTest", "=", "lgcTest", ")", "# Load config parameters from dictionary into namespace:", "cfg", "=", "cls_set_config", "(", "dicCnfg", ")", "# Conditional imports:", "if", "cfg", ".", "strVersion", "==", "'gpu'", ":", "from", "pyprf_feature", ".", "analysis", ".", "find_prf_gpu", "import", "find_prf_gpu", "if", "(", "(", "cfg", ".", "strVersion", "==", "'cython'", ")", "or", "(", "cfg", ".", "strVersion", "==", "'numpy'", ")", ")", ":", "from", "pyprf_feature", ".", "analysis", ".", "find_prf_cpu", "import", "find_prf_cpu", "# Convert preprocessing parameters (for temporal smoothing)", "# from SI units (i.e. [s]) into units of data array (volumes):", "cfg", ".", "varSdSmthTmp", "=", "np", ".", "divide", "(", "cfg", ".", "varSdSmthTmp", ",", "cfg", ".", "varTr", ")", "# *************************************************************************", "# *************************************************************************", "# *** Create or load pRF time course models", "# Create model time courses. Also return logical for inclusion of model", "# parameters which will be needed later when we create model parameters", "# in degree.", "aryPrfTc", ",", "lgcMdlInc", "=", "model_creation", "(", "dicCnfg", ",", "varRat", "=", "varRat", ",", "strPathHrf", "=", "strPathHrf", ")", "# Deduce the number of features from the pRF time course models array", "cfg", ".", "varNumFtr", "=", "aryPrfTc", ".", "shape", "[", "1", "]", "# *************************************************************************", "# *************************************************************************", "# *** Preprocessing", "# The model time courses will be preprocessed such that they are smoothed", "# (temporally) with same factor as the data and that they will be z-scored:", "aryPrfTc", "=", "prep_models", "(", "aryPrfTc", ",", "varSdSmthTmp", "=", "cfg", ".", "varSdSmthTmp", ")", "# The functional data will be masked and demeaned:", "aryLgcMsk", ",", "aryLgcVar", ",", "hdrMsk", ",", "aryAff", ",", "aryFunc", ",", "tplNiiShp", "=", "prep_func", "(", "cfg", ".", "strPathNiiMask", ",", "cfg", ".", "lstPathNiiFunc", ",", "varAvgThr", "=", "-", "100", ")", "# set the precision of the header to np.float32 so that the prf results", "# will be saved in this precision later", "hdrMsk", ".", "set_data_dtype", "(", "np", ".", "float32", ")", "# *************************************************************************", "# *************************************************************************", "# *** Checks", "# Make sure that if gpu fitting is used, the number of cross-validations is", "# set to 1, not higher", "if", "cfg", ".", "strVersion", "==", "'gpu'", ":", "strErrMsg", "=", "'Stopping program. '", "+", "'Cross-validation on GPU is currently not supported. '", "+", "'Set varNumXval equal to 1 in csv file in order to continue. '", "assert", "cfg", ".", "varNumXval", "==", "1", ",", "strErrMsg", "# For the GPU version, we need to set down the parallelisation to 1 now,", "# because no separate CPU threads are to be created. We may still use CPU", "# parallelisation for preprocessing, which is why the parallelisation", "# factor is only reduced now, not earlier.", "if", "cfg", ".", "strVersion", "==", "'gpu'", ":", "cfg", ".", "varPar", "=", "1", "# Make sure that if cython is used, the number of features is 1 or 2,", "# not higher", "if", "cfg", ".", "strVersion", "==", "'cython'", ":", "strErrMsg", "=", "'Stopping program. '", "+", "'Cython is not supported for more features than 1. '", "+", "'Set strVersion equal \\'numpy\\'.'", "assert", "cfg", ".", "varNumFtr", "in", "[", "1", ",", "2", "]", ",", "strErrMsg", "# Check whether we need to crossvalidate", "if", "np", ".", "greater", "(", "cfg", ".", "varNumXval", ",", "1", ")", ":", "cfg", ".", "lgcXval", "=", "True", "elif", "np", ".", "equal", "(", "cfg", ".", "varNumXval", ",", "1", ")", ":", "cfg", ".", "lgcXval", "=", "False", "strErrMsg", "=", "'Stopping program. '", "+", "'Set numXval (number of crossvalidation folds) to 1 or higher'", "assert", "np", ".", "greater_equal", "(", "cfg", ".", "varNumXval", ",", "1", ")", ",", "strErrMsg", "# *************************************************************************", "# *** Find pRF models for voxel time courses", "print", "(", "'------Find pRF models for voxel time courses'", ")", "# Number of voxels for which pRF finding will be performed:", "cfg", ".", "varNumVoxInc", "=", "aryFunc", ".", "shape", "[", "0", "]", "print", "(", "'---------Number of voxels on which pRF finding will be performed: '", "+", "str", "(", "cfg", ".", "varNumVoxInc", ")", ")", "print", "(", "'---------Number of features pRF finding will be performed with: '", "+", "str", "(", "cfg", ".", "varNumFtr", ")", ")", "print", "(", "'---------Preparing parallel pRF model finding'", ")", "# Get array with all possible model parameter combination:", "# [x positions, y positions, sigmas]", "aryMdlParams", "=", "crt_mdl_prms", "(", "(", "int", "(", "cfg", ".", "varVslSpcSzeX", ")", ",", "int", "(", "cfg", ".", "varVslSpcSzeY", ")", ")", ",", "cfg", ".", "varNum1", ",", "cfg", ".", "varExtXmin", ",", "cfg", ".", "varExtXmax", ",", "cfg", ".", "varNum2", ",", "cfg", ".", "varExtYmin", ",", "cfg", ".", "varExtYmax", ",", "cfg", ".", "varNumPrfSizes", ",", "cfg", ".", "varPrfStdMin", ",", "cfg", ".", "varPrfStdMax", ",", "kwUnt", "=", "'deg'", ",", "kwCrd", "=", "cfg", ".", "strKwCrd", ")", "# Exclude models with prf center outside stimulated area", "aryMdlParams", "=", "aryMdlParams", "[", "lgcMdlInc", ",", ":", "]", "# Empty list for results (parameters of best fitting pRF model):", "lstPrfRes", "=", "[", "None", "]", "*", "cfg", ".", "varPar", "# Empty list for processes:", "lstPrcs", "=", "[", "None", "]", "*", "cfg", ".", "varPar", "# Create a queue to put the results in:", "queOut", "=", "mp", ".", "Queue", "(", ")", "# Create list with chunks of functional data for the parallel processes:", "lstFunc", "=", "np", ".", "array_split", "(", "aryFunc", ",", "cfg", ".", "varPar", ")", "# We don't need the original array with the functional data anymore:", "del", "(", "aryFunc", ")", "# Prepare dictionary to pass as kwargs to find_prf_cpu", "dctKw", "=", "{", "'lgcRstr'", ":", "None", ",", "'lgcPrint'", ":", "True", "}", "# CPU version (using numpy or cython for pRF finding):", "if", "(", "(", "cfg", ".", "strVersion", "==", "'numpy'", ")", "or", "(", "cfg", ".", "strVersion", "==", "'cython'", ")", ")", ":", "print", "(", "'---------pRF finding on CPU'", ")", "print", "(", "'---------Creating parallel processes'", ")", "# Create processes:", "for", "idxPrc", "in", "range", "(", "0", ",", "cfg", ".", "varPar", ")", ":", "lstPrcs", "[", "idxPrc", "]", "=", "mp", ".", "Process", "(", "target", "=", "find_prf_cpu", ",", "args", "=", "(", "idxPrc", ",", "lstFunc", "[", "idxPrc", "]", ",", "aryPrfTc", ",", "aryMdlParams", ",", "cfg", ".", "strVersion", ",", "cfg", ".", "lgcXval", ",", "cfg", ".", "varNumXval", ",", "queOut", ")", ",", "kwargs", "=", "dctKw", ",", ")", "# Daemon (kills processes when exiting):", "lstPrcs", "[", "idxPrc", "]", ".", "Daemon", "=", "True", "# GPU version (using tensorflow for pRF finding):", "elif", "cfg", ".", "strVersion", "==", "'gpu'", ":", "print", "(", "'---------pRF finding on GPU'", ")", "# Create processes:", "for", "idxPrc", "in", "range", "(", "0", ",", "cfg", ".", "varPar", ")", ":", "lstPrcs", "[", "idxPrc", "]", "=", "mp", ".", "Process", "(", "target", "=", "find_prf_gpu", ",", "args", "=", "(", "idxPrc", ",", "aryMdlParams", ",", "lstFunc", "[", "idxPrc", "]", ",", "aryPrfTc", ",", "queOut", ")", ",", "kwargs", "=", "dctKw", ",", ")", "# Daemon (kills processes when exiting):", "lstPrcs", "[", "idxPrc", "]", ".", "Daemon", "=", "True", "# Start processes:", "for", "idxPrc", "in", "range", "(", "0", ",", "cfg", ".", "varPar", ")", ":", "lstPrcs", "[", "idxPrc", "]", ".", "start", "(", ")", "# Delete reference to list with function data (the data continues to exists", "# in child process):", "del", "(", "lstFunc", ")", "# Collect results from queue:", "for", "idxPrc", "in", "range", "(", "0", ",", "cfg", ".", "varPar", ")", ":", "lstPrfRes", "[", "idxPrc", "]", "=", "queOut", ".", "get", "(", "True", ")", "# Join processes:", "for", "idxPrc", "in", "range", "(", "0", ",", "cfg", ".", "varPar", ")", ":", "lstPrcs", "[", "idxPrc", "]", ".", "join", "(", ")", "# *************************************************************************", "# *************************************************************************", "# *** Prepare pRF finding results for export", "print", "(", "'---------Prepare pRF finding results for export'", ")", "# Put output into correct order:", "lstPrfRes", "=", "sorted", "(", "lstPrfRes", ")", "# collect results from parallelization", "aryBstXpos", "=", "joinRes", "(", "lstPrfRes", ",", "cfg", ".", "varPar", ",", "1", ",", "inFormat", "=", "'1D'", ")", "aryBstYpos", "=", "joinRes", "(", "lstPrfRes", ",", "cfg", ".", "varPar", ",", "2", ",", "inFormat", "=", "'1D'", ")", "aryBstSd", "=", "joinRes", "(", "lstPrfRes", ",", "cfg", ".", "varPar", ",", "3", ",", "inFormat", "=", "'1D'", ")", "aryBstR2", "=", "joinRes", "(", "lstPrfRes", ",", "cfg", ".", "varPar", ",", "4", ",", "inFormat", "=", "'1D'", ")", "aryBstBts", "=", "joinRes", "(", "lstPrfRes", ",", "cfg", ".", "varPar", ",", "5", ",", "inFormat", "=", "'2D'", ")", "if", "np", ".", "greater", "(", "cfg", ".", "varNumXval", ",", "1", ")", ":", "aryBstR2Single", "=", "joinRes", "(", "lstPrfRes", ",", "cfg", ".", "varPar", ",", "6", ",", "inFormat", "=", "'2D'", ")", "# Delete unneeded large objects:", "del", "(", "lstPrfRes", ")", "# *************************************************************************", "# *************************************************************************", "# Calculate polar angle map:", "aryPlrAng", "=", "np", ".", "arctan2", "(", "aryBstYpos", ",", "aryBstXpos", ")", "# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):", "aryEcc", "=", "np", ".", "sqrt", "(", "np", ".", "add", "(", "np", ".", "square", "(", "aryBstXpos", ")", ",", "np", ".", "square", "(", "aryBstYpos", ")", ")", ")", "# *************************************************************************", "# *************************************************************************", "# Export each map of best parameters as a 3D nii file", "print", "(", "'---------Exporting results'", ")", "# Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf", "if", "strPathHrf", "is", "not", "None", ":", "cfg", ".", "strPathOut", "=", "cfg", ".", "strPathOut", "+", "'_hrf'", "# Xoncatenate all the best voxel maps", "aryBstMaps", "=", "np", ".", "stack", "(", "[", "aryBstXpos", ",", "aryBstYpos", ",", "aryBstSd", ",", "aryBstR2", ",", "aryPlrAng", ",", "aryEcc", "]", ",", "axis", "=", "1", ")", "# List with name suffices of output images:", "lstNiiNames", "=", "[", "'_x_pos'", ",", "'_y_pos'", ",", "'_SD'", ",", "'_R2'", ",", "'_polar_angle'", ",", "'_eccentricity'", "]", "# Append ratio to nii file name, if fitting was done with sup surround", "if", "varRat", "is", "not", "None", ":", "lstNiiNames", "=", "[", "strNii", "+", "'_'", "+", "str", "(", "varRat", ")", "for", "strNii", "in", "lstNiiNames", "]", "# Create full path names from nii file names and output path", "lstNiiNames", "=", "[", "cfg", ".", "strPathOut", "+", "strNii", "+", "'.nii.gz'", "for", "strNii", "in", "lstNiiNames", "]", "# export map results as seperate 3D nii files", "export_nii", "(", "aryBstMaps", ",", "lstNiiNames", ",", "aryLgcMsk", ",", "aryLgcVar", ",", "tplNiiShp", ",", "aryAff", ",", "hdrMsk", ",", "outFormat", "=", "'3D'", ")", "# *************************************************************************", "# *************************************************************************", "# Save beta parameter estimates for every feature:", "# List with name suffices of output images:", "lstNiiNames", "=", "[", "'_Betas'", "]", "# Append ratio to nii file name, if fitting was done with sup surround", "if", "varRat", "is", "not", "None", ":", "lstNiiNames", "=", "[", "strNii", "+", "'_'", "+", "str", "(", "varRat", ")", "for", "strNii", "in", "lstNiiNames", "]", "# Create full path names from nii file names and output path", "lstNiiNames", "=", "[", "cfg", ".", "strPathOut", "+", "strNii", "+", "'.nii.gz'", "for", "strNii", "in", "lstNiiNames", "]", "# export beta parameter as a single 4D nii file", "export_nii", "(", "aryBstBts", ",", "lstNiiNames", ",", "aryLgcMsk", ",", "aryLgcVar", ",", "tplNiiShp", ",", "aryAff", ",", "hdrMsk", ",", "outFormat", "=", "'4D'", ")", "# *************************************************************************", "# *************************************************************************", "# Save R2 maps from crossvalidation (saved for every run) as nii:", "if", "np", ".", "greater", "(", "cfg", ".", "varNumXval", ",", "1", ")", ":", "# truncate extremely negative R2 values", "aryBstR2Single", "[", "np", ".", "where", "(", "np", ".", "less_equal", "(", "aryBstR2Single", ",", "-", "1.0", ")", ")", "]", "=", "-", "1.0", "# List with name suffices of output images:", "lstNiiNames", "=", "[", "'_R2_single'", "]", "# Append ratio to nii file name, if fitting was done with sup surround", "if", "varRat", "is", "not", "None", ":", "lstNiiNames", "=", "[", "strNii", "+", "'_'", "+", "str", "(", "varRat", ")", "for", "strNii", "in", "lstNiiNames", "]", "# Create full path names from nii file names and output path", "lstNiiNames", "=", "[", "cfg", ".", "strPathOut", "+", "strNii", "+", "'.nii.gz'", "for", "strNii", "in", "lstNiiNames", "]", "# export R2 maps as a single 4D nii file", "export_nii", "(", "aryBstR2Single", ",", "lstNiiNames", ",", "aryLgcMsk", ",", "aryLgcVar", ",", "tplNiiShp", ",", "aryAff", ",", "hdrMsk", ",", "outFormat", "=", "'4D'", ")", "# *************************************************************************", "# *************************************************************************", "# *** Report time", "varTme02", "=", "time", ".", "time", "(", ")", "varTme03", "=", "varTme02", "-", "varTme01", "print", "(", "'---Elapsed time: '", "+", "str", "(", "varTme03", ")", "+", "' s'", ")", "print", "(", "'---Done.'", ")" ]
Main function for pRF mapping. Parameters ---------- strCsvCnfg : str Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of pyprf libary will be prepended to config file paths. varRat : float, default None Ratio of size suppressive surround to size of center pRF strPathHrf : str or None: Path to npy file with custom hrf parameters. If None, default parameters will be used.
[ "Main", "function", "for", "pRF", "mapping", "." ]
train
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/pyprf_main.py#L37-L366
osilkin98/PyBRY
pybry/base_api.py
BaseApi.make_request
def make_request(cls, url, method, params=None, basic_auth=None, timeout=600): """ Makes a cURL POST request to the given URL, specifying the data to be passed in as {"method": method, "params": parameters} :param str url: URL to connect to. :param str method: The API method to call. :param dict params: Dictionary object of the parameters associated with the `method` given. None by default. :param list | tuple basic_auth: List containing your username and password as ['username', 'password']. This is empty by default, however it is required by all of the `lbrycrd` methods :param float timeout: Amount of seconds to wait for the server's response before we timeout. :raises LBRYException: If the request returns an error when calling the API :return: A `dict` of the JSON result member of the request :rtype: dict, PreparedResponse """ # Default parameters params = {} if params is None else params # Increment the request ID cls.request_id += 1 # Weed out all the None valued params params = {k: v for (k, v) in params.items() if v is not None} # This is the data to be sent data = {"method": method, "params": params, "jsonrpc": "2.0", "id": cls.request_id} headers = {"Content-Type": "application/json-rpc", # sends the request as a json "user-agent": "LBRY python3-api"} # Sets the user agent # You could create a request object and then make a prepared request object # And then be able to print the Request that will be sent request = requests.Request('POST', url, json=data, headers=headers, auth=basic_auth) prepared = request.prepare() try: # Create a session object sesh = requests.Session() # Send the prepared request object through response = sesh.send(prepared, timeout=timeout) response_json = response.json() # Successful request was made if 'result' in response_json: # Returns the Result sub-JSON formatted as a dict return response_json['result'], response # If the response we received from the LBRY http post had an error elif 'error' in response_json: raise LBRYUtils.LBRYException("POST Request made to LBRY received an error", response_json, response.status_code, prepared) except requests.HTTPError as HE: print(HE) return None, None except requests.RequestException as RE: # Print the Request Exception given print(RE) print("Printing Request Created:\n") LBRYUtils.print_request(prepared) return None, None
python
def make_request(cls, url, method, params=None, basic_auth=None, timeout=600): """ Makes a cURL POST request to the given URL, specifying the data to be passed in as {"method": method, "params": parameters} :param str url: URL to connect to. :param str method: The API method to call. :param dict params: Dictionary object of the parameters associated with the `method` given. None by default. :param list | tuple basic_auth: List containing your username and password as ['username', 'password']. This is empty by default, however it is required by all of the `lbrycrd` methods :param float timeout: Amount of seconds to wait for the server's response before we timeout. :raises LBRYException: If the request returns an error when calling the API :return: A `dict` of the JSON result member of the request :rtype: dict, PreparedResponse """ # Default parameters params = {} if params is None else params # Increment the request ID cls.request_id += 1 # Weed out all the None valued params params = {k: v for (k, v) in params.items() if v is not None} # This is the data to be sent data = {"method": method, "params": params, "jsonrpc": "2.0", "id": cls.request_id} headers = {"Content-Type": "application/json-rpc", # sends the request as a json "user-agent": "LBRY python3-api"} # Sets the user agent # You could create a request object and then make a prepared request object # And then be able to print the Request that will be sent request = requests.Request('POST', url, json=data, headers=headers, auth=basic_auth) prepared = request.prepare() try: # Create a session object sesh = requests.Session() # Send the prepared request object through response = sesh.send(prepared, timeout=timeout) response_json = response.json() # Successful request was made if 'result' in response_json: # Returns the Result sub-JSON formatted as a dict return response_json['result'], response # If the response we received from the LBRY http post had an error elif 'error' in response_json: raise LBRYUtils.LBRYException("POST Request made to LBRY received an error", response_json, response.status_code, prepared) except requests.HTTPError as HE: print(HE) return None, None except requests.RequestException as RE: # Print the Request Exception given print(RE) print("Printing Request Created:\n") LBRYUtils.print_request(prepared) return None, None
[ "def", "make_request", "(", "cls", ",", "url", ",", "method", ",", "params", "=", "None", ",", "basic_auth", "=", "None", ",", "timeout", "=", "600", ")", ":", "# Default parameters", "params", "=", "{", "}", "if", "params", "is", "None", "else", "params", "# Increment the request ID", "cls", ".", "request_id", "+=", "1", "# Weed out all the None valued params", "params", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "params", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "# This is the data to be sent", "data", "=", "{", "\"method\"", ":", "method", ",", "\"params\"", ":", "params", ",", "\"jsonrpc\"", ":", "\"2.0\"", ",", "\"id\"", ":", "cls", ".", "request_id", "}", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/json-rpc\"", ",", "# sends the request as a json", "\"user-agent\"", ":", "\"LBRY python3-api\"", "}", "# Sets the user agent", "# You could create a request object and then make a prepared request object", "# And then be able to print the Request that will be sent", "request", "=", "requests", ".", "Request", "(", "'POST'", ",", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ",", "auth", "=", "basic_auth", ")", "prepared", "=", "request", ".", "prepare", "(", ")", "try", ":", "# Create a session object", "sesh", "=", "requests", ".", "Session", "(", ")", "# Send the prepared request object through", "response", "=", "sesh", ".", "send", "(", "prepared", ",", "timeout", "=", "timeout", ")", "response_json", "=", "response", ".", "json", "(", ")", "# Successful request was made", "if", "'result'", "in", "response_json", ":", "# Returns the Result sub-JSON formatted as a dict", "return", "response_json", "[", "'result'", "]", ",", "response", "# If the response we received from the LBRY http post had an error", "elif", "'error'", "in", "response_json", ":", "raise", "LBRYUtils", ".", "LBRYException", "(", "\"POST Request made to LBRY received an error\"", ",", "response_json", ",", "response", ".", "status_code", ",", "prepared", ")", "except", "requests", ".", "HTTPError", "as", "HE", ":", "print", "(", "HE", ")", "return", "None", ",", "None", "except", "requests", ".", "RequestException", "as", "RE", ":", "# Print the Request Exception given", "print", "(", "RE", ")", "print", "(", "\"Printing Request Created:\\n\"", ")", "LBRYUtils", ".", "print_request", "(", "prepared", ")", "return", "None", ",", "None" ]
Makes a cURL POST request to the given URL, specifying the data to be passed in as {"method": method, "params": parameters} :param str url: URL to connect to. :param str method: The API method to call. :param dict params: Dictionary object of the parameters associated with the `method` given. None by default. :param list | tuple basic_auth: List containing your username and password as ['username', 'password']. This is empty by default, however it is required by all of the `lbrycrd` methods :param float timeout: Amount of seconds to wait for the server's response before we timeout. :raises LBRYException: If the request returns an error when calling the API :return: A `dict` of the JSON result member of the request :rtype: dict, PreparedResponse
[ "Makes", "a", "cURL", "POST", "request", "to", "the", "given", "URL", "specifying", "the", "data", "to", "be", "passed", "in", "as", "{", "method", ":", "method", "params", ":", "parameters", "}" ]
train
https://github.com/osilkin98/PyBRY/blob/af86805a8077916f72f3fe980943d4cd741e61f0/pybry/base_api.py#L29-L99
MSchnei/pyprf_feature
pyprf_feature/analysis/prepro/prepro_conv_load.py
load_png
def load_png(varNumVol, strPathPng, tplVslSpcSze=(200, 200), varStrtIdx=0, varZfill=3): """ Load PNGs with stimulus information for pRF model creation. Parameters ---------- varNumVol : int Number of PNG files. strPathPng : str Parent directory of PNG files. PNG files need to be organsied in numerical order (e.g. `file_001.png`, `file_002.png`, etc.). tplVslSpcSze : tuple Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it is useful to sample at a lower than the original resolution. varStrtIdx : int Start index of PNG files. For instance, `varStrtIdx = 0` if the name of the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is `file_001.png`. varZfill : int Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is `file_0007.png`. Returns ------- aryPngData : np.array 3D Numpy array with the following structure: aryPngData[x-pixel-index, y-pixel-index, PngNumber] Notes ----- Part of py_pRF_mapping library. """ # Create list of png files to load: lstPngPaths = [None] * varNumVol for idx01 in range(0, varNumVol): lstPngPaths[idx01] = (strPathPng + str(idx01 + varStrtIdx).zfill(varZfill) + '.png') # The png data will be saved in a numpy array of the following order: # aryPngData[x-pixel, y-pixel, PngNumber]. aryPngData = np.zeros((tplVslSpcSze[0], tplVslSpcSze[1], varNumVol)) # Open first image in order to check dimensions (greyscale or RGB, i.e. 2D # or 3D). objIm = Image.open(lstPngPaths[0]) aryTest = np.array(objIm.resize((objIm.size[0], objIm.size[1]), Image.ANTIALIAS)) varNumDim = aryTest.ndim del(aryTest) # Loop trough PNG files: for idx01 in range(0, varNumVol): # Old version of reading images with scipy # aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :, 0] # aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :] # Load & resize image: objIm = Image.open(lstPngPaths[idx01]) objIm = objIm.resize((tplVslSpcSze[0], tplVslSpcSze[1]), resample=Image.NEAREST) # Casting of array depends on dimensionality (greyscale or RGB, i.e. 2D # or 3D): if varNumDim == 2: aryPngData[:, :, idx01] = np.array(objIm.resize( (objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :] elif varNumDim == 3: aryPngData[:, :, idx01] = np.array(objIm.resize( (objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :, 0] else: # Error message: strErrMsg = ('ERROR: PNG files for model creation need to be RGB ' + 'or greyscale.') raise ValueError(strErrMsg) # Convert RGB values (0 to 255) to integer ones and zeros: aryPngData = (aryPngData > 200).astype(np.int8) return aryPngData
python
def load_png(varNumVol, strPathPng, tplVslSpcSze=(200, 200), varStrtIdx=0, varZfill=3): """ Load PNGs with stimulus information for pRF model creation. Parameters ---------- varNumVol : int Number of PNG files. strPathPng : str Parent directory of PNG files. PNG files need to be organsied in numerical order (e.g. `file_001.png`, `file_002.png`, etc.). tplVslSpcSze : tuple Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it is useful to sample at a lower than the original resolution. varStrtIdx : int Start index of PNG files. For instance, `varStrtIdx = 0` if the name of the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is `file_001.png`. varZfill : int Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is `file_0007.png`. Returns ------- aryPngData : np.array 3D Numpy array with the following structure: aryPngData[x-pixel-index, y-pixel-index, PngNumber] Notes ----- Part of py_pRF_mapping library. """ # Create list of png files to load: lstPngPaths = [None] * varNumVol for idx01 in range(0, varNumVol): lstPngPaths[idx01] = (strPathPng + str(idx01 + varStrtIdx).zfill(varZfill) + '.png') # The png data will be saved in a numpy array of the following order: # aryPngData[x-pixel, y-pixel, PngNumber]. aryPngData = np.zeros((tplVslSpcSze[0], tplVslSpcSze[1], varNumVol)) # Open first image in order to check dimensions (greyscale or RGB, i.e. 2D # or 3D). objIm = Image.open(lstPngPaths[0]) aryTest = np.array(objIm.resize((objIm.size[0], objIm.size[1]), Image.ANTIALIAS)) varNumDim = aryTest.ndim del(aryTest) # Loop trough PNG files: for idx01 in range(0, varNumVol): # Old version of reading images with scipy # aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :, 0] # aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :] # Load & resize image: objIm = Image.open(lstPngPaths[idx01]) objIm = objIm.resize((tplVslSpcSze[0], tplVslSpcSze[1]), resample=Image.NEAREST) # Casting of array depends on dimensionality (greyscale or RGB, i.e. 2D # or 3D): if varNumDim == 2: aryPngData[:, :, idx01] = np.array(objIm.resize( (objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :] elif varNumDim == 3: aryPngData[:, :, idx01] = np.array(objIm.resize( (objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :, 0] else: # Error message: strErrMsg = ('ERROR: PNG files for model creation need to be RGB ' + 'or greyscale.') raise ValueError(strErrMsg) # Convert RGB values (0 to 255) to integer ones and zeros: aryPngData = (aryPngData > 200).astype(np.int8) return aryPngData
[ "def", "load_png", "(", "varNumVol", ",", "strPathPng", ",", "tplVslSpcSze", "=", "(", "200", ",", "200", ")", ",", "varStrtIdx", "=", "0", ",", "varZfill", "=", "3", ")", ":", "# Create list of png files to load:", "lstPngPaths", "=", "[", "None", "]", "*", "varNumVol", "for", "idx01", "in", "range", "(", "0", ",", "varNumVol", ")", ":", "lstPngPaths", "[", "idx01", "]", "=", "(", "strPathPng", "+", "str", "(", "idx01", "+", "varStrtIdx", ")", ".", "zfill", "(", "varZfill", ")", "+", "'.png'", ")", "# The png data will be saved in a numpy array of the following order:", "# aryPngData[x-pixel, y-pixel, PngNumber].", "aryPngData", "=", "np", ".", "zeros", "(", "(", "tplVslSpcSze", "[", "0", "]", ",", "tplVslSpcSze", "[", "1", "]", ",", "varNumVol", ")", ")", "# Open first image in order to check dimensions (greyscale or RGB, i.e. 2D", "# or 3D).", "objIm", "=", "Image", ".", "open", "(", "lstPngPaths", "[", "0", "]", ")", "aryTest", "=", "np", ".", "array", "(", "objIm", ".", "resize", "(", "(", "objIm", ".", "size", "[", "0", "]", ",", "objIm", ".", "size", "[", "1", "]", ")", ",", "Image", ".", "ANTIALIAS", ")", ")", "varNumDim", "=", "aryTest", ".", "ndim", "del", "(", "aryTest", ")", "# Loop trough PNG files:", "for", "idx01", "in", "range", "(", "0", ",", "varNumVol", ")", ":", "# Old version of reading images with scipy", "# aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :, 0]", "# aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :]", "# Load & resize image:", "objIm", "=", "Image", ".", "open", "(", "lstPngPaths", "[", "idx01", "]", ")", "objIm", "=", "objIm", ".", "resize", "(", "(", "tplVslSpcSze", "[", "0", "]", ",", "tplVslSpcSze", "[", "1", "]", ")", ",", "resample", "=", "Image", ".", "NEAREST", ")", "# Casting of array depends on dimensionality (greyscale or RGB, i.e. 2D", "# or 3D):", "if", "varNumDim", "==", "2", ":", "aryPngData", "[", ":", ",", ":", ",", "idx01", "]", "=", "np", ".", "array", "(", "objIm", ".", "resize", "(", "(", "objIm", ".", "size", "[", "0", "]", ",", "objIm", ".", "size", "[", "1", "]", ")", ",", "Image", ".", "ANTIALIAS", ")", ")", "[", ":", ",", ":", "]", "elif", "varNumDim", "==", "3", ":", "aryPngData", "[", ":", ",", ":", ",", "idx01", "]", "=", "np", ".", "array", "(", "objIm", ".", "resize", "(", "(", "objIm", ".", "size", "[", "0", "]", ",", "objIm", ".", "size", "[", "1", "]", ")", ",", "Image", ".", "ANTIALIAS", ")", ")", "[", ":", ",", ":", ",", "0", "]", "else", ":", "# Error message:", "strErrMsg", "=", "(", "'ERROR: PNG files for model creation need to be RGB '", "+", "'or greyscale.'", ")", "raise", "ValueError", "(", "strErrMsg", ")", "# Convert RGB values (0 to 255) to integer ones and zeros:", "aryPngData", "=", "(", "aryPngData", ">", "200", ")", ".", "astype", "(", "np", ".", "int8", ")", "return", "aryPngData" ]
Load PNGs with stimulus information for pRF model creation. Parameters ---------- varNumVol : int Number of PNG files. strPathPng : str Parent directory of PNG files. PNG files need to be organsied in numerical order (e.g. `file_001.png`, `file_002.png`, etc.). tplVslSpcSze : tuple Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it is useful to sample at a lower than the original resolution. varStrtIdx : int Start index of PNG files. For instance, `varStrtIdx = 0` if the name of the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is `file_001.png`. varZfill : int Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is `file_0007.png`. Returns ------- aryPngData : np.array 3D Numpy array with the following structure: aryPngData[x-pixel-index, y-pixel-index, PngNumber] Notes ----- Part of py_pRF_mapping library.
[ "Load", "PNGs", "with", "stimulus", "information", "for", "pRF", "model", "creation", "." ]
train
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/prepro/prepro_conv_load.py#L34-L119
MSchnei/pyprf_feature
pyprf_feature/analysis/prepro/prepro_conv_load.py
load_ev_txt
def load_ev_txt(strPthEv): """Load information from event text file. Parameters ---------- input1 : str Path to event text file Returns ------- aryEvTxt : 2d numpy array, shape [n_measurements, 3] Array with info about conditions: type, onset, duration Notes ----- Part of py_pRF_mapping library. """ aryEvTxt = np.loadtxt(strPthEv, dtype='float', comments='#', delimiter=' ', skiprows=0, usecols=(0, 1, 2)) return aryEvTxt
python
def load_ev_txt(strPthEv): """Load information from event text file. Parameters ---------- input1 : str Path to event text file Returns ------- aryEvTxt : 2d numpy array, shape [n_measurements, 3] Array with info about conditions: type, onset, duration Notes ----- Part of py_pRF_mapping library. """ aryEvTxt = np.loadtxt(strPthEv, dtype='float', comments='#', delimiter=' ', skiprows=0, usecols=(0, 1, 2)) return aryEvTxt
[ "def", "load_ev_txt", "(", "strPthEv", ")", ":", "aryEvTxt", "=", "np", ".", "loadtxt", "(", "strPthEv", ",", "dtype", "=", "'float'", ",", "comments", "=", "'#'", ",", "delimiter", "=", "' '", ",", "skiprows", "=", "0", ",", "usecols", "=", "(", "0", ",", "1", ",", "2", ")", ")", "return", "aryEvTxt" ]
Load information from event text file. Parameters ---------- input1 : str Path to event text file Returns ------- aryEvTxt : 2d numpy array, shape [n_measurements, 3] Array with info about conditions: type, onset, duration Notes ----- Part of py_pRF_mapping library.
[ "Load", "information", "from", "event", "text", "file", "." ]
train
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/prepro/prepro_conv_load.py#L122-L139
bachya/pyflunearyou
pyflunearyou/cdc.py
adjust_status
def adjust_status(info: dict) -> dict: """Apply status mapping to a raw API result.""" modified_info = deepcopy(info) modified_info.update({ 'level': get_nearest_by_numeric_key(STATUS_MAP, int(info['level'])), 'level2': STATUS_MAP[99] if info['level2'] is None else get_nearest_by_numeric_key(STATUS_MAP, int(info['level2'])) }) return modified_info
python
def adjust_status(info: dict) -> dict: """Apply status mapping to a raw API result.""" modified_info = deepcopy(info) modified_info.update({ 'level': get_nearest_by_numeric_key(STATUS_MAP, int(info['level'])), 'level2': STATUS_MAP[99] if info['level2'] is None else get_nearest_by_numeric_key(STATUS_MAP, int(info['level2'])) }) return modified_info
[ "def", "adjust_status", "(", "info", ":", "dict", ")", "->", "dict", ":", "modified_info", "=", "deepcopy", "(", "info", ")", "modified_info", ".", "update", "(", "{", "'level'", ":", "get_nearest_by_numeric_key", "(", "STATUS_MAP", ",", "int", "(", "info", "[", "'level'", "]", ")", ")", ",", "'level2'", ":", "STATUS_MAP", "[", "99", "]", "if", "info", "[", "'level2'", "]", "is", "None", "else", "get_nearest_by_numeric_key", "(", "STATUS_MAP", ",", "int", "(", "info", "[", "'level2'", "]", ")", ")", "}", ")", "return", "modified_info" ]
Apply status mapping to a raw API result.
[ "Apply", "status", "mapping", "to", "a", "raw", "API", "result", "." ]
train
https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/cdc.py#L23-L34
bachya/pyflunearyou
pyflunearyou/cdc.py
CdcReport.status_by_coordinates
async def status_by_coordinates( self, latitude: float, longitude: float) -> dict: """Return the CDC status for the provided latitude/longitude.""" cdc_data = await self.raw_cdc_data() nearest = await self.nearest_by_coordinates(latitude, longitude) return adjust_status(cdc_data[nearest['state']['name']])
python
async def status_by_coordinates( self, latitude: float, longitude: float) -> dict: """Return the CDC status for the provided latitude/longitude.""" cdc_data = await self.raw_cdc_data() nearest = await self.nearest_by_coordinates(latitude, longitude) return adjust_status(cdc_data[nearest['state']['name']])
[ "async", "def", "status_by_coordinates", "(", "self", ",", "latitude", ":", "float", ",", "longitude", ":", "float", ")", "->", "dict", ":", "cdc_data", "=", "await", "self", ".", "raw_cdc_data", "(", ")", "nearest", "=", "await", "self", ".", "nearest_by_coordinates", "(", "latitude", ",", "longitude", ")", "return", "adjust_status", "(", "cdc_data", "[", "nearest", "[", "'state'", "]", "[", "'name'", "]", "]", ")" ]
Return the CDC status for the provided latitude/longitude.
[ "Return", "the", "CDC", "status", "for", "the", "provided", "latitude", "/", "longitude", "." ]
train
https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/cdc.py#L51-L56
bachya/pyflunearyou
pyflunearyou/cdc.py
CdcReport.status_by_state
async def status_by_state(self, state: str) -> dict: """Return the CDC status for the specified state.""" data = await self.raw_cdc_data() try: info = next((v for k, v in data.items() if state in k)) except StopIteration: return {} return adjust_status(info)
python
async def status_by_state(self, state: str) -> dict: """Return the CDC status for the specified state.""" data = await self.raw_cdc_data() try: info = next((v for k, v in data.items() if state in k)) except StopIteration: return {} return adjust_status(info)
[ "async", "def", "status_by_state", "(", "self", ",", "state", ":", "str", ")", "->", "dict", ":", "data", "=", "await", "self", ".", "raw_cdc_data", "(", ")", "try", ":", "info", "=", "next", "(", "(", "v", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", "if", "state", "in", "k", ")", ")", "except", "StopIteration", ":", "return", "{", "}", "return", "adjust_status", "(", "info", ")" ]
Return the CDC status for the specified state.
[ "Return", "the", "CDC", "status", "for", "the", "specified", "state", "." ]
train
https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/cdc.py#L58-L67
curious-containers/cc-core
cc_core/commons/exceptions.py
brief_exception_text
def brief_exception_text(exception, secret_values): """ Returns the Exception class and the message of the exception as string. :param exception: The exception to format :param secret_values: Values to hide in output """ exception_text = _hide_secret_values(str(exception), secret_values) return '[{}]\n{}'.format(type(exception).__name__, exception_text)
python
def brief_exception_text(exception, secret_values): """ Returns the Exception class and the message of the exception as string. :param exception: The exception to format :param secret_values: Values to hide in output """ exception_text = _hide_secret_values(str(exception), secret_values) return '[{}]\n{}'.format(type(exception).__name__, exception_text)
[ "def", "brief_exception_text", "(", "exception", ",", "secret_values", ")", ":", "exception_text", "=", "_hide_secret_values", "(", "str", "(", "exception", ")", ",", "secret_values", ")", "return", "'[{}]\\n{}'", ".", "format", "(", "type", "(", "exception", ")", ".", "__name__", ",", "exception_text", ")" ]
Returns the Exception class and the message of the exception as string. :param exception: The exception to format :param secret_values: Values to hide in output
[ "Returns", "the", "Exception", "class", "and", "the", "message", "of", "the", "exception", "as", "string", "." ]
train
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/exceptions.py#L27-L35
curious-containers/cc-core
cc_core/commons/exceptions.py
print_exception
def print_exception(exception, secret_values=None): """ Prints the exception message and the name of the exception class to stderr. :param exception: The exception to print :param secret_values: Values to hide in output """ print(brief_exception_text(exception, secret_values), file=sys.stderr)
python
def print_exception(exception, secret_values=None): """ Prints the exception message and the name of the exception class to stderr. :param exception: The exception to print :param secret_values: Values to hide in output """ print(brief_exception_text(exception, secret_values), file=sys.stderr)
[ "def", "print_exception", "(", "exception", ",", "secret_values", "=", "None", ")", ":", "print", "(", "brief_exception_text", "(", "exception", ",", "secret_values", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
Prints the exception message and the name of the exception class to stderr. :param exception: The exception to print :param secret_values: Values to hide in output
[ "Prints", "the", "exception", "message", "and", "the", "name", "of", "the", "exception", "class", "to", "stderr", "." ]
train
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/exceptions.py#L38-L45
lvieirajr/mongorest
mongorest/collection.py
Collection.insert
def insert(self, **kwargs): """ Saves the Document to the database if it is valid. Returns errors otherwise. """ if self.is_valid: before = self.before_insert() if before: return before try: self._document['_id'] = self.insert_one(self._document) self.after_insert() return self._document except PyMongoException as exc: return PyMongoError( error_message=exc.details.get( 'errmsg', exc.details.get('err', 'PyMongoError.') ), operation='insert', collection=type(self).__name__, document=self._document, ) return self._errors
python
def insert(self, **kwargs): """ Saves the Document to the database if it is valid. Returns errors otherwise. """ if self.is_valid: before = self.before_insert() if before: return before try: self._document['_id'] = self.insert_one(self._document) self.after_insert() return self._document except PyMongoException as exc: return PyMongoError( error_message=exc.details.get( 'errmsg', exc.details.get('err', 'PyMongoError.') ), operation='insert', collection=type(self).__name__, document=self._document, ) return self._errors
[ "def", "insert", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "is_valid", ":", "before", "=", "self", ".", "before_insert", "(", ")", "if", "before", ":", "return", "before", "try", ":", "self", ".", "_document", "[", "'_id'", "]", "=", "self", ".", "insert_one", "(", "self", ".", "_document", ")", "self", ".", "after_insert", "(", ")", "return", "self", ".", "_document", "except", "PyMongoException", "as", "exc", ":", "return", "PyMongoError", "(", "error_message", "=", "exc", ".", "details", ".", "get", "(", "'errmsg'", ",", "exc", ".", "details", ".", "get", "(", "'err'", ",", "'PyMongoError.'", ")", ")", ",", "operation", "=", "'insert'", ",", "collection", "=", "type", "(", "self", ")", ".", "__name__", ",", "document", "=", "self", ".", "_document", ",", ")", "return", "self", ".", "_errors" ]
Saves the Document to the database if it is valid. Returns errors otherwise.
[ "Saves", "the", "Document", "to", "the", "database", "if", "it", "is", "valid", ".", "Returns", "errors", "otherwise", "." ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L159-L184
lvieirajr/mongorest
mongorest/collection.py
Collection.update
def update(self, **kwargs): """ Updates the document with the given _id saved in the collection if it is valid. Returns errors otherwise. """ if self.is_valid: if '_id' in self._document: to_update = self.find_one({'_id': self._id}) if to_update: before = self.before_update(old=to_update) if before: return before try: self.replace_one({'_id': self._id}, self._document) self.after_update(old=to_update) return self._document except PyMongoException as exc: return PyMongoError( error_message=exc.details.get( 'errmsg', exc.details.get( 'err', 'PyMongoError.' ) ), operation='update', collection=type(self).__name__, document=self._document, ) else: return DocumentNotFoundError(type(self).__name__, self._id) else: return UnidentifiedDocumentError( type(self).__name__, self._document ) return self._errors
python
def update(self, **kwargs): """ Updates the document with the given _id saved in the collection if it is valid. Returns errors otherwise. """ if self.is_valid: if '_id' in self._document: to_update = self.find_one({'_id': self._id}) if to_update: before = self.before_update(old=to_update) if before: return before try: self.replace_one({'_id': self._id}, self._document) self.after_update(old=to_update) return self._document except PyMongoException as exc: return PyMongoError( error_message=exc.details.get( 'errmsg', exc.details.get( 'err', 'PyMongoError.' ) ), operation='update', collection=type(self).__name__, document=self._document, ) else: return DocumentNotFoundError(type(self).__name__, self._id) else: return UnidentifiedDocumentError( type(self).__name__, self._document ) return self._errors
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "is_valid", ":", "if", "'_id'", "in", "self", ".", "_document", ":", "to_update", "=", "self", ".", "find_one", "(", "{", "'_id'", ":", "self", ".", "_id", "}", ")", "if", "to_update", ":", "before", "=", "self", ".", "before_update", "(", "old", "=", "to_update", ")", "if", "before", ":", "return", "before", "try", ":", "self", ".", "replace_one", "(", "{", "'_id'", ":", "self", ".", "_id", "}", ",", "self", ".", "_document", ")", "self", ".", "after_update", "(", "old", "=", "to_update", ")", "return", "self", ".", "_document", "except", "PyMongoException", "as", "exc", ":", "return", "PyMongoError", "(", "error_message", "=", "exc", ".", "details", ".", "get", "(", "'errmsg'", ",", "exc", ".", "details", ".", "get", "(", "'err'", ",", "'PyMongoError.'", ")", ")", ",", "operation", "=", "'update'", ",", "collection", "=", "type", "(", "self", ")", ".", "__name__", ",", "document", "=", "self", ".", "_document", ",", ")", "else", ":", "return", "DocumentNotFoundError", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "_id", ")", "else", ":", "return", "UnidentifiedDocumentError", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "_document", ")", "return", "self", ".", "_errors" ]
Updates the document with the given _id saved in the collection if it is valid. Returns errors otherwise.
[ "Updates", "the", "document", "with", "the", "given", "_id", "saved", "in", "the", "collection", "if", "it", "is", "valid", ".", "Returns", "errors", "otherwise", "." ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L187-L225
lvieirajr/mongorest
mongorest/collection.py
Collection.delete
def delete(self, **kwargs): """ Deletes the document if it is saved in the collection. """ if self.is_valid: if '_id' in self._document: to_delete = self.find_one({'_id': self._id}) if to_delete: before = self.before_delete() if before: return before try: self.delete_one({'_id': self._id}) self.after_delete() return self._document except PyMongoException as exc: return PyMongoError( error_message=exc.details.get( 'errmsg', exc.details.get( 'err', 'PyMongoError.' ) ), operation='delete', collection=type(self).__name__, document=self._document, ) else: return DocumentNotFoundError(type(self).__name__, self._id) else: return UnidentifiedDocumentError( type(self).__name__, self._document )
python
def delete(self, **kwargs): """ Deletes the document if it is saved in the collection. """ if self.is_valid: if '_id' in self._document: to_delete = self.find_one({'_id': self._id}) if to_delete: before = self.before_delete() if before: return before try: self.delete_one({'_id': self._id}) self.after_delete() return self._document except PyMongoException as exc: return PyMongoError( error_message=exc.details.get( 'errmsg', exc.details.get( 'err', 'PyMongoError.' ) ), operation='delete', collection=type(self).__name__, document=self._document, ) else: return DocumentNotFoundError(type(self).__name__, self._id) else: return UnidentifiedDocumentError( type(self).__name__, self._document )
[ "def", "delete", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "is_valid", ":", "if", "'_id'", "in", "self", ".", "_document", ":", "to_delete", "=", "self", ".", "find_one", "(", "{", "'_id'", ":", "self", ".", "_id", "}", ")", "if", "to_delete", ":", "before", "=", "self", ".", "before_delete", "(", ")", "if", "before", ":", "return", "before", "try", ":", "self", ".", "delete_one", "(", "{", "'_id'", ":", "self", ".", "_id", "}", ")", "self", ".", "after_delete", "(", ")", "return", "self", ".", "_document", "except", "PyMongoException", "as", "exc", ":", "return", "PyMongoError", "(", "error_message", "=", "exc", ".", "details", ".", "get", "(", "'errmsg'", ",", "exc", ".", "details", ".", "get", "(", "'err'", ",", "'PyMongoError.'", ")", ")", ",", "operation", "=", "'delete'", ",", "collection", "=", "type", "(", "self", ")", ".", "__name__", ",", "document", "=", "self", ".", "_document", ",", ")", "else", ":", "return", "DocumentNotFoundError", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "_id", ")", "else", ":", "return", "UnidentifiedDocumentError", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "_document", ")" ]
Deletes the document if it is saved in the collection.
[ "Deletes", "the", "document", "if", "it", "is", "saved", "in", "the", "collection", "." ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L228-L262
lvieirajr/mongorest
mongorest/collection.py
Collection.find_one
def find_one(cls, filter=None, *args, **kwargs): """ Returns one document dict if one passes the filter. Returns None otherwise. """ return cls.collection.find_one(filter, *args, **kwargs)
python
def find_one(cls, filter=None, *args, **kwargs): """ Returns one document dict if one passes the filter. Returns None otherwise. """ return cls.collection.find_one(filter, *args, **kwargs)
[ "def", "find_one", "(", "cls", ",", "filter", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "cls", ".", "collection", ".", "find_one", "(", "filter", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Returns one document dict if one passes the filter. Returns None otherwise.
[ "Returns", "one", "document", "dict", "if", "one", "passes", "the", "filter", ".", "Returns", "None", "otherwise", "." ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L266-L271
lvieirajr/mongorest
mongorest/collection.py
Collection.find
def find(cls, *args, **kwargs): """ Returns all document dicts that pass the filter """ return list(cls.collection.find(*args, **kwargs))
python
def find(cls, *args, **kwargs): """ Returns all document dicts that pass the filter """ return list(cls.collection.find(*args, **kwargs))
[ "def", "find", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "list", "(", "cls", ".", "collection", ".", "find", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Returns all document dicts that pass the filter
[ "Returns", "all", "document", "dicts", "that", "pass", "the", "filter" ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L275-L279
lvieirajr/mongorest
mongorest/collection.py
Collection.aggregate
def aggregate(cls, pipeline=None, **kwargs): """ Returns the document dicts returned from the Aggregation Pipeline """ return list(cls.collection.aggregate(pipeline or [], **kwargs))
python
def aggregate(cls, pipeline=None, **kwargs): """ Returns the document dicts returned from the Aggregation Pipeline """ return list(cls.collection.aggregate(pipeline or [], **kwargs))
[ "def", "aggregate", "(", "cls", ",", "pipeline", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "list", "(", "cls", ".", "collection", ".", "aggregate", "(", "pipeline", "or", "[", "]", ",", "*", "*", "kwargs", ")", ")" ]
Returns the document dicts returned from the Aggregation Pipeline
[ "Returns", "the", "document", "dicts", "returned", "from", "the", "Aggregation", "Pipeline" ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L283-L287
lvieirajr/mongorest
mongorest/collection.py
Collection.insert_many
def insert_many(cls, documents, ordered=True): """ Inserts a list of documents into the Collection and returns their _ids """ return cls.collection.insert_many(documents, ordered).inserted_ids
python
def insert_many(cls, documents, ordered=True): """ Inserts a list of documents into the Collection and returns their _ids """ return cls.collection.insert_many(documents, ordered).inserted_ids
[ "def", "insert_many", "(", "cls", ",", "documents", ",", "ordered", "=", "True", ")", ":", "return", "cls", ".", "collection", ".", "insert_many", "(", "documents", ",", "ordered", ")", ".", "inserted_ids" ]
Inserts a list of documents into the Collection and returns their _ids
[ "Inserts", "a", "list", "of", "documents", "into", "the", "Collection", "and", "returns", "their", "_ids" ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L299-L303
lvieirajr/mongorest
mongorest/collection.py
Collection.update_one
def update_one(cls, filter, update, upsert=False): """ Updates a document that passes the filter with the update value Will upsert a new document if upsert=True and no document is filtered """ return cls.collection.update_one(filter, update, upsert).raw_result
python
def update_one(cls, filter, update, upsert=False): """ Updates a document that passes the filter with the update value Will upsert a new document if upsert=True and no document is filtered """ return cls.collection.update_one(filter, update, upsert).raw_result
[ "def", "update_one", "(", "cls", ",", "filter", ",", "update", ",", "upsert", "=", "False", ")", ":", "return", "cls", ".", "collection", ".", "update_one", "(", "filter", ",", "update", ",", "upsert", ")", ".", "raw_result" ]
Updates a document that passes the filter with the update value Will upsert a new document if upsert=True and no document is filtered
[ "Updates", "a", "document", "that", "passes", "the", "filter", "with", "the", "update", "value", "Will", "upsert", "a", "new", "document", "if", "upsert", "=", "True", "and", "no", "document", "is", "filtered" ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L307-L312
lvieirajr/mongorest
mongorest/collection.py
Collection.update_many
def update_many(cls, filter, update, upsert=False): """ Updates all documents that pass the filter with the update value Will upsert a new document if upsert=True and no document is filtered """ return cls.collection.update_many(filter, update, upsert).raw_result
python
def update_many(cls, filter, update, upsert=False): """ Updates all documents that pass the filter with the update value Will upsert a new document if upsert=True and no document is filtered """ return cls.collection.update_many(filter, update, upsert).raw_result
[ "def", "update_many", "(", "cls", ",", "filter", ",", "update", ",", "upsert", "=", "False", ")", ":", "return", "cls", ".", "collection", ".", "update_many", "(", "filter", ",", "update", ",", "upsert", ")", ".", "raw_result" ]
Updates all documents that pass the filter with the update value Will upsert a new document if upsert=True and no document is filtered
[ "Updates", "all", "documents", "that", "pass", "the", "filter", "with", "the", "update", "value", "Will", "upsert", "a", "new", "document", "if", "upsert", "=", "True", "and", "no", "document", "is", "filtered" ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L316-L321
lvieirajr/mongorest
mongorest/collection.py
Collection.replace_one
def replace_one(cls, filter, replacement, upsert=False): """ Replaces a document that passes the filter. Will upsert a new document if upsert=True and no document is filtered """ return cls.collection.replace_one( filter, replacement, upsert ).raw_result
python
def replace_one(cls, filter, replacement, upsert=False): """ Replaces a document that passes the filter. Will upsert a new document if upsert=True and no document is filtered """ return cls.collection.replace_one( filter, replacement, upsert ).raw_result
[ "def", "replace_one", "(", "cls", ",", "filter", ",", "replacement", ",", "upsert", "=", "False", ")", ":", "return", "cls", ".", "collection", ".", "replace_one", "(", "filter", ",", "replacement", ",", "upsert", ")", ".", "raw_result" ]
Replaces a document that passes the filter. Will upsert a new document if upsert=True and no document is filtered
[ "Replaces", "a", "document", "that", "passes", "the", "filter", ".", "Will", "upsert", "a", "new", "document", "if", "upsert", "=", "True", "and", "no", "document", "is", "filtered" ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L325-L332
lvieirajr/mongorest
mongorest/collection.py
Collection.get
def get(cls, filter=None, **kwargs): """ Returns a Document if any document is filtered, returns None otherwise """ document = cls(cls.find_one(filter, **kwargs)) return document if document.document else None
python
def get(cls, filter=None, **kwargs): """ Returns a Document if any document is filtered, returns None otherwise """ document = cls(cls.find_one(filter, **kwargs)) return document if document.document else None
[ "def", "get", "(", "cls", ",", "filter", "=", "None", ",", "*", "*", "kwargs", ")", ":", "document", "=", "cls", "(", "cls", ".", "find_one", "(", "filter", ",", "*", "*", "kwargs", ")", ")", "return", "document", "if", "document", ".", "document", "else", "None" ]
Returns a Document if any document is filtered, returns None otherwise
[ "Returns", "a", "Document", "if", "any", "document", "is", "filtered", "returns", "None", "otherwise" ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L359-L364
lvieirajr/mongorest
mongorest/collection.py
Collection.documents
def documents(cls, filter=None, **kwargs): """ Returns a list of Documents if any document is filtered """ documents = [cls(document) for document in cls.find(filter, **kwargs)] return [document for document in documents if document.document]
python
def documents(cls, filter=None, **kwargs): """ Returns a list of Documents if any document is filtered """ documents = [cls(document) for document in cls.find(filter, **kwargs)] return [document for document in documents if document.document]
[ "def", "documents", "(", "cls", ",", "filter", "=", "None", ",", "*", "*", "kwargs", ")", ":", "documents", "=", "[", "cls", "(", "document", ")", "for", "document", "in", "cls", ".", "find", "(", "filter", ",", "*", "*", "kwargs", ")", "]", "return", "[", "document", "for", "document", "in", "documents", "if", "document", ".", "document", "]" ]
Returns a list of Documents if any document is filtered
[ "Returns", "a", "list", "of", "Documents", "if", "any", "document", "is", "filtered" ]
train
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L367-L372
ChrisTimperley/Kaskara
python/kaskara/statements.py
StatementDB.in_file
def in_file(self, fn: str) -> Iterator[Statement]: """ Returns an iterator over all of the statements belonging to a file. """ yield from self.__file_to_statements.get(fn, [])
python
def in_file(self, fn: str) -> Iterator[Statement]: """ Returns an iterator over all of the statements belonging to a file. """ yield from self.__file_to_statements.get(fn, [])
[ "def", "in_file", "(", "self", ",", "fn", ":", "str", ")", "->", "Iterator", "[", "Statement", "]", ":", "yield", "from", "self", ".", "__file_to_statements", ".", "get", "(", "fn", ",", "[", "]", ")" ]
Returns an iterator over all of the statements belonging to a file.
[ "Returns", "an", "iterator", "over", "all", "of", "the", "statements", "belonging", "to", "a", "file", "." ]
train
https://github.com/ChrisTimperley/Kaskara/blob/3d182d95b2938508e5990eddd30321be15e2f2ef/python/kaskara/statements.py#L128-L132
ChrisTimperley/Kaskara
python/kaskara/statements.py
StatementDB.at_line
def at_line(self, line: FileLine) -> Iterator[Statement]: """ Returns an iterator over all of the statements located at a given line. """ num = line.num for stmt in self.in_file(line.filename): if stmt.location.start.line == num: yield stmt
python
def at_line(self, line: FileLine) -> Iterator[Statement]: """ Returns an iterator over all of the statements located at a given line. """ num = line.num for stmt in self.in_file(line.filename): if stmt.location.start.line == num: yield stmt
[ "def", "at_line", "(", "self", ",", "line", ":", "FileLine", ")", "->", "Iterator", "[", "Statement", "]", ":", "num", "=", "line", ".", "num", "for", "stmt", "in", "self", ".", "in_file", "(", "line", ".", "filename", ")", ":", "if", "stmt", ".", "location", ".", "start", ".", "line", "==", "num", ":", "yield", "stmt" ]
Returns an iterator over all of the statements located at a given line.
[ "Returns", "an", "iterator", "over", "all", "of", "the", "statements", "located", "at", "a", "given", "line", "." ]
train
https://github.com/ChrisTimperley/Kaskara/blob/3d182d95b2938508e5990eddd30321be15e2f2ef/python/kaskara/statements.py#L134-L141
MSchnei/pyprf_feature
pyprf_feature/analysis/find_prf_gpu.py
funcFindPrfGpu
def funcFindPrfGpu(idxPrc, vecMdlXpos, vecMdlYpos, vecMdlSd, aryFunc, # noqa aryPrfTc, varL2reg, queOut, lgcPrint=True): """ Find best pRF model for voxel time course. Parameters ---------- idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0 (just one thread on CPU). vecMdlXpos : np.array 1D array with pRF model x positions. vecMdlYpos : np.array 1D array with pRF model y positions. vecMdlSd : np.array 1D array with pRF model sizes (SD of Gaussian). aryFunc : np.array 2D array with functional MRI data, with shape aryFunc[voxel, time]. aryPrfTc : np.array Array with pRF model time courses, with shape aryPrfTc[x-pos, y-pos, SD, motion-direction, time] varL2reg : float L2 regularisation factor for ridge regression. queOut : multiprocessing.queues.Queue Queue to put the results on. lgcPrint : boolean Whether print statements should be executed. Returns ------- lstOut : list List containing the following objects: idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0. vecBstXpos : np.array 1D array with best fitting x-position for each voxel, with shape vecBstXpos[voxel]. vecBstYpos : np.array 1D array with best fitting y-position for each voxel, with shape vecBstYpos[voxel]. vecBstSd : np.array 1D array with best fitting pRF size for each voxel, with shape vecBstSd[voxel]. vecBstR2 : np.array 1D array with R2 value of 'winning' pRF model for each voxel, with shape vecBstR2[voxel]. dummy : np.array 2D array that is supposed to contain the beta values of 'winning' pRF models for each voxel, with shape aryBeta[voxel, beta]. AT THE MOMENT, CONTAINS EMPTY DUMMY ARRAY (np.zeros). Notes ----- Uses a queue that runs in a separate thread to put model time courses on the computational graph. """ # ------------------------------------------------------------------------- # *** Queue-feeding-function that will run in extra thread def funcPlcIn(): """Place data on queue.""" # Iteration counter: idxCnt = 0 while True: # Feed example to Tensorflow placeholder aryTmp02 = lstPrfTc[idxCnt] dicIn = {objPlcHld01: aryTmp02} # Push to the queue: objSess.run(objEnQ, feed_dict=dicIn) idxCnt += 1 # Stop if coordinator says stop: if objCoord.should_stop(): break # Stop if all data has been put on the queue: elif idxCnt == varNumMdls: break # ------------------------------------------------------------------------- # *** Prepare pRF model time courses for graph if lgcPrint: print('------Prepare pRF model time courses for graph') # Information about pRF model parameters: varNumX = np.shape(vecMdlXpos)[0] varNumY = np.shape(vecMdlYpos)[0] varNumPrfSizes = np.shape(vecMdlSd)[0] # Number of predictors (betas): varNumBeta = aryPrfTc.shape[3] # At this point, aryPrfTc has the following dimensions: # aryPrfTc[x-pos, y-pos, SD, motion-direction, time] # Reshape pRF model time courses: aryPrfTc = np.reshape(aryPrfTc, ((aryPrfTc.shape[0] * aryPrfTc.shape[1] * aryPrfTc.shape[2]), aryPrfTc.shape[3], aryPrfTc.shape[4])) # Now, aryPrfTc has the following dimensions: # aryPrfTc[(x-pos * y-pos * SD), motion-direction, time] # Original total number of pRF time course models (before removing models # with zero variance): varNumMdlsTtl = aryPrfTc.shape[0] # Change type to float 32: aryPrfTc = aryPrfTc.astype(np.float32) # The pRF model is fitted only if variance along time dimension is not # zero. Get variance along time dimension: vecVarPrfTc = np.var(aryPrfTc, axis=2) # Zero with float32 precision for comparison: varZero32 = np.array(([0.0])).astype(np.float32)[0] # Boolean array for models with variance greater than zero for at least # one motion direction: vecLgcVar = np.max( np.greater(vecVarPrfTc, varZero32), axis=1 ) # Take models with variance less than zero out of the array: aryPrfTc = aryPrfTc[vecLgcVar, :, :] # Swap axes, so that # aryPrfTc[(x-pos * y-pos * SD), time, motion-direction] aryPrfTc = np.swapaxes(aryPrfTc, 1, 2) # Add constant term (ones): # aryPrfTc = np.concatenate((aryPrfTc, # np.ones((aryPrfTc.shape[0], # aryPrfTc.shape[1], # 1)).astype(np.float32)), # axis=2) # Size of pRF time courses in MB: varSzePrf = np.divide(float(aryPrfTc.nbytes), 1000000.0) if lgcPrint: print(('---------Size of pRF time courses: ' + str(np.around(varSzePrf)) + ' MB')) # Put pRF model time courses into list: lstPrfTc = [None] * aryPrfTc.shape[0] for idxMdl in range(int(aryPrfTc.shape[0])): lstPrfTc[idxMdl] = aryPrfTc[idxMdl, :, :] del(aryPrfTc) # Total number of pRF models to fit: varNumMdls = len(lstPrfTc) # ------------------------------------------------------------------------- # *** Prepare functional data for graph if lgcPrint: print('------Prepare functional data for graph') # Number of voxels to be fitted: varNumVox = aryFunc.shape[0] # Number of volumes: varNumVol = aryFunc.shape[1] # We reshape the voxel time courses, so that time goes down the column, # i.e. from top to bottom. aryFunc = aryFunc.T # Change type to float 32: aryFunc = aryFunc.astype(np.float32) # We cannot commit the entire functional data to GPU memory, we need to # create chunks. Establish the limit (maximum size) of one chunk (in MB): varSzeMax = 50.0 # 20.0 # Size of functional data in MB: varSzeFunc = np.divide(float(aryFunc.nbytes), 1000000.0) if lgcPrint: print(('---------Size of functional data: ' + str(np.around(varSzeFunc)) + ' MB')) # Number of chunks to create: varNumChnk = int(np.ceil(np.divide(varSzeFunc, varSzeMax))) if lgcPrint: print(('---------Functional data will be split into ' + str(varNumChnk) + ' batches')) # Vector with the indicies at which the functional data will be separated # in order to be chunked up for the parallel processes: vecIdxChnks = np.linspace(0, varNumVox, num=varNumChnk, endpoint=False) vecIdxChnks = np.hstack((vecIdxChnks, varNumVox)) # List into which the chunks of functional data are put: lstFunc = [None] * varNumChnk # Put functional data into chunks: for idxChnk in range(0, varNumChnk): # Index of first voxel to be included in current chunk: varChnkStr = int(vecIdxChnks[idxChnk]) # Index of last voxel to be included in current chunk: varChnkEnd = int(vecIdxChnks[(idxChnk+1)]) # Put voxel array into list: lstFunc[idxChnk] = aryFunc[:, varChnkStr:varChnkEnd] # We delete the original array holding the functional data to conserve # memory. Therefore, we first need to calculate the mean (will be needed # for calculation of R2). # After finding the best fitting model for each voxel, we still have to # calculate the coefficient of determination (R-squared) for each voxel. We # start by calculating the total sum of squares (i.e. the deviation of the # data from the mean). The mean of each time course: vecFuncMean = np.mean(aryFunc, axis=0) # Deviation from the mean for each datapoint: vecFuncDev = np.subtract(aryFunc, vecFuncMean[None, :]) # Sum of squares: vecSsTot = np.sum(np.power(vecFuncDev, 2.0), axis=0) # We don't need the original array with the functional data anymore (the # above seems to have created a hard copy): del(vecFuncDev) del(aryFunc) # ------------------------------------------------------------------------- # *** Miscellaneous preparations # Vector for minimum squared residuals: vecResSsMin = np.zeros((varNumVox), dtype=np.float32) # Vector for indices of models with minimum residuals: vecResSsMinIdx = np.zeros((varNumVox), dtype=np.int32) # Multiply L2 regularization factor with identity matrix: aryL2reg = np.multiply(np.eye(varNumBeta), varL2reg).astype(np.float32) # Reduce logging verbosity: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ------------------------------------------------------------------------- # *** Prepare status indicator # We create a status indicator for the time consuming pRF model finding # algorithm. Number of steps of the status indicator: varStsStpSze = 20 # Vector with pRF values at which to give status feedback: vecStatPrf = np.linspace(0, (varNumMdls * varNumChnk), num=(varStsStpSze+1), endpoint=True) vecStatPrf = np.ceil(vecStatPrf) vecStatPrf = vecStatPrf.astype(int) # Vector with corresponding percentage values at which to give status # feedback: vecStatPrc = np.linspace(0, 100, num=(varStsStpSze+1), endpoint=True) vecStatPrc = np.ceil(vecStatPrc) vecStatPrc = vecStatPrc.astype(int) # Counter for status indicator: varCntSts01 = 0 varCntSts02 = 0 # ------------------------------------------------------------------------- # *** Loop through chunks if lgcPrint: print('------Run graph') for idxChnk in range(varNumChnk): if lgcPrint: print(('---------Chunk: ' + str(idxChnk))) print('lstPrfTc[0].shape') print(lstPrfTc[0].shape) # Define session: # objSess = tf.Session() with tf.Graph().as_default(), tf.Session() as objSess: # ----------------------------------------------------------------- # *** Prepare queue if lgcPrint: print('------Define computational graph, queue & session') # Queue capacity: varCapQ = 10 # Dimensions of placeholder have to be determined outside of the # tensor object, otherwise the object on which the size is # calculated is loaded into GPU memory. varDim01 = lstPrfTc[0].shape[0] varDim02 = lstPrfTc[0].shape[1] # The queue: objQ = tf.FIFOQueue(capacity=varCapQ, dtypes=[tf.float32], shapes=[(varDim01, varDim02)]) # Method for getting queue size: objSzeQ = objQ.size() # Placeholder that is used to put design matrix on computational # graph: objPlcHld01 = tf.placeholder(tf.float32, shape=[varDim01, varDim02]) # The enqueue operation that puts data on the graph. objEnQ = objQ.enqueue([objPlcHld01]) # Number of threads that will be created: varNumThrd = 1 # The queue runner (places the enqueue operation on the queue?). objRunQ = tf.train.QueueRunner(objQ, [objEnQ] * varNumThrd) tf.train.add_queue_runner(objRunQ) # The tensor object that is retrieved from the queue. Functions # like placeholders for the data in the queue when defining the # graph. objDsng = objQ.dequeue() # Coordinator needs to be initialised: objCoord = tf.train.Coordinator() # ----------------------------------------------------------------- # *** Fill queue # Buffer size (number of samples to put on queue before starting # execution of graph): varBuff = 10 # Define & run extra thread with graph that places data on queue: objThrd = threading.Thread(target=funcPlcIn) objThrd.setDaemon(True) objThrd.start() # Stay in this while loop until the specified number of samples # (varBuffer) have been placed on the queue). varTmpSzeQ = 0 while varTmpSzeQ < varBuff: varTmpSzeQ = objSess.run(objSzeQ) # ----------------------------------------------------------------- # *** Prepare & run the graph # Chunk of functional data: aryTmp01 = np.copy(lstFunc[idxChnk]) with tf.device('/gpu:0'): objFunc = tf.Variable(aryTmp01) # Regularisation factor matrix: with tf.device('/gpu:0'): objL2reg = tf.Variable(aryL2reg) # The computational graph. Operation that solves matrix (in the # least squares sense), and calculates residuals along time # dimension. There are two versions: (1) The number of measurements # (e.g. volumes) is greater than or equal to the number of # predictors (betas). (2) The number of measurements is less than # the number of predictors. # (1) Number of measurements greater/equal to number of predictors: if np.greater_equal(varNumVol, varNumBeta): objMatSlve = tf.reduce_sum( tf.squared_difference( objFunc, tf.matmul( objDsng, tf.matmul( tf.matmul( tf.matrix_inverse( tf.add( tf.matmul( objDsng, objDsng, transpose_a=True, transpose_b=False ), objL2reg ) ), objDsng, transpose_a=False, transpose_b=True ), objFunc ) ), ), axis=0 ) # (2) Number of measurements less than number of predictors: else: objMatSlve = tf.reduce_sum( tf.squared_difference( objFunc, tf.matmul( objDsng, tf.matmul( tf.matmul( objDsng, tf.matrix_inverse( tf.add( tf.matmul( objDsng, objDsng, transpose_a=False, transpose_b=True ), objL2reg ) ), transpose_a=True, transpose_b=False ), objFunc ) ), ), axis=0 ) # Variables need to be (re-)initialised: objSess.run(tf.global_variables_initializer()) # Mark graph as read-only (would throw an error in case of memory # leak): objSess.graph.finalize() # Index of first voxel in current chunk (needed to assign results): varChnkStr = int(vecIdxChnks[idxChnk]) # Index of last voxel in current chunk (needed to assign results): varChnkEnd = int(vecIdxChnks[(idxChnk+1)]) # Array for results of current chunk: aryTmpRes = np.zeros((varNumMdls, lstFunc[idxChnk].shape[1]), dtype=np.float32) # Loop through models: for idxMdl in range(varNumMdls): # Run main computational graph and put results in list: # varTme01 = time.time() aryTmpRes[idxMdl, :] = objSess.run(objMatSlve) # print(('---------Time for graph call: ' # + str(time.time() - varTme01))) # Status indicator: if varCntSts02 == vecStatPrf[varCntSts01]: # Number of elements on queue: varTmpSzeQ = objSess.run(objSzeQ) # Prepare status message: strStsMsg = ('---------Progress: ' + str(vecStatPrc[varCntSts01]) + ' % --- Number of elements on queue: ' + str(varTmpSzeQ)) if lgcPrint: print(strStsMsg) # Only increment counter if the last value has not been # reached yet: if varCntSts01 < varStsStpSze: varCntSts01 = varCntSts01 + int(1) # Increment status indicator counter: varCntSts02 = varCntSts02 + 1 # Stop threads. objCoord.request_stop() # objSess.close() # Get indices of models with minimum residuals (minimum along # model-space) for current chunk: vecResSsMinIdx[varChnkStr:varChnkEnd] = np.argmin(aryTmpRes, axis=0) # Get minimum residuals of those models: vecResSsMin[varChnkStr:varChnkEnd] = np.min(aryTmpRes, axis=0) # ------------------------------------------------------------------------- # *** Post-process results if lgcPrint: print('------Post-processing results') # Array for model parameters. At the moment, we have the indices of the # best fitting models, so we need an array that tells us what model # parameters these indices refer to. aryMdl = np.zeros((varNumMdlsTtl, 3), dtype=np.float32) # Model parameter can be represented as float32 as well: vecMdlXpos = vecMdlXpos.astype(np.float32) vecMdlYpos = vecMdlYpos.astype(np.float32) vecMdlSd = vecMdlSd.astype(np.float32) # The first column is to contain model x positions: aryMdl[:, 0] = np.repeat(vecMdlXpos, int(varNumY * varNumPrfSizes)) # The second column is to contain model y positions: aryMdl[:, 1] = np.repeat( np.tile(vecMdlYpos, varNumX), varNumPrfSizes ) # The third column is to contain model pRF sizes: aryMdl[:, 2] = np.tile(vecMdlSd, int(varNumX * varNumY)) # Earlier, we had removed models with a variance of zero. Thus, those # models were ignored and are not present in the results. We remove them # from the model-parameter-array: aryMdl = aryMdl[vecLgcVar] # Retrieve model parameters of 'winning' model for all voxels: vecBstXpos = aryMdl[:, 0][vecResSsMinIdx] vecBstYpos = aryMdl[:, 1][vecResSsMinIdx] vecBstSd = aryMdl[:, 2][vecResSsMinIdx] # Coefficient of determination (1 - ratio of (residual sum of squares by # total sum of squares)): vecBstR2 = np.subtract(1.0, np.divide(vecResSsMin, vecSsTot) ) # Output list: lstOut = [idxPrc, vecBstXpos, vecBstYpos, vecBstSd, vecBstR2, np.zeros((varNumVox, (varNumBeta))).astype(np.float32)] queOut.put(lstOut)
python
def funcFindPrfGpu(idxPrc, vecMdlXpos, vecMdlYpos, vecMdlSd, aryFunc, # noqa aryPrfTc, varL2reg, queOut, lgcPrint=True): """ Find best pRF model for voxel time course. Parameters ---------- idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0 (just one thread on CPU). vecMdlXpos : np.array 1D array with pRF model x positions. vecMdlYpos : np.array 1D array with pRF model y positions. vecMdlSd : np.array 1D array with pRF model sizes (SD of Gaussian). aryFunc : np.array 2D array with functional MRI data, with shape aryFunc[voxel, time]. aryPrfTc : np.array Array with pRF model time courses, with shape aryPrfTc[x-pos, y-pos, SD, motion-direction, time] varL2reg : float L2 regularisation factor for ridge regression. queOut : multiprocessing.queues.Queue Queue to put the results on. lgcPrint : boolean Whether print statements should be executed. Returns ------- lstOut : list List containing the following objects: idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0. vecBstXpos : np.array 1D array with best fitting x-position for each voxel, with shape vecBstXpos[voxel]. vecBstYpos : np.array 1D array with best fitting y-position for each voxel, with shape vecBstYpos[voxel]. vecBstSd : np.array 1D array with best fitting pRF size for each voxel, with shape vecBstSd[voxel]. vecBstR2 : np.array 1D array with R2 value of 'winning' pRF model for each voxel, with shape vecBstR2[voxel]. dummy : np.array 2D array that is supposed to contain the beta values of 'winning' pRF models for each voxel, with shape aryBeta[voxel, beta]. AT THE MOMENT, CONTAINS EMPTY DUMMY ARRAY (np.zeros). Notes ----- Uses a queue that runs in a separate thread to put model time courses on the computational graph. """ # ------------------------------------------------------------------------- # *** Queue-feeding-function that will run in extra thread def funcPlcIn(): """Place data on queue.""" # Iteration counter: idxCnt = 0 while True: # Feed example to Tensorflow placeholder aryTmp02 = lstPrfTc[idxCnt] dicIn = {objPlcHld01: aryTmp02} # Push to the queue: objSess.run(objEnQ, feed_dict=dicIn) idxCnt += 1 # Stop if coordinator says stop: if objCoord.should_stop(): break # Stop if all data has been put on the queue: elif idxCnt == varNumMdls: break # ------------------------------------------------------------------------- # *** Prepare pRF model time courses for graph if lgcPrint: print('------Prepare pRF model time courses for graph') # Information about pRF model parameters: varNumX = np.shape(vecMdlXpos)[0] varNumY = np.shape(vecMdlYpos)[0] varNumPrfSizes = np.shape(vecMdlSd)[0] # Number of predictors (betas): varNumBeta = aryPrfTc.shape[3] # At this point, aryPrfTc has the following dimensions: # aryPrfTc[x-pos, y-pos, SD, motion-direction, time] # Reshape pRF model time courses: aryPrfTc = np.reshape(aryPrfTc, ((aryPrfTc.shape[0] * aryPrfTc.shape[1] * aryPrfTc.shape[2]), aryPrfTc.shape[3], aryPrfTc.shape[4])) # Now, aryPrfTc has the following dimensions: # aryPrfTc[(x-pos * y-pos * SD), motion-direction, time] # Original total number of pRF time course models (before removing models # with zero variance): varNumMdlsTtl = aryPrfTc.shape[0] # Change type to float 32: aryPrfTc = aryPrfTc.astype(np.float32) # The pRF model is fitted only if variance along time dimension is not # zero. Get variance along time dimension: vecVarPrfTc = np.var(aryPrfTc, axis=2) # Zero with float32 precision for comparison: varZero32 = np.array(([0.0])).astype(np.float32)[0] # Boolean array for models with variance greater than zero for at least # one motion direction: vecLgcVar = np.max( np.greater(vecVarPrfTc, varZero32), axis=1 ) # Take models with variance less than zero out of the array: aryPrfTc = aryPrfTc[vecLgcVar, :, :] # Swap axes, so that # aryPrfTc[(x-pos * y-pos * SD), time, motion-direction] aryPrfTc = np.swapaxes(aryPrfTc, 1, 2) # Add constant term (ones): # aryPrfTc = np.concatenate((aryPrfTc, # np.ones((aryPrfTc.shape[0], # aryPrfTc.shape[1], # 1)).astype(np.float32)), # axis=2) # Size of pRF time courses in MB: varSzePrf = np.divide(float(aryPrfTc.nbytes), 1000000.0) if lgcPrint: print(('---------Size of pRF time courses: ' + str(np.around(varSzePrf)) + ' MB')) # Put pRF model time courses into list: lstPrfTc = [None] * aryPrfTc.shape[0] for idxMdl in range(int(aryPrfTc.shape[0])): lstPrfTc[idxMdl] = aryPrfTc[idxMdl, :, :] del(aryPrfTc) # Total number of pRF models to fit: varNumMdls = len(lstPrfTc) # ------------------------------------------------------------------------- # *** Prepare functional data for graph if lgcPrint: print('------Prepare functional data for graph') # Number of voxels to be fitted: varNumVox = aryFunc.shape[0] # Number of volumes: varNumVol = aryFunc.shape[1] # We reshape the voxel time courses, so that time goes down the column, # i.e. from top to bottom. aryFunc = aryFunc.T # Change type to float 32: aryFunc = aryFunc.astype(np.float32) # We cannot commit the entire functional data to GPU memory, we need to # create chunks. Establish the limit (maximum size) of one chunk (in MB): varSzeMax = 50.0 # 20.0 # Size of functional data in MB: varSzeFunc = np.divide(float(aryFunc.nbytes), 1000000.0) if lgcPrint: print(('---------Size of functional data: ' + str(np.around(varSzeFunc)) + ' MB')) # Number of chunks to create: varNumChnk = int(np.ceil(np.divide(varSzeFunc, varSzeMax))) if lgcPrint: print(('---------Functional data will be split into ' + str(varNumChnk) + ' batches')) # Vector with the indicies at which the functional data will be separated # in order to be chunked up for the parallel processes: vecIdxChnks = np.linspace(0, varNumVox, num=varNumChnk, endpoint=False) vecIdxChnks = np.hstack((vecIdxChnks, varNumVox)) # List into which the chunks of functional data are put: lstFunc = [None] * varNumChnk # Put functional data into chunks: for idxChnk in range(0, varNumChnk): # Index of first voxel to be included in current chunk: varChnkStr = int(vecIdxChnks[idxChnk]) # Index of last voxel to be included in current chunk: varChnkEnd = int(vecIdxChnks[(idxChnk+1)]) # Put voxel array into list: lstFunc[idxChnk] = aryFunc[:, varChnkStr:varChnkEnd] # We delete the original array holding the functional data to conserve # memory. Therefore, we first need to calculate the mean (will be needed # for calculation of R2). # After finding the best fitting model for each voxel, we still have to # calculate the coefficient of determination (R-squared) for each voxel. We # start by calculating the total sum of squares (i.e. the deviation of the # data from the mean). The mean of each time course: vecFuncMean = np.mean(aryFunc, axis=0) # Deviation from the mean for each datapoint: vecFuncDev = np.subtract(aryFunc, vecFuncMean[None, :]) # Sum of squares: vecSsTot = np.sum(np.power(vecFuncDev, 2.0), axis=0) # We don't need the original array with the functional data anymore (the # above seems to have created a hard copy): del(vecFuncDev) del(aryFunc) # ------------------------------------------------------------------------- # *** Miscellaneous preparations # Vector for minimum squared residuals: vecResSsMin = np.zeros((varNumVox), dtype=np.float32) # Vector for indices of models with minimum residuals: vecResSsMinIdx = np.zeros((varNumVox), dtype=np.int32) # Multiply L2 regularization factor with identity matrix: aryL2reg = np.multiply(np.eye(varNumBeta), varL2reg).astype(np.float32) # Reduce logging verbosity: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ------------------------------------------------------------------------- # *** Prepare status indicator # We create a status indicator for the time consuming pRF model finding # algorithm. Number of steps of the status indicator: varStsStpSze = 20 # Vector with pRF values at which to give status feedback: vecStatPrf = np.linspace(0, (varNumMdls * varNumChnk), num=(varStsStpSze+1), endpoint=True) vecStatPrf = np.ceil(vecStatPrf) vecStatPrf = vecStatPrf.astype(int) # Vector with corresponding percentage values at which to give status # feedback: vecStatPrc = np.linspace(0, 100, num=(varStsStpSze+1), endpoint=True) vecStatPrc = np.ceil(vecStatPrc) vecStatPrc = vecStatPrc.astype(int) # Counter for status indicator: varCntSts01 = 0 varCntSts02 = 0 # ------------------------------------------------------------------------- # *** Loop through chunks if lgcPrint: print('------Run graph') for idxChnk in range(varNumChnk): if lgcPrint: print(('---------Chunk: ' + str(idxChnk))) print('lstPrfTc[0].shape') print(lstPrfTc[0].shape) # Define session: # objSess = tf.Session() with tf.Graph().as_default(), tf.Session() as objSess: # ----------------------------------------------------------------- # *** Prepare queue if lgcPrint: print('------Define computational graph, queue & session') # Queue capacity: varCapQ = 10 # Dimensions of placeholder have to be determined outside of the # tensor object, otherwise the object on which the size is # calculated is loaded into GPU memory. varDim01 = lstPrfTc[0].shape[0] varDim02 = lstPrfTc[0].shape[1] # The queue: objQ = tf.FIFOQueue(capacity=varCapQ, dtypes=[tf.float32], shapes=[(varDim01, varDim02)]) # Method for getting queue size: objSzeQ = objQ.size() # Placeholder that is used to put design matrix on computational # graph: objPlcHld01 = tf.placeholder(tf.float32, shape=[varDim01, varDim02]) # The enqueue operation that puts data on the graph. objEnQ = objQ.enqueue([objPlcHld01]) # Number of threads that will be created: varNumThrd = 1 # The queue runner (places the enqueue operation on the queue?). objRunQ = tf.train.QueueRunner(objQ, [objEnQ] * varNumThrd) tf.train.add_queue_runner(objRunQ) # The tensor object that is retrieved from the queue. Functions # like placeholders for the data in the queue when defining the # graph. objDsng = objQ.dequeue() # Coordinator needs to be initialised: objCoord = tf.train.Coordinator() # ----------------------------------------------------------------- # *** Fill queue # Buffer size (number of samples to put on queue before starting # execution of graph): varBuff = 10 # Define & run extra thread with graph that places data on queue: objThrd = threading.Thread(target=funcPlcIn) objThrd.setDaemon(True) objThrd.start() # Stay in this while loop until the specified number of samples # (varBuffer) have been placed on the queue). varTmpSzeQ = 0 while varTmpSzeQ < varBuff: varTmpSzeQ = objSess.run(objSzeQ) # ----------------------------------------------------------------- # *** Prepare & run the graph # Chunk of functional data: aryTmp01 = np.copy(lstFunc[idxChnk]) with tf.device('/gpu:0'): objFunc = tf.Variable(aryTmp01) # Regularisation factor matrix: with tf.device('/gpu:0'): objL2reg = tf.Variable(aryL2reg) # The computational graph. Operation that solves matrix (in the # least squares sense), and calculates residuals along time # dimension. There are two versions: (1) The number of measurements # (e.g. volumes) is greater than or equal to the number of # predictors (betas). (2) The number of measurements is less than # the number of predictors. # (1) Number of measurements greater/equal to number of predictors: if np.greater_equal(varNumVol, varNumBeta): objMatSlve = tf.reduce_sum( tf.squared_difference( objFunc, tf.matmul( objDsng, tf.matmul( tf.matmul( tf.matrix_inverse( tf.add( tf.matmul( objDsng, objDsng, transpose_a=True, transpose_b=False ), objL2reg ) ), objDsng, transpose_a=False, transpose_b=True ), objFunc ) ), ), axis=0 ) # (2) Number of measurements less than number of predictors: else: objMatSlve = tf.reduce_sum( tf.squared_difference( objFunc, tf.matmul( objDsng, tf.matmul( tf.matmul( objDsng, tf.matrix_inverse( tf.add( tf.matmul( objDsng, objDsng, transpose_a=False, transpose_b=True ), objL2reg ) ), transpose_a=True, transpose_b=False ), objFunc ) ), ), axis=0 ) # Variables need to be (re-)initialised: objSess.run(tf.global_variables_initializer()) # Mark graph as read-only (would throw an error in case of memory # leak): objSess.graph.finalize() # Index of first voxel in current chunk (needed to assign results): varChnkStr = int(vecIdxChnks[idxChnk]) # Index of last voxel in current chunk (needed to assign results): varChnkEnd = int(vecIdxChnks[(idxChnk+1)]) # Array for results of current chunk: aryTmpRes = np.zeros((varNumMdls, lstFunc[idxChnk].shape[1]), dtype=np.float32) # Loop through models: for idxMdl in range(varNumMdls): # Run main computational graph and put results in list: # varTme01 = time.time() aryTmpRes[idxMdl, :] = objSess.run(objMatSlve) # print(('---------Time for graph call: ' # + str(time.time() - varTme01))) # Status indicator: if varCntSts02 == vecStatPrf[varCntSts01]: # Number of elements on queue: varTmpSzeQ = objSess.run(objSzeQ) # Prepare status message: strStsMsg = ('---------Progress: ' + str(vecStatPrc[varCntSts01]) + ' % --- Number of elements on queue: ' + str(varTmpSzeQ)) if lgcPrint: print(strStsMsg) # Only increment counter if the last value has not been # reached yet: if varCntSts01 < varStsStpSze: varCntSts01 = varCntSts01 + int(1) # Increment status indicator counter: varCntSts02 = varCntSts02 + 1 # Stop threads. objCoord.request_stop() # objSess.close() # Get indices of models with minimum residuals (minimum along # model-space) for current chunk: vecResSsMinIdx[varChnkStr:varChnkEnd] = np.argmin(aryTmpRes, axis=0) # Get minimum residuals of those models: vecResSsMin[varChnkStr:varChnkEnd] = np.min(aryTmpRes, axis=0) # ------------------------------------------------------------------------- # *** Post-process results if lgcPrint: print('------Post-processing results') # Array for model parameters. At the moment, we have the indices of the # best fitting models, so we need an array that tells us what model # parameters these indices refer to. aryMdl = np.zeros((varNumMdlsTtl, 3), dtype=np.float32) # Model parameter can be represented as float32 as well: vecMdlXpos = vecMdlXpos.astype(np.float32) vecMdlYpos = vecMdlYpos.astype(np.float32) vecMdlSd = vecMdlSd.astype(np.float32) # The first column is to contain model x positions: aryMdl[:, 0] = np.repeat(vecMdlXpos, int(varNumY * varNumPrfSizes)) # The second column is to contain model y positions: aryMdl[:, 1] = np.repeat( np.tile(vecMdlYpos, varNumX), varNumPrfSizes ) # The third column is to contain model pRF sizes: aryMdl[:, 2] = np.tile(vecMdlSd, int(varNumX * varNumY)) # Earlier, we had removed models with a variance of zero. Thus, those # models were ignored and are not present in the results. We remove them # from the model-parameter-array: aryMdl = aryMdl[vecLgcVar] # Retrieve model parameters of 'winning' model for all voxels: vecBstXpos = aryMdl[:, 0][vecResSsMinIdx] vecBstYpos = aryMdl[:, 1][vecResSsMinIdx] vecBstSd = aryMdl[:, 2][vecResSsMinIdx] # Coefficient of determination (1 - ratio of (residual sum of squares by # total sum of squares)): vecBstR2 = np.subtract(1.0, np.divide(vecResSsMin, vecSsTot) ) # Output list: lstOut = [idxPrc, vecBstXpos, vecBstYpos, vecBstSd, vecBstR2, np.zeros((varNumVox, (varNumBeta))).astype(np.float32)] queOut.put(lstOut)
[ "def", "funcFindPrfGpu", "(", "idxPrc", ",", "vecMdlXpos", ",", "vecMdlYpos", ",", "vecMdlSd", ",", "aryFunc", ",", "# noqa", "aryPrfTc", ",", "varL2reg", ",", "queOut", ",", "lgcPrint", "=", "True", ")", ":", "# -------------------------------------------------------------------------", "# *** Queue-feeding-function that will run in extra thread", "def", "funcPlcIn", "(", ")", ":", "\"\"\"Place data on queue.\"\"\"", "# Iteration counter:", "idxCnt", "=", "0", "while", "True", ":", "# Feed example to Tensorflow placeholder", "aryTmp02", "=", "lstPrfTc", "[", "idxCnt", "]", "dicIn", "=", "{", "objPlcHld01", ":", "aryTmp02", "}", "# Push to the queue:", "objSess", ".", "run", "(", "objEnQ", ",", "feed_dict", "=", "dicIn", ")", "idxCnt", "+=", "1", "# Stop if coordinator says stop:", "if", "objCoord", ".", "should_stop", "(", ")", ":", "break", "# Stop if all data has been put on the queue:", "elif", "idxCnt", "==", "varNumMdls", ":", "break", "# -------------------------------------------------------------------------", "# *** Prepare pRF model time courses for graph", "if", "lgcPrint", ":", "print", "(", "'------Prepare pRF model time courses for graph'", ")", "# Information about pRF model parameters:", "varNumX", "=", "np", ".", "shape", "(", "vecMdlXpos", ")", "[", "0", "]", "varNumY", "=", "np", ".", "shape", "(", "vecMdlYpos", ")", "[", "0", "]", "varNumPrfSizes", "=", "np", ".", "shape", "(", "vecMdlSd", ")", "[", "0", "]", "# Number of predictors (betas):", "varNumBeta", "=", "aryPrfTc", ".", "shape", "[", "3", "]", "# At this point, aryPrfTc has the following dimensions:", "# aryPrfTc[x-pos, y-pos, SD, motion-direction, time]", "# Reshape pRF model time courses:", "aryPrfTc", "=", "np", ".", "reshape", "(", "aryPrfTc", ",", "(", "(", "aryPrfTc", ".", "shape", "[", "0", "]", "*", "aryPrfTc", ".", "shape", "[", "1", "]", "*", "aryPrfTc", ".", "shape", "[", "2", "]", ")", ",", "aryPrfTc", ".", "shape", "[", "3", "]", ",", "aryPrfTc", ".", "shape", "[", "4", "]", ")", ")", "# Now, aryPrfTc has the following dimensions:", "# aryPrfTc[(x-pos * y-pos * SD), motion-direction, time]", "# Original total number of pRF time course models (before removing models", "# with zero variance):", "varNumMdlsTtl", "=", "aryPrfTc", ".", "shape", "[", "0", "]", "# Change type to float 32:", "aryPrfTc", "=", "aryPrfTc", ".", "astype", "(", "np", ".", "float32", ")", "# The pRF model is fitted only if variance along time dimension is not", "# zero. Get variance along time dimension:", "vecVarPrfTc", "=", "np", ".", "var", "(", "aryPrfTc", ",", "axis", "=", "2", ")", "# Zero with float32 precision for comparison:", "varZero32", "=", "np", ".", "array", "(", "(", "[", "0.0", "]", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "[", "0", "]", "# Boolean array for models with variance greater than zero for at least", "# one motion direction:", "vecLgcVar", "=", "np", ".", "max", "(", "np", ".", "greater", "(", "vecVarPrfTc", ",", "varZero32", ")", ",", "axis", "=", "1", ")", "# Take models with variance less than zero out of the array:", "aryPrfTc", "=", "aryPrfTc", "[", "vecLgcVar", ",", ":", ",", ":", "]", "# Swap axes, so that", "# aryPrfTc[(x-pos * y-pos * SD), time, motion-direction]", "aryPrfTc", "=", "np", ".", "swapaxes", "(", "aryPrfTc", ",", "1", ",", "2", ")", "# Add constant term (ones):", "# aryPrfTc = np.concatenate((aryPrfTc,", "# np.ones((aryPrfTc.shape[0],", "# aryPrfTc.shape[1],", "# 1)).astype(np.float32)),", "# axis=2)", "# Size of pRF time courses in MB:", "varSzePrf", "=", "np", ".", "divide", "(", "float", "(", "aryPrfTc", ".", "nbytes", ")", ",", "1000000.0", ")", "if", "lgcPrint", ":", "print", "(", "(", "'---------Size of pRF time courses: '", "+", "str", "(", "np", ".", "around", "(", "varSzePrf", ")", ")", "+", "' MB'", ")", ")", "# Put pRF model time courses into list:", "lstPrfTc", "=", "[", "None", "]", "*", "aryPrfTc", ".", "shape", "[", "0", "]", "for", "idxMdl", "in", "range", "(", "int", "(", "aryPrfTc", ".", "shape", "[", "0", "]", ")", ")", ":", "lstPrfTc", "[", "idxMdl", "]", "=", "aryPrfTc", "[", "idxMdl", ",", ":", ",", ":", "]", "del", "(", "aryPrfTc", ")", "# Total number of pRF models to fit:", "varNumMdls", "=", "len", "(", "lstPrfTc", ")", "# -------------------------------------------------------------------------", "# *** Prepare functional data for graph", "if", "lgcPrint", ":", "print", "(", "'------Prepare functional data for graph'", ")", "# Number of voxels to be fitted:", "varNumVox", "=", "aryFunc", ".", "shape", "[", "0", "]", "# Number of volumes:", "varNumVol", "=", "aryFunc", ".", "shape", "[", "1", "]", "# We reshape the voxel time courses, so that time goes down the column,", "# i.e. from top to bottom.", "aryFunc", "=", "aryFunc", ".", "T", "# Change type to float 32:", "aryFunc", "=", "aryFunc", ".", "astype", "(", "np", ".", "float32", ")", "# We cannot commit the entire functional data to GPU memory, we need to", "# create chunks. Establish the limit (maximum size) of one chunk (in MB):", "varSzeMax", "=", "50.0", "# 20.0", "# Size of functional data in MB:", "varSzeFunc", "=", "np", ".", "divide", "(", "float", "(", "aryFunc", ".", "nbytes", ")", ",", "1000000.0", ")", "if", "lgcPrint", ":", "print", "(", "(", "'---------Size of functional data: '", "+", "str", "(", "np", ".", "around", "(", "varSzeFunc", ")", ")", "+", "' MB'", ")", ")", "# Number of chunks to create:", "varNumChnk", "=", "int", "(", "np", ".", "ceil", "(", "np", ".", "divide", "(", "varSzeFunc", ",", "varSzeMax", ")", ")", ")", "if", "lgcPrint", ":", "print", "(", "(", "'---------Functional data will be split into '", "+", "str", "(", "varNumChnk", ")", "+", "' batches'", ")", ")", "# Vector with the indicies at which the functional data will be separated", "# in order to be chunked up for the parallel processes:", "vecIdxChnks", "=", "np", ".", "linspace", "(", "0", ",", "varNumVox", ",", "num", "=", "varNumChnk", ",", "endpoint", "=", "False", ")", "vecIdxChnks", "=", "np", ".", "hstack", "(", "(", "vecIdxChnks", ",", "varNumVox", ")", ")", "# List into which the chunks of functional data are put:", "lstFunc", "=", "[", "None", "]", "*", "varNumChnk", "# Put functional data into chunks:", "for", "idxChnk", "in", "range", "(", "0", ",", "varNumChnk", ")", ":", "# Index of first voxel to be included in current chunk:", "varChnkStr", "=", "int", "(", "vecIdxChnks", "[", "idxChnk", "]", ")", "# Index of last voxel to be included in current chunk:", "varChnkEnd", "=", "int", "(", "vecIdxChnks", "[", "(", "idxChnk", "+", "1", ")", "]", ")", "# Put voxel array into list:", "lstFunc", "[", "idxChnk", "]", "=", "aryFunc", "[", ":", ",", "varChnkStr", ":", "varChnkEnd", "]", "# We delete the original array holding the functional data to conserve", "# memory. Therefore, we first need to calculate the mean (will be needed", "# for calculation of R2).", "# After finding the best fitting model for each voxel, we still have to", "# calculate the coefficient of determination (R-squared) for each voxel. We", "# start by calculating the total sum of squares (i.e. the deviation of the", "# data from the mean). The mean of each time course:", "vecFuncMean", "=", "np", ".", "mean", "(", "aryFunc", ",", "axis", "=", "0", ")", "# Deviation from the mean for each datapoint:", "vecFuncDev", "=", "np", ".", "subtract", "(", "aryFunc", ",", "vecFuncMean", "[", "None", ",", ":", "]", ")", "# Sum of squares:", "vecSsTot", "=", "np", ".", "sum", "(", "np", ".", "power", "(", "vecFuncDev", ",", "2.0", ")", ",", "axis", "=", "0", ")", "# We don't need the original array with the functional data anymore (the", "# above seems to have created a hard copy):", "del", "(", "vecFuncDev", ")", "del", "(", "aryFunc", ")", "# -------------------------------------------------------------------------", "# *** Miscellaneous preparations", "# Vector for minimum squared residuals:", "vecResSsMin", "=", "np", ".", "zeros", "(", "(", "varNumVox", ")", ",", "dtype", "=", "np", ".", "float32", ")", "# Vector for indices of models with minimum residuals:", "vecResSsMinIdx", "=", "np", ".", "zeros", "(", "(", "varNumVox", ")", ",", "dtype", "=", "np", ".", "int32", ")", "# Multiply L2 regularization factor with identity matrix:", "aryL2reg", "=", "np", ".", "multiply", "(", "np", ".", "eye", "(", "varNumBeta", ")", ",", "varL2reg", ")", ".", "astype", "(", "np", ".", "float32", ")", "# Reduce logging verbosity:", "os", ".", "environ", "[", "'TF_CPP_MIN_LOG_LEVEL'", "]", "=", "'2'", "# -------------------------------------------------------------------------", "# *** Prepare status indicator", "# We create a status indicator for the time consuming pRF model finding", "# algorithm. Number of steps of the status indicator:", "varStsStpSze", "=", "20", "# Vector with pRF values at which to give status feedback:", "vecStatPrf", "=", "np", ".", "linspace", "(", "0", ",", "(", "varNumMdls", "*", "varNumChnk", ")", ",", "num", "=", "(", "varStsStpSze", "+", "1", ")", ",", "endpoint", "=", "True", ")", "vecStatPrf", "=", "np", ".", "ceil", "(", "vecStatPrf", ")", "vecStatPrf", "=", "vecStatPrf", ".", "astype", "(", "int", ")", "# Vector with corresponding percentage values at which to give status", "# feedback:", "vecStatPrc", "=", "np", ".", "linspace", "(", "0", ",", "100", ",", "num", "=", "(", "varStsStpSze", "+", "1", ")", ",", "endpoint", "=", "True", ")", "vecStatPrc", "=", "np", ".", "ceil", "(", "vecStatPrc", ")", "vecStatPrc", "=", "vecStatPrc", ".", "astype", "(", "int", ")", "# Counter for status indicator:", "varCntSts01", "=", "0", "varCntSts02", "=", "0", "# -------------------------------------------------------------------------", "# *** Loop through chunks", "if", "lgcPrint", ":", "print", "(", "'------Run graph'", ")", "for", "idxChnk", "in", "range", "(", "varNumChnk", ")", ":", "if", "lgcPrint", ":", "print", "(", "(", "'---------Chunk: '", "+", "str", "(", "idxChnk", ")", ")", ")", "print", "(", "'lstPrfTc[0].shape'", ")", "print", "(", "lstPrfTc", "[", "0", "]", ".", "shape", ")", "# Define session:", "# objSess = tf.Session()", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ",", "tf", ".", "Session", "(", ")", "as", "objSess", ":", "# -----------------------------------------------------------------", "# *** Prepare queue", "if", "lgcPrint", ":", "print", "(", "'------Define computational graph, queue & session'", ")", "# Queue capacity:", "varCapQ", "=", "10", "# Dimensions of placeholder have to be determined outside of the", "# tensor object, otherwise the object on which the size is", "# calculated is loaded into GPU memory.", "varDim01", "=", "lstPrfTc", "[", "0", "]", ".", "shape", "[", "0", "]", "varDim02", "=", "lstPrfTc", "[", "0", "]", ".", "shape", "[", "1", "]", "# The queue:", "objQ", "=", "tf", ".", "FIFOQueue", "(", "capacity", "=", "varCapQ", ",", "dtypes", "=", "[", "tf", ".", "float32", "]", ",", "shapes", "=", "[", "(", "varDim01", ",", "varDim02", ")", "]", ")", "# Method for getting queue size:", "objSzeQ", "=", "objQ", ".", "size", "(", ")", "# Placeholder that is used to put design matrix on computational", "# graph:", "objPlcHld01", "=", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "shape", "=", "[", "varDim01", ",", "varDim02", "]", ")", "# The enqueue operation that puts data on the graph.", "objEnQ", "=", "objQ", ".", "enqueue", "(", "[", "objPlcHld01", "]", ")", "# Number of threads that will be created:", "varNumThrd", "=", "1", "# The queue runner (places the enqueue operation on the queue?).", "objRunQ", "=", "tf", ".", "train", ".", "QueueRunner", "(", "objQ", ",", "[", "objEnQ", "]", "*", "varNumThrd", ")", "tf", ".", "train", ".", "add_queue_runner", "(", "objRunQ", ")", "# The tensor object that is retrieved from the queue. Functions", "# like placeholders for the data in the queue when defining the", "# graph.", "objDsng", "=", "objQ", ".", "dequeue", "(", ")", "# Coordinator needs to be initialised:", "objCoord", "=", "tf", ".", "train", ".", "Coordinator", "(", ")", "# -----------------------------------------------------------------", "# *** Fill queue", "# Buffer size (number of samples to put on queue before starting", "# execution of graph):", "varBuff", "=", "10", "# Define & run extra thread with graph that places data on queue:", "objThrd", "=", "threading", ".", "Thread", "(", "target", "=", "funcPlcIn", ")", "objThrd", ".", "setDaemon", "(", "True", ")", "objThrd", ".", "start", "(", ")", "# Stay in this while loop until the specified number of samples", "# (varBuffer) have been placed on the queue).", "varTmpSzeQ", "=", "0", "while", "varTmpSzeQ", "<", "varBuff", ":", "varTmpSzeQ", "=", "objSess", ".", "run", "(", "objSzeQ", ")", "# -----------------------------------------------------------------", "# *** Prepare & run the graph", "# Chunk of functional data:", "aryTmp01", "=", "np", ".", "copy", "(", "lstFunc", "[", "idxChnk", "]", ")", "with", "tf", ".", "device", "(", "'/gpu:0'", ")", ":", "objFunc", "=", "tf", ".", "Variable", "(", "aryTmp01", ")", "# Regularisation factor matrix:", "with", "tf", ".", "device", "(", "'/gpu:0'", ")", ":", "objL2reg", "=", "tf", ".", "Variable", "(", "aryL2reg", ")", "# The computational graph. Operation that solves matrix (in the", "# least squares sense), and calculates residuals along time", "# dimension. There are two versions: (1) The number of measurements", "# (e.g. volumes) is greater than or equal to the number of", "# predictors (betas). (2) The number of measurements is less than", "# the number of predictors.", "# (1) Number of measurements greater/equal to number of predictors:", "if", "np", ".", "greater_equal", "(", "varNumVol", ",", "varNumBeta", ")", ":", "objMatSlve", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "squared_difference", "(", "objFunc", ",", "tf", ".", "matmul", "(", "objDsng", ",", "tf", ".", "matmul", "(", "tf", ".", "matmul", "(", "tf", ".", "matrix_inverse", "(", "tf", ".", "add", "(", "tf", ".", "matmul", "(", "objDsng", ",", "objDsng", ",", "transpose_a", "=", "True", ",", "transpose_b", "=", "False", ")", ",", "objL2reg", ")", ")", ",", "objDsng", ",", "transpose_a", "=", "False", ",", "transpose_b", "=", "True", ")", ",", "objFunc", ")", ")", ",", ")", ",", "axis", "=", "0", ")", "# (2) Number of measurements less than number of predictors:", "else", ":", "objMatSlve", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "squared_difference", "(", "objFunc", ",", "tf", ".", "matmul", "(", "objDsng", ",", "tf", ".", "matmul", "(", "tf", ".", "matmul", "(", "objDsng", ",", "tf", ".", "matrix_inverse", "(", "tf", ".", "add", "(", "tf", ".", "matmul", "(", "objDsng", ",", "objDsng", ",", "transpose_a", "=", "False", ",", "transpose_b", "=", "True", ")", ",", "objL2reg", ")", ")", ",", "transpose_a", "=", "True", ",", "transpose_b", "=", "False", ")", ",", "objFunc", ")", ")", ",", ")", ",", "axis", "=", "0", ")", "# Variables need to be (re-)initialised:", "objSess", ".", "run", "(", "tf", ".", "global_variables_initializer", "(", ")", ")", "# Mark graph as read-only (would throw an error in case of memory", "# leak):", "objSess", ".", "graph", ".", "finalize", "(", ")", "# Index of first voxel in current chunk (needed to assign results):", "varChnkStr", "=", "int", "(", "vecIdxChnks", "[", "idxChnk", "]", ")", "# Index of last voxel in current chunk (needed to assign results):", "varChnkEnd", "=", "int", "(", "vecIdxChnks", "[", "(", "idxChnk", "+", "1", ")", "]", ")", "# Array for results of current chunk:", "aryTmpRes", "=", "np", ".", "zeros", "(", "(", "varNumMdls", ",", "lstFunc", "[", "idxChnk", "]", ".", "shape", "[", "1", "]", ")", ",", "dtype", "=", "np", ".", "float32", ")", "# Loop through models:", "for", "idxMdl", "in", "range", "(", "varNumMdls", ")", ":", "# Run main computational graph and put results in list:", "# varTme01 = time.time()", "aryTmpRes", "[", "idxMdl", ",", ":", "]", "=", "objSess", ".", "run", "(", "objMatSlve", ")", "# print(('---------Time for graph call: '", "# + str(time.time() - varTme01)))", "# Status indicator:", "if", "varCntSts02", "==", "vecStatPrf", "[", "varCntSts01", "]", ":", "# Number of elements on queue:", "varTmpSzeQ", "=", "objSess", ".", "run", "(", "objSzeQ", ")", "# Prepare status message:", "strStsMsg", "=", "(", "'---------Progress: '", "+", "str", "(", "vecStatPrc", "[", "varCntSts01", "]", ")", "+", "' % --- Number of elements on queue: '", "+", "str", "(", "varTmpSzeQ", ")", ")", "if", "lgcPrint", ":", "print", "(", "strStsMsg", ")", "# Only increment counter if the last value has not been", "# reached yet:", "if", "varCntSts01", "<", "varStsStpSze", ":", "varCntSts01", "=", "varCntSts01", "+", "int", "(", "1", ")", "# Increment status indicator counter:", "varCntSts02", "=", "varCntSts02", "+", "1", "# Stop threads.", "objCoord", ".", "request_stop", "(", ")", "# objSess.close()", "# Get indices of models with minimum residuals (minimum along", "# model-space) for current chunk:", "vecResSsMinIdx", "[", "varChnkStr", ":", "varChnkEnd", "]", "=", "np", ".", "argmin", "(", "aryTmpRes", ",", "axis", "=", "0", ")", "# Get minimum residuals of those models:", "vecResSsMin", "[", "varChnkStr", ":", "varChnkEnd", "]", "=", "np", ".", "min", "(", "aryTmpRes", ",", "axis", "=", "0", ")", "# -------------------------------------------------------------------------", "# *** Post-process results", "if", "lgcPrint", ":", "print", "(", "'------Post-processing results'", ")", "# Array for model parameters. At the moment, we have the indices of the", "# best fitting models, so we need an array that tells us what model", "# parameters these indices refer to.", "aryMdl", "=", "np", ".", "zeros", "(", "(", "varNumMdlsTtl", ",", "3", ")", ",", "dtype", "=", "np", ".", "float32", ")", "# Model parameter can be represented as float32 as well:", "vecMdlXpos", "=", "vecMdlXpos", ".", "astype", "(", "np", ".", "float32", ")", "vecMdlYpos", "=", "vecMdlYpos", ".", "astype", "(", "np", ".", "float32", ")", "vecMdlSd", "=", "vecMdlSd", ".", "astype", "(", "np", ".", "float32", ")", "# The first column is to contain model x positions:", "aryMdl", "[", ":", ",", "0", "]", "=", "np", ".", "repeat", "(", "vecMdlXpos", ",", "int", "(", "varNumY", "*", "varNumPrfSizes", ")", ")", "# The second column is to contain model y positions:", "aryMdl", "[", ":", ",", "1", "]", "=", "np", ".", "repeat", "(", "np", ".", "tile", "(", "vecMdlYpos", ",", "varNumX", ")", ",", "varNumPrfSizes", ")", "# The third column is to contain model pRF sizes:", "aryMdl", "[", ":", ",", "2", "]", "=", "np", ".", "tile", "(", "vecMdlSd", ",", "int", "(", "varNumX", "*", "varNumY", ")", ")", "# Earlier, we had removed models with a variance of zero. Thus, those", "# models were ignored and are not present in the results. We remove them", "# from the model-parameter-array:", "aryMdl", "=", "aryMdl", "[", "vecLgcVar", "]", "# Retrieve model parameters of 'winning' model for all voxels:", "vecBstXpos", "=", "aryMdl", "[", ":", ",", "0", "]", "[", "vecResSsMinIdx", "]", "vecBstYpos", "=", "aryMdl", "[", ":", ",", "1", "]", "[", "vecResSsMinIdx", "]", "vecBstSd", "=", "aryMdl", "[", ":", ",", "2", "]", "[", "vecResSsMinIdx", "]", "# Coefficient of determination (1 - ratio of (residual sum of squares by", "# total sum of squares)):", "vecBstR2", "=", "np", ".", "subtract", "(", "1.0", ",", "np", ".", "divide", "(", "vecResSsMin", ",", "vecSsTot", ")", ")", "# Output list:", "lstOut", "=", "[", "idxPrc", ",", "vecBstXpos", ",", "vecBstYpos", ",", "vecBstSd", ",", "vecBstR2", ",", "np", ".", "zeros", "(", "(", "varNumVox", ",", "(", "varNumBeta", ")", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "]", "queOut", ".", "put", "(", "lstOut", ")" ]
Find best pRF model for voxel time course. Parameters ---------- idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0 (just one thread on CPU). vecMdlXpos : np.array 1D array with pRF model x positions. vecMdlYpos : np.array 1D array with pRF model y positions. vecMdlSd : np.array 1D array with pRF model sizes (SD of Gaussian). aryFunc : np.array 2D array with functional MRI data, with shape aryFunc[voxel, time]. aryPrfTc : np.array Array with pRF model time courses, with shape aryPrfTc[x-pos, y-pos, SD, motion-direction, time] varL2reg : float L2 regularisation factor for ridge regression. queOut : multiprocessing.queues.Queue Queue to put the results on. lgcPrint : boolean Whether print statements should be executed. Returns ------- lstOut : list List containing the following objects: idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0. vecBstXpos : np.array 1D array with best fitting x-position for each voxel, with shape vecBstXpos[voxel]. vecBstYpos : np.array 1D array with best fitting y-position for each voxel, with shape vecBstYpos[voxel]. vecBstSd : np.array 1D array with best fitting pRF size for each voxel, with shape vecBstSd[voxel]. vecBstR2 : np.array 1D array with R2 value of 'winning' pRF model for each voxel, with shape vecBstR2[voxel]. dummy : np.array 2D array that is supposed to contain the beta values of 'winning' pRF models for each voxel, with shape aryBeta[voxel, beta]. AT THE MOMENT, CONTAINS EMPTY DUMMY ARRAY (np.zeros). Notes ----- Uses a queue that runs in a separate thread to put model time courses on the computational graph.
[ "Find", "best", "pRF", "model", "for", "voxel", "time", "course", "." ]
train
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/find_prf_gpu.py#L30-L586
ecometrica/parawrap
parawrap.py
wrap
def wrap(text, width=70, **kwargs): """Wrap multiple paragraphs of text, returning a list of wrapped lines. Reformat the multiple paragraphs 'text' so they fit in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour. """ w = ParagraphWrapper(width=width, **kwargs) return w.wrap(text)
python
def wrap(text, width=70, **kwargs): """Wrap multiple paragraphs of text, returning a list of wrapped lines. Reformat the multiple paragraphs 'text' so they fit in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour. """ w = ParagraphWrapper(width=width, **kwargs) return w.wrap(text)
[ "def", "wrap", "(", "text", ",", "width", "=", "70", ",", "*", "*", "kwargs", ")", ":", "w", "=", "ParagraphWrapper", "(", "width", "=", "width", ",", "*", "*", "kwargs", ")", "return", "w", ".", "wrap", "(", "text", ")" ]
Wrap multiple paragraphs of text, returning a list of wrapped lines. Reformat the multiple paragraphs 'text' so they fit in lines of no more than 'width' columns, and return a list of wrapped lines. By default, tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour.
[ "Wrap", "multiple", "paragraphs", "of", "text", "returning", "a", "list", "of", "wrapped", "lines", "." ]
train
https://github.com/ecometrica/parawrap/blob/6523763f58045b8a98bac24a9438062ea856e5ae/parawrap.py#L62-L73
ecometrica/parawrap
parawrap.py
fill
def fill(text, width=70, **kwargs): """Fill multiple paragraphs of text, returning a new string. Reformat multiple paragraphs in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped text. As with wrap(), tabs are expanded and other whitespace characters converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour. """ w = ParagraphWrapper(width=width, **kwargs) return w.fill(text)
python
def fill(text, width=70, **kwargs): """Fill multiple paragraphs of text, returning a new string. Reformat multiple paragraphs in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped text. As with wrap(), tabs are expanded and other whitespace characters converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour. """ w = ParagraphWrapper(width=width, **kwargs) return w.fill(text)
[ "def", "fill", "(", "text", ",", "width", "=", "70", ",", "*", "*", "kwargs", ")", ":", "w", "=", "ParagraphWrapper", "(", "width", "=", "width", ",", "*", "*", "kwargs", ")", "return", "w", ".", "fill", "(", "text", ")" ]
Fill multiple paragraphs of text, returning a new string. Reformat multiple paragraphs in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped text. As with wrap(), tabs are expanded and other whitespace characters converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour.
[ "Fill", "multiple", "paragraphs", "of", "text", "returning", "a", "new", "string", "." ]
train
https://github.com/ecometrica/parawrap/blob/6523763f58045b8a98bac24a9438062ea856e5ae/parawrap.py#L76-L86
ecometrica/parawrap
parawrap.py
ParagraphWrapper.split
def split(cls, text): """split(text : string) -> [string] Splits 'text' into multiple paragraphs and return a list of each paragraph. """ result = [line.strip('\n') for line in cls.parasep_re.split(text)] if result == ['', '']: result = [''] return result
python
def split(cls, text): """split(text : string) -> [string] Splits 'text' into multiple paragraphs and return a list of each paragraph. """ result = [line.strip('\n') for line in cls.parasep_re.split(text)] if result == ['', '']: result = [''] return result
[ "def", "split", "(", "cls", ",", "text", ")", ":", "result", "=", "[", "line", ".", "strip", "(", "'\\n'", ")", "for", "line", "in", "cls", ".", "parasep_re", ".", "split", "(", "text", ")", "]", "if", "result", "==", "[", "''", ",", "''", "]", ":", "result", "=", "[", "''", "]", "return", "result" ]
split(text : string) -> [string] Splits 'text' into multiple paragraphs and return a list of each paragraph.
[ "split", "(", "text", ":", "string", ")", "-", ">", "[", "string", "]" ]
train
https://github.com/ecometrica/parawrap/blob/6523763f58045b8a98bac24a9438062ea856e5ae/parawrap.py#L21-L30
ecometrica/parawrap
parawrap.py
ParagraphWrapper.wrap
def wrap(self, text): """wrap(text : string) -> [string] Reformat the multiple paragraphs in 'text' so they fit in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. """ lines = [] linewrap = partial(textwrap.TextWrapper.wrap, self) for para in self.split(text): lines.extend(linewrap(para)) lines.append('') # Add newline between paragraphs # Remove trailing newline lines = lines[:-1] return lines
python
def wrap(self, text): """wrap(text : string) -> [string] Reformat the multiple paragraphs in 'text' so they fit in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space. """ lines = [] linewrap = partial(textwrap.TextWrapper.wrap, self) for para in self.split(text): lines.extend(linewrap(para)) lines.append('') # Add newline between paragraphs # Remove trailing newline lines = lines[:-1] return lines
[ "def", "wrap", "(", "self", ",", "text", ")", ":", "lines", "=", "[", "]", "linewrap", "=", "partial", "(", "textwrap", ".", "TextWrapper", ".", "wrap", ",", "self", ")", "for", "para", "in", "self", ".", "split", "(", "text", ")", ":", "lines", ".", "extend", "(", "linewrap", "(", "para", ")", ")", "lines", ".", "append", "(", "''", ")", "# Add newline between paragraphs", "# Remove trailing newline", "lines", "=", "lines", "[", ":", "-", "1", "]", "return", "lines" ]
wrap(text : string) -> [string] Reformat the multiple paragraphs in 'text' so they fit in lines of no more than 'self.width' columns, and return a list of wrapped lines. Tabs in 'text' are expanded with string.expandtabs(), and all other whitespace characters (including newline) are converted to space.
[ "wrap", "(", "text", ":", "string", ")", "-", ">", "[", "string", "]" ]
train
https://github.com/ecometrica/parawrap/blob/6523763f58045b8a98bac24a9438062ea856e5ae/parawrap.py#L32-L51
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.getSenderNumberMgtURL
def getSenderNumberMgtURL(self, CorpNum, UserID): """ 팩스 전송내역 팝업 URL args CorpNum : 회원 사업자번호 UserID : 회원 팝빌아이디 return 30초 보안 토큰을 포함한 url raise PopbillException """ result = self._httpget('/FAX/?TG=SENDER', CorpNum, UserID) return result.url
python
def getSenderNumberMgtURL(self, CorpNum, UserID): """ 팩스 전송내역 팝업 URL args CorpNum : 회원 사업자번호 UserID : 회원 팝빌아이디 return 30초 보안 토큰을 포함한 url raise PopbillException """ result = self._httpget('/FAX/?TG=SENDER', CorpNum, UserID) return result.url
[ "def", "getSenderNumberMgtURL", "(", "self", ",", "CorpNum", ",", "UserID", ")", ":", "result", "=", "self", ".", "_httpget", "(", "'/FAX/?TG=SENDER'", ",", "CorpNum", ",", "UserID", ")", "return", "result", ".", "url" ]
팩스 전송내역 팝업 URL args CorpNum : 회원 사업자번호 UserID : 회원 팝빌아이디 return 30초 보안 토큰을 포함한 url raise PopbillException
[ "팩스", "전송내역", "팝업", "URL", "args", "CorpNum", ":", "회원", "사업자번호", "UserID", ":", "회원", "팝빌아이디", "return", "30초", "보안", "토큰을", "포함한", "url", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L70-L81
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.getUnitCost
def getUnitCost(self, CorpNum): """ 팩스 전송 단가 확인 args CorpNum : 팝빌회원 사업자번호 return 전송 단가 by float raise PopbillException """ result = self._httpget('/FAX/UnitCost', CorpNum) return int(result.unitCost)
python
def getUnitCost(self, CorpNum): """ 팩스 전송 단가 확인 args CorpNum : 팝빌회원 사업자번호 return 전송 단가 by float raise PopbillException """ result = self._httpget('/FAX/UnitCost', CorpNum) return int(result.unitCost)
[ "def", "getUnitCost", "(", "self", ",", "CorpNum", ")", ":", "result", "=", "self", ".", "_httpget", "(", "'/FAX/UnitCost'", ",", "CorpNum", ")", "return", "int", "(", "result", ".", "unitCost", ")" ]
팩스 전송 단가 확인 args CorpNum : 팝빌회원 사업자번호 return 전송 단가 by float raise PopbillException
[ "팩스", "전송", "단가", "확인", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "return", "전송", "단가", "by", "float", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L83-L94
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.getFaxResult
def getFaxResult(self, CorpNum, ReceiptNum, UserID=None): """ 팩스 전송결과 조회 args CorpNum : 팝빌회원 사업자번호 ReceiptNum : 전송요청시 발급받은 접수번호 UserID : 팝빌회원 아이디 return 팩스전송정보 as list raise PopbillException """ if ReceiptNum == None or len(ReceiptNum) != 18: raise PopbillException(-99999999, "접수번호가 올바르지 않습니다.") return self._httpget('/FAX/' + ReceiptNum, CorpNum, UserID)
python
def getFaxResult(self, CorpNum, ReceiptNum, UserID=None): """ 팩스 전송결과 조회 args CorpNum : 팝빌회원 사업자번호 ReceiptNum : 전송요청시 발급받은 접수번호 UserID : 팝빌회원 아이디 return 팩스전송정보 as list raise PopbillException """ if ReceiptNum == None or len(ReceiptNum) != 18: raise PopbillException(-99999999, "접수번호가 올바르지 않습니다.") return self._httpget('/FAX/' + ReceiptNum, CorpNum, UserID)
[ "def", "getFaxResult", "(", "self", ",", "CorpNum", ",", "ReceiptNum", ",", "UserID", "=", "None", ")", ":", "if", "ReceiptNum", "==", "None", "or", "len", "(", "ReceiptNum", ")", "!=", "18", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"접수번호가 올바르지 않습니다.\")", "", "return", "self", ".", "_httpget", "(", "'/FAX/'", "+", "ReceiptNum", ",", "CorpNum", ",", "UserID", ")" ]
팩스 전송결과 조회 args CorpNum : 팝빌회원 사업자번호 ReceiptNum : 전송요청시 발급받은 접수번호 UserID : 팝빌회원 아이디 return 팩스전송정보 as list raise PopbillException
[ "팩스", "전송결과", "조회", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "ReceiptNum", ":", "전송요청시", "발급받은", "접수번호", "UserID", ":", "팝빌회원", "아이디", "return", "팩스전송정보", "as", "list", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L138-L153
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.getFaxResultRN
def getFaxResultRN(self, CorpNum, RequestNum, UserID=None): """ 팩스 전송결과 조회 args CorpNum : 팝빌회원 사업자번호 RequestNum : 전송요청시 할당한 전송요청번호 UserID : 팝빌회원 아이디 return 팩스전송정보 as list raise PopbillException """ if RequestNum == None or RequestNum == '': raise PopbillException(-99999999, "요청번호가 입력되지 않았습니다.") return self._httpget('/FAX/Get/' + RequestNum, CorpNum, UserID)
python
def getFaxResultRN(self, CorpNum, RequestNum, UserID=None): """ 팩스 전송결과 조회 args CorpNum : 팝빌회원 사업자번호 RequestNum : 전송요청시 할당한 전송요청번호 UserID : 팝빌회원 아이디 return 팩스전송정보 as list raise PopbillException """ if RequestNum == None or RequestNum == '': raise PopbillException(-99999999, "요청번호가 입력되지 않았습니다.") return self._httpget('/FAX/Get/' + RequestNum, CorpNum, UserID)
[ "def", "getFaxResultRN", "(", "self", ",", "CorpNum", ",", "RequestNum", ",", "UserID", "=", "None", ")", ":", "if", "RequestNum", "==", "None", "or", "RequestNum", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"요청번호가 입력되지 않았습니다.\")", "", "return", "self", ".", "_httpget", "(", "'/FAX/Get/'", "+", "RequestNum", ",", "CorpNum", ",", "UserID", ")" ]
팩스 전송결과 조회 args CorpNum : 팝빌회원 사업자번호 RequestNum : 전송요청시 할당한 전송요청번호 UserID : 팝빌회원 아이디 return 팩스전송정보 as list raise PopbillException
[ "팩스", "전송결과", "조회", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "RequestNum", ":", "전송요청시", "할당한", "전송요청번호", "UserID", ":", "팝빌회원", "아이디", "return", "팩스전송정보", "as", "list", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L155-L170
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.sendFax
def sendFax(self, CorpNum, SenderNum, ReceiverNum, ReceiverName, FilePath, ReserveDT=None, UserID=None, SenderName=None, adsYN=False, title=None, RequestNum=None): """ 팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 SenderNum : 발신자 번호 ReceiverNum : 수신자 번호 ReceiverName : 수신자 명 FilePath : 발신 파일경로 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 SenderName : 발신자명 (동보전송용) adsYN : 광고팩스 여부 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ receivers = [] receivers.append(FaxReceiver(receiveNum=ReceiverNum, receiveName=ReceiverName) ) return self.sendFax_multi(CorpNum, SenderNum, receivers, FilePath, ReserveDT, UserID, SenderName, adsYN, title, RequestNum)
python
def sendFax(self, CorpNum, SenderNum, ReceiverNum, ReceiverName, FilePath, ReserveDT=None, UserID=None, SenderName=None, adsYN=False, title=None, RequestNum=None): """ 팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 SenderNum : 발신자 번호 ReceiverNum : 수신자 번호 ReceiverName : 수신자 명 FilePath : 발신 파일경로 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 SenderName : 발신자명 (동보전송용) adsYN : 광고팩스 여부 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ receivers = [] receivers.append(FaxReceiver(receiveNum=ReceiverNum, receiveName=ReceiverName) ) return self.sendFax_multi(CorpNum, SenderNum, receivers, FilePath, ReserveDT, UserID, SenderName, adsYN, title, RequestNum)
[ "def", "sendFax", "(", "self", ",", "CorpNum", ",", "SenderNum", ",", "ReceiverNum", ",", "ReceiverName", ",", "FilePath", ",", "ReserveDT", "=", "None", ",", "UserID", "=", "None", ",", "SenderName", "=", "None", ",", "adsYN", "=", "False", ",", "title", "=", "None", ",", "RequestNum", "=", "None", ")", ":", "receivers", "=", "[", "]", "receivers", ".", "append", "(", "FaxReceiver", "(", "receiveNum", "=", "ReceiverNum", ",", "receiveName", "=", "ReceiverName", ")", ")", "return", "self", ".", "sendFax_multi", "(", "CorpNum", ",", "SenderNum", ",", "receivers", ",", "FilePath", ",", "ReserveDT", ",", "UserID", ",", "SenderName", ",", "adsYN", ",", "title", ",", "RequestNum", ")" ]
팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 SenderNum : 발신자 번호 ReceiverNum : 수신자 번호 ReceiverName : 수신자 명 FilePath : 발신 파일경로 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 SenderName : 발신자명 (동보전송용) adsYN : 광고팩스 여부 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException
[ "팩스", "단건", "전송", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "SenderNum", ":", "발신자", "번호", "ReceiverNum", ":", "수신자", "번호", "ReceiverName", ":", "수신자", "명", "FilePath", ":", "발신", "파일경로", "ReserveDT", ":", "예약시간", "(", "형식", "yyyyMMddHHmmss", ")", "UserID", ":", "팝빌회원", "아이디", "SenderName", ":", "발신자명", "(", "동보전송용", ")", "adsYN", ":", "광고팩스", "여부", "title", ":", "팩스제목", "RequestNum", ":", "전송요청시", "할당한", "전송요청번호", "return", "접수번호", "(", "receiptNum", ")", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L206-L232
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.sendFax_multi
def sendFax_multi(self, CorpNum, SenderNum, Receiver, FilePath, ReserveDT=None, UserID=None, SenderName=None, adsYN=False, title=None, RequestNum=None): """ 팩스 전송 args CorpNum : 팝빌회원 사업자번호 SenderNum : 발신자 번호 (동보전송용) Receiver : 수신자 번호(동보전송용) FilePath : 발신 파일경로 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 SenderName : 발신자명 (동보전송용) adsYN : 광고팩스 여부 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ if SenderNum == None or SenderNum == "": raise PopbillException(-99999999, "발신자 번호가 입력되지 않았습니다.") if Receiver == None: raise PopbillException(-99999999, "수신자 정보가 입력되지 않았습니다.") if not (type(Receiver) is str or type(Receiver) is FaxReceiver or type(Receiver) is list): raise PopbillException(-99999999, "'Receiver' argument type error. 'FaxReceiver' or List of 'FaxReceiver'.") if FilePath == None: raise PopbillException(-99999999, "발신 파일경로가 입력되지 않았습니다.") if not (type(FilePath) is str or type(FilePath) is list): raise PopbillException(-99999999, "발신 파일은 파일경로 또는 경로목록만 입력 가능합니다.") if type(FilePath) is list and (len(FilePath) < 1 or len(FilePath) > 20): raise PopbillException(-99999999, "파일은 1개 이상, 20개 까지 전송 가능합니다.") req = {"snd": SenderNum, "sndnm": SenderName, "fCnt": 1 if type(FilePath) is str else len(FilePath), "rcvs": [], "sndDT": None} if (type(Receiver) is str): Receiver = FaxReceiver(receiveNum=Receiver) if (type(Receiver) is FaxReceiver): Receiver = [Receiver] if adsYN: req['adsYN'] = True for r in Receiver: req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName}) if ReserveDT != None: req['sndDT'] = ReserveDT if title != None: req['title'] = title if RequestNum != None: req['requestNum'] = RequestNum postData = self._stringtify(req) if (type(FilePath) is str): FilePath = [FilePath] files = [] for filePath in FilePath: with open(filePath, "rb") as f: files.append(File(fieldName='file', fileName=f.name, fileData=f.read()) ) result = self._httppost_files('/FAX', postData, files, CorpNum, UserID) return result.receiptNum
python
def sendFax_multi(self, CorpNum, SenderNum, Receiver, FilePath, ReserveDT=None, UserID=None, SenderName=None, adsYN=False, title=None, RequestNum=None): """ 팩스 전송 args CorpNum : 팝빌회원 사업자번호 SenderNum : 발신자 번호 (동보전송용) Receiver : 수신자 번호(동보전송용) FilePath : 발신 파일경로 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 SenderName : 발신자명 (동보전송용) adsYN : 광고팩스 여부 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ if SenderNum == None or SenderNum == "": raise PopbillException(-99999999, "발신자 번호가 입력되지 않았습니다.") if Receiver == None: raise PopbillException(-99999999, "수신자 정보가 입력되지 않았습니다.") if not (type(Receiver) is str or type(Receiver) is FaxReceiver or type(Receiver) is list): raise PopbillException(-99999999, "'Receiver' argument type error. 'FaxReceiver' or List of 'FaxReceiver'.") if FilePath == None: raise PopbillException(-99999999, "발신 파일경로가 입력되지 않았습니다.") if not (type(FilePath) is str or type(FilePath) is list): raise PopbillException(-99999999, "발신 파일은 파일경로 또는 경로목록만 입력 가능합니다.") if type(FilePath) is list and (len(FilePath) < 1 or len(FilePath) > 20): raise PopbillException(-99999999, "파일은 1개 이상, 20개 까지 전송 가능합니다.") req = {"snd": SenderNum, "sndnm": SenderName, "fCnt": 1 if type(FilePath) is str else len(FilePath), "rcvs": [], "sndDT": None} if (type(Receiver) is str): Receiver = FaxReceiver(receiveNum=Receiver) if (type(Receiver) is FaxReceiver): Receiver = [Receiver] if adsYN: req['adsYN'] = True for r in Receiver: req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName}) if ReserveDT != None: req['sndDT'] = ReserveDT if title != None: req['title'] = title if RequestNum != None: req['requestNum'] = RequestNum postData = self._stringtify(req) if (type(FilePath) is str): FilePath = [FilePath] files = [] for filePath in FilePath: with open(filePath, "rb") as f: files.append(File(fieldName='file', fileName=f.name, fileData=f.read()) ) result = self._httppost_files('/FAX', postData, files, CorpNum, UserID) return result.receiptNum
[ "def", "sendFax_multi", "(", "self", ",", "CorpNum", ",", "SenderNum", ",", "Receiver", ",", "FilePath", ",", "ReserveDT", "=", "None", ",", "UserID", "=", "None", ",", "SenderName", "=", "None", ",", "adsYN", "=", "False", ",", "title", "=", "None", ",", "RequestNum", "=", "None", ")", ":", "if", "SenderNum", "==", "None", "or", "SenderNum", "==", "\"\"", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"발신자 번호가 입력되지 않았습니다.\")", "", "if", "Receiver", "==", "None", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"수신자 정보가 입력되지 않았습니다.\")", "", "if", "not", "(", "type", "(", "Receiver", ")", "is", "str", "or", "type", "(", "Receiver", ")", "is", "FaxReceiver", "or", "type", "(", "Receiver", ")", "is", "list", ")", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"'Receiver' argument type error. 'FaxReceiver' or List of 'FaxReceiver'.\"", ")", "if", "FilePath", "==", "None", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"발신 파일경로가 입력되지 않았습니다.\")", "", "if", "not", "(", "type", "(", "FilePath", ")", "is", "str", "or", "type", "(", "FilePath", ")", "is", "list", ")", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"발신 파일은 파일경로 또는 경로목록만 입력 가능합니다.\")", "", "if", "type", "(", "FilePath", ")", "is", "list", "and", "(", "len", "(", "FilePath", ")", "<", "1", "or", "len", "(", "FilePath", ")", ">", "20", ")", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"파일은 1개 이상, 20개 까지 전송 가능합니다.\")", "", "req", "=", "{", "\"snd\"", ":", "SenderNum", ",", "\"sndnm\"", ":", "SenderName", ",", "\"fCnt\"", ":", "1", "if", "type", "(", "FilePath", ")", "is", "str", "else", "len", "(", "FilePath", ")", ",", "\"rcvs\"", ":", "[", "]", ",", "\"sndDT\"", ":", "None", "}", "if", "(", "type", "(", "Receiver", ")", "is", "str", ")", ":", "Receiver", "=", "FaxReceiver", "(", "receiveNum", "=", "Receiver", ")", "if", "(", "type", "(", "Receiver", ")", "is", "FaxReceiver", ")", ":", "Receiver", "=", "[", "Receiver", "]", "if", "adsYN", ":", "req", "[", "'adsYN'", "]", "=", "True", "for", "r", "in", "Receiver", ":", "req", "[", "'rcvs'", "]", ".", "append", "(", "{", "\"rcv\"", ":", "r", ".", "receiveNum", ",", "\"rcvnm\"", ":", "r", ".", "receiveName", "}", ")", "if", "ReserveDT", "!=", "None", ":", "req", "[", "'sndDT'", "]", "=", "ReserveDT", "if", "title", "!=", "None", ":", "req", "[", "'title'", "]", "=", "title", "if", "RequestNum", "!=", "None", ":", "req", "[", "'requestNum'", "]", "=", "RequestNum", "postData", "=", "self", ".", "_stringtify", "(", "req", ")", "if", "(", "type", "(", "FilePath", ")", "is", "str", ")", ":", "FilePath", "=", "[", "FilePath", "]", "files", "=", "[", "]", "for", "filePath", "in", "FilePath", ":", "with", "open", "(", "filePath", ",", "\"rb\"", ")", "as", "f", ":", "files", ".", "append", "(", "File", "(", "fieldName", "=", "'file'", ",", "fileName", "=", "f", ".", "name", ",", "fileData", "=", "f", ".", "read", "(", ")", ")", ")", "result", "=", "self", ".", "_httppost_files", "(", "'/FAX'", ",", "postData", ",", "files", ",", "CorpNum", ",", "UserID", ")", "return", "result", ".", "receiptNum" ]
팩스 전송 args CorpNum : 팝빌회원 사업자번호 SenderNum : 발신자 번호 (동보전송용) Receiver : 수신자 번호(동보전송용) FilePath : 발신 파일경로 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 SenderName : 발신자명 (동보전송용) adsYN : 광고팩스 여부 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException
[ "팩스", "전송", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "SenderNum", ":", "발신자", "번호", "(", "동보전송용", ")", "Receiver", ":", "수신자", "번호", "(", "동보전송용", ")", "FilePath", ":", "발신", "파일경로", "ReserveDT", ":", "예약시간", "(", "형식", "yyyyMMddHHmmss", ")", "UserID", ":", "팝빌회원", "아이디", "SenderName", ":", "발신자명", "(", "동보전송용", ")", "adsYN", ":", "광고팩스", "여부", "title", ":", "팩스제목", "RequestNum", ":", "전송요청시", "할당한", "전송요청번호", "return", "접수번호", "(", "receiptNum", ")", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L234-L306
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.resendFax
def resendFax(self, CorpNum, ReceiptNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None, UserID=None, title=None, RequestNum=None): """ 팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 ReceiptNum : 팩스 접수번호 SenderNum : 발신자 번호 SenderName : 발신자명 ReceiverNum : 수신번호 ReceiverName : 수신자명 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ receivers = None if ReceiverNum != "" or ReceiverName != "": receivers = [] receivers.append(FaxReceiver(receiveNum=ReceiverNum, receiveName=ReceiverName) ) return self.resendFax_multi(CorpNum, ReceiptNum, SenderNum, SenderName, receivers, ReserveDT, UserID, title, RequestNum)
python
def resendFax(self, CorpNum, ReceiptNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None, UserID=None, title=None, RequestNum=None): """ 팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 ReceiptNum : 팩스 접수번호 SenderNum : 발신자 번호 SenderName : 발신자명 ReceiverNum : 수신번호 ReceiverName : 수신자명 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ receivers = None if ReceiverNum != "" or ReceiverName != "": receivers = [] receivers.append(FaxReceiver(receiveNum=ReceiverNum, receiveName=ReceiverName) ) return self.resendFax_multi(CorpNum, ReceiptNum, SenderNum, SenderName, receivers, ReserveDT, UserID, title, RequestNum)
[ "def", "resendFax", "(", "self", ",", "CorpNum", ",", "ReceiptNum", ",", "SenderNum", ",", "SenderName", ",", "ReceiverNum", ",", "ReceiverName", ",", "ReserveDT", "=", "None", ",", "UserID", "=", "None", ",", "title", "=", "None", ",", "RequestNum", "=", "None", ")", ":", "receivers", "=", "None", "if", "ReceiverNum", "!=", "\"\"", "or", "ReceiverName", "!=", "\"\"", ":", "receivers", "=", "[", "]", "receivers", ".", "append", "(", "FaxReceiver", "(", "receiveNum", "=", "ReceiverNum", ",", "receiveName", "=", "ReceiverName", ")", ")", "return", "self", ".", "resendFax_multi", "(", "CorpNum", ",", "ReceiptNum", ",", "SenderNum", ",", "SenderName", ",", "receivers", ",", "ReserveDT", ",", "UserID", ",", "title", ",", "RequestNum", ")" ]
팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 ReceiptNum : 팩스 접수번호 SenderNum : 발신자 번호 SenderName : 발신자명 ReceiverNum : 수신번호 ReceiverName : 수신자명 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException
[ "팩스", "단건", "전송", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "ReceiptNum", ":", "팩스", "접수번호", "SenderNum", ":", "발신자", "번호", "SenderName", ":", "발신자명", "ReceiverNum", ":", "수신번호", "ReceiverName", ":", "수신자명", "ReserveDT", ":", "예약시간", "(", "형식", "yyyyMMddHHmmss", ")", "UserID", ":", "팝빌회원", "아이디", "title", ":", "팩스제목", "RequestNum", ":", "전송요청시", "할당한", "전송요청번호", "return", "접수번호", "(", "receiptNum", ")", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L308-L335
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.resendFaxRN
def resendFaxRN(self, CorpNum, OrgRequestNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None, UserID=None, title=None, RequestNum=None): """ 팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호 ReceiptNum : 팩스 접수번호 SenderNum : 발신자 번호 SenderName : 발신자명 ReceiverNum : 수신번호 ReceiverName : 수신자명 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ receivers = None if ReceiverNum != "" or ReceiverName != "": receivers = [] receivers.append(FaxReceiver(receiveNum=ReceiverNum, receiveName=ReceiverName) ) return self.resendFaxRN_multi(CorpNum, OrgRequestNum, SenderNum, SenderName, receivers, ReserveDT, UserID, title, RequestNum)
python
def resendFaxRN(self, CorpNum, OrgRequestNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None, UserID=None, title=None, RequestNum=None): """ 팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호 ReceiptNum : 팩스 접수번호 SenderNum : 발신자 번호 SenderName : 발신자명 ReceiverNum : 수신번호 ReceiverName : 수신자명 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ receivers = None if ReceiverNum != "" or ReceiverName != "": receivers = [] receivers.append(FaxReceiver(receiveNum=ReceiverNum, receiveName=ReceiverName) ) return self.resendFaxRN_multi(CorpNum, OrgRequestNum, SenderNum, SenderName, receivers, ReserveDT, UserID, title, RequestNum)
[ "def", "resendFaxRN", "(", "self", ",", "CorpNum", ",", "OrgRequestNum", ",", "SenderNum", ",", "SenderName", ",", "ReceiverNum", ",", "ReceiverName", ",", "ReserveDT", "=", "None", ",", "UserID", "=", "None", ",", "title", "=", "None", ",", "RequestNum", "=", "None", ")", ":", "receivers", "=", "None", "if", "ReceiverNum", "!=", "\"\"", "or", "ReceiverName", "!=", "\"\"", ":", "receivers", "=", "[", "]", "receivers", ".", "append", "(", "FaxReceiver", "(", "receiveNum", "=", "ReceiverNum", ",", "receiveName", "=", "ReceiverName", ")", ")", "return", "self", ".", "resendFaxRN_multi", "(", "CorpNum", ",", "OrgRequestNum", ",", "SenderNum", ",", "SenderName", ",", "receivers", ",", "ReserveDT", ",", "UserID", ",", "title", ",", "RequestNum", ")" ]
팩스 단건 전송 args CorpNum : 팝빌회원 사업자번호 OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호 ReceiptNum : 팩스 접수번호 SenderNum : 발신자 번호 SenderName : 발신자명 ReceiverNum : 수신번호 ReceiverName : 수신자명 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException
[ "팩스", "단건", "전송", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "OrgRequestNum", ":", "원본", "팩스", "전송시", "할당한", "전송요청번호", "ReceiptNum", ":", "팩스", "접수번호", "SenderNum", ":", "발신자", "번호", "SenderName", ":", "발신자명", "ReceiverNum", ":", "수신번호", "ReceiverName", ":", "수신자명", "ReserveDT", ":", "예약시간", "(", "형식", "yyyyMMddHHmmss", ")", "UserID", ":", "팝빌회원", "아이디", "title", ":", "팩스제목", "RequestNum", ":", "전송요청시", "할당한", "전송요청번호", "return", "접수번호", "(", "receiptNum", ")", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L389-L417
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.resendFaxRN_multi
def resendFaxRN_multi(self, CorpNum, OrgRequestNum, SenderNum, SenderName, Receiver, ReserveDT=None, UserID=None, title=None, RequestNum=None): """ 팩스 전송 args CorpNum : 팝빌회원 사업자번호 OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호 SenderNum : 발신자 번호 SenderName : 발신자명 Receiver : 수신자정보 배열 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ req = {} if not OrgRequestNum: raise PopbillException(-99999999, "원본 팩스 요청번호가 입력되지 않았습니다") if SenderNum != "": req['snd'] = SenderNum if SenderName != "": req['sndnm'] = SenderName if ReserveDT != None: req['sndDT'] = ReserveDT if title != None: req['title'] = title if RequestNum != None: req['requestNum'] = RequestNum if Receiver != None: req['rcvs'] = [] if (type(Receiver) is str): Receiver = FaxReceiver(receiveNum=Receiver) if (type(Receiver) is FaxReceiver): Receiver = [Receiver] for r in Receiver: req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName}) postData = self._stringtify(req) return self._httppost('/FAX/Resend/' + OrgRequestNum, postData, CorpNum, UserID).receiptNum
python
def resendFaxRN_multi(self, CorpNum, OrgRequestNum, SenderNum, SenderName, Receiver, ReserveDT=None, UserID=None, title=None, RequestNum=None): """ 팩스 전송 args CorpNum : 팝빌회원 사업자번호 OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호 SenderNum : 발신자 번호 SenderName : 발신자명 Receiver : 수신자정보 배열 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException """ req = {} if not OrgRequestNum: raise PopbillException(-99999999, "원본 팩스 요청번호가 입력되지 않았습니다") if SenderNum != "": req['snd'] = SenderNum if SenderName != "": req['sndnm'] = SenderName if ReserveDT != None: req['sndDT'] = ReserveDT if title != None: req['title'] = title if RequestNum != None: req['requestNum'] = RequestNum if Receiver != None: req['rcvs'] = [] if (type(Receiver) is str): Receiver = FaxReceiver(receiveNum=Receiver) if (type(Receiver) is FaxReceiver): Receiver = [Receiver] for r in Receiver: req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName}) postData = self._stringtify(req) return self._httppost('/FAX/Resend/' + OrgRequestNum, postData, CorpNum, UserID).receiptNum
[ "def", "resendFaxRN_multi", "(", "self", ",", "CorpNum", ",", "OrgRequestNum", ",", "SenderNum", ",", "SenderName", ",", "Receiver", ",", "ReserveDT", "=", "None", ",", "UserID", "=", "None", ",", "title", "=", "None", ",", "RequestNum", "=", "None", ")", ":", "req", "=", "{", "}", "if", "not", "OrgRequestNum", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"원본 팩스 요청번호가 입력되지 않았습니다\")", "", "if", "SenderNum", "!=", "\"\"", ":", "req", "[", "'snd'", "]", "=", "SenderNum", "if", "SenderName", "!=", "\"\"", ":", "req", "[", "'sndnm'", "]", "=", "SenderName", "if", "ReserveDT", "!=", "None", ":", "req", "[", "'sndDT'", "]", "=", "ReserveDT", "if", "title", "!=", "None", ":", "req", "[", "'title'", "]", "=", "title", "if", "RequestNum", "!=", "None", ":", "req", "[", "'requestNum'", "]", "=", "RequestNum", "if", "Receiver", "!=", "None", ":", "req", "[", "'rcvs'", "]", "=", "[", "]", "if", "(", "type", "(", "Receiver", ")", "is", "str", ")", ":", "Receiver", "=", "FaxReceiver", "(", "receiveNum", "=", "Receiver", ")", "if", "(", "type", "(", "Receiver", ")", "is", "FaxReceiver", ")", ":", "Receiver", "=", "[", "Receiver", "]", "for", "r", "in", "Receiver", ":", "req", "[", "'rcvs'", "]", ".", "append", "(", "{", "\"rcv\"", ":", "r", ".", "receiveNum", ",", "\"rcvnm\"", ":", "r", ".", "receiveName", "}", ")", "postData", "=", "self", ".", "_stringtify", "(", "req", ")", "return", "self", ".", "_httppost", "(", "'/FAX/Resend/'", "+", "OrgRequestNum", ",", "postData", ",", "CorpNum", ",", "UserID", ")", ".", "receiptNum" ]
팩스 전송 args CorpNum : 팝빌회원 사업자번호 OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호 SenderNum : 발신자 번호 SenderName : 발신자명 Receiver : 수신자정보 배열 ReserveDT : 예약시간(형식 yyyyMMddHHmmss) UserID : 팝빌회원 아이디 title : 팩스제목 RequestNum : 전송요청시 할당한 전송요청번호 return 접수번호 (receiptNum) raise PopbillException
[ "팩스", "전송", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "OrgRequestNum", ":", "원본", "팩스", "전송시", "할당한", "전송요청번호", "SenderNum", ":", "발신자", "번호", "SenderName", ":", "발신자명", "Receiver", ":", "수신자정보", "배열", "ReserveDT", ":", "예약시간", "(", "형식", "yyyyMMddHHmmss", ")", "UserID", ":", "팝빌회원", "아이디", "title", ":", "팩스제목", "RequestNum", ":", "전송요청시", "할당한", "전송요청번호", "return", "접수번호", "(", "receiptNum", ")", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L419-L469
linkhub-sdk/popbill.py
popbill/faxService.py
FaxService.getPreviewURL
def getPreviewURL(self, CorpNum, ReceiptNum, UserID): """ 팩스 발신번호 목록 확인 args CorpNum : 팝빌회원 사업자번호 UserID : 팝빌회원 아이디 return 처리결과. list of SenderNumber raise PopbillException """ return self._httpget('/FAX/Preview/' + ReceiptNum, CorpNum, UserID).url
python
def getPreviewURL(self, CorpNum, ReceiptNum, UserID): """ 팩스 발신번호 목록 확인 args CorpNum : 팝빌회원 사업자번호 UserID : 팝빌회원 아이디 return 처리결과. list of SenderNumber raise PopbillException """ return self._httpget('/FAX/Preview/' + ReceiptNum, CorpNum, UserID).url
[ "def", "getPreviewURL", "(", "self", ",", "CorpNum", ",", "ReceiptNum", ",", "UserID", ")", ":", "return", "self", ".", "_httpget", "(", "'/FAX/Preview/'", "+", "ReceiptNum", ",", "CorpNum", ",", "UserID", ")", ".", "url" ]
팩스 발신번호 목록 확인 args CorpNum : 팝빌회원 사업자번호 UserID : 팝빌회원 아이디 return 처리결과. list of SenderNumber raise PopbillException
[ "팩스", "발신번호", "목록", "확인", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "UserID", ":", "팝빌회원", "아이디", "return", "처리결과", ".", "list", "of", "SenderNumber", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/faxService.py#L483-L493
curious-containers/cc-core
cc_core/commons/shell.py
prepare_outdir
def prepare_outdir(outdir): """ Creates the output directory if not existing. If outdir is None or if no output_files are provided nothing happens. :param outdir: The output directory to create. """ if outdir: outdir = os.path.expanduser(outdir) if not os.path.isdir(outdir): try: os.makedirs(outdir) except os.error as e: raise JobExecutionError('Failed to create outdir "{}".\n{}'.format(outdir, str(e)))
python
def prepare_outdir(outdir): """ Creates the output directory if not existing. If outdir is None or if no output_files are provided nothing happens. :param outdir: The output directory to create. """ if outdir: outdir = os.path.expanduser(outdir) if not os.path.isdir(outdir): try: os.makedirs(outdir) except os.error as e: raise JobExecutionError('Failed to create outdir "{}".\n{}'.format(outdir, str(e)))
[ "def", "prepare_outdir", "(", "outdir", ")", ":", "if", "outdir", ":", "outdir", "=", "os", ".", "path", ".", "expanduser", "(", "outdir", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "outdir", ")", ":", "try", ":", "os", ".", "makedirs", "(", "outdir", ")", "except", "os", ".", "error", "as", "e", ":", "raise", "JobExecutionError", "(", "'Failed to create outdir \"{}\".\\n{}'", ".", "format", "(", "outdir", ",", "str", "(", "e", ")", ")", ")" ]
Creates the output directory if not existing. If outdir is None or if no output_files are provided nothing happens. :param outdir: The output directory to create.
[ "Creates", "the", "output", "directory", "if", "not", "existing", ".", "If", "outdir", "is", "None", "or", "if", "no", "output_files", "are", "provided", "nothing", "happens", "." ]
train
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/shell.py#L15-L28
linkhub-sdk/popbill.py
popbill/htCashbillService.py
HTCashbillService.requestJob
def requestJob(self, CorpNum, Type, SDate, EDate, UserID=None): """ 수집 요청 args CorpNum : 팝빌회원 사업자번호 Type : 문서형태, SELL-매출, BUY-매입, SDate : 시작일자, 표시형식(yyyyMMdd) EDate : 종료일자, 표시형식(yyyyMMdd) UserID : 팝빌회원 아이디 return 작업아이디 (jobID) raise PopbillException """ if Type == None or Type == '': raise PopbillException(-99999999, "문서형태이 입력되지 않았습니다.") if SDate == None or SDate == '': raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.") if EDate == None or EDate == '': raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.") uri = '/HomeTax/Cashbill/' + Type uri += '?SDate=' + SDate uri += '&EDate=' + EDate return self._httppost(uri, "", CorpNum, UserID).jobID
python
def requestJob(self, CorpNum, Type, SDate, EDate, UserID=None): """ 수집 요청 args CorpNum : 팝빌회원 사업자번호 Type : 문서형태, SELL-매출, BUY-매입, SDate : 시작일자, 표시형식(yyyyMMdd) EDate : 종료일자, 표시형식(yyyyMMdd) UserID : 팝빌회원 아이디 return 작업아이디 (jobID) raise PopbillException """ if Type == None or Type == '': raise PopbillException(-99999999, "문서형태이 입력되지 않았습니다.") if SDate == None or SDate == '': raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.") if EDate == None or EDate == '': raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.") uri = '/HomeTax/Cashbill/' + Type uri += '?SDate=' + SDate uri += '&EDate=' + EDate return self._httppost(uri, "", CorpNum, UserID).jobID
[ "def", "requestJob", "(", "self", ",", "CorpNum", ",", "Type", ",", "SDate", ",", "EDate", ",", "UserID", "=", "None", ")", ":", "if", "Type", "==", "None", "or", "Type", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"문서형태이 입력되지 않았습니다.\")", "", "if", "SDate", "==", "None", "or", "SDate", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"시작일자가 입력되지 않았습니다.\")", "", "if", "EDate", "==", "None", "or", "EDate", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"종료일자가 입력되지 않았습니다.\")", "", "uri", "=", "'/HomeTax/Cashbill/'", "+", "Type", "uri", "+=", "'?SDate='", "+", "SDate", "uri", "+=", "'&EDate='", "+", "EDate", "return", "self", ".", "_httppost", "(", "uri", ",", "\"\"", ",", "CorpNum", ",", "UserID", ")", ".", "jobID" ]
수집 요청 args CorpNum : 팝빌회원 사업자번호 Type : 문서형태, SELL-매출, BUY-매입, SDate : 시작일자, 표시형식(yyyyMMdd) EDate : 종료일자, 표시형식(yyyyMMdd) UserID : 팝빌회원 아이디 return 작업아이디 (jobID) raise PopbillException
[ "수집", "요청", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "Type", ":", "문서형태", "SELL", "-", "매출", "BUY", "-", "매입", "SDate", ":", "시작일자", "표시형식", "(", "yyyyMMdd", ")", "EDate", ":", "종료일자", "표시형식", "(", "yyyyMMdd", ")", "UserID", ":", "팝빌회원", "아이디", "return", "작업아이디", "(", "jobID", ")", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htCashbillService.py#L41-L68
linkhub-sdk/popbill.py
popbill/htCashbillService.py
HTCashbillService.search
def search(self, CorpNum, JobID, TradeType, TradeUsage, Page, PerPage, Order, UserID=None): """ 수집 결과 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 TradeType : 문서형태 배열, N-일반 현금영수증, C-취소 현금영수증 TradeUsage : 거래구분 배열, P-소등공제용, C-지출증빙용 Page : 페이지 번호 PerPage : 페이지당 목록 개수, 최대 1000개 Order : 정렬 방향, D-내림차순, A-오름차순 UserID : 팝빌회원 아이디 return 수집 결과 정보 raise PopbillException """ if JobID == None or len(JobID) != 18: raise PopbillException(-99999999, "작업아이디(jobID)가 올바르지 않습니다.") uri = '/HomeTax/Cashbill/' + JobID uri += '?TradeType=' + ','.join(TradeType) uri += '&TradeUsage=' + ','.join(TradeUsage) uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order return self._httpget(uri, CorpNum, UserID)
python
def search(self, CorpNum, JobID, TradeType, TradeUsage, Page, PerPage, Order, UserID=None): """ 수집 결과 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 TradeType : 문서형태 배열, N-일반 현금영수증, C-취소 현금영수증 TradeUsage : 거래구분 배열, P-소등공제용, C-지출증빙용 Page : 페이지 번호 PerPage : 페이지당 목록 개수, 최대 1000개 Order : 정렬 방향, D-내림차순, A-오름차순 UserID : 팝빌회원 아이디 return 수집 결과 정보 raise PopbillException """ if JobID == None or len(JobID) != 18: raise PopbillException(-99999999, "작업아이디(jobID)가 올바르지 않습니다.") uri = '/HomeTax/Cashbill/' + JobID uri += '?TradeType=' + ','.join(TradeType) uri += '&TradeUsage=' + ','.join(TradeUsage) uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order return self._httpget(uri, CorpNum, UserID)
[ "def", "search", "(", "self", ",", "CorpNum", ",", "JobID", ",", "TradeType", ",", "TradeUsage", ",", "Page", ",", "PerPage", ",", "Order", ",", "UserID", "=", "None", ")", ":", "if", "JobID", "==", "None", "or", "len", "(", "JobID", ")", "!=", "18", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"작업아이디(jobID)가 올바르지 않습니다.\")", "", "uri", "=", "'/HomeTax/Cashbill/'", "+", "JobID", "uri", "+=", "'?TradeType='", "+", "','", ".", "join", "(", "TradeType", ")", "uri", "+=", "'&TradeUsage='", "+", "','", ".", "join", "(", "TradeUsage", ")", "uri", "+=", "'&Page='", "+", "str", "(", "Page", ")", "uri", "+=", "'&PerPage='", "+", "str", "(", "PerPage", ")", "uri", "+=", "'&Order='", "+", "Order", "return", "self", ".", "_httpget", "(", "uri", ",", "CorpNum", ",", "UserID", ")" ]
수집 결과 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 TradeType : 문서형태 배열, N-일반 현금영수증, C-취소 현금영수증 TradeUsage : 거래구분 배열, P-소등공제용, C-지출증빙용 Page : 페이지 번호 PerPage : 페이지당 목록 개수, 최대 1000개 Order : 정렬 방향, D-내림차순, A-오름차순 UserID : 팝빌회원 아이디 return 수집 결과 정보 raise PopbillException
[ "수집", "결과", "조회", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "JobID", ":", "작업아이디", "TradeType", ":", "문서형태", "배열", "N", "-", "일반", "현금영수증", "C", "-", "취소", "현금영수증", "TradeUsage", ":", "거래구분", "배열", "P", "-", "소등공제용", "C", "-", "지출증빙용", "Page", ":", "페이지", "번호", "PerPage", ":", "페이지당", "목록", "개수", "최대", "1000개", "Order", ":", "정렬", "방향", "D", "-", "내림차순", "A", "-", "오름차순", "UserID", ":", "팝빌회원", "아이디", "return", "수집", "결과", "정보", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htCashbillService.py#L99-L125
linkhub-sdk/popbill.py
popbill/htCashbillService.py
HTCashbillService.summary
def summary(self, CorpNum, JobID, TradeType, TradeUsage, UserID=None): """ 수집 결과 요약정보 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 TradeType : 문서형태 배열, N-일반 현금영수증, C-취소 현금영수증 TradeUsage : 거래구분 배열, P-소등공제용, C-지출증빙용 UserID : 팝빌회원 아이디 return 수집 결과 요약정보 raise PopbillException """ if JobID == None or len(JobID) != 18: raise PopbillException(-99999999, "작업아이디(jobID)가 올바르지 않습니다.") uri = '/HomeTax/Cashbill/' + JobID + '/Summary' uri += '?TradeType=' + ','.join(TradeType) uri += '&TradeUsage=' + ','.join(TradeUsage) return self._httpget(uri, CorpNum, UserID)
python
def summary(self, CorpNum, JobID, TradeType, TradeUsage, UserID=None): """ 수집 결과 요약정보 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 TradeType : 문서형태 배열, N-일반 현금영수증, C-취소 현금영수증 TradeUsage : 거래구분 배열, P-소등공제용, C-지출증빙용 UserID : 팝빌회원 아이디 return 수집 결과 요약정보 raise PopbillException """ if JobID == None or len(JobID) != 18: raise PopbillException(-99999999, "작업아이디(jobID)가 올바르지 않습니다.") uri = '/HomeTax/Cashbill/' + JobID + '/Summary' uri += '?TradeType=' + ','.join(TradeType) uri += '&TradeUsage=' + ','.join(TradeUsage) return self._httpget(uri, CorpNum, UserID)
[ "def", "summary", "(", "self", ",", "CorpNum", ",", "JobID", ",", "TradeType", ",", "TradeUsage", ",", "UserID", "=", "None", ")", ":", "if", "JobID", "==", "None", "or", "len", "(", "JobID", ")", "!=", "18", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"작업아이디(jobID)가 올바르지 않습니다.\")", "", "uri", "=", "'/HomeTax/Cashbill/'", "+", "JobID", "+", "'/Summary'", "uri", "+=", "'?TradeType='", "+", "','", ".", "join", "(", "TradeType", ")", "uri", "+=", "'&TradeUsage='", "+", "','", ".", "join", "(", "TradeUsage", ")", "return", "self", ".", "_httpget", "(", "uri", ",", "CorpNum", ",", "UserID", ")" ]
수집 결과 요약정보 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 TradeType : 문서형태 배열, N-일반 현금영수증, C-취소 현금영수증 TradeUsage : 거래구분 배열, P-소등공제용, C-지출증빙용 UserID : 팝빌회원 아이디 return 수집 결과 요약정보 raise PopbillException
[ "수집", "결과", "요약정보", "조회", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "JobID", ":", "작업아이디", "TradeType", ":", "문서형태", "배열", "N", "-", "일반", "현금영수증", "C", "-", "취소", "현금영수증", "TradeUsage", ":", "거래구분", "배열", "P", "-", "소등공제용", "C", "-", "지출증빙용", "UserID", ":", "팝빌회원", "아이디", "return", "수집", "결과", "요약정보", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htCashbillService.py#L127-L147
linkhub-sdk/popbill.py
popbill/htCashbillService.py
HTCashbillService.registDeptUser
def registDeptUser(self, CorpNum, DeptUserID, DeptUserPWD, UserID=None): """ 홈택스 현금영수증 부서사용자 계정 등록 args CorpNum : 팝빌회원 사업자번호 DeptUserID : 홈택스 부서사용자 계정아이디 DeptUserPWD : 홈택스 부서사용자 계정비밀번호 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException """ if DeptUserID == None or len(DeptUserID) == 0: raise PopbillException(-99999999, "홈택스 부서사용자 계정 아이디가 입력되지 않았습니다.") if DeptUserPWD == None or len(DeptUserPWD) == 0: raise PopbillException(-99999999, "홈택스 부서사용자 계정 비밀번호가 입력되지 않았습니다.") req = {} req["id"] = DeptUserID req["pwd"] = DeptUserPWD postData = self._stringtify(req) return self._httppost("/HomeTax/Cashbill/DeptUser", postData, CorpNum, UserID)
python
def registDeptUser(self, CorpNum, DeptUserID, DeptUserPWD, UserID=None): """ 홈택스 현금영수증 부서사용자 계정 등록 args CorpNum : 팝빌회원 사업자번호 DeptUserID : 홈택스 부서사용자 계정아이디 DeptUserPWD : 홈택스 부서사용자 계정비밀번호 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException """ if DeptUserID == None or len(DeptUserID) == 0: raise PopbillException(-99999999, "홈택스 부서사용자 계정 아이디가 입력되지 않았습니다.") if DeptUserPWD == None or len(DeptUserPWD) == 0: raise PopbillException(-99999999, "홈택스 부서사용자 계정 비밀번호가 입력되지 않았습니다.") req = {} req["id"] = DeptUserID req["pwd"] = DeptUserPWD postData = self._stringtify(req) return self._httppost("/HomeTax/Cashbill/DeptUser", postData, CorpNum, UserID)
[ "def", "registDeptUser", "(", "self", ",", "CorpNum", ",", "DeptUserID", ",", "DeptUserPWD", ",", "UserID", "=", "None", ")", ":", "if", "DeptUserID", "==", "None", "or", "len", "(", "DeptUserID", ")", "==", "0", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"홈택스 부서사용자 계정 아이디가 입력되지 않았습니다.\")", "", "if", "DeptUserPWD", "==", "None", "or", "len", "(", "DeptUserPWD", ")", "==", "0", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"홈택스 부서사용자 계정 비밀번호가 입력되지 않았습니다.\")", "", "req", "=", "{", "}", "req", "[", "\"id\"", "]", "=", "DeptUserID", "req", "[", "\"pwd\"", "]", "=", "DeptUserPWD", "postData", "=", "self", ".", "_stringtify", "(", "req", ")", "return", "self", ".", "_httppost", "(", "\"/HomeTax/Cashbill/DeptUser\"", ",", "postData", ",", "CorpNum", ",", "UserID", ")" ]
홈택스 현금영수증 부서사용자 계정 등록 args CorpNum : 팝빌회원 사업자번호 DeptUserID : 홈택스 부서사용자 계정아이디 DeptUserPWD : 홈택스 부서사용자 계정비밀번호 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException
[ "홈택스", "현금영수증", "부서사용자", "계정", "등록", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "DeptUserID", ":", "홈택스", "부서사용자", "계정아이디", "DeptUserPWD", ":", "홈택스", "부서사용자", "계정비밀번호", "UserID", ":", "팝빌회원", "아이디", "return", "처리결과", ".", "consist", "of", "code", "and", "message", "raise", "PopbillException" ]
train
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htCashbillService.py#L211-L235
olivier-m/rafter
rafter/contrib/schematics/helpers.py
model_node
def model_node(**kwargs): """ Decorates a ``schematics.Model`` class to add it as a field of type ``schematic.types.ModelType``. Keyword arguments are passed to ``schematic.types.ModelType``. Example: .. code-block:: python :emphasize-lines: 8,13 from schematics import Model, types from rafter.contrib.schematics.helpers import model_node class MyModel(Model): name = types.StringType() @model_node() class options(Model): status = types.IntType() # With arguments and another name @model_node(serialized_name='extra', required=True) class _extra(Model): test = types.StringType() """ kwargs.setdefault('default', {}) def decorator(model): return types.ModelType(model, **kwargs) return decorator
python
def model_node(**kwargs): """ Decorates a ``schematics.Model`` class to add it as a field of type ``schematic.types.ModelType``. Keyword arguments are passed to ``schematic.types.ModelType``. Example: .. code-block:: python :emphasize-lines: 8,13 from schematics import Model, types from rafter.contrib.schematics.helpers import model_node class MyModel(Model): name = types.StringType() @model_node() class options(Model): status = types.IntType() # With arguments and another name @model_node(serialized_name='extra', required=True) class _extra(Model): test = types.StringType() """ kwargs.setdefault('default', {}) def decorator(model): return types.ModelType(model, **kwargs) return decorator
[ "def", "model_node", "(", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'default'", ",", "{", "}", ")", "def", "decorator", "(", "model", ")", ":", "return", "types", ".", "ModelType", "(", "model", ",", "*", "*", "kwargs", ")", "return", "decorator" ]
Decorates a ``schematics.Model`` class to add it as a field of type ``schematic.types.ModelType``. Keyword arguments are passed to ``schematic.types.ModelType``. Example: .. code-block:: python :emphasize-lines: 8,13 from schematics import Model, types from rafter.contrib.schematics.helpers import model_node class MyModel(Model): name = types.StringType() @model_node() class options(Model): status = types.IntType() # With arguments and another name @model_node(serialized_name='extra', required=True) class _extra(Model): test = types.StringType()
[ "Decorates", "a", "schematics", ".", "Model", "class", "to", "add", "it", "as", "a", "field", "of", "type", "schematic", ".", "types", ".", "ModelType", "." ]
train
https://github.com/olivier-m/rafter/blob/aafcf8fd019f24abcf519307c4484cc6b4697c04/rafter/contrib/schematics/helpers.py#L11-L44
curious-containers/cc-core
cc_core/commons/files.py
for_each_file
def for_each_file(base_dir, func): """ Calls func(filename) for every file under base_dir. :param base_dir: A directory containing files :param func: The function to call with every file. """ for dir_path, _, file_names in os.walk(base_dir): for filename in file_names: func(os.path.join(dir_path, filename))
python
def for_each_file(base_dir, func): """ Calls func(filename) for every file under base_dir. :param base_dir: A directory containing files :param func: The function to call with every file. """ for dir_path, _, file_names in os.walk(base_dir): for filename in file_names: func(os.path.join(dir_path, filename))
[ "def", "for_each_file", "(", "base_dir", ",", "func", ")", ":", "for", "dir_path", ",", "_", ",", "file_names", "in", "os", ".", "walk", "(", "base_dir", ")", ":", "for", "filename", "in", "file_names", ":", "func", "(", "os", ".", "path", ".", "join", "(", "dir_path", ",", "filename", ")", ")" ]
Calls func(filename) for every file under base_dir. :param base_dir: A directory containing files :param func: The function to call with every file.
[ "Calls", "func", "(", "filename", ")", "for", "every", "file", "under", "base_dir", "." ]
train
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/files.py#L142-L152
curious-containers/cc-core
cc_core/commons/files.py
make_file_read_only
def make_file_read_only(file_path): """ Removes the write permissions for the given file for owner, groups and others. :param file_path: The file whose privileges are revoked. :raise FileNotFoundError: If the given file does not exist. """ old_permissions = os.stat(file_path).st_mode os.chmod(file_path, old_permissions & ~WRITE_PERMISSIONS)
python
def make_file_read_only(file_path): """ Removes the write permissions for the given file for owner, groups and others. :param file_path: The file whose privileges are revoked. :raise FileNotFoundError: If the given file does not exist. """ old_permissions = os.stat(file_path).st_mode os.chmod(file_path, old_permissions & ~WRITE_PERMISSIONS)
[ "def", "make_file_read_only", "(", "file_path", ")", ":", "old_permissions", "=", "os", ".", "stat", "(", "file_path", ")", ".", "st_mode", "os", ".", "chmod", "(", "file_path", ",", "old_permissions", "&", "~", "WRITE_PERMISSIONS", ")" ]
Removes the write permissions for the given file for owner, groups and others. :param file_path: The file whose privileges are revoked. :raise FileNotFoundError: If the given file does not exist.
[ "Removes", "the", "write", "permissions", "for", "the", "given", "file", "for", "owner", "groups", "and", "others", "." ]
train
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/files.py#L155-L163
MSchnei/pyprf_feature
pyprf_feature/analysis/load_config.py
load_config
def load_config(strCsvCnfg, lgcTest=False, lgcPrint=True): """ Load py_pRF_mapping config file. Parameters ---------- strCsvCnfg : string Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of this function will be prepended to config file paths. lgcPrint : Boolean Print config parameters? Returns ------- dicCnfg : dict Dictionary containing parameter names (as keys) and parameter values (as values). For example, `dicCnfg['varTr']` contains a float, such as `2.94`. """ # Dictionary with config information: dicCnfg = {} # Open file with parameter configuration: # fleConfig = open(strCsvCnfg, 'r') with open(strCsvCnfg, 'r') as fleConfig: # Read file with ROI information: csvIn = csv.reader(fleConfig, delimiter='\n', skipinitialspace=True) # Loop through csv object to fill list with csv data: for lstTmp in csvIn: # Skip comments (i.e. lines starting with '#') and empty lines. # Note: Indexing the list (i.e. lstTmp[0][0]) does not work for # empty lines. However, if the first condition is no fullfilled # (i.e. line is empty and 'if lstTmp' evaluates to false), the # second logical test (after the 'and') is not actually carried # out. if lstTmp and not (lstTmp[0][0] == '#'): # Name of current parameter (e.g. 'varTr'): strParamKey = lstTmp[0].split(' = ')[0] # print(strParamKey) # Current parameter value (e.g. '2.94'): strParamVlu = lstTmp[0].split(' = ')[1] # print(strParamVlu) # Put paramter name (key) and value (item) into dictionary: dicCnfg[strParamKey] = strParamVlu # Are model parameters in cartesian or polar coordinates? # set either pol (polar) or crt (cartesian) dicCnfg['strKwCrd'] = ast.literal_eval(dicCnfg['strKwCrd']) if lgcPrint: print('---Model coordinates are in: ' + str(dicCnfg['strKwCrd'])) # Number of x- or radial positions to model: dicCnfg['varNum1'] = int(dicCnfg['varNum1']) # Number of y- or angular positions to model: dicCnfg['varNum2'] = int(dicCnfg['varNum2']) if lgcPrint: if dicCnfg['strKwCrd'] == 'crt': print('---Number of x-positions to model: ' + str(dicCnfg['varNum1'])) print('---Number of y-positions to model: ' + str(dicCnfg['varNum2'])) elif dicCnfg['strKwCrd'] == 'pol': print('---Number of radial positions to model: ' + str(dicCnfg['varNum1'])) print('---Number of angular positions to model: ' + str(dicCnfg['varNum2'])) # Number of pRF sizes to model: dicCnfg['varNumPrfSizes'] = int(dicCnfg['varNumPrfSizes']) if lgcPrint: print('---Number of pRF sizes to model: ' + str(dicCnfg['varNumPrfSizes'])) # Extent of visual space from centre of the screen in negative x-direction # (i.e. from the fixation point to the left end of the screen) in degrees # of visual angle. dicCnfg['varExtXmin'] = float(dicCnfg['varExtXmin']) if lgcPrint: print('---Extent of visual space in negative x-direction: ' + str(dicCnfg['varExtXmin'])) # Extent of visual space from centre of the screen in positive x-direction # (i.e. from the fixation point to the right end of the screen) in degrees # of visual angle. dicCnfg['varExtXmax'] = float(dicCnfg['varExtXmax']) if lgcPrint: print('---Extent of visual space in positive x-direction: ' + str(dicCnfg['varExtXmax'])) # Extent of visual space from centre of the screen in negative y-direction # (i.e. from the fixation point to the lower end of the screen) in degrees # of visual angle. dicCnfg['varExtYmin'] = float(dicCnfg['varExtYmin']) if lgcPrint: print('---Extent of visual space in negative y-direction: ' + str(dicCnfg['varExtYmin'])) # Extent of visual space from centre of the screen in positive y-direction # (i.e. from the fixation point to the upper end of the screen) in degrees # of visual angle. dicCnfg['varExtYmax'] = float(dicCnfg['varExtYmax']) if lgcPrint: print('---Extent of visual space in positive y-direction: ' + str(dicCnfg['varExtYmax'])) # Minimum pRF model size (standard deviation of 2D Gaussian) [degrees of # visual angle]: dicCnfg['varPrfStdMin'] = float(dicCnfg['varPrfStdMin']) if lgcPrint: print('---Minimum pRF model size: ' + str(dicCnfg['varPrfStdMin'])) # Maximum pRF model size (standard deviation of 2D Gaussian) [degrees of # visual angle]: dicCnfg['varPrfStdMax'] = float(dicCnfg['varPrfStdMax']) if lgcPrint: print('---Maximum pRF model size: ' + str(dicCnfg['varPrfStdMax'])) # Volume TR of input data [s]: dicCnfg['varTr'] = float(dicCnfg['varTr']) if lgcPrint: print('---Volume TR of input data [s]: ' + str(dicCnfg['varTr'])) # Voxel resolution of fMRI data [mm]: dicCnfg['varVoxRes'] = float(dicCnfg['varVoxRes']) if lgcPrint: print('---Voxel resolution of fMRI data [mm]: ' + str(dicCnfg['varVoxRes'])) # Number of fMRI volumes and png files to load: dicCnfg['varNumVol'] = int(dicCnfg['varNumVol']) if lgcPrint: print('---Total number of fMRI volumes and png files: ' + str(dicCnfg['varNumVol'])) # Extent of temporal smoothing for fMRI data and pRF time course models # [standard deviation of the Gaussian kernel, in seconds]: # same temporal smoothing will be applied to pRF model time courses dicCnfg['varSdSmthTmp'] = float(dicCnfg['varSdSmthTmp']) if lgcPrint: print('---Extent of temporal smoothing (Gaussian SD in [s]): ' + str(dicCnfg['varSdSmthTmp'])) # Number of processes to run in parallel: dicCnfg['varPar'] = int(dicCnfg['varPar']) if lgcPrint: print('---Number of processes to run in parallel: ' + str(dicCnfg['varPar'])) # Size of space model in which the pRF models are # created (x- and y-dimension). dicCnfg['tplVslSpcSze'] = tuple([int(dicCnfg['varVslSpcSzeX']), int(dicCnfg['varVslSpcSzeY'])]) if lgcPrint: print('---Size of visual space model (x & y): ' + str(dicCnfg['tplVslSpcSze'])) # Path(s) of functional data: dicCnfg['lstPathNiiFunc'] = ast.literal_eval(dicCnfg['lstPathNiiFunc']) if lgcPrint: print('---Path(s) of functional data:') for strTmp in dicCnfg['lstPathNiiFunc']: print(' ' + str(strTmp)) # Path of mask (to restrict pRF model finding): dicCnfg['strPathNiiMask'] = ast.literal_eval(dicCnfg['strPathNiiMask']) if lgcPrint: print('---Path of mask (to restrict pRF model finding):') print(' ' + str(dicCnfg['strPathNiiMask'])) # Output basename: dicCnfg['strPathOut'] = ast.literal_eval(dicCnfg['strPathOut']) if lgcPrint: print('---Output basename:') print(' ' + str(dicCnfg['strPathOut'])) # Which version to use for pRF finding. 'numpy' or 'cython' for pRF finding # on CPU, 'gpu' for using GPU. dicCnfg['strVersion'] = ast.literal_eval(dicCnfg['strVersion']) if lgcPrint: print('---Version (numpy, cython, or gpu): ' + str(dicCnfg['strVersion'])) # Create pRF time course models? dicCnfg['lgcCrteMdl'] = (dicCnfg['lgcCrteMdl'] == 'True') if lgcPrint: print('---Create pRF time course models: ' + str(dicCnfg['lgcCrteMdl'])) # Path to npy file with pRF time course models (to save or laod). Without # file extension. dicCnfg['strPathMdl'] = ast.literal_eval(dicCnfg['strPathMdl']) if lgcPrint: print('---Path to npy file with pRF time course models (to save ' + 'or load):') print(' ' + str(dicCnfg['strPathMdl'])) # switch to determine which hrf functions should be used # 1: canonical, 2: can and temp derivative, 3: can, temp and spat deriv dicCnfg['switchHrfSet'] = ast.literal_eval(dicCnfg['switchHrfSet']) if lgcPrint: print('---Switch to determine which hrf functions should be used: ' + str(dicCnfg['switchHrfSet'])) # should model fitting be based on k-fold cross-validation? # if not, set to 1 dicCnfg['varNumXval'] = ast.literal_eval(dicCnfg['varNumXval']) if lgcPrint: print('---Model fitting will have this number of folds for xval: ' + str(dicCnfg['varNumXval'])) # If we create new pRF time course models, the following parameters have to # be provided: if dicCnfg['lgcCrteMdl']: # Name of the npy that holds spatial info about conditions dicCnfg['strSptExpInf'] = ast.literal_eval(dicCnfg['strSptExpInf']) if lgcPrint: print('---Path to npy file with spatial condition info: ') print(' ' + str(dicCnfg['strSptExpInf'])) # Name of the npy that holds temporal info about conditions dicCnfg['strTmpExpInf'] = ast.literal_eval(dicCnfg['strTmpExpInf']) if lgcPrint: print('---Path to npy file with temporal condition info: ') print(' ' + str(dicCnfg['strTmpExpInf'])) # Factor by which time courses and HRF will be upsampled for the # convolutions dicCnfg['varTmpOvsmpl'] = ast.literal_eval(dicCnfg['varTmpOvsmpl']) if lgcPrint: print('---Factor by which time courses and HRF will be upsampled: ' + str(dicCnfg['varTmpOvsmpl'])) # Is this a test? if lgcTest: # Prepend absolute path of this file to config file paths: dicCnfg['strPathNiiMask'] = (strDir + dicCnfg['strPathNiiMask']) dicCnfg['strPathOut'] = (strDir + dicCnfg['strPathOut']) dicCnfg['strPathMdl'] = (strDir + dicCnfg['strPathMdl']) dicCnfg['strSptExpInf'] = (strDir + dicCnfg['strSptExpInf']) dicCnfg['strTmpExpInf'] = (strDir + dicCnfg['strTmpExpInf']) # Loop through functional runs: varNumRun = len(dicCnfg['lstPathNiiFunc']) for idxRun in range(varNumRun): dicCnfg['lstPathNiiFunc'][idxRun] = ( strDir + dicCnfg['lstPathNiiFunc'][idxRun] ) return dicCnfg
python
def load_config(strCsvCnfg, lgcTest=False, lgcPrint=True): """ Load py_pRF_mapping config file. Parameters ---------- strCsvCnfg : string Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of this function will be prepended to config file paths. lgcPrint : Boolean Print config parameters? Returns ------- dicCnfg : dict Dictionary containing parameter names (as keys) and parameter values (as values). For example, `dicCnfg['varTr']` contains a float, such as `2.94`. """ # Dictionary with config information: dicCnfg = {} # Open file with parameter configuration: # fleConfig = open(strCsvCnfg, 'r') with open(strCsvCnfg, 'r') as fleConfig: # Read file with ROI information: csvIn = csv.reader(fleConfig, delimiter='\n', skipinitialspace=True) # Loop through csv object to fill list with csv data: for lstTmp in csvIn: # Skip comments (i.e. lines starting with '#') and empty lines. # Note: Indexing the list (i.e. lstTmp[0][0]) does not work for # empty lines. However, if the first condition is no fullfilled # (i.e. line is empty and 'if lstTmp' evaluates to false), the # second logical test (after the 'and') is not actually carried # out. if lstTmp and not (lstTmp[0][0] == '#'): # Name of current parameter (e.g. 'varTr'): strParamKey = lstTmp[0].split(' = ')[0] # print(strParamKey) # Current parameter value (e.g. '2.94'): strParamVlu = lstTmp[0].split(' = ')[1] # print(strParamVlu) # Put paramter name (key) and value (item) into dictionary: dicCnfg[strParamKey] = strParamVlu # Are model parameters in cartesian or polar coordinates? # set either pol (polar) or crt (cartesian) dicCnfg['strKwCrd'] = ast.literal_eval(dicCnfg['strKwCrd']) if lgcPrint: print('---Model coordinates are in: ' + str(dicCnfg['strKwCrd'])) # Number of x- or radial positions to model: dicCnfg['varNum1'] = int(dicCnfg['varNum1']) # Number of y- or angular positions to model: dicCnfg['varNum2'] = int(dicCnfg['varNum2']) if lgcPrint: if dicCnfg['strKwCrd'] == 'crt': print('---Number of x-positions to model: ' + str(dicCnfg['varNum1'])) print('---Number of y-positions to model: ' + str(dicCnfg['varNum2'])) elif dicCnfg['strKwCrd'] == 'pol': print('---Number of radial positions to model: ' + str(dicCnfg['varNum1'])) print('---Number of angular positions to model: ' + str(dicCnfg['varNum2'])) # Number of pRF sizes to model: dicCnfg['varNumPrfSizes'] = int(dicCnfg['varNumPrfSizes']) if lgcPrint: print('---Number of pRF sizes to model: ' + str(dicCnfg['varNumPrfSizes'])) # Extent of visual space from centre of the screen in negative x-direction # (i.e. from the fixation point to the left end of the screen) in degrees # of visual angle. dicCnfg['varExtXmin'] = float(dicCnfg['varExtXmin']) if lgcPrint: print('---Extent of visual space in negative x-direction: ' + str(dicCnfg['varExtXmin'])) # Extent of visual space from centre of the screen in positive x-direction # (i.e. from the fixation point to the right end of the screen) in degrees # of visual angle. dicCnfg['varExtXmax'] = float(dicCnfg['varExtXmax']) if lgcPrint: print('---Extent of visual space in positive x-direction: ' + str(dicCnfg['varExtXmax'])) # Extent of visual space from centre of the screen in negative y-direction # (i.e. from the fixation point to the lower end of the screen) in degrees # of visual angle. dicCnfg['varExtYmin'] = float(dicCnfg['varExtYmin']) if lgcPrint: print('---Extent of visual space in negative y-direction: ' + str(dicCnfg['varExtYmin'])) # Extent of visual space from centre of the screen in positive y-direction # (i.e. from the fixation point to the upper end of the screen) in degrees # of visual angle. dicCnfg['varExtYmax'] = float(dicCnfg['varExtYmax']) if lgcPrint: print('---Extent of visual space in positive y-direction: ' + str(dicCnfg['varExtYmax'])) # Minimum pRF model size (standard deviation of 2D Gaussian) [degrees of # visual angle]: dicCnfg['varPrfStdMin'] = float(dicCnfg['varPrfStdMin']) if lgcPrint: print('---Minimum pRF model size: ' + str(dicCnfg['varPrfStdMin'])) # Maximum pRF model size (standard deviation of 2D Gaussian) [degrees of # visual angle]: dicCnfg['varPrfStdMax'] = float(dicCnfg['varPrfStdMax']) if lgcPrint: print('---Maximum pRF model size: ' + str(dicCnfg['varPrfStdMax'])) # Volume TR of input data [s]: dicCnfg['varTr'] = float(dicCnfg['varTr']) if lgcPrint: print('---Volume TR of input data [s]: ' + str(dicCnfg['varTr'])) # Voxel resolution of fMRI data [mm]: dicCnfg['varVoxRes'] = float(dicCnfg['varVoxRes']) if lgcPrint: print('---Voxel resolution of fMRI data [mm]: ' + str(dicCnfg['varVoxRes'])) # Number of fMRI volumes and png files to load: dicCnfg['varNumVol'] = int(dicCnfg['varNumVol']) if lgcPrint: print('---Total number of fMRI volumes and png files: ' + str(dicCnfg['varNumVol'])) # Extent of temporal smoothing for fMRI data and pRF time course models # [standard deviation of the Gaussian kernel, in seconds]: # same temporal smoothing will be applied to pRF model time courses dicCnfg['varSdSmthTmp'] = float(dicCnfg['varSdSmthTmp']) if lgcPrint: print('---Extent of temporal smoothing (Gaussian SD in [s]): ' + str(dicCnfg['varSdSmthTmp'])) # Number of processes to run in parallel: dicCnfg['varPar'] = int(dicCnfg['varPar']) if lgcPrint: print('---Number of processes to run in parallel: ' + str(dicCnfg['varPar'])) # Size of space model in which the pRF models are # created (x- and y-dimension). dicCnfg['tplVslSpcSze'] = tuple([int(dicCnfg['varVslSpcSzeX']), int(dicCnfg['varVslSpcSzeY'])]) if lgcPrint: print('---Size of visual space model (x & y): ' + str(dicCnfg['tplVslSpcSze'])) # Path(s) of functional data: dicCnfg['lstPathNiiFunc'] = ast.literal_eval(dicCnfg['lstPathNiiFunc']) if lgcPrint: print('---Path(s) of functional data:') for strTmp in dicCnfg['lstPathNiiFunc']: print(' ' + str(strTmp)) # Path of mask (to restrict pRF model finding): dicCnfg['strPathNiiMask'] = ast.literal_eval(dicCnfg['strPathNiiMask']) if lgcPrint: print('---Path of mask (to restrict pRF model finding):') print(' ' + str(dicCnfg['strPathNiiMask'])) # Output basename: dicCnfg['strPathOut'] = ast.literal_eval(dicCnfg['strPathOut']) if lgcPrint: print('---Output basename:') print(' ' + str(dicCnfg['strPathOut'])) # Which version to use for pRF finding. 'numpy' or 'cython' for pRF finding # on CPU, 'gpu' for using GPU. dicCnfg['strVersion'] = ast.literal_eval(dicCnfg['strVersion']) if lgcPrint: print('---Version (numpy, cython, or gpu): ' + str(dicCnfg['strVersion'])) # Create pRF time course models? dicCnfg['lgcCrteMdl'] = (dicCnfg['lgcCrteMdl'] == 'True') if lgcPrint: print('---Create pRF time course models: ' + str(dicCnfg['lgcCrteMdl'])) # Path to npy file with pRF time course models (to save or laod). Without # file extension. dicCnfg['strPathMdl'] = ast.literal_eval(dicCnfg['strPathMdl']) if lgcPrint: print('---Path to npy file with pRF time course models (to save ' + 'or load):') print(' ' + str(dicCnfg['strPathMdl'])) # switch to determine which hrf functions should be used # 1: canonical, 2: can and temp derivative, 3: can, temp and spat deriv dicCnfg['switchHrfSet'] = ast.literal_eval(dicCnfg['switchHrfSet']) if lgcPrint: print('---Switch to determine which hrf functions should be used: ' + str(dicCnfg['switchHrfSet'])) # should model fitting be based on k-fold cross-validation? # if not, set to 1 dicCnfg['varNumXval'] = ast.literal_eval(dicCnfg['varNumXval']) if lgcPrint: print('---Model fitting will have this number of folds for xval: ' + str(dicCnfg['varNumXval'])) # If we create new pRF time course models, the following parameters have to # be provided: if dicCnfg['lgcCrteMdl']: # Name of the npy that holds spatial info about conditions dicCnfg['strSptExpInf'] = ast.literal_eval(dicCnfg['strSptExpInf']) if lgcPrint: print('---Path to npy file with spatial condition info: ') print(' ' + str(dicCnfg['strSptExpInf'])) # Name of the npy that holds temporal info about conditions dicCnfg['strTmpExpInf'] = ast.literal_eval(dicCnfg['strTmpExpInf']) if lgcPrint: print('---Path to npy file with temporal condition info: ') print(' ' + str(dicCnfg['strTmpExpInf'])) # Factor by which time courses and HRF will be upsampled for the # convolutions dicCnfg['varTmpOvsmpl'] = ast.literal_eval(dicCnfg['varTmpOvsmpl']) if lgcPrint: print('---Factor by which time courses and HRF will be upsampled: ' + str(dicCnfg['varTmpOvsmpl'])) # Is this a test? if lgcTest: # Prepend absolute path of this file to config file paths: dicCnfg['strPathNiiMask'] = (strDir + dicCnfg['strPathNiiMask']) dicCnfg['strPathOut'] = (strDir + dicCnfg['strPathOut']) dicCnfg['strPathMdl'] = (strDir + dicCnfg['strPathMdl']) dicCnfg['strSptExpInf'] = (strDir + dicCnfg['strSptExpInf']) dicCnfg['strTmpExpInf'] = (strDir + dicCnfg['strTmpExpInf']) # Loop through functional runs: varNumRun = len(dicCnfg['lstPathNiiFunc']) for idxRun in range(varNumRun): dicCnfg['lstPathNiiFunc'][idxRun] = ( strDir + dicCnfg['lstPathNiiFunc'][idxRun] ) return dicCnfg
[ "def", "load_config", "(", "strCsvCnfg", ",", "lgcTest", "=", "False", ",", "lgcPrint", "=", "True", ")", ":", "# Dictionary with config information:", "dicCnfg", "=", "{", "}", "# Open file with parameter configuration:", "# fleConfig = open(strCsvCnfg, 'r')", "with", "open", "(", "strCsvCnfg", ",", "'r'", ")", "as", "fleConfig", ":", "# Read file with ROI information:", "csvIn", "=", "csv", ".", "reader", "(", "fleConfig", ",", "delimiter", "=", "'\\n'", ",", "skipinitialspace", "=", "True", ")", "# Loop through csv object to fill list with csv data:", "for", "lstTmp", "in", "csvIn", ":", "# Skip comments (i.e. lines starting with '#') and empty lines.", "# Note: Indexing the list (i.e. lstTmp[0][0]) does not work for", "# empty lines. However, if the first condition is no fullfilled", "# (i.e. line is empty and 'if lstTmp' evaluates to false), the", "# second logical test (after the 'and') is not actually carried", "# out.", "if", "lstTmp", "and", "not", "(", "lstTmp", "[", "0", "]", "[", "0", "]", "==", "'#'", ")", ":", "# Name of current parameter (e.g. 'varTr'):", "strParamKey", "=", "lstTmp", "[", "0", "]", ".", "split", "(", "' = '", ")", "[", "0", "]", "# print(strParamKey)", "# Current parameter value (e.g. '2.94'):", "strParamVlu", "=", "lstTmp", "[", "0", "]", ".", "split", "(", "' = '", ")", "[", "1", "]", "# print(strParamVlu)", "# Put paramter name (key) and value (item) into dictionary:", "dicCnfg", "[", "strParamKey", "]", "=", "strParamVlu", "# Are model parameters in cartesian or polar coordinates?", "# set either pol (polar) or crt (cartesian)", "dicCnfg", "[", "'strKwCrd'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'strKwCrd'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Model coordinates are in: '", "+", "str", "(", "dicCnfg", "[", "'strKwCrd'", "]", ")", ")", "# Number of x- or radial positions to model:", "dicCnfg", "[", "'varNum1'", "]", "=", "int", "(", "dicCnfg", "[", "'varNum1'", "]", ")", "# Number of y- or angular positions to model:", "dicCnfg", "[", "'varNum2'", "]", "=", "int", "(", "dicCnfg", "[", "'varNum2'", "]", ")", "if", "lgcPrint", ":", "if", "dicCnfg", "[", "'strKwCrd'", "]", "==", "'crt'", ":", "print", "(", "'---Number of x-positions to model: '", "+", "str", "(", "dicCnfg", "[", "'varNum1'", "]", ")", ")", "print", "(", "'---Number of y-positions to model: '", "+", "str", "(", "dicCnfg", "[", "'varNum2'", "]", ")", ")", "elif", "dicCnfg", "[", "'strKwCrd'", "]", "==", "'pol'", ":", "print", "(", "'---Number of radial positions to model: '", "+", "str", "(", "dicCnfg", "[", "'varNum1'", "]", ")", ")", "print", "(", "'---Number of angular positions to model: '", "+", "str", "(", "dicCnfg", "[", "'varNum2'", "]", ")", ")", "# Number of pRF sizes to model:", "dicCnfg", "[", "'varNumPrfSizes'", "]", "=", "int", "(", "dicCnfg", "[", "'varNumPrfSizes'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Number of pRF sizes to model: '", "+", "str", "(", "dicCnfg", "[", "'varNumPrfSizes'", "]", ")", ")", "# Extent of visual space from centre of the screen in negative x-direction", "# (i.e. from the fixation point to the left end of the screen) in degrees", "# of visual angle.", "dicCnfg", "[", "'varExtXmin'", "]", "=", "float", "(", "dicCnfg", "[", "'varExtXmin'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Extent of visual space in negative x-direction: '", "+", "str", "(", "dicCnfg", "[", "'varExtXmin'", "]", ")", ")", "# Extent of visual space from centre of the screen in positive x-direction", "# (i.e. from the fixation point to the right end of the screen) in degrees", "# of visual angle.", "dicCnfg", "[", "'varExtXmax'", "]", "=", "float", "(", "dicCnfg", "[", "'varExtXmax'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Extent of visual space in positive x-direction: '", "+", "str", "(", "dicCnfg", "[", "'varExtXmax'", "]", ")", ")", "# Extent of visual space from centre of the screen in negative y-direction", "# (i.e. from the fixation point to the lower end of the screen) in degrees", "# of visual angle.", "dicCnfg", "[", "'varExtYmin'", "]", "=", "float", "(", "dicCnfg", "[", "'varExtYmin'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Extent of visual space in negative y-direction: '", "+", "str", "(", "dicCnfg", "[", "'varExtYmin'", "]", ")", ")", "# Extent of visual space from centre of the screen in positive y-direction", "# (i.e. from the fixation point to the upper end of the screen) in degrees", "# of visual angle.", "dicCnfg", "[", "'varExtYmax'", "]", "=", "float", "(", "dicCnfg", "[", "'varExtYmax'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Extent of visual space in positive y-direction: '", "+", "str", "(", "dicCnfg", "[", "'varExtYmax'", "]", ")", ")", "# Minimum pRF model size (standard deviation of 2D Gaussian) [degrees of", "# visual angle]:", "dicCnfg", "[", "'varPrfStdMin'", "]", "=", "float", "(", "dicCnfg", "[", "'varPrfStdMin'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Minimum pRF model size: '", "+", "str", "(", "dicCnfg", "[", "'varPrfStdMin'", "]", ")", ")", "# Maximum pRF model size (standard deviation of 2D Gaussian) [degrees of", "# visual angle]:", "dicCnfg", "[", "'varPrfStdMax'", "]", "=", "float", "(", "dicCnfg", "[", "'varPrfStdMax'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Maximum pRF model size: '", "+", "str", "(", "dicCnfg", "[", "'varPrfStdMax'", "]", ")", ")", "# Volume TR of input data [s]:", "dicCnfg", "[", "'varTr'", "]", "=", "float", "(", "dicCnfg", "[", "'varTr'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Volume TR of input data [s]: '", "+", "str", "(", "dicCnfg", "[", "'varTr'", "]", ")", ")", "# Voxel resolution of fMRI data [mm]:", "dicCnfg", "[", "'varVoxRes'", "]", "=", "float", "(", "dicCnfg", "[", "'varVoxRes'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Voxel resolution of fMRI data [mm]: '", "+", "str", "(", "dicCnfg", "[", "'varVoxRes'", "]", ")", ")", "# Number of fMRI volumes and png files to load:", "dicCnfg", "[", "'varNumVol'", "]", "=", "int", "(", "dicCnfg", "[", "'varNumVol'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Total number of fMRI volumes and png files: '", "+", "str", "(", "dicCnfg", "[", "'varNumVol'", "]", ")", ")", "# Extent of temporal smoothing for fMRI data and pRF time course models", "# [standard deviation of the Gaussian kernel, in seconds]:", "# same temporal smoothing will be applied to pRF model time courses", "dicCnfg", "[", "'varSdSmthTmp'", "]", "=", "float", "(", "dicCnfg", "[", "'varSdSmthTmp'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Extent of temporal smoothing (Gaussian SD in [s]): '", "+", "str", "(", "dicCnfg", "[", "'varSdSmthTmp'", "]", ")", ")", "# Number of processes to run in parallel:", "dicCnfg", "[", "'varPar'", "]", "=", "int", "(", "dicCnfg", "[", "'varPar'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Number of processes to run in parallel: '", "+", "str", "(", "dicCnfg", "[", "'varPar'", "]", ")", ")", "# Size of space model in which the pRF models are", "# created (x- and y-dimension).", "dicCnfg", "[", "'tplVslSpcSze'", "]", "=", "tuple", "(", "[", "int", "(", "dicCnfg", "[", "'varVslSpcSzeX'", "]", ")", ",", "int", "(", "dicCnfg", "[", "'varVslSpcSzeY'", "]", ")", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Size of visual space model (x & y): '", "+", "str", "(", "dicCnfg", "[", "'tplVslSpcSze'", "]", ")", ")", "# Path(s) of functional data:", "dicCnfg", "[", "'lstPathNiiFunc'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'lstPathNiiFunc'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Path(s) of functional data:'", ")", "for", "strTmp", "in", "dicCnfg", "[", "'lstPathNiiFunc'", "]", ":", "print", "(", "' '", "+", "str", "(", "strTmp", ")", ")", "# Path of mask (to restrict pRF model finding):", "dicCnfg", "[", "'strPathNiiMask'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'strPathNiiMask'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Path of mask (to restrict pRF model finding):'", ")", "print", "(", "' '", "+", "str", "(", "dicCnfg", "[", "'strPathNiiMask'", "]", ")", ")", "# Output basename:", "dicCnfg", "[", "'strPathOut'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'strPathOut'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Output basename:'", ")", "print", "(", "' '", "+", "str", "(", "dicCnfg", "[", "'strPathOut'", "]", ")", ")", "# Which version to use for pRF finding. 'numpy' or 'cython' for pRF finding", "# on CPU, 'gpu' for using GPU.", "dicCnfg", "[", "'strVersion'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'strVersion'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Version (numpy, cython, or gpu): '", "+", "str", "(", "dicCnfg", "[", "'strVersion'", "]", ")", ")", "# Create pRF time course models?", "dicCnfg", "[", "'lgcCrteMdl'", "]", "=", "(", "dicCnfg", "[", "'lgcCrteMdl'", "]", "==", "'True'", ")", "if", "lgcPrint", ":", "print", "(", "'---Create pRF time course models: '", "+", "str", "(", "dicCnfg", "[", "'lgcCrteMdl'", "]", ")", ")", "# Path to npy file with pRF time course models (to save or laod). Without", "# file extension.", "dicCnfg", "[", "'strPathMdl'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'strPathMdl'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Path to npy file with pRF time course models (to save '", "+", "'or load):'", ")", "print", "(", "' '", "+", "str", "(", "dicCnfg", "[", "'strPathMdl'", "]", ")", ")", "# switch to determine which hrf functions should be used", "# 1: canonical, 2: can and temp derivative, 3: can, temp and spat deriv", "dicCnfg", "[", "'switchHrfSet'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'switchHrfSet'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Switch to determine which hrf functions should be used: '", "+", "str", "(", "dicCnfg", "[", "'switchHrfSet'", "]", ")", ")", "# should model fitting be based on k-fold cross-validation?", "# if not, set to 1", "dicCnfg", "[", "'varNumXval'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'varNumXval'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Model fitting will have this number of folds for xval: '", "+", "str", "(", "dicCnfg", "[", "'varNumXval'", "]", ")", ")", "# If we create new pRF time course models, the following parameters have to", "# be provided:", "if", "dicCnfg", "[", "'lgcCrteMdl'", "]", ":", "# Name of the npy that holds spatial info about conditions", "dicCnfg", "[", "'strSptExpInf'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'strSptExpInf'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Path to npy file with spatial condition info: '", ")", "print", "(", "' '", "+", "str", "(", "dicCnfg", "[", "'strSptExpInf'", "]", ")", ")", "# Name of the npy that holds temporal info about conditions", "dicCnfg", "[", "'strTmpExpInf'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'strTmpExpInf'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Path to npy file with temporal condition info: '", ")", "print", "(", "' '", "+", "str", "(", "dicCnfg", "[", "'strTmpExpInf'", "]", ")", ")", "# Factor by which time courses and HRF will be upsampled for the", "# convolutions", "dicCnfg", "[", "'varTmpOvsmpl'", "]", "=", "ast", ".", "literal_eval", "(", "dicCnfg", "[", "'varTmpOvsmpl'", "]", ")", "if", "lgcPrint", ":", "print", "(", "'---Factor by which time courses and HRF will be upsampled: '", "+", "str", "(", "dicCnfg", "[", "'varTmpOvsmpl'", "]", ")", ")", "# Is this a test?", "if", "lgcTest", ":", "# Prepend absolute path of this file to config file paths:", "dicCnfg", "[", "'strPathNiiMask'", "]", "=", "(", "strDir", "+", "dicCnfg", "[", "'strPathNiiMask'", "]", ")", "dicCnfg", "[", "'strPathOut'", "]", "=", "(", "strDir", "+", "dicCnfg", "[", "'strPathOut'", "]", ")", "dicCnfg", "[", "'strPathMdl'", "]", "=", "(", "strDir", "+", "dicCnfg", "[", "'strPathMdl'", "]", ")", "dicCnfg", "[", "'strSptExpInf'", "]", "=", "(", "strDir", "+", "dicCnfg", "[", "'strSptExpInf'", "]", ")", "dicCnfg", "[", "'strTmpExpInf'", "]", "=", "(", "strDir", "+", "dicCnfg", "[", "'strTmpExpInf'", "]", ")", "# Loop through functional runs:", "varNumRun", "=", "len", "(", "dicCnfg", "[", "'lstPathNiiFunc'", "]", ")", "for", "idxRun", "in", "range", "(", "varNumRun", ")", ":", "dicCnfg", "[", "'lstPathNiiFunc'", "]", "[", "idxRun", "]", "=", "(", "strDir", "+", "dicCnfg", "[", "'lstPathNiiFunc'", "]", "[", "idxRun", "]", ")", "return", "dicCnfg" ]
Load py_pRF_mapping config file. Parameters ---------- strCsvCnfg : string Absolute file path of config file. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of this function will be prepended to config file paths. lgcPrint : Boolean Print config parameters? Returns ------- dicCnfg : dict Dictionary containing parameter names (as keys) and parameter values (as values). For example, `dicCnfg['varTr']` contains a float, such as `2.94`.
[ "Load", "py_pRF_mapping", "config", "file", "." ]
train
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/load_config.py#L12-L277
bachya/pyflunearyou
pyflunearyou/user.py
UserReport.status_by_coordinates
async def status_by_coordinates( self, latitude: float, longitude: float) -> dict: """Get symptom data for the location nearest to the user's lat/lon.""" return await self.nearest_by_coordinates(latitude, longitude)
python
async def status_by_coordinates( self, latitude: float, longitude: float) -> dict: """Get symptom data for the location nearest to the user's lat/lon.""" return await self.nearest_by_coordinates(latitude, longitude)
[ "async", "def", "status_by_coordinates", "(", "self", ",", "latitude", ":", "float", ",", "longitude", ":", "float", ")", "->", "dict", ":", "return", "await", "self", ".", "nearest_by_coordinates", "(", "latitude", ",", "longitude", ")" ]
Get symptom data for the location nearest to the user's lat/lon.
[ "Get", "symptom", "data", "for", "the", "location", "nearest", "to", "the", "user", "s", "lat", "/", "lon", "." ]
train
https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/user.py#L12-L15
bachya/pyflunearyou
pyflunearyou/user.py
UserReport.status_by_zip
async def status_by_zip(self, zip_code: str) -> dict: """Get symptom data for the provided ZIP code.""" try: location = next(( d for d in await self.user_reports() if d['zip'] == zip_code)) except StopIteration: return {} return await self.status_by_coordinates( float(location['latitude']), float(location['longitude']))
python
async def status_by_zip(self, zip_code: str) -> dict: """Get symptom data for the provided ZIP code.""" try: location = next(( d for d in await self.user_reports() if d['zip'] == zip_code)) except StopIteration: return {} return await self.status_by_coordinates( float(location['latitude']), float(location['longitude']))
[ "async", "def", "status_by_zip", "(", "self", ",", "zip_code", ":", "str", ")", "->", "dict", ":", "try", ":", "location", "=", "next", "(", "(", "d", "for", "d", "in", "await", "self", ".", "user_reports", "(", ")", "if", "d", "[", "'zip'", "]", "==", "zip_code", ")", ")", "except", "StopIteration", ":", "return", "{", "}", "return", "await", "self", ".", "status_by_coordinates", "(", "float", "(", "location", "[", "'latitude'", "]", ")", ",", "float", "(", "location", "[", "'longitude'", "]", ")", ")" ]
Get symptom data for the provided ZIP code.
[ "Get", "symptom", "data", "for", "the", "provided", "ZIP", "code", "." ]
train
https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/user.py#L17-L27
osilkin98/PyBRY
pybry/LBRYException.py
print_request
def print_request(request): """ Prints a prepared request to give the user info as to what they're sending :param request.PreparedRequest request: PreparedRequest object to be printed :return: Nothing """ print('{}\n{}\n{}\n\n{}'.format( '-----------START-----------', request.method + ' ' + request.url, '\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()), request.body, ))
python
def print_request(request): """ Prints a prepared request to give the user info as to what they're sending :param request.PreparedRequest request: PreparedRequest object to be printed :return: Nothing """ print('{}\n{}\n{}\n\n{}'.format( '-----------START-----------', request.method + ' ' + request.url, '\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()), request.body, ))
[ "def", "print_request", "(", "request", ")", ":", "print", "(", "'{}\\n{}\\n{}\\n\\n{}'", ".", "format", "(", "'-----------START-----------'", ",", "request", ".", "method", "+", "' '", "+", "request", ".", "url", ",", "'\\n'", ".", "join", "(", "'{}: {}'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "request", ".", "headers", ".", "items", "(", ")", ")", ",", "request", ".", "body", ",", ")", ")" ]
Prints a prepared request to give the user info as to what they're sending :param request.PreparedRequest request: PreparedRequest object to be printed :return: Nothing
[ "Prints", "a", "prepared", "request", "to", "give", "the", "user", "info", "as", "to", "what", "they", "re", "sending" ]
train
https://github.com/osilkin98/PyBRY/blob/af86805a8077916f72f3fe980943d4cd741e61f0/pybry/LBRYException.py#L3-L14
olivier-m/rafter
rafter/contrib/schematics/filters.py
filter_validate_schemas
def filter_validate_schemas(get_response, params): """ This filter validates input data against the resource's ``request_schema`` and fill the request's ``validated`` dict. Data from ``request.params`` and ``request.body`` (when the request body is of a form type) will be converted using the schema in order to get proper lists or unique values. .. important:: The request validation is only effective when a ``request_schema`` has been provided by the resource definition. """ request_schema = params.get('request_schema') if request_schema is None: return get_response def _convert_params(schema, data): for sc in schema.fields.values(): name = sc.serialized_name or sc.name val = data.getlist(name) if val is None: continue if len(val) == 1 and not isinstance(sc, ListType): val = val[0] data[name] = val async def decorated_filter(request, *args, **kwargs): data = { 'headers': CIDict(request.headers), 'path': request.app.router.get(request)[2], 'params': RequestParameters(request.args), 'body': {} } if request.body: # Get body if we have something there if request.form: data['body'] = RequestParameters(request.form) else: # will raise 400 if cannot parse json data['body'] = deepcopy(request.json) if hasattr(request_schema, 'body') and request.form: _convert_params(request_schema.body, data['body']) if hasattr(request_schema, 'params') and data['params']: _convert_params(request_schema.params, data['params']) # Now, validate the whole thing try: model = request_schema(data, strict=False, validate=False) model.validate() request.validated = model.to_native() except BaseError as e: raise ValidationErrors(e.to_primitive()) return await get_response(request, *args, **kwargs) return decorated_filter
python
def filter_validate_schemas(get_response, params): """ This filter validates input data against the resource's ``request_schema`` and fill the request's ``validated`` dict. Data from ``request.params`` and ``request.body`` (when the request body is of a form type) will be converted using the schema in order to get proper lists or unique values. .. important:: The request validation is only effective when a ``request_schema`` has been provided by the resource definition. """ request_schema = params.get('request_schema') if request_schema is None: return get_response def _convert_params(schema, data): for sc in schema.fields.values(): name = sc.serialized_name or sc.name val = data.getlist(name) if val is None: continue if len(val) == 1 and not isinstance(sc, ListType): val = val[0] data[name] = val async def decorated_filter(request, *args, **kwargs): data = { 'headers': CIDict(request.headers), 'path': request.app.router.get(request)[2], 'params': RequestParameters(request.args), 'body': {} } if request.body: # Get body if we have something there if request.form: data['body'] = RequestParameters(request.form) else: # will raise 400 if cannot parse json data['body'] = deepcopy(request.json) if hasattr(request_schema, 'body') and request.form: _convert_params(request_schema.body, data['body']) if hasattr(request_schema, 'params') and data['params']: _convert_params(request_schema.params, data['params']) # Now, validate the whole thing try: model = request_schema(data, strict=False, validate=False) model.validate() request.validated = model.to_native() except BaseError as e: raise ValidationErrors(e.to_primitive()) return await get_response(request, *args, **kwargs) return decorated_filter
[ "def", "filter_validate_schemas", "(", "get_response", ",", "params", ")", ":", "request_schema", "=", "params", ".", "get", "(", "'request_schema'", ")", "if", "request_schema", "is", "None", ":", "return", "get_response", "def", "_convert_params", "(", "schema", ",", "data", ")", ":", "for", "sc", "in", "schema", ".", "fields", ".", "values", "(", ")", ":", "name", "=", "sc", ".", "serialized_name", "or", "sc", ".", "name", "val", "=", "data", ".", "getlist", "(", "name", ")", "if", "val", "is", "None", ":", "continue", "if", "len", "(", "val", ")", "==", "1", "and", "not", "isinstance", "(", "sc", ",", "ListType", ")", ":", "val", "=", "val", "[", "0", "]", "data", "[", "name", "]", "=", "val", "async", "def", "decorated_filter", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "'headers'", ":", "CIDict", "(", "request", ".", "headers", ")", ",", "'path'", ":", "request", ".", "app", ".", "router", ".", "get", "(", "request", ")", "[", "2", "]", ",", "'params'", ":", "RequestParameters", "(", "request", ".", "args", ")", ",", "'body'", ":", "{", "}", "}", "if", "request", ".", "body", ":", "# Get body if we have something there", "if", "request", ".", "form", ":", "data", "[", "'body'", "]", "=", "RequestParameters", "(", "request", ".", "form", ")", "else", ":", "# will raise 400 if cannot parse json", "data", "[", "'body'", "]", "=", "deepcopy", "(", "request", ".", "json", ")", "if", "hasattr", "(", "request_schema", ",", "'body'", ")", "and", "request", ".", "form", ":", "_convert_params", "(", "request_schema", ".", "body", ",", "data", "[", "'body'", "]", ")", "if", "hasattr", "(", "request_schema", ",", "'params'", ")", "and", "data", "[", "'params'", "]", ":", "_convert_params", "(", "request_schema", ".", "params", ",", "data", "[", "'params'", "]", ")", "# Now, validate the whole thing", "try", ":", "model", "=", "request_schema", "(", "data", ",", "strict", "=", "False", ",", "validate", "=", "False", ")", "model", ".", "validate", "(", ")", "request", ".", "validated", "=", "model", ".", "to_native", "(", ")", "except", "BaseError", "as", "e", ":", "raise", "ValidationErrors", "(", "e", ".", "to_primitive", "(", ")", ")", "return", "await", "get_response", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "decorated_filter" ]
This filter validates input data against the resource's ``request_schema`` and fill the request's ``validated`` dict. Data from ``request.params`` and ``request.body`` (when the request body is of a form type) will be converted using the schema in order to get proper lists or unique values. .. important:: The request validation is only effective when a ``request_schema`` has been provided by the resource definition.
[ "This", "filter", "validates", "input", "data", "against", "the", "resource", "s", "request_schema", "and", "fill", "the", "request", "s", "validated", "dict", "." ]
train
https://github.com/olivier-m/rafter/blob/aafcf8fd019f24abcf519307c4484cc6b4697c04/rafter/contrib/schematics/filters.py#L25-L88
olivier-m/rafter
rafter/contrib/schematics/filters.py
filter_validate_response
def filter_validate_response(get_response, params): """ This filter process the returned response. It does 2 things: - If the response is a ``sanic.response.HTTPResponse`` and not a :class:`rafter.http.Response`, return it immediately. - It processes, validates and serializes this response when a schema is provided. That means that you can always return a normal Sanic's HTTPResponse and thus, bypass the validation process when you need to do so. .. important:: The response validation is only effective when: - A ``response_schema`` has been provided by the resource definition - The resource returns a :class:`rafter.http.Response` instance or arbitrary data. """ schema = params.get('response_schema') async def decorated_filter(request, *args, **kwargs): response = await get_response(request, *args, **kwargs) if isinstance(response, HTTPResponse) and \ not isinstance(response, Response): return response if not isinstance(response, Response): raise TypeError('response is not an instance ' 'of rafter.http.Response.') if schema: data = { 'body': response.data, 'headers': response.headers } try: model = schema(data, strict=False, validate=False) model.validate() result = model.to_primitive() response.body = result.get('body', None) response.headers.update(result.get('headers', {})) except BaseError as e: log.exception(e) abort(500, 'Wrong data output') return response return decorated_filter
python
def filter_validate_response(get_response, params): """ This filter process the returned response. It does 2 things: - If the response is a ``sanic.response.HTTPResponse`` and not a :class:`rafter.http.Response`, return it immediately. - It processes, validates and serializes this response when a schema is provided. That means that you can always return a normal Sanic's HTTPResponse and thus, bypass the validation process when you need to do so. .. important:: The response validation is only effective when: - A ``response_schema`` has been provided by the resource definition - The resource returns a :class:`rafter.http.Response` instance or arbitrary data. """ schema = params.get('response_schema') async def decorated_filter(request, *args, **kwargs): response = await get_response(request, *args, **kwargs) if isinstance(response, HTTPResponse) and \ not isinstance(response, Response): return response if not isinstance(response, Response): raise TypeError('response is not an instance ' 'of rafter.http.Response.') if schema: data = { 'body': response.data, 'headers': response.headers } try: model = schema(data, strict=False, validate=False) model.validate() result = model.to_primitive() response.body = result.get('body', None) response.headers.update(result.get('headers', {})) except BaseError as e: log.exception(e) abort(500, 'Wrong data output') return response return decorated_filter
[ "def", "filter_validate_response", "(", "get_response", ",", "params", ")", ":", "schema", "=", "params", ".", "get", "(", "'response_schema'", ")", "async", "def", "decorated_filter", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "await", "get_response", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "response", ",", "HTTPResponse", ")", "and", "not", "isinstance", "(", "response", ",", "Response", ")", ":", "return", "response", "if", "not", "isinstance", "(", "response", ",", "Response", ")", ":", "raise", "TypeError", "(", "'response is not an instance '", "'of rafter.http.Response.'", ")", "if", "schema", ":", "data", "=", "{", "'body'", ":", "response", ".", "data", ",", "'headers'", ":", "response", ".", "headers", "}", "try", ":", "model", "=", "schema", "(", "data", ",", "strict", "=", "False", ",", "validate", "=", "False", ")", "model", ".", "validate", "(", ")", "result", "=", "model", ".", "to_primitive", "(", ")", "response", ".", "body", "=", "result", ".", "get", "(", "'body'", ",", "None", ")", "response", ".", "headers", ".", "update", "(", "result", ".", "get", "(", "'headers'", ",", "{", "}", ")", ")", "except", "BaseError", "as", "e", ":", "log", ".", "exception", "(", "e", ")", "abort", "(", "500", ",", "'Wrong data output'", ")", "return", "response", "return", "decorated_filter" ]
This filter process the returned response. It does 2 things: - If the response is a ``sanic.response.HTTPResponse`` and not a :class:`rafter.http.Response`, return it immediately. - It processes, validates and serializes this response when a schema is provided. That means that you can always return a normal Sanic's HTTPResponse and thus, bypass the validation process when you need to do so. .. important:: The response validation is only effective when: - A ``response_schema`` has been provided by the resource definition - The resource returns a :class:`rafter.http.Response` instance or arbitrary data.
[ "This", "filter", "process", "the", "returned", "response", ".", "It", "does", "2", "things", ":" ]
train
https://github.com/olivier-m/rafter/blob/aafcf8fd019f24abcf519307c4484cc6b4697c04/rafter/contrib/schematics/filters.py#L91-L142
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/utils/epub.py
make_EPUB
def make_EPUB(parsed_article, output_directory, input_path, image_directory, config_module=None, epub_version=None, batch=False): """ Standard workflow for creating an EPUB document. make_EPUB is used to produce an EPUB file from a parsed article. In addition to the article it also requires a path to the appropriate image directory which it will insert into the EPUB file, as well the output directory location for the EPUB file. Parameters ---------- article : openaccess_epub.article.Article instance `article` is an Article instance for the XML document to be converted to EPUB. output_directory : str `output_directory` is a string path to the directory in which the EPUB will be produced. The name of the directory will be used as the EPUB's filename. input_path : str `input_path` is a string absolute path to the input XML file, used to locate input-relative images. image_directory : str `image_directory` is a string path indicating an explicit image directory. If supplied, other image input methods will not be used. config_module : config module, optional `config_module` is a pre-loaded config module for OpenAccess_EPUB; if not used then this function will load the global config file. Might be useful in certain cases to dynamically alter configuration. epub_version : {None, 2, 3} `epub_version` dictates which version of EPUB to be created. An error will be raised if the specified version is not supported for the publisher. If left to the default, the created version will defer to the publisher default version. batch : bool, optional `batch` indicates that batch creation is being used (such as with the `oaepub batch` command). In this case, directory conflicts will be automatically resolved (in favor of keeping previous data, skipping creation of EPUB). Returns False in the case of a fatal error, True if successful. """ #command_log.info('Creating {0}.epub'.format(output_directory)) if config_module is None: config_module = openaccess_epub.utils.load_config_module() if epub_version not in (None, 2, 3): log.error('Invalid EPUB version: {0}'.format(epub_version)) raise ValueError('Invalid EPUB version. Should be 2 or 3') if epub_version is None: epub_version = parsed_article.publisher.epub_default #Handle directory output conflicts if os.path.isdir(output_directory): if batch: # No user prompt, default to protect previous data log.error('Directory conflict during batch conversion, skipping.') return False else: # User prompting openaccess_epub.utils.dir_exists(output_directory) else: try: os.makedirs(output_directory) except OSError as err: if err.errno != 17: log.exception('Unable to recursively create output directories') #Copy over the basic epub directory make_epub_base(output_directory) #Get the images, if possible, fail gracefully if not success = openaccess_epub.utils.images.get_images(output_directory, image_directory, input_path, config_module, parsed_article) if not success: log.critical('Images for the article were not located! Aborting!') return False #Instantiate Navigation and Package epub_nav = Navigation() epub_package = Package() #Process the article for navigation and package info epub_nav.process(parsed_article) epub_package.process(parsed_article) #Render the content using publisher-specific methods parsed_article.publisher.render_content(output_directory, epub_version) if epub_version == 2: epub_nav.render_EPUB2(output_directory) epub_package.render_EPUB2(output_directory) elif epub_version == 3: epub_nav.render_EPUB3(output_directory) epub_package.render_EPUB3(output_directory) #Zip the directory into EPUB epub_zip(output_directory) return True
python
def make_EPUB(parsed_article, output_directory, input_path, image_directory, config_module=None, epub_version=None, batch=False): """ Standard workflow for creating an EPUB document. make_EPUB is used to produce an EPUB file from a parsed article. In addition to the article it also requires a path to the appropriate image directory which it will insert into the EPUB file, as well the output directory location for the EPUB file. Parameters ---------- article : openaccess_epub.article.Article instance `article` is an Article instance for the XML document to be converted to EPUB. output_directory : str `output_directory` is a string path to the directory in which the EPUB will be produced. The name of the directory will be used as the EPUB's filename. input_path : str `input_path` is a string absolute path to the input XML file, used to locate input-relative images. image_directory : str `image_directory` is a string path indicating an explicit image directory. If supplied, other image input methods will not be used. config_module : config module, optional `config_module` is a pre-loaded config module for OpenAccess_EPUB; if not used then this function will load the global config file. Might be useful in certain cases to dynamically alter configuration. epub_version : {None, 2, 3} `epub_version` dictates which version of EPUB to be created. An error will be raised if the specified version is not supported for the publisher. If left to the default, the created version will defer to the publisher default version. batch : bool, optional `batch` indicates that batch creation is being used (such as with the `oaepub batch` command). In this case, directory conflicts will be automatically resolved (in favor of keeping previous data, skipping creation of EPUB). Returns False in the case of a fatal error, True if successful. """ #command_log.info('Creating {0}.epub'.format(output_directory)) if config_module is None: config_module = openaccess_epub.utils.load_config_module() if epub_version not in (None, 2, 3): log.error('Invalid EPUB version: {0}'.format(epub_version)) raise ValueError('Invalid EPUB version. Should be 2 or 3') if epub_version is None: epub_version = parsed_article.publisher.epub_default #Handle directory output conflicts if os.path.isdir(output_directory): if batch: # No user prompt, default to protect previous data log.error('Directory conflict during batch conversion, skipping.') return False else: # User prompting openaccess_epub.utils.dir_exists(output_directory) else: try: os.makedirs(output_directory) except OSError as err: if err.errno != 17: log.exception('Unable to recursively create output directories') #Copy over the basic epub directory make_epub_base(output_directory) #Get the images, if possible, fail gracefully if not success = openaccess_epub.utils.images.get_images(output_directory, image_directory, input_path, config_module, parsed_article) if not success: log.critical('Images for the article were not located! Aborting!') return False #Instantiate Navigation and Package epub_nav = Navigation() epub_package = Package() #Process the article for navigation and package info epub_nav.process(parsed_article) epub_package.process(parsed_article) #Render the content using publisher-specific methods parsed_article.publisher.render_content(output_directory, epub_version) if epub_version == 2: epub_nav.render_EPUB2(output_directory) epub_package.render_EPUB2(output_directory) elif epub_version == 3: epub_nav.render_EPUB3(output_directory) epub_package.render_EPUB3(output_directory) #Zip the directory into EPUB epub_zip(output_directory) return True
[ "def", "make_EPUB", "(", "parsed_article", ",", "output_directory", ",", "input_path", ",", "image_directory", ",", "config_module", "=", "None", ",", "epub_version", "=", "None", ",", "batch", "=", "False", ")", ":", "#command_log.info('Creating {0}.epub'.format(output_directory))", "if", "config_module", "is", "None", ":", "config_module", "=", "openaccess_epub", ".", "utils", ".", "load_config_module", "(", ")", "if", "epub_version", "not", "in", "(", "None", ",", "2", ",", "3", ")", ":", "log", ".", "error", "(", "'Invalid EPUB version: {0}'", ".", "format", "(", "epub_version", ")", ")", "raise", "ValueError", "(", "'Invalid EPUB version. Should be 2 or 3'", ")", "if", "epub_version", "is", "None", ":", "epub_version", "=", "parsed_article", ".", "publisher", ".", "epub_default", "#Handle directory output conflicts", "if", "os", ".", "path", ".", "isdir", "(", "output_directory", ")", ":", "if", "batch", ":", "# No user prompt, default to protect previous data", "log", ".", "error", "(", "'Directory conflict during batch conversion, skipping.'", ")", "return", "False", "else", ":", "# User prompting", "openaccess_epub", ".", "utils", ".", "dir_exists", "(", "output_directory", ")", "else", ":", "try", ":", "os", ".", "makedirs", "(", "output_directory", ")", "except", "OSError", "as", "err", ":", "if", "err", ".", "errno", "!=", "17", ":", "log", ".", "exception", "(", "'Unable to recursively create output directories'", ")", "#Copy over the basic epub directory", "make_epub_base", "(", "output_directory", ")", "#Get the images, if possible, fail gracefully if not", "success", "=", "openaccess_epub", ".", "utils", ".", "images", ".", "get_images", "(", "output_directory", ",", "image_directory", ",", "input_path", ",", "config_module", ",", "parsed_article", ")", "if", "not", "success", ":", "log", ".", "critical", "(", "'Images for the article were not located! Aborting!'", ")", "return", "False", "#Instantiate Navigation and Package", "epub_nav", "=", "Navigation", "(", ")", "epub_package", "=", "Package", "(", ")", "#Process the article for navigation and package info", "epub_nav", ".", "process", "(", "parsed_article", ")", "epub_package", ".", "process", "(", "parsed_article", ")", "#Render the content using publisher-specific methods", "parsed_article", ".", "publisher", ".", "render_content", "(", "output_directory", ",", "epub_version", ")", "if", "epub_version", "==", "2", ":", "epub_nav", ".", "render_EPUB2", "(", "output_directory", ")", "epub_package", ".", "render_EPUB2", "(", "output_directory", ")", "elif", "epub_version", "==", "3", ":", "epub_nav", ".", "render_EPUB3", "(", "output_directory", ")", "epub_package", ".", "render_EPUB3", "(", "output_directory", ")", "#Zip the directory into EPUB", "epub_zip", "(", "output_directory", ")", "return", "True" ]
Standard workflow for creating an EPUB document. make_EPUB is used to produce an EPUB file from a parsed article. In addition to the article it also requires a path to the appropriate image directory which it will insert into the EPUB file, as well the output directory location for the EPUB file. Parameters ---------- article : openaccess_epub.article.Article instance `article` is an Article instance for the XML document to be converted to EPUB. output_directory : str `output_directory` is a string path to the directory in which the EPUB will be produced. The name of the directory will be used as the EPUB's filename. input_path : str `input_path` is a string absolute path to the input XML file, used to locate input-relative images. image_directory : str `image_directory` is a string path indicating an explicit image directory. If supplied, other image input methods will not be used. config_module : config module, optional `config_module` is a pre-loaded config module for OpenAccess_EPUB; if not used then this function will load the global config file. Might be useful in certain cases to dynamically alter configuration. epub_version : {None, 2, 3} `epub_version` dictates which version of EPUB to be created. An error will be raised if the specified version is not supported for the publisher. If left to the default, the created version will defer to the publisher default version. batch : bool, optional `batch` indicates that batch creation is being used (such as with the `oaepub batch` command). In this case, directory conflicts will be automatically resolved (in favor of keeping previous data, skipping creation of EPUB). Returns False in the case of a fatal error, True if successful.
[ "Standard", "workflow", "for", "creating", "an", "EPUB", "document", "." ]
train
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/epub.py#L21-L126
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/utils/epub.py
make_epub_base
def make_epub_base(location): """ Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters ---------- location : str A path string to a local directory in which the EPUB is to be built """ log.info('Making EPUB base files in {0}'.format(location)) with open(os.path.join(location, 'mimetype'), 'w') as out: # mimetype file out.write('application/epub+zip') #Create EPUB and META-INF directorys os.mkdir(os.path.join(location, 'META-INF')) os.mkdir(os.path.join(location, 'EPUB')) os.mkdir(os.path.join(location, 'EPUB', 'css')) with open(os.path.join(location, 'META-INF', 'container.xml'), 'w') as out: out.write('''\ <?xml version="1.0" encoding="UTF-8"?> <container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container"> <rootfiles> <rootfile full-path="EPUB/package.opf" media-type="application/oebps-package+xml"/> </rootfiles> </container>''') with open(os.path.join(location, 'EPUB', 'css', 'default.css') ,'wb') as out: out.write(bytes(DEFAULT_CSS, 'UTF-8'))
python
def make_epub_base(location): """ Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters ---------- location : str A path string to a local directory in which the EPUB is to be built """ log.info('Making EPUB base files in {0}'.format(location)) with open(os.path.join(location, 'mimetype'), 'w') as out: # mimetype file out.write('application/epub+zip') #Create EPUB and META-INF directorys os.mkdir(os.path.join(location, 'META-INF')) os.mkdir(os.path.join(location, 'EPUB')) os.mkdir(os.path.join(location, 'EPUB', 'css')) with open(os.path.join(location, 'META-INF', 'container.xml'), 'w') as out: out.write('''\ <?xml version="1.0" encoding="UTF-8"?> <container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container"> <rootfiles> <rootfile full-path="EPUB/package.opf" media-type="application/oebps-package+xml"/> </rootfiles> </container>''') with open(os.path.join(location, 'EPUB', 'css', 'default.css') ,'wb') as out: out.write(bytes(DEFAULT_CSS, 'UTF-8'))
[ "def", "make_epub_base", "(", "location", ")", ":", "log", ".", "info", "(", "'Making EPUB base files in {0}'", ".", "format", "(", "location", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "location", ",", "'mimetype'", ")", ",", "'w'", ")", "as", "out", ":", "# mimetype file", "out", ".", "write", "(", "'application/epub+zip'", ")", "#Create EPUB and META-INF directorys", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "location", ",", "'META-INF'", ")", ")", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "location", ",", "'EPUB'", ")", ")", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "location", ",", "'EPUB'", ",", "'css'", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "location", ",", "'META-INF'", ",", "'container.xml'", ")", ",", "'w'", ")", "as", "out", ":", "out", ".", "write", "(", "'''\\\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">\n <rootfiles>\n <rootfile full-path=\"EPUB/package.opf\" media-type=\"application/oebps-package+xml\"/>\n </rootfiles>\n</container>'''", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "location", ",", "'EPUB'", ",", "'css'", ",", "'default.css'", ")", ",", "'wb'", ")", "as", "out", ":", "out", ".", "write", "(", "bytes", "(", "DEFAULT_CSS", ",", "'UTF-8'", ")", ")" ]
Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters ---------- location : str A path string to a local directory in which the EPUB is to be built
[ "Creates", "the", "base", "structure", "for", "an", "EPUB", "file", "in", "a", "specified", "location", "." ]
train
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/epub.py#L129-L160
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/utils/epub.py
epub_zip
def epub_zip(outdirect): """ Zips up the input file directory into an EPUB file. """ def recursive_zip(zipf, directory, folder=None): if folder is None: folder = '' for item in os.listdir(directory): if os.path.isfile(os.path.join(directory, item)): zipf.write(os.path.join(directory, item), os.path.join(directory, item)) elif os.path.isdir(os.path.join(directory, item)): recursive_zip(zipf, os.path.join(directory, item), os.path.join(folder, item)) log.info('Zipping up the directory {0}'.format(outdirect)) epub_filename = outdirect + '.epub' epub = zipfile.ZipFile(epub_filename, 'w') current_dir = os.getcwd() os.chdir(outdirect) epub.write('mimetype') log.info('Recursively zipping META-INF and EPUB') for item in os.listdir('.'): if item == 'mimetype': continue recursive_zip(epub, item) os.chdir(current_dir) epub.close()
python
def epub_zip(outdirect): """ Zips up the input file directory into an EPUB file. """ def recursive_zip(zipf, directory, folder=None): if folder is None: folder = '' for item in os.listdir(directory): if os.path.isfile(os.path.join(directory, item)): zipf.write(os.path.join(directory, item), os.path.join(directory, item)) elif os.path.isdir(os.path.join(directory, item)): recursive_zip(zipf, os.path.join(directory, item), os.path.join(folder, item)) log.info('Zipping up the directory {0}'.format(outdirect)) epub_filename = outdirect + '.epub' epub = zipfile.ZipFile(epub_filename, 'w') current_dir = os.getcwd() os.chdir(outdirect) epub.write('mimetype') log.info('Recursively zipping META-INF and EPUB') for item in os.listdir('.'): if item == 'mimetype': continue recursive_zip(epub, item) os.chdir(current_dir) epub.close()
[ "def", "epub_zip", "(", "outdirect", ")", ":", "def", "recursive_zip", "(", "zipf", ",", "directory", ",", "folder", "=", "None", ")", ":", "if", "folder", "is", "None", ":", "folder", "=", "''", "for", "item", "in", "os", ".", "listdir", "(", "directory", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "item", ")", ")", ":", "zipf", ".", "write", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "item", ")", ",", "os", ".", "path", ".", "join", "(", "directory", ",", "item", ")", ")", "elif", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "item", ")", ")", ":", "recursive_zip", "(", "zipf", ",", "os", ".", "path", ".", "join", "(", "directory", ",", "item", ")", ",", "os", ".", "path", ".", "join", "(", "folder", ",", "item", ")", ")", "log", ".", "info", "(", "'Zipping up the directory {0}'", ".", "format", "(", "outdirect", ")", ")", "epub_filename", "=", "outdirect", "+", "'.epub'", "epub", "=", "zipfile", ".", "ZipFile", "(", "epub_filename", ",", "'w'", ")", "current_dir", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "outdirect", ")", "epub", ".", "write", "(", "'mimetype'", ")", "log", ".", "info", "(", "'Recursively zipping META-INF and EPUB'", ")", "for", "item", "in", "os", ".", "listdir", "(", "'.'", ")", ":", "if", "item", "==", "'mimetype'", ":", "continue", "recursive_zip", "(", "epub", ",", "item", ")", "os", ".", "chdir", "(", "current_dir", ")", "epub", ".", "close", "(", ")" ]
Zips up the input file directory into an EPUB file.
[ "Zips", "up", "the", "input", "file", "directory", "into", "an", "EPUB", "file", "." ]
train
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/epub.py#L163-L191
pmacosta/pcsv
pcsv/write.py
_write_int
def _write_int(fname, data, append=True): """Write data to CSV file with validation.""" # pylint: disable=W0705 data_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file") fos_ex = pexdoc.exh.addex( OSError, "File *[fname]* could not be created: *[reason]*" ) data_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0))) try: pmisc.make_dir(fname) mode = "w" if append is False else "a" if sys.hexversion < 0x03000000: # pragma: no cover, no branch with open(fname, mode) as file_handle: csv.writer(file_handle, delimiter=",").writerows(data) else: # pragma: no cover with open(fname, mode, newline="") as file_handle: csv.writer(file_handle, delimiter=",").writerows(data) except (IOError, OSError) as eobj: fos_ex(True, _MF("fname", fname, "reason", eobj.strerror))
python
def _write_int(fname, data, append=True): """Write data to CSV file with validation.""" # pylint: disable=W0705 data_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file") fos_ex = pexdoc.exh.addex( OSError, "File *[fname]* could not be created: *[reason]*" ) data_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0))) try: pmisc.make_dir(fname) mode = "w" if append is False else "a" if sys.hexversion < 0x03000000: # pragma: no cover, no branch with open(fname, mode) as file_handle: csv.writer(file_handle, delimiter=",").writerows(data) else: # pragma: no cover with open(fname, mode, newline="") as file_handle: csv.writer(file_handle, delimiter=",").writerows(data) except (IOError, OSError) as eobj: fos_ex(True, _MF("fname", fname, "reason", eobj.strerror))
[ "def", "_write_int", "(", "fname", ",", "data", ",", "append", "=", "True", ")", ":", "# pylint: disable=W0705", "data_ex", "=", "pexdoc", ".", "exh", ".", "addex", "(", "ValueError", ",", "\"There is no data to save to file\"", ")", "fos_ex", "=", "pexdoc", ".", "exh", ".", "addex", "(", "OSError", ",", "\"File *[fname]* could not be created: *[reason]*\"", ")", "data_ex", "(", "(", "len", "(", "data", ")", "==", "0", ")", "or", "(", "(", "len", "(", "data", ")", "==", "1", ")", "and", "(", "len", "(", "data", "[", "0", "]", ")", "==", "0", ")", ")", ")", "try", ":", "pmisc", ".", "make_dir", "(", "fname", ")", "mode", "=", "\"w\"", "if", "append", "is", "False", "else", "\"a\"", "if", "sys", ".", "hexversion", "<", "0x03000000", ":", "# pragma: no cover, no branch", "with", "open", "(", "fname", ",", "mode", ")", "as", "file_handle", ":", "csv", ".", "writer", "(", "file_handle", ",", "delimiter", "=", "\",\"", ")", ".", "writerows", "(", "data", ")", "else", ":", "# pragma: no cover", "with", "open", "(", "fname", ",", "mode", ",", "newline", "=", "\"\"", ")", "as", "file_handle", ":", "csv", ".", "writer", "(", "file_handle", ",", "delimiter", "=", "\",\"", ")", ".", "writerows", "(", "data", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "eobj", ":", "fos_ex", "(", "True", ",", "_MF", "(", "\"fname\"", ",", "fname", ",", "\"reason\"", ",", "eobj", ".", "strerror", ")", ")" ]
Write data to CSV file with validation.
[ "Write", "data", "to", "CSV", "file", "with", "validation", "." ]
train
https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/write.py#L37-L55
curious-containers/cc-core
cc_core/commons/cwl.py
_input_directory_description
def _input_directory_description(input_identifier, arg_item, input_dir): """ Produces a directory description. A directory description is a dictionary containing the following information. - 'path': An array containing the paths to the specified directories. - 'debugInfo': A field to possibly provide debug information. - 'found': A boolean that indicates, if the directory exists in the local filesystem. - 'listing': A listing that shows which files are in the given directory. This could be None. :param input_identifier: The input identifier in the cwl description file :param arg_item: The corresponding job information :param input_dir: TODO :return: A directory description :raise DirectoryError: If the given directory does not exist or is not a directory. """ description = { 'path': None, 'found': False, 'debugInfo': None, 'listing': None, 'basename': None } try: path = location(input_identifier, arg_item) if input_dir and not os.path.isabs(path): path = os.path.join(os.path.expanduser(input_dir), path) description['path'] = path if not os.path.exists(path): raise DirectoryError('path does not exist') if not os.path.isdir(path): raise DirectoryError('path is not a directory') description['listing'] = arg_item.get('listing') description['basename'] = os.path.basename(path) description['found'] = True except: description['debugInfo'] = exception_format() return description
python
def _input_directory_description(input_identifier, arg_item, input_dir): """ Produces a directory description. A directory description is a dictionary containing the following information. - 'path': An array containing the paths to the specified directories. - 'debugInfo': A field to possibly provide debug information. - 'found': A boolean that indicates, if the directory exists in the local filesystem. - 'listing': A listing that shows which files are in the given directory. This could be None. :param input_identifier: The input identifier in the cwl description file :param arg_item: The corresponding job information :param input_dir: TODO :return: A directory description :raise DirectoryError: If the given directory does not exist or is not a directory. """ description = { 'path': None, 'found': False, 'debugInfo': None, 'listing': None, 'basename': None } try: path = location(input_identifier, arg_item) if input_dir and not os.path.isabs(path): path = os.path.join(os.path.expanduser(input_dir), path) description['path'] = path if not os.path.exists(path): raise DirectoryError('path does not exist') if not os.path.isdir(path): raise DirectoryError('path is not a directory') description['listing'] = arg_item.get('listing') description['basename'] = os.path.basename(path) description['found'] = True except: description['debugInfo'] = exception_format() return description
[ "def", "_input_directory_description", "(", "input_identifier", ",", "arg_item", ",", "input_dir", ")", ":", "description", "=", "{", "'path'", ":", "None", ",", "'found'", ":", "False", ",", "'debugInfo'", ":", "None", ",", "'listing'", ":", "None", ",", "'basename'", ":", "None", "}", "try", ":", "path", "=", "location", "(", "input_identifier", ",", "arg_item", ")", "if", "input_dir", "and", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "input_dir", ")", ",", "path", ")", "description", "[", "'path'", "]", "=", "path", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "DirectoryError", "(", "'path does not exist'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "raise", "DirectoryError", "(", "'path is not a directory'", ")", "description", "[", "'listing'", "]", "=", "arg_item", ".", "get", "(", "'listing'", ")", "description", "[", "'basename'", "]", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "description", "[", "'found'", "]", "=", "True", "except", ":", "description", "[", "'debugInfo'", "]", "=", "exception_format", "(", ")", "return", "description" ]
Produces a directory description. A directory description is a dictionary containing the following information. - 'path': An array containing the paths to the specified directories. - 'debugInfo': A field to possibly provide debug information. - 'found': A boolean that indicates, if the directory exists in the local filesystem. - 'listing': A listing that shows which files are in the given directory. This could be None. :param input_identifier: The input identifier in the cwl description file :param arg_item: The corresponding job information :param input_dir: TODO :return: A directory description :raise DirectoryError: If the given directory does not exist or is not a directory.
[ "Produces", "a", "directory", "description", ".", "A", "directory", "description", "is", "a", "dictionary", "containing", "the", "following", "information", "." ]
train
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/cwl.py#L98-L140
curious-containers/cc-core
cc_core/commons/cwl.py
_check_input_directory_listing
def _check_input_directory_listing(base_directory, listing): """ Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem. :param base_directory: The path to the directory to check :param listing: A listing given as dictionary :raise DirectoryError: If the given base directory does not contain all of the subdirectories and subfiles given in the listing. """ for sub in listing: path = os.path.join(base_directory, sub['basename']) if sub['class'] == 'File': if not os.path.isfile(path): raise DirectoryError('File \'{}\' not found but specified in listing.'.format(path)) if sub['class'] == 'Directory': if not os.path.isdir(path): raise DirectoryError('Directory \'{}\' not found but specified in listing'.format(path)) sub_listing = sub.get('listing') if sub_listing: _check_input_directory_listing(path, sub_listing)
python
def _check_input_directory_listing(base_directory, listing): """ Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem. :param base_directory: The path to the directory to check :param listing: A listing given as dictionary :raise DirectoryError: If the given base directory does not contain all of the subdirectories and subfiles given in the listing. """ for sub in listing: path = os.path.join(base_directory, sub['basename']) if sub['class'] == 'File': if not os.path.isfile(path): raise DirectoryError('File \'{}\' not found but specified in listing.'.format(path)) if sub['class'] == 'Directory': if not os.path.isdir(path): raise DirectoryError('Directory \'{}\' not found but specified in listing'.format(path)) sub_listing = sub.get('listing') if sub_listing: _check_input_directory_listing(path, sub_listing)
[ "def", "_check_input_directory_listing", "(", "base_directory", ",", "listing", ")", ":", "for", "sub", "in", "listing", ":", "path", "=", "os", ".", "path", ".", "join", "(", "base_directory", ",", "sub", "[", "'basename'", "]", ")", "if", "sub", "[", "'class'", "]", "==", "'File'", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "raise", "DirectoryError", "(", "'File \\'{}\\' not found but specified in listing.'", ".", "format", "(", "path", ")", ")", "if", "sub", "[", "'class'", "]", "==", "'Directory'", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "raise", "DirectoryError", "(", "'Directory \\'{}\\' not found but specified in listing'", ".", "format", "(", "path", ")", ")", "sub_listing", "=", "sub", ".", "get", "(", "'listing'", ")", "if", "sub_listing", ":", "_check_input_directory_listing", "(", "path", ",", "sub_listing", ")" ]
Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem. :param base_directory: The path to the directory to check :param listing: A listing given as dictionary :raise DirectoryError: If the given base directory does not contain all of the subdirectories and subfiles given in the listing.
[ "Raises", "an", "DirectoryError", "if", "files", "or", "directories", "given", "in", "the", "listing", "could", "not", "be", "found", "in", "the", "local", "filesystem", "." ]
train
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/cwl.py#L160-L180