index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
20,176 | gitcomp.repository | Repository | Repository(repo_data: dict) | class Repository:
name: str
full_name: str
private: bool
web_url: str
description: str
forked: str
created_at: datetime
updated_at: datetime
pushed_at: datetime
clone_url: str
stars: int
watches: int
language: str
forks: int
archived: bool
owner: str
open_issues: int
network_count: int
subscribers_count: int
git_score: int
license: str = None
display_rows = ['full_name', 'forks', 'open_issues', 'watches', 'network_count', 'subscribers_count', 'git_score']
__date_fmt = '%Y-%m-%dT%H:%M:%SZ'
__total_weight = 100 / 16
def __init__(self, repo_data: dict):
self.name = repo_data['name']
self.full_name = repo_data['full_name']
self.private = repo_data['private']
self.web_url = repo_data['html_url']
self.description = repo_data['description']
self.forked = repo_data['fork']
self.created_at = datetime.strptime(repo_data['created_at'], Repository.__date_fmt)
self.updated_at = datetime.strptime(repo_data['updated_at'], Repository.__date_fmt)
self.pushed_at = datetime.strptime(repo_data['pushed_at'], Repository.__date_fmt)
self.clone_url = repo_data['clone_url']
self.stars = repo_data['stargazers_count']
self.watches = repo_data['watchers_count']
self.language = repo_data['language']
self.forks = repo_data['forks_count']
self.archived = repo_data['archived']
self.owner = repo_data['owner']['login']
self.open_issues = repo_data['open_issues']
self.network_count = repo_data['network_count']
self.subscribers_count = repo_data['subscribers_count']
if repo_data['license'] is not None:
self.license = repo_data['license']['name']
self.git_score = self.get_score()
def feature_score(self, name, val, weight=1, metric={}):
"""
calculate score based upon val as compared to metric
Metric caonains max bounds for each value range and corresponing score
"""
fscore = 0
for i in metric:
if val <= i:
fscore = metric[i]
break
return weight * fscore
def get_score(self):
score = 0
score += self.feature_score('is_forked', self.forked, 1, {False: 4, True: 1})
score += self.feature_score('num_forks', self.forks, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
score += self.feature_score('stars', self.stars, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
score += self.feature_score('watchers', self.watches, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
return int(score * Repository.__total_weight)
| (repo_data: dict) |
20,177 | gitcomp.repository | __eq__ | null | from datetime import datetime
from dataclasses import dataclass
import sys
@dataclass(repr=True)
class Repository:
name: str
full_name: str
private: bool
web_url: str
description: str
forked: str
created_at: datetime
updated_at: datetime
pushed_at: datetime
clone_url: str
stars: int
watches: int
language: str
forks: int
archived: bool
owner: str
open_issues: int
network_count: int
subscribers_count: int
git_score: int
license: str = None
display_rows = ['full_name', 'forks', 'open_issues', 'watches', 'network_count', 'subscribers_count', 'git_score']
__date_fmt = '%Y-%m-%dT%H:%M:%SZ'
__total_weight = 100 / 16
def __init__(self, repo_data: dict):
self.name = repo_data['name']
self.full_name = repo_data['full_name']
self.private = repo_data['private']
self.web_url = repo_data['html_url']
self.description = repo_data['description']
self.forked = repo_data['fork']
self.created_at = datetime.strptime(repo_data['created_at'], Repository.__date_fmt)
self.updated_at = datetime.strptime(repo_data['updated_at'], Repository.__date_fmt)
self.pushed_at = datetime.strptime(repo_data['pushed_at'], Repository.__date_fmt)
self.clone_url = repo_data['clone_url']
self.stars = repo_data['stargazers_count']
self.watches = repo_data['watchers_count']
self.language = repo_data['language']
self.forks = repo_data['forks_count']
self.archived = repo_data['archived']
self.owner = repo_data['owner']['login']
self.open_issues = repo_data['open_issues']
self.network_count = repo_data['network_count']
self.subscribers_count = repo_data['subscribers_count']
if repo_data['license'] is not None:
self.license = repo_data['license']['name']
self.git_score = self.get_score()
def feature_score(self, name, val, weight=1, metric={}):
"""
calculate score based upon val as compared to metric
Metric caonains max bounds for each value range and corresponing score
"""
fscore = 0
for i in metric:
if val <= i:
fscore = metric[i]
break
return weight * fscore
def get_score(self):
score = 0
score += self.feature_score('is_forked', self.forked, 1, {False: 4, True: 1})
score += self.feature_score('num_forks', self.forks, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
score += self.feature_score('stars', self.stars, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
score += self.feature_score('watchers', self.watches, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
return int(score * Repository.__total_weight)
| (self, other) |
20,178 | gitcomp.repository | __init__ | null | def __init__(self, repo_data: dict):
self.name = repo_data['name']
self.full_name = repo_data['full_name']
self.private = repo_data['private']
self.web_url = repo_data['html_url']
self.description = repo_data['description']
self.forked = repo_data['fork']
self.created_at = datetime.strptime(repo_data['created_at'], Repository.__date_fmt)
self.updated_at = datetime.strptime(repo_data['updated_at'], Repository.__date_fmt)
self.pushed_at = datetime.strptime(repo_data['pushed_at'], Repository.__date_fmt)
self.clone_url = repo_data['clone_url']
self.stars = repo_data['stargazers_count']
self.watches = repo_data['watchers_count']
self.language = repo_data['language']
self.forks = repo_data['forks_count']
self.archived = repo_data['archived']
self.owner = repo_data['owner']['login']
self.open_issues = repo_data['open_issues']
self.network_count = repo_data['network_count']
self.subscribers_count = repo_data['subscribers_count']
if repo_data['license'] is not None:
self.license = repo_data['license']['name']
self.git_score = self.get_score()
| (self, repo_data: dict) |
20,180 | gitcomp.repository | feature_score |
calculate score based upon val as compared to metric
Metric caonains max bounds for each value range and corresponing score
| def feature_score(self, name, val, weight=1, metric={}):
"""
calculate score based upon val as compared to metric
Metric caonains max bounds for each value range and corresponing score
"""
fscore = 0
for i in metric:
if val <= i:
fscore = metric[i]
break
return weight * fscore
| (self, name, val, weight=1, metric={}) |
20,181 | gitcomp.repository | get_score | null | def get_score(self):
score = 0
score += self.feature_score('is_forked', self.forked, 1, {False: 4, True: 1})
score += self.feature_score('num_forks', self.forks, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
score += self.feature_score('stars', self.stars, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
score += self.feature_score('watchers', self.watches, 1, {0: 1, 3: 2, 10: 3, sys.maxsize: 4})
return int(score * Repository.__total_weight)
| (self) |
20,182 | gitcomp.user | User | User(user_data: dict) | class User:
login: str
followers: int
following: int
site_admin: bool
name: str
company: str
blog: str
location: str
public_repos: int
public_gists: int
git_score: int
organizations: int
display_rows = ['login', 'followers', 'following', 'site_admin', 'name', 'company', 'blog', 'location',
'public_repos', 'public_gists', 'git_score']
__total_weight = 100 / 16
def __init__(self, user_data: dict):
"""
setup a user object containing a user's vital stats, equivalent to using the CLI tool with the -u flag
:param user_data: the json dictionary we get from calling the GitHub API
"""
self.login = user_data['login']
self.followers = user_data['followers']
self.following = user_data['following']
self.site_admin = user_data['site_admin']
self.name = user_data['name']
self.company = user_data['company']
self.blog = user_data['blog']
self.location = user_data['location']
self.public_repos = user_data['public_repos']
self.public_gists = user_data['public_gists']
self.organizations = self.__get_orgs_len()
self.git_score = self.get_score()
def __get_orgs_len(self):
response = NetMod().fetch_org_data(self.login)
return len(response)
def feature_score(self, name, val, weight=1, metric={}):
fscore = 0
for i in metric:
if val <= i:
fscore = metric[i]
break
return weight * fscore
def get_score(self):
score = 0
score += self.feature_score('num_followers', self.followers, 1, {10: 1, 25: 2, 50: 3, sys.maxsize: 4})
# todo-> contrib/ time
score += self.feature_score('num_organizaitions', self.organizations, 1,
{0: 1, 3: 2, 7: 4, 10: 3, sys.maxsize: 2})
# todo-> repos: forked/org
score += self.feature_score('num_gists', self.public_gists, 1, {0: 1, 4: 2, 10: 3, sys.maxsize: 4})
# todo-> stars given
# todo-> stars recieved
try:
score += self.feature_score('follow_ratio', self.followers / self.following, 1,
{0.99: 1, 1: 2, 2: 3, sys.maxsize: 4})
except ZeroDivisionError:
pass
return int(score * User.__total_weight)
| (user_data: dict) |
20,183 | gitcomp.user | __get_orgs_len | null | def __get_orgs_len(self):
response = NetMod().fetch_org_data(self.login)
return len(response)
| (self) |
20,184 | gitcomp.user | __eq__ | null | from dataclasses import dataclass
from .net_mod import NetMod
import sys
@dataclass(repr=True)
class User:
login: str
followers: int
following: int
site_admin: bool
name: str
company: str
blog: str
location: str
public_repos: int
public_gists: int
git_score: int
organizations: int
display_rows = ['login', 'followers', 'following', 'site_admin', 'name', 'company', 'blog', 'location',
'public_repos', 'public_gists', 'git_score']
__total_weight = 100 / 16
def __init__(self, user_data: dict):
"""
setup a user object containing a user's vital stats, equivalent to using the CLI tool with the -u flag
:param user_data: the json dictionary we get from calling the GitHub API
"""
self.login = user_data['login']
self.followers = user_data['followers']
self.following = user_data['following']
self.site_admin = user_data['site_admin']
self.name = user_data['name']
self.company = user_data['company']
self.blog = user_data['blog']
self.location = user_data['location']
self.public_repos = user_data['public_repos']
self.public_gists = user_data['public_gists']
self.organizations = self.__get_orgs_len()
self.git_score = self.get_score()
def __get_orgs_len(self):
response = NetMod().fetch_org_data(self.login)
return len(response)
def feature_score(self, name, val, weight=1, metric={}):
fscore = 0
for i in metric:
if val <= i:
fscore = metric[i]
break
return weight * fscore
def get_score(self):
score = 0
score += self.feature_score('num_followers', self.followers, 1, {10: 1, 25: 2, 50: 3, sys.maxsize: 4})
# todo-> contrib/ time
score += self.feature_score('num_organizaitions', self.organizations, 1,
{0: 1, 3: 2, 7: 4, 10: 3, sys.maxsize: 2})
# todo-> repos: forked/org
score += self.feature_score('num_gists', self.public_gists, 1, {0: 1, 4: 2, 10: 3, sys.maxsize: 4})
# todo-> stars given
# todo-> stars recieved
try:
score += self.feature_score('follow_ratio', self.followers / self.following, 1,
{0.99: 1, 1: 2, 2: 3, sys.maxsize: 4})
except ZeroDivisionError:
pass
return int(score * User.__total_weight)
| (self, other) |
20,185 | gitcomp.user | __init__ |
setup a user object containing a user's vital stats, equivalent to using the CLI tool with the -u flag
:param user_data: the json dictionary we get from calling the GitHub API
| def __init__(self, user_data: dict):
"""
setup a user object containing a user's vital stats, equivalent to using the CLI tool with the -u flag
:param user_data: the json dictionary we get from calling the GitHub API
"""
self.login = user_data['login']
self.followers = user_data['followers']
self.following = user_data['following']
self.site_admin = user_data['site_admin']
self.name = user_data['name']
self.company = user_data['company']
self.blog = user_data['blog']
self.location = user_data['location']
self.public_repos = user_data['public_repos']
self.public_gists = user_data['public_gists']
self.organizations = self.__get_orgs_len()
self.git_score = self.get_score()
| (self, user_data: dict) |
20,187 | gitcomp.user | feature_score | null | def feature_score(self, name, val, weight=1, metric={}):
fscore = 0
for i in metric:
if val <= i:
fscore = metric[i]
break
return weight * fscore
| (self, name, val, weight=1, metric={}) |
20,188 | gitcomp.user | get_score | null | def get_score(self):
score = 0
score += self.feature_score('num_followers', self.followers, 1, {10: 1, 25: 2, 50: 3, sys.maxsize: 4})
# todo-> contrib/ time
score += self.feature_score('num_organizaitions', self.organizations, 1,
{0: 1, 3: 2, 7: 4, 10: 3, sys.maxsize: 2})
# todo-> repos: forked/org
score += self.feature_score('num_gists', self.public_gists, 1, {0: 1, 4: 2, 10: 3, sys.maxsize: 4})
# todo-> stars given
# todo-> stars recieved
try:
score += self.feature_score('follow_ratio', self.followers / self.following, 1,
{0.99: 1, 1: 2, 2: 3, sys.maxsize: 4})
except ZeroDivisionError:
pass
return int(score * User.__total_weight)
| (self) |
20,189 | gitcomp.ser_de | Writer | null | class Writer:
obj: object
prop: str
type: str
out_file: Union[TextIO, str] = stdout
writers: Dict[str, callable]
display_rows: List[str]
__ascii_threshold = 4
file_handle: TextIO = stdout
def __init__(self, prop, obj, out_type, out_file=None):
self.obj = obj
self.prop = prop
self.type = out_type
self.display_rows = sorted(FIELD[prop].value.display_rows)
self.writers = {
'json': self.__to_json,
'csv': self.__to_csv,
'ascii': self.__to_ascii_table,
'html': self.__to_html_table
}
if out_file is not None:
self.out_file = out_file
self.file_handle = open(out_file, 'w')
def __del__(self):
if self.file_handle is not stdout:
self.file_handle.close()
def write(self):
writer = self.__get_writer()
attr = getattr(self.obj, self.prop)
writer(attr)
def __get_writer(self):
return self.writers[self.type]
@writer_wrapper
def __to_json(self, g: object):
json.dump(g, self.file_handle, cls=Serializer, indent=4, sort_keys=True)
@writer_wrapper
def __to_csv(self, g: object):
dict_obj = Writer.__to_dict(g)
headers = Writer.__get_headers(dict_obj)
writer = csv.DictWriter(self.file_handle, fieldnames=headers)
writer.writeheader()
for entry in dict_obj.keys():
writer.writerow(dict_obj[entry])
@writer_wrapper
def __to_ascii_table(self, g: Dict[str, Union[User, Repository]]):
headers, rows = self.__get_table_content(g)
if len(g.keys()) < Writer.__ascii_threshold:
table_writer = self.__get_table_transpose(g, headers, rows)
else:
table_writer = self.__get_table(headers, rows)
self.file_handle.write(table_writer)
@writer_wrapper
def __to_html_table(self, g: Dict[str, Union[User, Repository]]):
headers, rows = self.__get_table_content(g)
table_writer = tabulate(rows, headers=headers, tablefmt='html')
self.file_handle.write(table_writer)
@staticmethod
def __to_dict(g: object) -> Dict[str, Any]:
return json.loads(json.dumps(g, cls=Serializer, indent=4, sort_keys=True))
@staticmethod
def __get_headers(g: Dict[str, Any]) -> List[str]:
members = list(g.keys())
return list(g[members[0]].keys())
@staticmethod
def __get_table_transpose(g: Dict[str, Union[User, Repository]], headers: List[str], rows: List[str]):
new_headers, new_rows = Writer.__get_transpose(g, rows, headers)
return tabulate(new_rows, headers=new_headers, tablefmt='pretty')
@staticmethod
def __get_table(headers: List[str], rows: List[str]):
return tabulate(rows, headers=headers, tablefmt='plain')
def __get_table_content(self, g: Dict[str, Union[User, Repository]]):
dict_repr = Writer.__to_dict(g)
if self.out_file is stdout and self.type == 'ascii':
dict_repr = self.__summarize(dict_repr)
headers = Writer.__get_headers(dict_repr)
rows = Writer.__get_entries_as_rows(dict_repr)
return headers, rows
@staticmethod
def __get_entries_as_rows(g: Dict[str, Any]) -> List[Any]:
rows = []
for entry in g.keys():
rows.append(list(g[entry].values()))
return rows
@staticmethod
def __get_transpose(g: Dict[str, Union[User, Repository]], rows, headers):
new_rows = []
new_headers = [' '] + list(g.keys())
for i in range(len(rows[0])):
new_rows.append([rows[j][i] for j in range(len(rows))])
for i in range(len(new_rows)):
new_rows[i] = [headers[i]] + new_rows[i]
return new_headers, new_rows
def __summarize(self, dict_repr: Dict[str, Any]):
summary = {}
for entry in dict_repr:
summary[entry] = {k: dict_repr[entry][k] for k in self.display_rows}
return summary
| (prop: str, obj: object, out_type, out_file: Union[TextIO, str] = None) |
20,190 | gitcomp.ser_de | __get_entries_as_rows | null | @staticmethod
def __get_entries_as_rows(g: Dict[str, Any]) -> List[Any]:
rows = []
for entry in g.keys():
rows.append(list(g[entry].values()))
return rows
| (g: Dict[str, Any]) -> List[Any] |
20,191 | gitcomp.ser_de | __get_headers | null | @staticmethod
def __get_headers(g: Dict[str, Any]) -> List[str]:
members = list(g.keys())
return list(g[members[0]].keys())
| (g: Dict[str, Any]) -> List[str] |
20,192 | gitcomp.ser_de | __get_table | null | @staticmethod
def __get_table(headers: List[str], rows: List[str]):
return tabulate(rows, headers=headers, tablefmt='plain')
| (headers: List[str], rows: List[str]) |
20,193 | gitcomp.ser_de | __get_table_content | null | def __get_table_content(self, g: Dict[str, Union[User, Repository]]):
dict_repr = Writer.__to_dict(g)
if self.out_file is stdout and self.type == 'ascii':
dict_repr = self.__summarize(dict_repr)
headers = Writer.__get_headers(dict_repr)
rows = Writer.__get_entries_as_rows(dict_repr)
return headers, rows
| (self, g: Dict[str, Union[gitcomp.user.User, gitcomp.repository.Repository]]) |
20,194 | gitcomp.ser_de | __get_table_transpose | null | @staticmethod
def __get_table_transpose(g: Dict[str, Union[User, Repository]], headers: List[str], rows: List[str]):
new_headers, new_rows = Writer.__get_transpose(g, rows, headers)
return tabulate(new_rows, headers=new_headers, tablefmt='pretty')
| (g: Dict[str, Union[gitcomp.user.User, gitcomp.repository.Repository]], headers: List[str], rows: List[str]) |
20,195 | gitcomp.ser_de | __get_transpose | null | @staticmethod
def __get_transpose(g: Dict[str, Union[User, Repository]], rows, headers):
new_rows = []
new_headers = [' '] + list(g.keys())
for i in range(len(rows[0])):
new_rows.append([rows[j][i] for j in range(len(rows))])
for i in range(len(new_rows)):
new_rows[i] = [headers[i]] + new_rows[i]
return new_headers, new_rows
| (g: Dict[str, Union[gitcomp.user.User, gitcomp.repository.Repository]], rows, headers) |
20,196 | gitcomp.ser_de | __get_writer | null | def __get_writer(self):
return self.writers[self.type]
| (self) |
20,197 | gitcomp.ser_de | __summarize | null | def __summarize(self, dict_repr: Dict[str, Any]):
summary = {}
for entry in dict_repr:
summary[entry] = {k: dict_repr[entry][k] for k in self.display_rows}
return summary
| (self, dict_repr: Dict[str, Any]) |
20,198 | gitcomp.ser_de | wrapper | null | def writer_wrapper(writer):
def wrapper(ref, g: object):
writer(ref, g)
if ref.file_handle is stdout:
ref.file_handle.write('\n')
return wrapper
| (ref, g: object) |
20,200 | gitcomp.ser_de | __to_dict | null | @staticmethod
def __to_dict(g: object) -> Dict[str, Any]:
return json.loads(json.dumps(g, cls=Serializer, indent=4, sort_keys=True))
| (g: object) -> Dict[str, Any] |
20,203 | gitcomp.ser_de | __del__ | null | def __del__(self):
if self.file_handle is not stdout:
self.file_handle.close()
| (self) |
20,204 | gitcomp.ser_de | __init__ | null | def __init__(self, prop, obj, out_type, out_file=None):
self.obj = obj
self.prop = prop
self.type = out_type
self.display_rows = sorted(FIELD[prop].value.display_rows)
self.writers = {
'json': self.__to_json,
'csv': self.__to_csv,
'ascii': self.__to_ascii_table,
'html': self.__to_html_table
}
if out_file is not None:
self.out_file = out_file
self.file_handle = open(out_file, 'w')
| (self, prop, obj, out_type, out_file=None) |
20,205 | gitcomp.ser_de | write | null | def write(self):
writer = self.__get_writer()
attr = getattr(self.obj, self.prop)
writer(attr)
| (self) |
20,211 | fastapi_health.route | health | null | def health(
conditions: List[Callable[..., Union[Dict[str, Any], bool]]],
*,
success_handler: Callable[..., Awaitable] = default_handler,
failure_handler: Callable[..., Awaitable] = default_handler,
success_status: int = 200,
failure_status: int = 503,
):
async def endpoint(**dependencies):
if all(dependencies.values()):
handler = success_handler
status_code = success_status
else:
handler = failure_handler
status_code = failure_status
output = await handler(**dependencies)
return JSONResponse(jsonable_encoder(output), status_code=status_code)
params = []
for condition in conditions:
params.append(
Parameter(
f"{condition.__name__}",
kind=Parameter.POSITIONAL_OR_KEYWORD,
annotation=bool,
default=Depends(condition),
)
)
endpoint.__signature__ = Signature(params)
return endpoint
| (conditions: List[Callable[..., Union[Dict[str, Any], bool]]], *, success_handler: Callable[..., Awaitable] = <function default_handler at 0x7fc8cb313d00>, failure_handler: Callable[..., Awaitable] = <function default_handler at 0x7fc8cb313d00>, success_status: int = 200, failure_status: int = 503) |
20,215 | pandas_datareader.data | DataReader |
Imports data from a number of online sources.
Currently supports Google Finance, St. Louis FED (FRED),
and Kenneth French's data library, among others.
Parameters
----------
name : str or list of strs
the name of the dataset. Some data sources (IEX, fred) will
accept a list of names.
data_source: {str, None}
the data source ("iex", "fred", "ff")
start : string, int, date, datetime, Timestamp
left boundary for range (defaults to 1/1/2010)
end : string, int, date, datetime, Timestamp
right boundary for range (defaults to today)
retry_count : {int, 3}
Number of times to retry query request.
pause : {numeric, 0.001}
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
session : Session, default None
requests.sessions.Session instance to be used
api_key : (str, None)
Optional parameter to specify an API key for certain data sources.
Examples
----------
# Data from Google Finance
aapl = DataReader("AAPL", "iex")
# Price and volume data from IEX
tops = DataReader(["GS", "AAPL"], "iex-tops")
# Top of book executions from IEX
gs = DataReader("GS", "iex-last")
# Real-time depth of book data from IEX
gs = DataReader("GS", "iex-book")
# Data from FRED
vix = DataReader("VIXCLS", "fred")
# Data from Fama/French
ff = DataReader("F-F_Research_Data_Factors", "famafrench")
ff = DataReader("F-F_Research_Data_Factors_weekly", "famafrench")
ff = DataReader("6_Portfolios_2x3", "famafrench")
ff = DataReader("F-F_ST_Reversal_Factor", "famafrench")
| def get_dailysummary_iex(*args, **kwargs):
"""
Returns a summary of daily market volume statistics. Without parameters,
this will return the most recent trading session by default.
Parameters
----------
start : string, int, date, datetime, Timestamp
The beginning of the date range.
end : string, int, date, datetime, Timestamp
The end of the date range.
Reference: https://www.iextrading.com/developer/docs/#historical-daily
:return: DataFrame
"""
from pandas_datareader.iex.stats import DailySummaryReader
return DailySummaryReader(*args, **kwargs).read()
| (name, data_source=None, start=None, end=None, retry_count=3, pause=0.1, session=None, api_key=None) |
20,216 | pandas_datareader.data | Options | null | def Options(symbol, data_source=None, session=None):
if data_source is None:
warnings.warn(
"Options(symbol) is deprecated, use Options(symbol,"
" data_source) instead",
FutureWarning,
stacklevel=2,
)
data_source = "yahoo"
if data_source == "yahoo":
raise ImmediateDeprecationError(DEP_ERROR_MSG.format("Yahoo Options"))
return YahooOptions(symbol, session=session)
else:
raise NotImplementedError("currently only yahoo supported")
| (symbol, data_source=None, session=None) |
20,230 | pandas_datareader.yahoo.components | _get_data |
Returns DataFrame containing list of component information for
index represented in idx_sym from yahoo. Includes component symbol
(ticker), exchange, and name.
Parameters
----------
idx_sym : str
Stock index symbol
Examples:
'^DJI' (Dow Jones Industrial Average)
'^NYA' (NYSE Composite)
'^IXIC' (NASDAQ Composite)
See: http://finance.yahoo.com/indices for other index symbols
Returns
-------
idx_df : DataFrame
| def _get_data(idx_sym): # pragma: no cover
"""
Returns DataFrame containing list of component information for
index represented in idx_sym from yahoo. Includes component symbol
(ticker), exchange, and name.
Parameters
----------
idx_sym : str
Stock index symbol
Examples:
'^DJI' (Dow Jones Industrial Average)
'^NYA' (NYSE Composite)
'^IXIC' (NASDAQ Composite)
See: http://finance.yahoo.com/indices for other index symbols
Returns
-------
idx_df : DataFrame
"""
raise ImmediateDeprecationError(DEP_ERROR_MSG.format("Yahoo Components"))
stats = "snx"
# URL of form:
# http://download.finance.yahoo.com/d/quotes.csv?s=@%5EIXIC&f=snxl1d1t1c1ohgv
url = _URL + "s={0}&f={1}&e=.csv&h={2}"
idx_mod = idx_sym.replace("^", "@%5E")
url_str = url.format(idx_mod, stats, 1)
idx_df = DataFrame()
mask = [True]
comp_idx = 1
# LOOP across component index structure,
# break when no new components are found
while True in mask:
url_str = url.format(idx_mod, stats, comp_idx)
with urlopen(url_str) as resp:
raw = resp.read()
lines = raw.decode("utf-8").strip().strip('"').split('"\r\n"')
lines = [line.strip().split('","') for line in lines]
temp_df = DataFrame(lines, columns=["ticker", "name", "exchange"])
temp_df = temp_df.drop_duplicates()
temp_df = temp_df.set_index("ticker")
mask = ~temp_df.index.isin(idx_df.index)
comp_idx = comp_idx + 50
idx_df = idx_df.append(temp_df[mask])
return idx_df
| (idx_sym) |
20,231 | pandas_datareader.data | get_dailysummary_iex |
Returns a summary of daily market volume statistics. Without parameters,
this will return the most recent trading session by default.
Parameters
----------
start : string, int, date, datetime, Timestamp
The beginning of the date range.
end : string, int, date, datetime, Timestamp
The end of the date range.
Reference: https://www.iextrading.com/developer/docs/#historical-daily
:return: DataFrame
| def get_dailysummary_iex(*args, **kwargs):
"""
Returns a summary of daily market volume statistics. Without parameters,
this will return the most recent trading session by default.
Parameters
----------
start : string, int, date, datetime, Timestamp
The beginning of the date range.
end : string, int, date, datetime, Timestamp
The end of the date range.
Reference: https://www.iextrading.com/developer/docs/#historical-daily
:return: DataFrame
"""
from pandas_datareader.iex.stats import DailySummaryReader
return DailySummaryReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,232 | pandas_datareader.data | get_data_alphavantage | null | def get_data_alphavantage(*args, **kwargs):
return AVTimeSeriesReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,233 | pandas_datareader.data | get_data_enigma | null | def get_data_enigma(*args, **kwargs):
return EnigmaReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,234 | pandas_datareader.data | get_data_famafrench | null | def get_data_famafrench(*args, **kwargs):
return FamaFrenchReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,235 | pandas_datareader.data | get_data_fred | null | def get_data_fred(*args, **kwargs):
return FredReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,236 | pandas_datareader.data | get_data_moex | null | def get_data_moex(*args, **kwargs):
return MoexReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,237 | pandas_datareader.data | get_data_quandl | null | def get_data_quandl(*args, **kwargs):
return QuandlReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,238 | pandas_datareader.data | get_data_stooq | null | def get_data_stooq(*args, **kwargs):
return StooqDailyReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,239 | pandas_datareader.data | get_data_tiingo | null | def get_data_tiingo(*args, **kwargs):
return TiingoDailyReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,240 | pandas_datareader.data | get_data_yahoo | null | def get_data_yahoo(*args, **kwargs):
return YahooDailyReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,241 | pandas_datareader.data | get_data_yahoo_actions | null | def get_data_yahoo_actions(*args, **kwargs):
return YahooActionReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,242 | pandas_datareader.data | get_iex_book |
Returns an array of dictionaries with depth of book data from IEX for up to
10 securities at a time. Returns a dictionary of the bid and ask books.
Parameters
----------
symbols : str, List[str]
A string or list of strings of valid tickers
service : str
One of:
- 'book': Live depth of book data
- 'op-halt-status': Checks to see if the exchange has instituted a halt
- 'security-event': Denotes individual security related event
- 'ssr-status': Short Sale Price Test restrictions, per reg 201 of SHO
- 'system-event': Relays current feed status (i.e. market open)
- 'trades': Retrieves recent executions, trade size/price and flags
- 'trade-breaks': Lists execution breaks for the current trading session
- 'trading-status': Returns status and cause codes for securities
Returns
-------
DataFrame
| def get_iex_book(*args, **kwargs):
"""
Returns an array of dictionaries with depth of book data from IEX for up to
10 securities at a time. Returns a dictionary of the bid and ask books.
Parameters
----------
symbols : str, List[str]
A string or list of strings of valid tickers
service : str
One of:
- 'book': Live depth of book data
- 'op-halt-status': Checks to see if the exchange has instituted a halt
- 'security-event': Denotes individual security related event
- 'ssr-status': Short Sale Price Test restrictions, per reg 201 of SHO
- 'system-event': Relays current feed status (i.e. market open)
- 'trades': Retrieves recent executions, trade size/price and flags
- 'trade-breaks': Lists execution breaks for the current trading session
- 'trading-status': Returns status and cause codes for securities
Returns
-------
DataFrame
"""
return IEXDeep(*args, **kwargs).read()
| (*args, **kwargs) |
20,243 | pandas_datareader.data | get_iex_data_tiingo | null | def get_iex_data_tiingo(*args, **kwargs):
return TiingoIEXHistoricalReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,244 | pandas_datareader.data | get_iex_symbols |
Returns a list of all equity symbols available for trading on IEX. Accepts
no additional parameters.
Reference: https://www.iextrading.com/developer/docs/#symbols
:return: DataFrame
| def get_iex_symbols(*args, **kwargs):
"""
Returns a list of all equity symbols available for trading on IEX. Accepts
no additional parameters.
Reference: https://www.iextrading.com/developer/docs/#symbols
:return: DataFrame
"""
from pandas_datareader.iex.ref import SymbolsReader
return SymbolsReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,245 | pandas_datareader.data | get_last_iex | null | def get_last_iex(*args, **kwargs):
return IEXLasts(*args, **kwargs).read()
| (*args, **kwargs) |
20,246 | pandas_datareader.data | get_markets_iex |
Returns near-real time volume data across markets segregated by tape
and including a percentage of overall volume during the session
This endpoint does not accept any parameters.
Reference: https://www.iextrading.com/developer/docs/#markets
Returns
-------
DataFrame
| def get_markets_iex(*args, **kwargs):
"""
Returns near-real time volume data across markets segregated by tape
and including a percentage of overall volume during the session
This endpoint does not accept any parameters.
Reference: https://www.iextrading.com/developer/docs/#markets
Returns
-------
DataFrame
"""
from pandas_datareader.iex.market import MarketReader
return MarketReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,247 | pandas_datareader.nasdaq_trader | get_nasdaq_symbols |
Get the list of all available equity symbols from Nasdaq.
Returns
-------
nasdaq_tickers : pandas.DataFrame
DataFrame with company tickers, names, and other properties.
| def get_nasdaq_symbols(retry_count=3, timeout=30, pause=None):
"""
Get the list of all available equity symbols from Nasdaq.
Returns
-------
nasdaq_tickers : pandas.DataFrame
DataFrame with company tickers, names, and other properties.
"""
global _ticker_cache
if timeout < 0:
raise ValueError("timeout must be >= 0, not %r" % (timeout,))
if pause is None:
pause = timeout / 3
elif pause < 0:
raise ValueError("pause must be >= 0, not %r" % (pause,))
if _ticker_cache is None:
while retry_count > 0:
try:
_ticker_cache = _download_nasdaq_symbols(timeout=timeout)
retry_count = -1
except RemoteDataError:
# retry on any exception
retry_count -= 1
if retry_count <= 0:
raise
else:
time.sleep(pause)
return _ticker_cache
| (retry_count=3, timeout=30, pause=None) |
20,248 | pandas_datareader.data | get_quote_yahoo | null | def get_quote_yahoo(*args, **kwargs):
return YahooQuotesReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,249 | pandas_datareader.data | get_recent_iex |
Returns market volume and trade routing statistics for recent sessions.
Also reports IEX's relative market share, lit share volume and a boolean
halfday indicator.
Reference: https://www.iextrading.com/developer/docs/#recent
Returns
-------
DataFrame
| def get_recent_iex(*args, **kwargs):
"""
Returns market volume and trade routing statistics for recent sessions.
Also reports IEX's relative market share, lit share volume and a boolean
halfday indicator.
Reference: https://www.iextrading.com/developer/docs/#recent
Returns
-------
DataFrame
"""
from pandas_datareader.iex.stats import RecentReader
return RecentReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,250 | pandas_datareader.data | get_records_iex |
Returns the record value, record date, recent value, and 30-day average for
market volume, # of symbols traded, # of routed trades and notional value.
This function accepts no additional parameters.
Reference: https://www.iextrading.com/developer/docs/#records
:return: DataFrame
| def get_records_iex(*args, **kwargs):
"""
Returns the record value, record date, recent value, and 30-day average for
market volume, # of symbols traded, # of routed trades and notional value.
This function accepts no additional parameters.
Reference: https://www.iextrading.com/developer/docs/#records
:return: DataFrame
"""
from pandas_datareader.iex.stats import RecordsReader
return RecordsReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,251 | pandas_datareader.data | get_summary_iex |
Returns an aggregated monthly summary of market volume and a variety of
related metrics for trades by lot size, security market cap, and venue.
In the absence of parameters, this will return month-to-date statistics.
For ranges spanning multiple months, this will return one row per month.
start : string, int, date, datetime, Timestamp
A datetime object - the beginning of the date range.
end : string, int, date, datetime, Timestamp
A datetime object - the end of the date range.
Returns
-------
DataFrame
| def get_summary_iex(*args, **kwargs):
"""
Returns an aggregated monthly summary of market volume and a variety of
related metrics for trades by lot size, security market cap, and venue.
In the absence of parameters, this will return month-to-date statistics.
For ranges spanning multiple months, this will return one row per month.
start : string, int, date, datetime, Timestamp
A datetime object - the beginning of the date range.
end : string, int, date, datetime, Timestamp
A datetime object - the end of the date range.
Returns
-------
DataFrame
"""
from pandas_datareader.iex.stats import MonthlySummaryReader
return MonthlySummaryReader(*args, **kwargs).read()
| (*args, **kwargs) |
20,252 | pandas_datareader.data | get_tops_iex | null | def get_tops_iex(*args, **kwargs):
return IEXTops(*args, **kwargs).read()
| (*args, **kwargs) |
20,263 | pandas_datareader | test |
Run the test suite
Parameters
----------
extra_args : {str, List[str]}
A string or list of strings to pass to pytest. Default is
["--only-stable", "--skip-requires-api-key"]
| def test(extra_args=None):
"""
Run the test suite
Parameters
----------
extra_args : {str, List[str]}
A string or list of strings to pass to pytest. Default is
["--only-stable", "--skip-requires-api-key"]
"""
try:
import pytest
except ImportError as err:
raise ImportError("Need pytest>=5.0.1 to run tests") from err
cmd = ["--only-stable", "--skip-requires-api-key"]
if extra_args:
if not isinstance(extra_args, list):
extra_args = [extra_args]
cmd = extra_args
cmd += [PKG]
joined = " ".join(cmd)
print(f"running: pytest {joined}")
sys.exit(pytest.main(cmd))
| (extra_args=None) |
20,267 | flaky.flaky_decorator | flaky |
Decorator used to mark a test as "flaky".
:param max_runs:
The maximum number of times the decorated test will be run.
:type max_runs:
`int`
:param min_passes:
The minimum number of times the test must pass to be a success.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
:return:
A wrapper function that includes attributes describing the flaky test.
:rtype:
`callable`
| def flaky(max_runs=None, min_passes=None, rerun_filter=None):
"""
Decorator used to mark a test as "flaky".
:param max_runs:
The maximum number of times the decorated test will be run.
:type max_runs:
`int`
:param min_passes:
The minimum number of times the test must pass to be a success.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
:return:
A wrapper function that includes attributes describing the flaky test.
:rtype:
`callable`
"""
# In case @flaky is applied to a function or class without arguments
# (and without parentheses), max_runs will refer to the wrapped object.
# In this case, the default value can be used.
wrapped = None
if hasattr(max_runs, '__call__'):
wrapped, max_runs = max_runs, None
attrib = default_flaky_attributes(max_runs, min_passes, rerun_filter)
def wrapper(wrapped_object):
for name, value in attrib.items():
setattr(wrapped_object, name, value)
return wrapped_object
return wrapper(wrapped) if wrapped is not None else wrapper
| (max_runs=None, min_passes=None, rerun_filter=None) |
20,270 | start_sdk.cf_img | CFImage |
# Cloudflare [Images](https://developers.cloudflare.com/images/cloudflare-images/) API v4
Add secrets to .env file:
Field in .env | Cloudflare API Credential | Where credential found
:--|:--:|:--
`CF_IMG_ACCT` | Account ID | `https://dash.cloudflare.com/<acct_id>/images/images`
`CF_IMG_HASH` | Account Hash | `https://dash.cloudflare.com/<acct_id>/images/images`
`CF_IMG_TOKEN` | API Secret | Generate / save via `https://dash.cloudflare.com/<acct_id>/profile/api-tokens`
Examples:
```py title="Example Usage" linenums="1" hl_lines="5 18"
>>> from pathlib import Path
>>> from start_sdk import CFImage
>>> import os
>>> import io
>>> cf = CFImage() # will error out since missing key values
Traceback (most recent call last):
pydantic.error_wrappers.ValidationError: 1 validation error for CFImage
cf_img_hash
field required (type=value_error.missing)
>>> os.environ['CF_ACCT_ID'] = "ABC"
>>> cf = CFImage() # will error out since still missing other values
Traceback (most recent call last):
pydantic.error_wrappers.ValidationError: 1 validation error for CFImage
cf_img_hash
field required (type=value_error.missing)
>>> # we'll add all the values needed
>>> os.environ['CF_IMG_HASH'], os.environ['CF_IMG_TOKEN'] = "DEF", "XYZ"
>>> cf = CFImage() # no longer errors out
>>> cf.headers
{'Authorization': 'Bearer XYZ'}
>>> cf.base_api
'https://api.cloudflare.com/client/v4/accounts/ABC/images/v1'
>>> cf.base_delivery
'https://imagedelivery.net/DEF'
>>> cf.url('hi-bob', 'w=400,sharpen=3')
'https://imagedelivery.net/DEF/hi-bob/w=400,sharpen=3'
>>> p = Path().cwd() / "img" / "screenshot.png"
>>> p.exists() # Sample image found in `/img/screenshot.png`
True
>>> img = io.BytesIO(p.read_bytes())
>>> type(img)
<class '_io.BytesIO'>
>>> # Can now use img in `cf.post('sample_id', img)`
```
| class CFImage(BaseSettings):
"""
# Cloudflare [Images](https://developers.cloudflare.com/images/cloudflare-images/) API v4
Add secrets to .env file:
Field in .env | Cloudflare API Credential | Where credential found
:--|:--:|:--
`CF_IMG_ACCT` | Account ID | `https://dash.cloudflare.com/<acct_id>/images/images`
`CF_IMG_HASH` | Account Hash | `https://dash.cloudflare.com/<acct_id>/images/images`
`CF_IMG_TOKEN` | API Secret | Generate / save via `https://dash.cloudflare.com/<acct_id>/profile/api-tokens`
Examples:
```py title="Example Usage" linenums="1" hl_lines="5 18"
>>> from pathlib import Path
>>> from start_sdk import CFImage
>>> import os
>>> import io
>>> cf = CFImage() # will error out since missing key values
Traceback (most recent call last):
pydantic.error_wrappers.ValidationError: 1 validation error for CFImage
cf_img_hash
field required (type=value_error.missing)
>>> os.environ['CF_ACCT_ID'] = "ABC"
>>> cf = CFImage() # will error out since still missing other values
Traceback (most recent call last):
pydantic.error_wrappers.ValidationError: 1 validation error for CFImage
cf_img_hash
field required (type=value_error.missing)
>>> # we'll add all the values needed
>>> os.environ['CF_IMG_HASH'], os.environ['CF_IMG_TOKEN'] = "DEF", "XYZ"
>>> cf = CFImage() # no longer errors out
>>> cf.headers
{'Authorization': 'Bearer XYZ'}
>>> cf.base_api
'https://api.cloudflare.com/client/v4/accounts/ABC/images/v1'
>>> cf.base_delivery
'https://imagedelivery.net/DEF'
>>> cf.url('hi-bob', 'w=400,sharpen=3')
'https://imagedelivery.net/DEF/hi-bob/w=400,sharpen=3'
>>> p = Path().cwd() / "img" / "screenshot.png"
>>> p.exists() # Sample image found in `/img/screenshot.png`
True
>>> img = io.BytesIO(p.read_bytes())
>>> type(img)
<class '_io.BytesIO'>
>>> # Can now use img in `cf.post('sample_id', img)`
```
""" # noqa: E501
acct_id: str = Field(
default=...,
repr=False,
title="Cloudflare Account ID",
description="Used in other Cloudflare services like R2, etc.",
env="CF_ACCT_ID",
)
cf_img_hash: str = Field(
default=...,
repr=False,
title="Cloudflare Image Hash",
description="Assigned when you create a Cloudflare Images account",
env="CF_IMG_HASH",
)
api_token: str = Field(
default=...,
repr=False,
title="Cloudflare Image API Token",
description="Secure token to perform API operations",
env="CF_IMG_TOKEN",
)
client_api_ver: str = Field(
default="v4",
title="Cloudflare Client API Version",
description="Used in the middle of the URL in API requests.",
env="CLOUDFLARE_CLIENT_API_VERSION",
)
images_api_ver: str = Field(
default="v1",
title="Cloudflare Images API Version",
description="Used at the end of URL in API requests.",
env="CLOUDFLARE_IMAGES_API_VERSION",
)
timeout: int = Field(
default=60,
env="CF_IMG_TOKEN_TIMEOUT",
)
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
@property
def headers(self) -> dict:
return {"Authorization": f"Bearer {self.api_token}"}
@property
def client(self):
return httpx.Client(timeout=self.timeout)
@property
def base_api(self):
"""Construct URL based on Cloudflare API [format](https://developers.cloudflare.com/images/cloudflare-images/api-request/)""" # noqa: E501
client = f"client/{self.client_api_ver}"
account = f"accounts/{self.acct_id}"
images = f"images/{self.images_api_ver}"
return "/".join([CF_API_URL, client, account, images])
@property
def base_delivery(self):
"""The images are served with the following format:
`https://imagedelivery.net/<ACCOUNT_HASH>/<IMAGE_ID>/<VARIANT_NAME>`
This property constructs the first part:
`https://imagedelivery.net/<ACCOUNT_HASH>`
See Cloudflare [docs](https://developers.cloudflare.com/images/cloudflare-images/serve-images/).
""" # noqa: E501
return "/".join([CF_DELIVER, self.cf_img_hash])
def url(self, img_id: str, variant: str = "public"):
"""Generates url based on the Cloudflare hash of the account. The `variant` is based on
how these are customized on Cloudflare Images. See also flexible variant [docs](https://developers.cloudflare.com/images/cloudflare-images/transform/flexible-variants/)
""" # noqa: E501
return "/".join([self.base_delivery, img_id, variant])
def get(self, img_id: str, *args, **kwargs) -> httpx.Response:
"""Issue httpx GET request to the image found in storage. Assuming request like
`CFImage().get('target-img-id')`, returns a response with metadata:
Examples:
```py title="Response object from Cloudflare Images"
>>> # CFImage().get('target-img-id') commented out since hypothetical
b'{
"result": {
"id": "target-img-id",
"filename": "target-img-id",
"uploaded": "2023-02-20T09:09:41.755Z",
"requireSignedURLs": false,
"variants": [
"https://imagedelivery.net/<hash>/<target-img-id>/public",
"https://imagedelivery.net/<hash>/<target-img-id>/cover",
"https://imagedelivery.net/<hash>/<target-img-id>/avatar",
"https://imagedelivery.net/<hash>/<target-img-id>/uniform"
]
},
"success": true,
"errors": [],
"messages": []
}'
```
"""
return self.client.get(
url=f"{self.base_api}/{img_id}",
headers=self.headers,
*args,
**kwargs,
)
def delete(self, img_id: str, *args, **kwargs) -> httpx.Response:
"""Issue httpx [DELETE](https://developers.cloudflare.com/images/cloudflare-images/transform/delete-images/) request to the image.""" # noqa: E501
return self.client.delete(
url=f"{self.base_api}/{img_id}",
headers=self.headers,
*args,
**kwargs,
)
def post(self, img_id: str, img: bytes, *args, **kwargs) -> httpx.Response:
"""Issue httpx [POST](https://developers.cloudflare.com/images/cloudflare-images/upload-images/upload-via-url/) request to upload image.""" # noqa: E501
return self.client.post(
url=self.base_api,
headers=self.headers,
data={"id": img_id},
files={"file": (img_id, img)},
*args,
**kwargs,
)
def upsert(self, img_id: str, img: bytes) -> httpx.Response:
"""Ensures a unique id name by first deleting the `img_id` from storage and then
uploading the `img`."""
self.delete(img_id)
return self.post(img_id, img)
| (_env_file: Union[str, os.PathLike, List[Union[str, os.PathLike]], Tuple[Union[str, os.PathLike], ...], NoneType] = '<object object at 0x7f74a8a56270>', _env_file_encoding: Optional[str] = None, _env_nested_delimiter: Optional[str] = None, _secrets_dir: Union[str, os.PathLike, NoneType] = None, *, acct_id: str, cf_img_hash: str, api_token: str, client_api_ver: str = 'v4', images_api_ver: str = 'v1', timeout: int = 60) -> None |
20,271 | start_sdk.cf_img | delete | Issue httpx [DELETE](https://developers.cloudflare.com/images/cloudflare-images/transform/delete-images/) request to the image. | def delete(self, img_id: str, *args, **kwargs) -> httpx.Response:
"""Issue httpx [DELETE](https://developers.cloudflare.com/images/cloudflare-images/transform/delete-images/) request to the image.""" # noqa: E501
return self.client.delete(
url=f"{self.base_api}/{img_id}",
headers=self.headers,
*args,
**kwargs,
)
| (self, img_id: str, *args, **kwargs) -> httpx.Response |
20,272 | start_sdk.cf_img | get | Issue httpx GET request to the image found in storage. Assuming request like
`CFImage().get('target-img-id')`, returns a response with metadata:
Examples:
```py title="Response object from Cloudflare Images"
>>> # CFImage().get('target-img-id') commented out since hypothetical
b'{
"result": {
"id": "target-img-id",
"filename": "target-img-id",
"uploaded": "2023-02-20T09:09:41.755Z",
"requireSignedURLs": false,
"variants": [
"https://imagedelivery.net/<hash>/<target-img-id>/public",
"https://imagedelivery.net/<hash>/<target-img-id>/cover",
"https://imagedelivery.net/<hash>/<target-img-id>/avatar",
"https://imagedelivery.net/<hash>/<target-img-id>/uniform"
]
},
"success": true,
"errors": [],
"messages": []
}'
```
| def get(self, img_id: str, *args, **kwargs) -> httpx.Response:
"""Issue httpx GET request to the image found in storage. Assuming request like
`CFImage().get('target-img-id')`, returns a response with metadata:
Examples:
```py title="Response object from Cloudflare Images"
>>> # CFImage().get('target-img-id') commented out since hypothetical
b'{
"result": {
"id": "target-img-id",
"filename": "target-img-id",
"uploaded": "2023-02-20T09:09:41.755Z",
"requireSignedURLs": false,
"variants": [
"https://imagedelivery.net/<hash>/<target-img-id>/public",
"https://imagedelivery.net/<hash>/<target-img-id>/cover",
"https://imagedelivery.net/<hash>/<target-img-id>/avatar",
"https://imagedelivery.net/<hash>/<target-img-id>/uniform"
]
},
"success": true,
"errors": [],
"messages": []
}'
```
"""
return self.client.get(
url=f"{self.base_api}/{img_id}",
headers=self.headers,
*args,
**kwargs,
)
| (self, img_id: str, *args, **kwargs) -> httpx.Response |
20,273 | start_sdk.cf_img | post | Issue httpx [POST](https://developers.cloudflare.com/images/cloudflare-images/upload-images/upload-via-url/) request to upload image. | def post(self, img_id: str, img: bytes, *args, **kwargs) -> httpx.Response:
"""Issue httpx [POST](https://developers.cloudflare.com/images/cloudflare-images/upload-images/upload-via-url/) request to upload image.""" # noqa: E501
return self.client.post(
url=self.base_api,
headers=self.headers,
data={"id": img_id},
files={"file": (img_id, img)},
*args,
**kwargs,
)
| (self, img_id: str, img: bytes, *args, **kwargs) -> httpx.Response |
20,274 | start_sdk.cf_img | upsert | Ensures a unique id name by first deleting the `img_id` from storage and then
uploading the `img`. | def upsert(self, img_id: str, img: bytes) -> httpx.Response:
"""Ensures a unique id name by first deleting the `img_id` from storage and then
uploading the `img`."""
self.delete(img_id)
return self.post(img_id, img)
| (self, img_id: str, img: bytes) -> httpx.Response |
20,275 | start_sdk.cf_img | url | Generates url based on the Cloudflare hash of the account. The `variant` is based on
how these are customized on Cloudflare Images. See also flexible variant [docs](https://developers.cloudflare.com/images/cloudflare-images/transform/flexible-variants/)
| def url(self, img_id: str, variant: str = "public"):
"""Generates url based on the Cloudflare hash of the account. The `variant` is based on
how these are customized on Cloudflare Images. See also flexible variant [docs](https://developers.cloudflare.com/images/cloudflare-images/transform/flexible-variants/)
""" # noqa: E501
return "/".join([self.base_delivery, img_id, variant])
| (self, img_id: str, variant: str = 'public') |
20,276 | start_sdk.cf_r2 | CFR2 |
_CFR2_
Cloudflare R2 via Amazon S3 [API](https://developers.cloudflare.com/r2/examples/boto3/).
The Cloudflare R2 key/secret follows AWS S3 conventions, see compatability in docs..
Add secrets to .env file:
Field in .env | Cloudflare API Credential | Where credential found
:--|:--:|:--
`CF_ACCT_ID` | Account ID | `https://dash.cloudflare.com/<acct_id>/r2`
`CF_R2_REGION` | Default Region: `apac` | See [options](https://developers.cloudflare.com/r2/learning/data-location/#available-hints)
`R2_ACCESS_KEY_ID` | Key | When R2 Token created in `https://dash.cloudflare.com/<acct_id>/r2/overview/api-tokens`
`R2_SECRET_ACCESS_KEY` | Secret | When R2 Token created in `https://dash.cloudflare.com/<acct_id>/r2/overview/api-tokens`
Examples:
>>> import os
>>> os.environ['CF_ACCT_ID'] = "ACT"
>>> os.environ['R2_ACCESS_KEY_ID'] = "ABC"
>>> os.environ['R2_SECRET_ACCESS_KEY'] = "XYZ"
>>> from start_sdk import CFR2
>>> r2 = CFR2()
>>> type(r2.resource)
<class 'boto3.resources.factory.s3.ServiceResource'>
| class CFR2(BaseSettings):
"""
_CFR2_
Cloudflare R2 via Amazon S3 [API](https://developers.cloudflare.com/r2/examples/boto3/).
The Cloudflare R2 key/secret follows AWS S3 conventions, see compatability in docs..
Add secrets to .env file:
Field in .env | Cloudflare API Credential | Where credential found
:--|:--:|:--
`CF_ACCT_ID` | Account ID | `https://dash.cloudflare.com/<acct_id>/r2`
`CF_R2_REGION` | Default Region: `apac` | See [options](https://developers.cloudflare.com/r2/learning/data-location/#available-hints)
`R2_ACCESS_KEY_ID` | Key | When R2 Token created in `https://dash.cloudflare.com/<acct_id>/r2/overview/api-tokens`
`R2_SECRET_ACCESS_KEY` | Secret | When R2 Token created in `https://dash.cloudflare.com/<acct_id>/r2/overview/api-tokens`
Examples:
>>> import os
>>> os.environ['CF_ACCT_ID'] = "ACT"
>>> os.environ['R2_ACCESS_KEY_ID'] = "ABC"
>>> os.environ['R2_SECRET_ACCESS_KEY'] = "XYZ"
>>> from start_sdk import CFR2
>>> r2 = CFR2()
>>> type(r2.resource)
<class 'boto3.resources.factory.s3.ServiceResource'>
""" # noqa: E501
acct: str = Field(default="ACT", repr=False, env="CF_ACCT_ID")
r2_region: str = Field(default="apac", repr=True, env="CF_R2_REGION")
r2_access_key: str = Field(
default="ABC",
repr=False,
title="R2 Key",
description=( # noqa: E501
"The Cloudflare R2 key/secret follows AWS S3 conventions, see"
" compatability in docs."
),
env="R2_ACCESS_KEY_ID",
)
r2_secret_key: str = Field(
default="XYZ",
repr=False,
title="R2 Secret",
description=( # noqa: E501
"The Cloudflare R2 key/secret follows AWS S3 conventions, see"
" compatability in docs."
),
env="R2_SECRET_ACCESS_KEY",
)
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
@property
def endpoint_url(self):
return f"https://{self.acct}.r2.cloudflarestorage.com"
@property
def resource(self):
"""Resource can be used as a means to access the bucket via an instantiated
`r2`, e.g. `r2.resource.Bucket('<created-bucket-name>')`
"""
return boto3.resource(
"s3",
endpoint_url=self.endpoint_url,
aws_access_key_id=self.r2_access_key,
aws_secret_access_key=self.r2_secret_key,
region_name=self.r2_region,
)
def get_bucket(self, bucket_name: str):
return self.resource.Bucket(bucket_name) # type: ignore
| (_env_file: Union[str, os.PathLike, List[Union[str, os.PathLike]], Tuple[Union[str, os.PathLike], ...], NoneType] = '<object object at 0x7f74a8a56270>', _env_file_encoding: Optional[str] = None, _env_nested_delimiter: Optional[str] = None, _secrets_dir: Union[str, os.PathLike, NoneType] = None, *, acct: str = 'ACT', r2_region: str = 'apac', r2_access_key: str = 'ABC', r2_secret_key: str = 'XYZ') -> None |
20,277 | start_sdk.cf_r2 | get_bucket | null | def get_bucket(self, bucket_name: str):
return self.resource.Bucket(bucket_name) # type: ignore
| (self, bucket_name: str) |
20,278 | start_sdk.cf_r2 | CFR2_Bucket |
_CFR2_Bucket_
Helper function that can be assigned to each bucket.
Note [AWS API reference](https://docs.aws.amazon.com/AmazonS3/latest/API) vs. [R2](https://developers.cloudflare.com/r2/data-access/s3-api/api/)
Examples:
>>> import os
>>> os.environ['CF_R2_ACCT_ID'] = "ACT"
>>> os.environ['R2_ACCESS_KEY_ID'] = "ABC"
>>> os.environ['R2_SECRET_ACCESS_KEY'] = "XYZ"
>>> from start_sdk import CFR2_Bucket
>>> obj = CFR2_Bucket(name='test')
>>> type(obj.bucket)
<class 'boto3.resources.factory.s3.Bucket'>
| class CFR2_Bucket(CFR2):
"""
_CFR2_Bucket_
Helper function that can be assigned to each bucket.
Note [AWS API reference](https://docs.aws.amazon.com/AmazonS3/latest/API) vs. [R2](https://developers.cloudflare.com/r2/data-access/s3-api/api/)
Examples:
>>> import os
>>> os.environ['CF_R2_ACCT_ID'] = "ACT"
>>> os.environ['R2_ACCESS_KEY_ID'] = "ABC"
>>> os.environ['R2_SECRET_ACCESS_KEY'] = "XYZ"
>>> from start_sdk import CFR2_Bucket
>>> obj = CFR2_Bucket(name='test')
>>> type(obj.bucket)
<class 'boto3.resources.factory.s3.Bucket'>
""" # noqa: E501
name: str
@property
def bucket(self):
return self.get_bucket(self.name)
@property
def client(self):
return self.bucket.meta.client
def get(self, key: str, *args, **kwargs) -> dict | None:
"""Assumes the key prefix exists in the bucket. See helper
for [boto3 get_object](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/client/get_object.html)
Args:
key (str): Should exist in the browser
Returns:
dict | None: Returns `None` if not found.
""" # noqa: E501
try:
return self.client.get_object(
Bucket=self.name, Key=key, *args, **kwargs
)
except Exception:
return None
def fetch(self, *args, **kwargs) -> dict:
"""Each bucket contain content prefixes but can only be fetched incrementally,
e.g. by batches. Each batch limited to a max of 1000 prefixes. Without arguments
included in this call, will default to the first 1000 keys. See more details in
[boto3 list-objects-v2 API docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/client/list_objects_v2.html#list-objects-v2)
""" # noqa: E501
return self.client.list_objects_v2(Bucket=self.name, *args, **kwargs)
def all_items(self) -> list[dict] | None:
"""Using pagination conventions from s3 and r2, get all prefixes found in
the bucket name. Note this aggregates all `fetch()` calls, specifically limiting
the response to the "Contents" key of each `fetch()` call. Such key will
contain a list of dict-based prefixes."""
contents = []
counter = 1
next_token = None
while True:
print(f"Accessing page {counter=}")
if counter == 1:
res = self.fetch()
elif next_token:
res = self.fetch(ContinuationToken=next_token)
else:
print("Missing next token.")
break
next_token = res.get("NextContinuationToken")
if res.get("Contents"):
contents.extend(res["Contents"])
counter += 1
if not res["IsTruncated"]: # is False if all results returned.
print("All results returned.")
return contents
@classmethod
def filter_content(
cls, filter_suffix: str, objects_list: list[dict]
) -> Iterator[dict]:
"""Filter objects based on a `filter_suffix` from either:
1. List of objects from `self.all_items()`; or
2. _Contents_ key of `self.fetch()`. Note that each _Contents_ field of `fetch`
is a dict object, each object will contain a _Key_ field.
Args:
filter_suffix (str): Prefix terminates with what suffix
objects_list (list[dict]): List of objects previously fetched
Yields:
Iterator[dict]: Filtered `objects_list` based on `filter_suffix`
"""
for prefixed_obj in objects_list:
if key := prefixed_obj.get("Key"):
if key.endswith(filter_suffix):
yield prefixed_obj
def upload(self, file_like: str | Path, loc: str, args: dict = {}):
"""Upload local `file_like` contents to r2-bucket path `loc`.
Args:
file_like (str | Path): Local file
loc (str): Remote location
args (dict, optional): Will populate `ExtraArgs` during upload.
Defaults to {}.
"""
with open(file_like, "rb") as read_file:
return self.bucket.upload_fileobj(read_file, loc, ExtraArgs=args)
def download(self, loc: str, local_file: str):
"""With a r2-bucket `loc`, download contents to `local_file`.
Args:
loc (str): Origin file to download
local_file (str): Where to download, how to name downloaded file
"""
with open(local_file, "wb") as write_file:
return self.bucket.download_fileobj(loc, write_file)
def get_root_prefixes(self):
"""See adapted recipe from boto3 re: top-level [prefixes](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#list-top-level-common-prefixes-in-amazon-s3-bucket).
Returns:
list[str]: Matching prefixes in the root of the bucket.
""" # noqa: E501
_objs = []
paginator = self.client.get_paginator("list_objects")
result = paginator.paginate(Bucket=self.name, Delimiter="/")
for prefix in result.search("CommonPrefixes"):
_objs.append(prefix.get("Prefix")) # type: ignore
return _objs
| (_env_file: Union[str, os.PathLike, List[Union[str, os.PathLike]], Tuple[Union[str, os.PathLike], ...], NoneType] = '<object object at 0x7f74a8a56270>', _env_file_encoding: Optional[str] = None, _env_nested_delimiter: Optional[str] = None, _secrets_dir: Union[str, os.PathLike, NoneType] = None, *, acct: str = 'ACT', r2_region: str = 'apac', r2_access_key: str = 'ABC', r2_secret_key: str = 'XYZ', name: str) -> None |
20,279 | start_sdk.cf_r2 | all_items | Using pagination conventions from s3 and r2, get all prefixes found in
the bucket name. Note this aggregates all `fetch()` calls, specifically limiting
the response to the "Contents" key of each `fetch()` call. Such key will
contain a list of dict-based prefixes. | def all_items(self) -> list[dict] | None:
"""Using pagination conventions from s3 and r2, get all prefixes found in
the bucket name. Note this aggregates all `fetch()` calls, specifically limiting
the response to the "Contents" key of each `fetch()` call. Such key will
contain a list of dict-based prefixes."""
contents = []
counter = 1
next_token = None
while True:
print(f"Accessing page {counter=}")
if counter == 1:
res = self.fetch()
elif next_token:
res = self.fetch(ContinuationToken=next_token)
else:
print("Missing next token.")
break
next_token = res.get("NextContinuationToken")
if res.get("Contents"):
contents.extend(res["Contents"])
counter += 1
if not res["IsTruncated"]: # is False if all results returned.
print("All results returned.")
return contents
| (self) -> list[dict] | None |
20,280 | start_sdk.cf_r2 | download | With a r2-bucket `loc`, download contents to `local_file`.
Args:
loc (str): Origin file to download
local_file (str): Where to download, how to name downloaded file
| def download(self, loc: str, local_file: str):
"""With a r2-bucket `loc`, download contents to `local_file`.
Args:
loc (str): Origin file to download
local_file (str): Where to download, how to name downloaded file
"""
with open(local_file, "wb") as write_file:
return self.bucket.download_fileobj(loc, write_file)
| (self, loc: str, local_file: str) |
20,281 | start_sdk.cf_r2 | fetch | Each bucket contain content prefixes but can only be fetched incrementally,
e.g. by batches. Each batch limited to a max of 1000 prefixes. Without arguments
included in this call, will default to the first 1000 keys. See more details in
[boto3 list-objects-v2 API docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/client/list_objects_v2.html#list-objects-v2)
| def fetch(self, *args, **kwargs) -> dict:
"""Each bucket contain content prefixes but can only be fetched incrementally,
e.g. by batches. Each batch limited to a max of 1000 prefixes. Without arguments
included in this call, will default to the first 1000 keys. See more details in
[boto3 list-objects-v2 API docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/client/list_objects_v2.html#list-objects-v2)
""" # noqa: E501
return self.client.list_objects_v2(Bucket=self.name, *args, **kwargs)
| (self, *args, **kwargs) -> dict |
20,282 | start_sdk.cf_r2 | get | Assumes the key prefix exists in the bucket. See helper
for [boto3 get_object](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/client/get_object.html)
Args:
key (str): Should exist in the browser
Returns:
dict | None: Returns `None` if not found.
| def get(self, key: str, *args, **kwargs) -> dict | None:
"""Assumes the key prefix exists in the bucket. See helper
for [boto3 get_object](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/client/get_object.html)
Args:
key (str): Should exist in the browser
Returns:
dict | None: Returns `None` if not found.
""" # noqa: E501
try:
return self.client.get_object(
Bucket=self.name, Key=key, *args, **kwargs
)
except Exception:
return None
| (self, key: str, *args, **kwargs) -> dict | None |
20,284 | start_sdk.cf_r2 | get_root_prefixes | See adapted recipe from boto3 re: top-level [prefixes](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#list-top-level-common-prefixes-in-amazon-s3-bucket).
Returns:
list[str]: Matching prefixes in the root of the bucket.
| def get_root_prefixes(self):
"""See adapted recipe from boto3 re: top-level [prefixes](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#list-top-level-common-prefixes-in-amazon-s3-bucket).
Returns:
list[str]: Matching prefixes in the root of the bucket.
""" # noqa: E501
_objs = []
paginator = self.client.get_paginator("list_objects")
result = paginator.paginate(Bucket=self.name, Delimiter="/")
for prefix in result.search("CommonPrefixes"):
_objs.append(prefix.get("Prefix")) # type: ignore
return _objs
| (self) |
20,285 | start_sdk.cf_r2 | upload | Upload local `file_like` contents to r2-bucket path `loc`.
Args:
file_like (str | Path): Local file
loc (str): Remote location
args (dict, optional): Will populate `ExtraArgs` during upload.
Defaults to {}.
| def upload(self, file_like: str | Path, loc: str, args: dict = {}):
"""Upload local `file_like` contents to r2-bucket path `loc`.
Args:
file_like (str | Path): Local file
loc (str): Remote location
args (dict, optional): Will populate `ExtraArgs` during upload.
Defaults to {}.
"""
with open(file_like, "rb") as read_file:
return self.bucket.upload_fileobj(read_file, loc, ExtraArgs=args)
| (self, file_like: str | pathlib.Path, loc: str, args: dict = {}) |
20,286 | start_sdk.github | Github |
# Github API v2022-11-28
Add secrets to .env file:
Field in .env | Github Credentials | Where credential found
:--|:--:|:--
`GH_TOKEN` | Github Personal Access Token | Ensure _fine-grained_ Personal Access Token [Github Developer Settings](https://github.com/settings/tokens?type=beta) can access the repository represented in the url.
`GH_TOKEN_VERSION` | Default: `2022-11-28` | See [docs](https://docs.github.com/en/rest/repos/contents?apiVersion=2022-11-28)
| class Github(BaseSettings):
"""
# Github API v2022-11-28
Add secrets to .env file:
Field in .env | Github Credentials | Where credential found
:--|:--:|:--
`GH_TOKEN` | Github Personal Access Token | Ensure _fine-grained_ Personal Access Token [Github Developer Settings](https://github.com/settings/tokens?type=beta) can access the repository represented in the url.
`GH_TOKEN_VERSION` | Default: `2022-11-28` | See [docs](https://docs.github.com/en/rest/repos/contents?apiVersion=2022-11-28)
""" # noqa: E501
token: str = Field(
default=...,
repr=False,
env="GH_TOKEN",
)
version: str = Field(
default="2022-11-28",
repr=False,
env="GH_TOKEN_VERSION",
)
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
def get(
self,
url: str,
media_type: str | None = ".raw",
params: dict = {},
) -> httpx.Response:
"""See requisite [headers](https://docs.github.com/en/rest/repos/contents?apiVersion=2022-11-28#get-repository-content--code-samples)
Args:
url (str): _description_
media_type (str | None, optional): _description_. Defaults to ".raw".
params (dict, optional): _description_. Defaults to {}.
Returns:
httpx.Response: _description_
""" # noqa: E501
with httpx.Client(timeout=120) as client:
return client.get(
url,
params=params,
headers={
"Accept": f"application/vnd.github{media_type}",
"Authorization": f"token {self.token}",
"X-GitHub-Api-Version": self.version,
},
)
def get_repo(self, author: str, repo: str) -> httpx.Response:
"""See Github API [docs](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#get-a-repository)""" # noqa: E501
return self.get(f"{BASE}/{author}/{repo}")
def get_repo_commits(self, author: str, repo: str) -> httpx.Response:
"""See Github API [docs](https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28)""" # noqa: E501
return self.get(f"{BASE}/{author}/{repo}/commits")
def get_latest_sha(self, author: str, repo: str) -> str:
"""See Github API [docs](https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28#get-a-commit)""" # noqa: E501
commits_response = self.get_repo_commits(author, repo)
return commits_response.json()[0]["sha"]
def get_latest_og_img_url(self, author: str, repo: str) -> str:
"""See [Stackoverflow](https://stackoverflow.com/a/71454181)"""
return f"{OG}/{self.get_latest_sha(author, repo)}/{author}/{repo}"
| (_env_file: Union[str, os.PathLike, List[Union[str, os.PathLike]], Tuple[Union[str, os.PathLike], ...], NoneType] = '<object object at 0x7f74a8a56270>', _env_file_encoding: Optional[str] = None, _env_nested_delimiter: Optional[str] = None, _secrets_dir: Union[str, os.PathLike, NoneType] = None, *, token: str, version: str = '2022-11-28') -> None |
20,287 | start_sdk.github | get | See requisite [headers](https://docs.github.com/en/rest/repos/contents?apiVersion=2022-11-28#get-repository-content--code-samples)
Args:
url (str): _description_
media_type (str | None, optional): _description_. Defaults to ".raw".
params (dict, optional): _description_. Defaults to {}.
Returns:
httpx.Response: _description_
| def get(
self,
url: str,
media_type: str | None = ".raw",
params: dict = {},
) -> httpx.Response:
"""See requisite [headers](https://docs.github.com/en/rest/repos/contents?apiVersion=2022-11-28#get-repository-content--code-samples)
Args:
url (str): _description_
media_type (str | None, optional): _description_. Defaults to ".raw".
params (dict, optional): _description_. Defaults to {}.
Returns:
httpx.Response: _description_
""" # noqa: E501
with httpx.Client(timeout=120) as client:
return client.get(
url,
params=params,
headers={
"Accept": f"application/vnd.github{media_type}",
"Authorization": f"token {self.token}",
"X-GitHub-Api-Version": self.version,
},
)
| (self, url: str, media_type: str | None = '.raw', params: dict = {}) -> httpx.Response |
20,288 | start_sdk.github | get_latest_og_img_url | See [Stackoverflow](https://stackoverflow.com/a/71454181) | def get_latest_og_img_url(self, author: str, repo: str) -> str:
"""See [Stackoverflow](https://stackoverflow.com/a/71454181)"""
return f"{OG}/{self.get_latest_sha(author, repo)}/{author}/{repo}"
| (self, author: str, repo: str) -> str |
20,289 | start_sdk.github | get_latest_sha | See Github API [docs](https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28#get-a-commit) | def get_latest_sha(self, author: str, repo: str) -> str:
"""See Github API [docs](https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28#get-a-commit)""" # noqa: E501
commits_response = self.get_repo_commits(author, repo)
return commits_response.json()[0]["sha"]
| (self, author: str, repo: str) -> str |
20,290 | start_sdk.github | get_repo | See Github API [docs](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#get-a-repository) | def get_repo(self, author: str, repo: str) -> httpx.Response:
"""See Github API [docs](https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#get-a-repository)""" # noqa: E501
return self.get(f"{BASE}/{author}/{repo}")
| (self, author: str, repo: str) -> httpx.Response |
20,291 | start_sdk.github | get_repo_commits | See Github API [docs](https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28) | def get_repo_commits(self, author: str, repo: str) -> httpx.Response:
"""See Github API [docs](https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28)""" # noqa: E501
return self.get(f"{BASE}/{author}/{repo}/commits")
| (self, author: str, repo: str) -> httpx.Response |
20,292 | start_sdk.cf_r2 | StorageUtils | null | class StorageUtils(CFR2_Bucket):
temp_folder: Path
@classmethod
def clean_extra_meta(cls, text: str):
"""S3 metadata can only contain ASCII characters.
The overall size cannot exceed a certain threshold, see `when calling the PutObject operation: Your metadata headers exceed the maximum allowed metadata size.')`
Examples:
>>> bad_text = "Hello,\\n\\n\\nthis breaks"
>>> StorageUtils.clean_extra_meta(bad_text)
'Hello, this breaks'
>>> long_text = "Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec."
>>> StorageUtils.clean_extra_meta(long_text)
'Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean m'
>>> valid_text = "This is a valid string"
>>> StorageUtils.clean_extra_meta(valid_text)
'This is a valid string'
""" # noqa: E501
text = re.sub(r"(\r|\n)+", r" ", text)
text = re.sub(r"[^\x00-\x7f]", r"", text)
if len(text) >= 100:
text = text[:100]
return text
@classmethod
def set_extra_meta(cls, data: dict) -> dict:
"""S3 metadata can be attached as extra args to R2.
Examples:
>>> test = {"statute_category": "RA", "statute_serial": None}
>>> StorageUtils.set_extra_meta(test)
{'Metadata': {'statute_category': 'RA'}}
Args:
data (dict): ordinary dict
Returns:
dict: Will be added to the `extra_args` field when uploading to R2
"""
return {
"Metadata": {
k: cls.clean_extra_meta(str(v)) for k, v in data.items() if v
}
}
def make_temp_yaml_path_from_data(self, data: dict) -> Path:
"""Create a temporary yaml file into folder path.
Args:
data (dict): What to store in the yaml file
Returns:
Path: Location of the yaml file created
"""
temp_path = self.temp_folder / "temp.yaml"
temp_path.unlink(missing_ok=True) # delete existing content, if any.
with open(temp_path, "w+"):
temp_path.write_text(yaml.safe_dump(data))
return temp_path
def restore_temp_yaml(self, yaml_suffix: str) -> dict[str, Any] | None:
"""Based on the `yaml_suffix`, download the same into a temp file
and return its contents based on the extension.
A `yaml` extension should result in contents in `dict` format;
The temp file is deleted after every successful extraction of
the `src` as content."""
if not yaml_suffix.endswith(".yaml"):
logger.error(f"Not {yaml_suffix=}")
return None
path = self.temp_folder / "temp.yaml"
try:
self.download(loc=yaml_suffix, local_file=str(path))
except Exception as e:
logger.error(f"Could not download yaml; {e=}")
return None
content = yaml.safe_load(path.read_bytes())
path.unlink(missing_ok=True)
return content
def restore_temp_txt(self, readable_suffix: str) -> str | None:
"""Based on the `src` prefix, download the same into a temp file
and return its contents based on the extension.
An `md` or `html` extension results in `str`.
The temp file is deleted after every successful extraction of
the `src` as content."""
if readable_suffix.endswith(".html"):
ext = ".html"
elif readable_suffix.endswith(".md"):
ext = ".md"
else:
logger.error(f"Not {readable_suffix=}")
return None
path = self.temp_folder / f"temp{ext}"
try:
self.download(loc=readable_suffix, local_file=str(path))
except Exception as e:
logger.error(f"Could not download yaml; {e=}")
return None
content = path.read_text()
path.unlink(missing_ok=True)
return content
| (_env_file: Union[str, os.PathLike, List[Union[str, os.PathLike]], Tuple[Union[str, os.PathLike], ...], NoneType] = '<object object at 0x7f74a8a56270>', _env_file_encoding: Optional[str] = None, _env_nested_delimiter: Optional[str] = None, _secrets_dir: Union[str, os.PathLike, NoneType] = None, *, acct: str = 'ACT', r2_region: str = 'apac', r2_access_key: str = 'ABC', r2_secret_key: str = 'XYZ', name: str, temp_folder: pathlib.Path) -> None |
20,299 | start_sdk.cf_r2 | make_temp_yaml_path_from_data | Create a temporary yaml file into folder path.
Args:
data (dict): What to store in the yaml file
Returns:
Path: Location of the yaml file created
| def make_temp_yaml_path_from_data(self, data: dict) -> Path:
"""Create a temporary yaml file into folder path.
Args:
data (dict): What to store in the yaml file
Returns:
Path: Location of the yaml file created
"""
temp_path = self.temp_folder / "temp.yaml"
temp_path.unlink(missing_ok=True) # delete existing content, if any.
with open(temp_path, "w+"):
temp_path.write_text(yaml.safe_dump(data))
return temp_path
| (self, data: dict) -> pathlib.Path |
20,300 | start_sdk.cf_r2 | restore_temp_txt | Based on the `src` prefix, download the same into a temp file
and return its contents based on the extension.
An `md` or `html` extension results in `str`.
The temp file is deleted after every successful extraction of
the `src` as content. | def restore_temp_txt(self, readable_suffix: str) -> str | None:
"""Based on the `src` prefix, download the same into a temp file
and return its contents based on the extension.
An `md` or `html` extension results in `str`.
The temp file is deleted after every successful extraction of
the `src` as content."""
if readable_suffix.endswith(".html"):
ext = ".html"
elif readable_suffix.endswith(".md"):
ext = ".md"
else:
logger.error(f"Not {readable_suffix=}")
return None
path = self.temp_folder / f"temp{ext}"
try:
self.download(loc=readable_suffix, local_file=str(path))
except Exception as e:
logger.error(f"Could not download yaml; {e=}")
return None
content = path.read_text()
path.unlink(missing_ok=True)
return content
| (self, readable_suffix: str) -> str | None |
20,301 | start_sdk.cf_r2 | restore_temp_yaml | Based on the `yaml_suffix`, download the same into a temp file
and return its contents based on the extension.
A `yaml` extension should result in contents in `dict` format;
The temp file is deleted after every successful extraction of
the `src` as content. | def restore_temp_yaml(self, yaml_suffix: str) -> dict[str, Any] | None:
"""Based on the `yaml_suffix`, download the same into a temp file
and return its contents based on the extension.
A `yaml` extension should result in contents in `dict` format;
The temp file is deleted after every successful extraction of
the `src` as content."""
if not yaml_suffix.endswith(".yaml"):
logger.error(f"Not {yaml_suffix=}")
return None
path = self.temp_folder / "temp.yaml"
try:
self.download(loc=yaml_suffix, local_file=str(path))
except Exception as e:
logger.error(f"Could not download yaml; {e=}")
return None
content = yaml.safe_load(path.read_bytes())
path.unlink(missing_ok=True)
return content
| (self, yaml_suffix: str) -> dict[str, typing.Any] | None |
20,306 | connectorx | ConnectionUrl | null | class ConnectionUrl(Generic[_BackendT], str):
@overload
def __new__(
cls,
*,
backend: Literal["sqlite"],
db_path: str | Path,
) -> ConnectionUrl[Literal["sqlite"]]:
"""
Help to build sqlite connection string url.
Parameters
==========
backend:
must specify "sqlite".
db_path:
the path to the sqlite database file.
"""
@overload
def __new__(
cls,
*,
backend: Literal["bigquery"],
db_path: str | Path,
) -> ConnectionUrl[Literal["bigquery"]]:
"""
Help to build BigQuery connection string url.
Parameters
==========
backend:
must specify "bigquery".
db_path:
the path to the bigquery database file.
"""
@overload
def __new__(
cls,
*,
backend: _ServerBackendT,
username: str,
password: str = "",
server: str,
port: int,
database: str = "",
database_options: dict[str, str] | None = None,
) -> ConnectionUrl[_ServerBackendT]:
"""
Help to build server-side backend database connection string url.
Parameters
==========
backend:
the database backend.
username:
the database username.
password:
the database password.
server:
the database server name.
port:
the database server port.
database:
the database name.
database_options:
the database options for connection.
"""
@overload
def __new__(
cls,
raw_connection: str,
) -> ConnectionUrl:
"""
Build connection from raw connection string url
Parameters
==========
raw_connection:
raw connection string
"""
def __new__(
cls,
raw_connection: str | None = None,
*,
backend: str = "",
username: str = "",
password: str = "",
server: str = "",
port: int | None = None,
database: str = "",
database_options: dict[str, str] | None = None,
db_path: str | Path = "",
) -> ConnectionUrl:
if raw_connection is not None:
return super().__new__(cls, raw_connection)
assert backend
if backend == "sqlite":
db_path = urllib.parse.quote(str(db_path))
connection = f"{backend}://{db_path}"
else:
connection = f"{backend}://{username}:{password}@{server}:{port}/{database}"
if database_options:
connection += "?" + urllib.parse.urlencode(database_options)
return super().__new__(cls, connection)
| (raw_connection: 'str | None' = None, *, backend: 'str' = '', username: 'str' = '', password: 'str' = '', server: 'str' = '', port: 'int | None' = None, database: 'str' = '', database_options: 'dict[str, str] | None' = None, db_path: 'str | Path' = '') -> 'ConnectionUrl' |
20,307 | connectorx | __new__ | null | def __new__(
cls,
raw_connection: str | None = None,
*,
backend: str = "",
username: str = "",
password: str = "",
server: str = "",
port: int | None = None,
database: str = "",
database_options: dict[str, str] | None = None,
db_path: str | Path = "",
) -> ConnectionUrl:
if raw_connection is not None:
return super().__new__(cls, raw_connection)
assert backend
if backend == "sqlite":
db_path = urllib.parse.quote(str(db_path))
connection = f"{backend}://{db_path}"
else:
connection = f"{backend}://{username}:{password}@{server}:{port}/{database}"
if database_options:
connection += "?" + urllib.parse.urlencode(database_options)
return super().__new__(cls, connection)
| (cls, raw_connection: Optional[str] = None, *, backend: str = '', username: str = '', password: str = '', server: str = '', port: Optional[int] = None, database: str = '', database_options: Optional[dict[str, str]] = None, db_path: str | pathlib.Path = '') -> connectorx.ConnectionUrl |
20,385 | connectorx | get_meta |
Get metadata (header) of the given query (only for pandas)
Parameters
==========
conn
the connection string.
query
a SQL query or a list of SQL queries.
protocol
backend-specific transfer protocol directive; defaults to 'binary' (except for redshift
connection strings, where 'cursor' will be used instead).
| def get_meta(
conn: str | ConnectionUrl,
query: str,
protocol: Protocol | None = None,
) -> pd.DataFrame:
"""
Get metadata (header) of the given query (only for pandas)
Parameters
==========
conn
the connection string.
query
a SQL query or a list of SQL queries.
protocol
backend-specific transfer protocol directive; defaults to 'binary' (except for redshift
connection strings, where 'cursor' will be used instead).
"""
conn, protocol = rewrite_conn(conn, protocol)
result = _get_meta(conn, query, protocol)
df = reconstruct_pandas(result)
return df
| (conn: 'str | ConnectionUrl', query: 'str', protocol: 'Protocol | None' = None) -> 'pd.DataFrame' |
20,389 | connectorx | partition_sql |
Partition the sql query
Parameters
==========
conn
the connection string.
query
a SQL query or a list of SQL queries.
partition_on
the column on which to partition the result.
partition_num
how many partitions to generate.
partition_range
the value range of the partition column.
| def partition_sql(
conn: str | ConnectionUrl,
query: str,
partition_on: str,
partition_num: int,
partition_range: tuple[int, int] | None = None,
) -> list[str]:
"""
Partition the sql query
Parameters
==========
conn
the connection string.
query
a SQL query or a list of SQL queries.
partition_on
the column on which to partition the result.
partition_num
how many partitions to generate.
partition_range
the value range of the partition column.
"""
partition_query = {
"query": query,
"column": partition_on,
"min": partition_range and partition_range[0],
"max": partition_range and partition_range[1],
"num": partition_num,
}
return _partition_sql(conn, partition_query)
| (conn: str | connectorx.ConnectionUrl, query: str, partition_on: str, partition_num: int, partition_range: Optional[tuple[int, int]] = None) -> list[str] |
20,390 | connectorx | read_sql |
Run the SQL query, download the data from database into a dataframe.
Parameters
==========
conn
the connection string, or dict of connection string mapping for federated query.
query
a SQL query or a list of SQL queries.
return_type
the return type of this function; one of "arrow(2)", "pandas", "modin", "dask" or "polars(2)".
protocol
backend-specific transfer protocol directive; defaults to 'binary' (except for redshift
connection strings, where 'cursor' will be used instead).
partition_on
the column on which to partition the result.
partition_range
the value range of the partition column.
partition_num
how many partitions to generate.
index_col
the index column to set; only applicable for return type "pandas", "modin", "dask".
Examples
========
Read a DataFrame from a SQL query using a single thread:
>>> postgres_url = "postgresql://username:password@server:port/database"
>>> query = "SELECT * FROM lineitem"
>>> read_sql(postgres_url, query)
Read a DataFrame in parallel using 10 threads by automatically partitioning the provided SQL on the partition column:
>>> postgres_url = "postgresql://username:password@server:port/database"
>>> query = "SELECT * FROM lineitem"
>>> read_sql(postgres_url, query, partition_on="partition_col", partition_num=10)
Read a DataFrame in parallel using 2 threads by explicitly providing two SQL queries:
>>> postgres_url = "postgresql://username:password@server:port/database"
>>> queries = ["SELECT * FROM lineitem WHERE partition_col <= 10", "SELECT * FROM lineitem WHERE partition_col > 10"]
>>> read_sql(postgres_url, queries)
| def read_sql(
conn: str | ConnectionUrl | dict[str, str] | dict[str, ConnectionUrl],
query: list[str] | str,
*,
return_type: Literal[
"pandas", "polars", "polars2", "arrow", "arrow2", "modin", "dask"
] = "pandas",
protocol: Protocol | None = None,
partition_on: str | None = None,
partition_range: tuple[int, int] | None = None,
partition_num: int | None = None,
index_col: str | None = None,
) -> pd.DataFrame | mpd.DataFrame | dd.DataFrame | pl.DataFrame | pa.Table:
"""
Run the SQL query, download the data from database into a dataframe.
Parameters
==========
conn
the connection string, or dict of connection string mapping for federated query.
query
a SQL query or a list of SQL queries.
return_type
the return type of this function; one of "arrow(2)", "pandas", "modin", "dask" or "polars(2)".
protocol
backend-specific transfer protocol directive; defaults to 'binary' (except for redshift
connection strings, where 'cursor' will be used instead).
partition_on
the column on which to partition the result.
partition_range
the value range of the partition column.
partition_num
how many partitions to generate.
index_col
the index column to set; only applicable for return type "pandas", "modin", "dask".
Examples
========
Read a DataFrame from a SQL query using a single thread:
>>> postgres_url = "postgresql://username:password@server:port/database"
>>> query = "SELECT * FROM lineitem"
>>> read_sql(postgres_url, query)
Read a DataFrame in parallel using 10 threads by automatically partitioning the provided SQL on the partition column:
>>> postgres_url = "postgresql://username:password@server:port/database"
>>> query = "SELECT * FROM lineitem"
>>> read_sql(postgres_url, query, partition_on="partition_col", partition_num=10)
Read a DataFrame in parallel using 2 threads by explicitly providing two SQL queries:
>>> postgres_url = "postgresql://username:password@server:port/database"
>>> queries = ["SELECT * FROM lineitem WHERE partition_col <= 10", "SELECT * FROM lineitem WHERE partition_col > 10"]
>>> read_sql(postgres_url, queries)
"""
if isinstance(query, list) and len(query) == 1:
query = query[0]
query = remove_ending_semicolon(query)
if isinstance(conn, dict):
assert partition_on is None and isinstance(
query, str
), "Federated query does not support query partitioning for now"
assert (
protocol is None
), "Federated query does not support specifying protocol for now"
query = remove_ending_semicolon(query)
result = _read_sql2(query, conn)
df = reconstruct_arrow(result)
if return_type == "pandas":
df = df.to_pandas(date_as_object=False, split_blocks=False)
if return_type == "polars":
pl = try_import_module("polars")
try:
# api change for polars >= 0.8.*
df = pl.from_arrow(df)
except AttributeError:
df = pl.DataFrame.from_arrow(df)
return df
if isinstance(query, str):
query = remove_ending_semicolon(query)
if partition_on is None:
queries = [query]
partition_query = None
else:
partition_query = {
"query": query,
"column": partition_on,
"min": partition_range[0] if partition_range else None,
"max": partition_range[1] if partition_range else None,
"num": partition_num,
}
queries = None
elif isinstance(query, list):
queries = [remove_ending_semicolon(subquery) for subquery in query]
partition_query = None
if partition_on is not None:
raise ValueError("Partition on multiple queries is not supported.")
else:
raise ValueError("query must be either str or a list of str")
conn, protocol = rewrite_conn(conn, protocol)
if return_type in {"modin", "dask", "pandas"}:
try_import_module("pandas")
result = _read_sql(
conn,
"pandas",
queries=queries,
protocol=protocol,
partition_query=partition_query,
)
df = reconstruct_pandas(result)
if index_col is not None:
df.set_index(index_col, inplace=True)
if return_type == "modin":
mpd = try_import_module("modin.pandas")
df = mpd.DataFrame(df)
elif return_type == "dask":
dd = try_import_module("dask.dataframe")
df = dd.from_pandas(df, npartitions=1)
elif return_type in {"arrow", "arrow2", "polars", "polars2"}:
try_import_module("pyarrow")
result = _read_sql(
conn,
"arrow2" if return_type in {"arrow2", "polars", "polars2"} else "arrow",
queries=queries,
protocol=protocol,
partition_query=partition_query,
)
df = reconstruct_arrow(result)
if return_type in {"polars", "polars2"}:
pl = try_import_module("polars")
try:
df = pl.DataFrame.from_arrow(df)
except AttributeError:
# api change for polars >= 0.8.*
df = pl.from_arrow(df)
else:
raise ValueError(return_type)
return df
| (conn: 'str | ConnectionUrl | dict[str, str] | dict[str, ConnectionUrl]', query: 'list[str] | str', *, return_type: "Literal['pandas', 'polars', 'polars2', 'arrow', 'arrow2', 'modin', 'dask']" = 'pandas', protocol: 'Protocol | None' = None, partition_on: 'str | None' = None, partition_range: 'tuple[int, int] | None' = None, partition_num: 'int | None' = None, index_col: 'str | None' = None) -> 'pd.DataFrame | mpd.DataFrame | dd.DataFrame | pl.DataFrame | pa.Table' |
20,391 | connectorx | read_sql_pandas |
Run the SQL query, download the data from database into a dataframe.
First several parameters are in the same name and order with `pandas.read_sql`.
Parameters
==========
Please refer to `read_sql`
Examples
========
Read a DataFrame from a SQL query using a single thread:
>>> # from pandas import read_sql
>>> from connectorx import read_sql_pandas as read_sql
>>> postgres_url = "postgresql://username:password@server:port/database"
>>> query = "SELECT * FROM lineitem"
>>> read_sql(query, postgres_url)
| def read_sql_pandas(
sql: list[str] | str,
con: str | ConnectionUrl | dict[str, str] | dict[str, ConnectionUrl],
index_col: str | None = None,
protocol: Protocol | None = None,
partition_on: str | None = None,
partition_range: tuple[int, int] | None = None,
partition_num: int | None = None,
) -> pd.DataFrame:
"""
Run the SQL query, download the data from database into a dataframe.
First several parameters are in the same name and order with `pandas.read_sql`.
Parameters
==========
Please refer to `read_sql`
Examples
========
Read a DataFrame from a SQL query using a single thread:
>>> # from pandas import read_sql
>>> from connectorx import read_sql_pandas as read_sql
>>> postgres_url = "postgresql://username:password@server:port/database"
>>> query = "SELECT * FROM lineitem"
>>> read_sql(query, postgres_url)
"""
return read_sql(
con,
sql,
return_type="pandas",
protocol=protocol,
partition_on=partition_on,
partition_range=partition_range,
partition_num=partition_num,
index_col=index_col,
)
| (sql: 'list[str] | str', con: 'str | ConnectionUrl | dict[str, str] | dict[str, ConnectionUrl]', index_col: 'str | None' = None, protocol: 'Protocol | None' = None, partition_on: 'str | None' = None, partition_range: 'tuple[int, int] | None' = None, partition_num: 'int | None' = None) -> 'pd.DataFrame' |
20,392 | connectorx | reconstruct_arrow | null | def reconstruct_arrow(result: _ArrowInfos) -> pa.Table:
import pyarrow as pa
names, ptrs = result
if len(names) == 0:
return pa.Table.from_arrays([])
rbs = []
for chunk in ptrs:
rb = pa.RecordBatch.from_arrays(
[pa.Array._import_from_c(*col_ptr) for col_ptr in chunk], names
)
rbs.append(rb)
return pa.Table.from_batches(rbs)
| (result: '_ArrowInfos') -> 'pa.Table' |
20,393 | connectorx | reconstruct_pandas | null | def reconstruct_pandas(df_infos: _DataframeInfos) -> pd.DataFrame:
import pandas as pd
data = df_infos["data"]
headers = df_infos["headers"]
block_infos = df_infos["block_infos"]
nrows = data[0][0].shape[-1] if isinstance(data[0], tuple) else data[0].shape[-1]
blocks = []
for binfo, block_data in zip(block_infos, data):
if binfo.dt == 0: # NumpyArray
blocks.append(
pd.core.internals.make_block(block_data, placement=binfo.cids)
)
elif binfo.dt == 1: # IntegerArray
blocks.append(
pd.core.internals.make_block(
pd.core.arrays.IntegerArray(block_data[0], block_data[1]),
placement=binfo.cids[0],
)
)
elif binfo.dt == 2: # BooleanArray
blocks.append(
pd.core.internals.make_block(
pd.core.arrays.BooleanArray(block_data[0], block_data[1]),
placement=binfo.cids[0],
)
)
elif binfo.dt == 3: # DatetimeArray
blocks.append(
pd.core.internals.make_block(
pd.core.arrays.DatetimeArray(block_data), placement=binfo.cids
)
)
else:
raise ValueError(f"unknown dt: {binfo.dt}")
block_manager = pd.core.internals.BlockManager(
blocks, [pd.Index(headers), pd.RangeIndex(start=0, stop=nrows, step=1)]
)
df = pd.DataFrame(block_manager)
return df
| (df_infos: '_DataframeInfos') -> 'pd.DataFrame' |
20,394 | connectorx | remove_ending_semicolon |
Removes the semicolon if the query ends with it.
Parameters
==========
query
SQL query
| def remove_ending_semicolon(query: str) -> str:
"""
Removes the semicolon if the query ends with it.
Parameters
==========
query
SQL query
"""
if query.endswith(";"):
query = query[:-1]
return query
| (query: str) -> str |
20,395 | connectorx | rewrite_conn | null | def rewrite_conn(
conn: str | ConnectionUrl, protocol: Protocol | None = None
) -> tuple[str, Protocol]:
if not protocol:
# note: redshift/clickhouse are not compatible with the 'binary' protocol, and use other database
# drivers to connect. set a compatible protocol and masquerade as the appropriate backend.
backend, connection_details = conn.split(":", 1) if conn else ("", "")
if "redshift" in backend:
conn = f"postgresql:{connection_details}"
protocol = "cursor"
elif "clickhouse" in backend:
conn = f"mysql:{connection_details}"
protocol = "text"
else:
protocol = "binary"
return conn, protocol
| (conn: str | connectorx.ConnectionUrl, protocol: Optional[Literal['csv', 'binary', 'cursor', 'simple', 'text']] = None) -> tuple[str, typing.Literal['csv', 'binary', 'cursor', 'simple', 'text']] |
20,396 | connectorx | try_import_module | null | def try_import_module(name: str):
try:
return importlib.import_module(name)
except ModuleNotFoundError:
raise ValueError(f"You need to install {name.split('.')[0]} first")
| (name: str) |
20,399 | pypopulation.implementation | get_population |
Get population for either Alpha-2 or Alpha-3 `country_code` caseless.
None if `country_code` does not exist in either map.
| def get_population(country_code: str) -> t.Optional[int]:
"""
Get population for either Alpha-2 or Alpha-3 `country_code` caseless.
None if `country_code` does not exist in either map.
"""
return get_population_a2(country_code) or get_population_a3(country_code)
| (country_code: str) -> Optional[int] |
20,400 | pypopulation.implementation | get_population_a2 |
Get population for Alpha-2 `country_code` caseless.
None if `country_code` does not exist in the map.
| def get_population_a2(country_code: str) -> t.Optional[int]:
"""
Get population for Alpha-2 `country_code` caseless.
None if `country_code` does not exist in the map.
"""
return _a2_map.get(_normalize(country_code))
| (country_code: str) -> Optional[int] |
20,401 | pypopulation.implementation | get_population_a3 |
Get population for Alpha-3 `country_code` caseless.
None if `country_code` does not exist in the map.
| def get_population_a3(country_code: str) -> t.Optional[int]:
"""
Get population for Alpha-3 `country_code` caseless.
None if `country_code` does not exist in the map.
"""
return _a3_map.get(_normalize(country_code))
| (country_code: str) -> Optional[int] |
20,403 | nn_names.generator | generate_name | null | def generate_name(min_length: int = 4, max_length: int = 10) -> str:
try:
length = randint(min_length, max_length)
letters, letters2 = nn()
# first character
first_char = randint(0, 26)
name = chr(97 + first_char).upper()
# second character
ran = randint(0, 1000)
secondchar = 0
curar = letters[first_char]
while ran >= curar[secondchar]:
secondchar += 1
name += chr(97 + secondchar)
# rest of the characters
for _ in range(2, length):
ran = randint(0, 1000)
nextchar = 0
curar = letters2[first_char][secondchar]
while ran >= curar[nextchar]:
nextchar += 1
first_char = secondchar
secondchar = nextchar
name += chr(97 + nextchar)
return name
except IndexError:
return generate_name(min_length, max_length)
| (min_length: int = 4, max_length: int = 10) -> str |
20,405 | cron_descriptor.CasingTypeEnum | CasingTypeEnum | null | class CasingTypeEnum(object):
Title = 1
Sentence = 2
LowerCase = 3
| () |
20,406 | cron_descriptor.DescriptionTypeEnum | DescriptionTypeEnum |
DescriptionTypeEnum
| class DescriptionTypeEnum(enum.IntEnum):
"""
DescriptionTypeEnum
"""
FULL = 1
TIMEOFDAY = 2
SECONDS = 3
MINUTES = 4
HOURS = 5
DAYOFWEEK = 6
MONTH = 7
DAYOFMONTH = 8
YEAR = 9
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
20,408 | cron_descriptor.ExpressionDescriptor | ExpressionDescriptor |
Converts a Cron Expression into a human readable string
| class ExpressionDescriptor:
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptor
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknown
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
# if kwargs in _options, overwrite it, if not raise exception
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException("Unknown {} configuration argument".format(kwarg))
# Initializes localization
self.get_text = GetText(options.locale_code, options.locale_location)
# Parse expression
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
def _(self, message):
return self.get_text.trans.gettext(message)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a humanreadable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception:
"""
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
return choices.get(description_type, self.get_seconds_description)()
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formatting fails
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(description, self._options.verbose)
description = ExpressionDescriptor.transform_case(description, self._options.casing_type)
except Exception:
description = self._(
"An error occurred when generating the expression description. Check the cron expression syntax."
)
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(self._("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif seconds_expression == "" and "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(self._("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif seconds_expression == "" and "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(self._("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(self._(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description and minutes_description:
description.append(", ")
description.append(minutes_description)
if description and hours_description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
def get_description_format(s):
if s == "0":
return ""
try:
if int(s) < 20:
return self._("at {0} seconds past the minute")
else:
return self._("at {0} seconds past the minute [grThen20]") or self._("at {0} seconds past the minute")
except ValueError:
return self._("at {0} seconds past the minute")
return self.get_segment_description(
self._expression_parts[0],
self._("every second"),
lambda s: s,
lambda s: self._("every {0} seconds").format(s),
lambda s: self._("seconds {0} through {1} past the minute"),
get_description_format,
lambda s: self._(", second {0} through second {1}") or self._(", {0} through {1}")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
seconds_expression = self._expression_parts[0]
def get_description_format(s):
if s == "0" and seconds_expression == "":
return ""
try:
if int(s) < 20:
return self._("at {0} minutes past the hour")
else:
return self._("at {0} minutes past the hour [grThen20]") or self._("at {0} minutes past the hour")
except ValueError:
return self._("at {0} minutes past the hour")
return self.get_segment_description(
self._expression_parts[1],
self._("every minute"),
lambda s: s,
lambda s: self._("every {0} minutes").format(s),
lambda s: self._("minutes {0} through {1} past the hour"),
get_description_format,
lambda s: self._(", minute {0} through minute {1}") or self._(", {0} through {1}")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
self._("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: self._("every {0} hours").format(s),
lambda s: self._("between {0} and {1}"),
lambda s: self._("at {0}"),
lambda s: self._(", hour {0} through hour {1}") or self._(", {0} through {1}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*":
# DOW is specified as * so we will not generate a description and defer to DOM part.
# Otherwise, we could get a contradiction like "on day 1 of the month, every day"
# or a dupe description like "every day, every day".
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, _ = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return ExpressionDescriptor.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: self._("first"),
2: self._("second"),
3: self._("third"),
4: self._("forth"),
5: self._("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formatted = "{}{}{}".format(self._(", on the "), day_of_week_of_month_description, self._(" {0} of the month"))
elif "L" in s:
formatted = self._(", on the last {0} of the month")
else:
formatted = self._(", only on {0}")
return formatted
return self.get_segment_description(
self._expression_parts[5],
self._(", every day"),
lambda s: get_day_name(s),
lambda s: self._(", every {0} days of the week").format(s),
lambda s: self._(", {0} through {1}"),
lambda s: get_format(s),
lambda s: self._(", {0} through {1}")
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: self._(", every {0} months").format(s),
lambda s: self._(", month {0} through month {1}") or self._(", {0} through {1}"),
lambda s: self._(", only in {0}"),
lambda s: self._(", month {0} through month {1}") or self._(", {0} through {1}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
if expression == "L":
description = self._(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = self._(", on the last weekday of the month")
else:
regex = re.compile(r"(\d{1,2}W)|(W\d{1,2})")
m = regex.match(expression)
if m: # if matches
day_number = int(m.group().replace("W", ""))
day_string = self._("first weekday") if day_number == 1 else self._("weekday nearest day {0}").format(day_number)
description = self._(", on the {0} of the month").format(day_string)
else:
# Handle "last day offset"(i.e.L - 5: "5 days before the last day of the month")
regex = re.compile(r"L-(\d{1,2})")
m = regex.match(expression)
if m: # if matches
off_set_days = m.group(1)
description = self._(", {0} days before the last day of the month").format(off_set_days)
else:
description = self.get_segment_description(
expression,
self._(", every day"),
lambda s: s,
lambda s: self._(", every day") if s == "1" else self._(", every {0} days"),
lambda s: self._(", between day {0} and {1} of the month"),
lambda s: self._(", on day {0} of the month"),
lambda s: self._(", {0} through {1}")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: self._(", every {0} years").format(s),
lambda s: self._(", year {0} through year {1}") or self._(", {0} through {1}"),
lambda s: self._(", only in {0}"),
lambda s: self._(", year {0} through year {1}") or self._(", {0} through {1}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format,
get_range_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
get_range_format: function that formats range expressions depending on cron parts
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0],
get_between_description_format,
get_single_item_description
)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += self._(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += self._(" and ")
if "-" in segment:
between_segment_description = self.generate_between_segment_description(
segment,
get_range_format,
get_single_item_description
)
between_segment_description = between_segment_description.replace(", ", "")
description_content += between_segment_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(expression).format(description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression,
get_between_description_format,
get_single_item_description
)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will construct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = self._("PM") if (hour >= 12) else self._("AM")
if period:
# add preceding space
period = " " + period
if hour > 12:
hour -= 12
if hour == 0:
hour = 12
minute = str(int(minute_expression)) # Removes leading zero if any
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(self._(", every minute"), '')
description = description.replace(self._(", every hour"), '')
description = description.replace(self._(", every day"), '')
description = re.sub(r', ?$', '', description)
return description
@staticmethod
def transform_case(description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
@staticmethod
def number_to_day(day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
try:
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
except IndexError:
raise IndexError("Day {} is out of range!".format(day_number))
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
| (expression, options=None, **kwargs) |
20,409 | cron_descriptor.ExpressionDescriptor | _ | null | def _(self, message):
return self.get_text.trans.gettext(message)
| (self, message) |
20,410 | cron_descriptor.ExpressionDescriptor | __init__ | Initializes a new instance of the ExpressionDescriptor
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknown
| def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptor
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknown
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
# if kwargs in _options, overwrite it, if not raise exception
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException("Unknown {} configuration argument".format(kwarg))
# Initializes localization
self.get_text = GetText(options.locale_code, options.locale_location)
# Parse expression
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
| (self, expression, options=None, **kwargs) |
20,411 | cron_descriptor.ExpressionDescriptor | __repr__ | null | def __repr__(self):
return self.get_description()
| (self) |
20,412 | cron_descriptor.ExpressionDescriptor | __str__ | null | def __str__(self):
return self.get_description()
| (self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.