id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1654794
|
import pickle
import os
class Knowledge:
"""
先验知识的保存
"""
FILENAME_KNOW = "chatbot.know"
FILENAME_DIALOG = "chatbot.dialog"
def __init__(self, path="./data/", load=True):
self.path = path
self.known = {}
self.dialog = {}
if load:
self.load()
def study_entity(self, entity: str, sub: str):
if entity not in self.known:
self.known[entity] = Entity(entity)
if sub not in self.known:
self.known[sub] = Entity(sub)
if not self.known[entity].has_sub(sub):
self.known[entity].append(self.known[sub])
def study_connect(self, pre: dict, post: dict, double: bool):
conn = Statement(pre, post, double)
for entity in pre:
if entity not in self.known:
self.known[entity] = Entity(entity)
self.known[entity].add_conn(conn)
if double:
for entity in post:
if entity not in self.known:
self.known[entity] = Entity(entity)
self.known[entity].add_conn(conn)
def study_dialog(self, question: str, answer: str):
que_tag = None
for entity in self.known.keys():
if self.known.get(entity, Entity(entity)).has_sub(question):
que_tag = entity
break
if que_tag is None:
que_tag = "问题 %d" % len(self.dialog.keys())
if que_tag not in self.dialog:
self.dialog[que_tag] = []
self.study_entity(que_tag, question)
self.dialog[que_tag].append(answer)
def get_dialog_question(self) -> dict:
return self.dialog.keys()
def get_dialog_answer(self, question: str) -> dict:
return self.dialog.get(question, [])
def get_conn(self, entity: str) -> list:
return self.known.get(entity, Entity(entity)).get_conn()
def get_sub(self, entity: str) -> list:
return self.known.get(entity, Entity(entity)).get_sub()
def save(self):
if not os.path.exists(self.path):
os.makedirs(self.path)
dialog_path = "%s%s" % (self.path, Knowledge.FILENAME_DIALOG)
know_path = "%s%s" % (self.path, Knowledge.FILENAME_KNOW)
with open(dialog_path, "wb") as data:
pickle.dump(self.dialog, data)
with open(know_path, "wb") as data:
pickle.dump(self.known, data)
def load(self):
dialog_path = "%s%s" % (self.path, Knowledge.FILENAME_DIALOG)
know_path = "%s%s" % (self.path, Knowledge.FILENAME_KNOW)
if os.path.exists(dialog_path):
with open(dialog_path, "rb") as data:
self.dialog = pickle.load(data)
if os.path.exists(know_path):
with open(know_path, "rb") as data:
self.known = pickle.load(data)
class Instance:
"""
entity(string):抽象
tag(string):区分抽象的标签
text(string):实例
evidence(list):推导依据
"""
def __init__(self, entity: str, text: str, evidence: list, tag=None):
self.entity = entity
self.text = text
self.evidence = evidence
self.tag = tag
def get_entity(self): return self.entity
def get_text(self): return self.text
def get_evidence(self): return self.evidence
def get_tag(self): return self.tag
def __eq__(self, other):
if not isinstance(other, Instance):
return False
if self.entity != other.entity:
return False
if self.text != other.text:
return False
if self.tag != other.tag:
return False
# evidence 是否要算入相等判断内呢?
return True
def __repr__(self):
return "%s(%s)-%s\t(%s)" % (self.entity, self.tag, self.text, repr(self.evidence))
class Statement:
"""
pre(dict[string]sting): key为抽象,value为具体
post(dict[string]sting): key为抽象,value为具体
double(Connection): 是否是双向推导
"""
def __init__(self, pre: dict, post: dict, double: bool):
self.pre = pre
self.post = post
self.double = double
def infer(self, ctx) -> list:
evdience = []
# print("====")
for entity_str, sub in self.pre.items():
succ = False
for entity in ctx.findAll(entity_str):
if sub is None or entity.get_text() == sub:
succ = True
evdience.append(entity)
break
# print(evdience, entity, sub)
if succ is False:
return None
infer = []
for entity, sub in self.post.items():
infer.append(Instance(entity, sub, evdience))
return infer
def __repr__(self):
return repr(self.pre) + repr(self.post)
def __eq__(self, other):
if not isinstance(other, Statement):
return False
if self.double != other.double:
return False
if self.pre == other.pre and self.post == other.post:
return True
if self.double and self.pre == other.post and self.post == other.pre:
return True
return False
class Entity:
"""
tag(string): 用于表示实体的文字,带[]的场合意味着联系其它实体
sub(Entity): 该实体的子实体,意味着该实体可以转化为的其它实体
conn(Connection): 用于知识推导的记录
"""
def __init__(self, tag: str):
self.tag = tag
self.sub = []
self.conn = []
def append(self, sub): self.sub.append(sub)
def get_text(self) -> str: return self.tag
def get_sub(self) -> list: return self.sub
def get_conn(self): return self.conn
def add_conn(self, conn: Statement):
if conn not in self.conn:
self.conn.append(conn)
def has_sub(self, sub: str):
for entity in self.sub:
if sub == entity.get_text():
return True
return False
def __repr__(self):
return repr(self.tag) + repr(self.conn) + repr(self.sub)
class Context:
def __init__(self):
self.inst_stack = [{}]
def find(self, entity_str: str, tag=None, evdience=None) -> Instance:
"""
返回满足tag和cond的entity的Instance
"""
for stack in self.inst_stack:
for entity in stack.get(entity_str, []):
if entity.get_tag() != tag:
continue
if evdience is not None and entity.get_evidence() != evdience:
continue
return entity
return None
def findAll(self, entity_str: str) -> list:
result = []
for stack in self.inst_stack:
for stack_entity_str, stack_entities in stack.items():
if stack_entity_str == entity_str:
result.extend(stack_entities)
return result
def new_stack(self): self.inst_stack.append({})
def aborat_stack(self): self.inst_stack.pop()
def apply_stack(self):
stack = self.inst_stack.pop()
for key in stack.keys():
if key in self.inst_stack[-1]:
self.inst_stack[-1][key].extend(stack[key])
else:
self.inst_stack[-1][key] = stack[key]
def append(self, inst: Instance) -> bool:
entity = inst.get_entity()
tag = inst.get_tag()
evdience = inst.get_evidence()
if self.find(entity, tag=tag, evdience=evdience) is not None:
return False
if entity not in self.inst_stack[-1]:
self.inst_stack[-1][entity] = []
self.inst_stack[-1][entity].append(inst)
return True
def iter_all_inst(self):
for stack in self.inst_stack:
for _, inst_list in stack.items():
for inst in inst_list:
yield inst
def get_known_stat(self, known: Knowledge):
assert len(self.inst_stack) == 1, "上下文栈未全退出"
known_stat = []
for inst in self.iter_all_inst():
entity = inst.get_entity()
for stat in known.get_conn(entity):
if stat not in known_stat:
known_stat.append(stat)
return known_stat
def apply_closure(self, append_stat: list, known: Knowledge):
new_stat = []
for stat in append_stat:
infer = stat.infer(self)
if infer is None:
continue
for inst in infer:
entity = inst.get_entity()
text = inst.get_text()
evidence = inst.get_evidence()
# TODO:解决同推导的tag覆盖问题
if self.append(Instance(entity, text, evidence, tag=None)):
# TODO:解决推导可能成环的问题,尝试剪枝
for stat in known.get_conn(entity):
new_stat.append(stat)
if len(new_stat) > 0:
self.apply_closure(new_stat, known)
def infer(self, known: Knowledge):
assert len(self.inst_stack) == 1, "上下文栈未全退出"
known_stat = self.get_known_stat(known)
self.apply_closure(known_stat, known)
def __repr__(self):
text = ["Context:"]
for stack in self.inst_stack:
text.append("==stack==")
for inst in stack.items():
text.append(repr(inst))
return '\n'.join(text)
|
StarcoderdataPython
|
3377372
|
<filename>run.py
#!/usr/bin/env python
import sys
from jira_migrate.issues import main
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
StarcoderdataPython
|
1719742
|
import cloudmesh
cloudmesh.shell("help")
print cloudmesh.version()
|
StarcoderdataPython
|
14979
|
"""Module for BlameInteractionGraph plots."""
import typing as tp
from datetime import datetime
from pathlib import Path
import click
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import plotly.offline as offply
from matplotlib import style
from varats.data.reports.blame_interaction_graph import (
create_blame_interaction_graph,
CIGNodeAttrs,
CIGEdgeAttrs,
AIGNodeAttrs,
CAIGNodeAttrs,
)
from varats.data.reports.blame_report import BlameReport
from varats.mapping.commit_map import get_commit_map
from varats.paper_mgmt.case_study import (
newest_processed_revision_for_case_study,
)
from varats.plot.plot import Plot, PlotDataEmpty
from varats.plot.plots import (
PlotGenerator,
REQUIRE_CASE_STUDY,
REQUIRE_REVISION,
)
from varats.plots.chord_plot_utils import (
make_chord_plot,
make_arc_plot,
NodeTy,
ChordPlotNodeInfo,
ChordPlotEdgeInfo,
ArcPlotEdgeInfo,
ArcPlotNodeInfo,
)
from varats.ts_utils.cli_util import CLIOptionTy, make_cli_option
from varats.utils.git_util import (
CommitRepoPair,
create_commit_lookup_helper,
UNCOMMITTED_COMMIT_HASH,
FullCommitHash,
ShortCommitHash,
)
class CommitInteractionGraphPlot(Plot, plot_name='cig_plot'):
"""Creates a dot file for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
# Nothing to do here.
pass
def save(self, plot_dir: Path, filetype: str = 'svg') -> None:
project_name = self.plot_kwargs["project"]
revision = self.plot_kwargs["revision"]
cig = create_blame_interaction_graph(project_name, revision
).commit_interaction_graph()
nx.set_node_attributes(
cig, {node: cig.nodes[node]["commit_hash"] for node in cig.nodes},
"label"
)
# pylint: disable=import-outside-toplevel
from networkx.drawing.nx_agraph import write_dot
write_dot(cig, plot_dir / self.plot_file_name("dot"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CommitInteractionGraphPlotGenerator(
PlotGenerator,
generator_name="cig-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Plot a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphPlot(self.plot_config, **self.plot_kwargs)
]
NodeInfoTy = tp.TypeVar("NodeInfoTy", ChordPlotNodeInfo, ArcPlotNodeInfo)
EdgeInfoTy = tp.TypeVar("EdgeInfoTy", ChordPlotEdgeInfo, ArcPlotEdgeInfo)
def _prepare_cig_plotly(
project_name: str, revision: FullCommitHash,
create_node_info: tp.Callable[[NodeTy, CommitRepoPair, nx.DiGraph],
NodeInfoTy],
create_edge_info: tp.Callable[[CommitRepoPair, CommitRepoPair, int],
EdgeInfoTy]
) -> tp.Tuple[tp.List[tp.Tuple[NodeTy, NodeInfoTy]], tp.List[tp.Tuple[
NodeTy, NodeTy, EdgeInfoTy]]]:
commit_lookup = create_commit_lookup_helper(project_name)
cig = create_blame_interaction_graph(project_name,
revision).commit_interaction_graph()
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
commit = commit_lookup(node)
if not commit:
return False
# make filter configurable
return datetime.utcfromtimestamp(commit.commit_time
) >= datetime(2015, 1, 1)
nodes: tp.List[tp.Tuple[NodeTy, NodeInfoTy]] = []
node_meta: tp.Dict[NodeTy, CommitRepoPair] = {}
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
node_meta[node] = commit
nodes.append((node, create_node_info(node, commit, cig)))
nodes = sorted(
nodes, key=lambda x: int(commit_lookup(node_meta[x[0]]).commit_time)
)
edges: tp.List[tp.Tuple[NodeTy, NodeTy, EdgeInfoTy]] = []
for source, sink in cig.edges:
amount = tp.cast(CIGEdgeAttrs, cig[source][sink])["amount"]
source_commit = tp.cast(CIGNodeAttrs, cig.nodes[source])["commit"]
sink_commit = tp.cast(CIGNodeAttrs, cig.nodes[sink])["commit"]
if not filter_nodes(source_commit) or not filter_nodes(sink_commit):
continue
edges.append((
source, sink, create_edge_info(source_commit, sink_commit, amount)
))
return nodes, edges
class CommitInteractionGraphChordPlot(Plot, plot_name='cig_chord_plot'):
"""Chord plot for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
project_name: str = self.plot_kwargs["case_study"].project_name
revision = get_commit_map(project_name).convert_to_full_or_warn(
ShortCommitHash(self.plot_kwargs["revision"])
)
def create_node_data(
node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph
) -> ChordPlotNodeInfo:
del node
del cig
return {"info": commit.commit_hash.short_hash, "color": 1}
def create_edge_data(
source_commit: CommitRepoPair, sink_commit: CommitRepoPair,
amount: int
) -> ChordPlotEdgeInfo:
return {
"size": amount,
"color": 1,
"info":
f"{source_commit.commit_hash.short_hash} "
f"--{{{amount}}}--> "
f"{sink_commit.commit_hash.short_hash}"
}
nodes, edges = _prepare_cig_plotly(
project_name, revision, create_node_data, create_edge_data
)
figure = make_chord_plot(nodes, edges, "Commit Interaction Graph")
if view_mode:
figure.show()
else:
offply.plot(figure, filename=self.plot_file_name("html"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGChordPlotGenerator(
PlotGenerator,
generator_name="cig-chord-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Generates a chord plot for a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphChordPlot(
self.plot_config, **self.plot_kwargs
)
]
class CommitInteractionGraphArcPlot(Plot, plot_name='cig_arc_plot'):
"""Arc plot for a commit interaction graph."""
def plot(self, view_mode: bool) -> None:
project_name: str = self.plot_kwargs["case_study"].project_name
revision = get_commit_map(project_name).convert_to_full_or_warn(
ShortCommitHash(self.plot_kwargs["revision"])
)
def create_node_data(
node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph
) -> ArcPlotNodeInfo:
return {
"info": commit.commit_hash.short_hash,
"size": cig.degree(node),
"fill_color": cig.out_degree(node),
"line_color": cig.in_degree(node)
}
def create_edge_data(
source_commit: CommitRepoPair, sink_commit: CommitRepoPair,
amount: int
) -> ArcPlotEdgeInfo:
return {
"size": amount,
"color": amount,
"info":
f"{source_commit.commit_hash.short_hash} "
f"--{{{amount}}}--> "
f"{sink_commit.commit_hash.short_hash}"
}
nodes, edges = _prepare_cig_plotly(
project_name, revision, create_node_data, create_edge_data
)
figure = make_arc_plot(nodes, edges, "Commit Interaction Graph")
if view_mode:
figure.show()
else:
offply.plot(figure, filename=self.plot_file_name("html"))
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGArcPlotGenerator(
PlotGenerator,
generator_name="cig-arc-plot",
options=[REQUIRE_CASE_STUDY, REQUIRE_REVISION]
):
"""Generates an arc plot for a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphArcPlot(self.plot_config, **self.plot_kwargs)
]
OPTIONAL_SORT_METHOD: CLIOptionTy = make_cli_option(
"--sort-by",
type=click.Choice(["degree", "time"]),
default="degree",
required=False,
help="Sort method for commit interaction graph nodes."
)
class CommitInteractionGraphNodeDegreePlot(Plot, plot_name='cig_node_degrees'):
"""
Plot node degrees of a commit interaction graph.
Additional arguments:
- sort: criteria to sort the revisions [degree, time]
"""
def plot(self, view_mode: bool) -> None:
sort = self.plot_kwargs["sort"]
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Commit Interaction Graph - Node Degrees")
axes.set_title(case_study.project_name)
axes.set_ylabel("Degree")
xlabel = ""
if sort == "time":
xlabel = "Time (old to new)"
elif sort == "degree":
xlabel = "Commits"
axes.set_xlabel(xlabel)
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
cig = create_blame_interaction_graph(case_study.project_name, revision
).commit_interaction_graph()
commit_lookup = create_commit_lookup_helper(case_study.project_name)
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
return bool(commit_lookup(node))
def commit_time(node: CommitRepoPair) -> datetime:
return datetime.utcfromtimestamp(commit_lookup(node).commit_time)
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
nodes.append(({
"commit_hash": commit.commit_hash,
"commit_time": commit_time(commit),
"node_degree": cig.degree(node),
"node_out_degree": cig.out_degree(node),
"node_in_degree": cig.in_degree(node),
}))
data = pd.DataFrame(nodes)
if sort == "time":
data.sort_values(by="commit_time", inplace=True)
node_degrees = data.loc[:, ["commit_hash", "node_degree"]]
node_out_degrees = data.loc[:, ["commit_hash", "node_out_degree"]]
node_in_degrees = data.loc[:, ["commit_hash", "node_in_degree"]]
if sort == "degree":
node_degrees.sort_values(by="node_degree", inplace=True)
node_out_degrees.sort_values(by="node_out_degree", inplace=True)
node_in_degrees.sort_values(by="node_in_degree", inplace=True)
axes.plot(node_degrees["node_degree"].values, label="degree")
axes.plot(
node_out_degrees["node_out_degree"].values, label="out_degree"
)
axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree")
axes.legend()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="cig-node-degrees",
options=[REQUIRE_CASE_STUDY, OPTIONAL_SORT_METHOD]
):
"""Generates a plot of node degrees of a commit interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
class AuthorInteractionGraphNodeDegreePlot(Plot, plot_name='aig_node_degrees'):
"""Plot node degrees of a author interaction graph."""
def plot(self, view_mode: bool) -> None:
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Author Interaction Graph - Node Degrees")
axes.set_title(case_study.project_name)
axes.set_ylabel("Degree")
axes.set_xlabel("Authors")
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
aig = create_blame_interaction_graph(project_name, revision
).author_interaction_graph()
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in aig.nodes:
node_attrs = tp.cast(AIGNodeAttrs, aig.nodes[node])
author = node_attrs["author"]
nodes.append(({
"author": author,
"node_degree": aig.degree(node),
"node_out_degree": aig.out_degree(node),
"node_in_degree": aig.in_degree(node),
}))
data = pd.DataFrame(nodes)
node_degrees = data.loc[:, ["author", "node_degree"]]
node_out_degrees = data.loc[:, ["author", "node_out_degree"]]
node_in_degrees = data.loc[:, ["author", "node_in_degree"]]
node_degrees.sort_values(by="node_degree", inplace=True)
node_out_degrees.sort_values(by="node_out_degree", inplace=True)
node_in_degrees.sort_values(by="node_in_degree", inplace=True)
axes.plot(node_degrees["node_degree"].values, label="degree")
axes.plot(
node_out_degrees["node_out_degree"].values, label="out_degree"
)
axes.plot(node_in_degrees["node_in_degree"].values, label="in_degree")
axes.legend()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class AIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="aig-node-degrees",
options=[REQUIRE_CASE_STUDY]
):
"""Generates a plot of node degrees of a author interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
AuthorInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
class CommitAuthorInteractionGraphNodeDegreePlot(
Plot, plot_name='caig_node_degrees'
):
"""Plot node degrees of commits in a commit-author interaction graph."""
def plot(self, view_mode: bool) -> None:
case_study = self.plot_kwargs["plot_case_study"]
style.use(self.plot_config.style())
fig, axes = plt.subplots(1, 1, sharey="all")
fig.subplots_adjust(hspace=0.5)
fig.suptitle("Commit-Author Interaction Graph - # Interacting Authors")
axes.set_title(case_study.project_name)
axes.set_ylabel("Authors")
axes.set_xlabel("Commits")
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise PlotDataEmpty()
caig = create_blame_interaction_graph(project_name, revision
).commit_author_interaction_graph()
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in caig.nodes:
node_attrs = tp.cast(CAIGNodeAttrs, caig.nodes[node])
commit = node_attrs["commit"]
if commit:
nodes.append(({
"commit": commit.commit_hash,
"num_authors": caig.degree(node)
}))
data = pd.DataFrame(nodes)
num_authors = data.loc[:, ["commit", "num_authors"]]
num_authors.sort_values(by="num_authors", inplace=True)
axes.plot(num_authors["num_authors"].values)
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class CAIGNodeDegreePlotGenerator(
PlotGenerator,
generator_name="caig-node-degrees",
options=[
REQUIRE_CASE_STUDY,
]
):
"""Generates a plot of node degrees of a commit-author interaction graph."""
def generate(self) -> tp.List[Plot]:
return [
CommitAuthorInteractionGraphNodeDegreePlot(
self.plot_config, **self.plot_kwargs
)
]
|
StarcoderdataPython
|
4809718
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict, List
class ECertQueryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ECertQueryRequest(TeaModel):
def __init__(
self,
user_id: str = None,
):
# 用户ID
self.user_id = user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.user_id is not None:
result['userId'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('userId') is not None:
self.user_id = m.get('userId')
return self
class ECertQueryResponseBody(TeaModel):
def __init__(
self,
real_name: str = None,
cert_no: str = None,
main_dept_id: int = None,
main_dept_name: str = None,
employ_job_id: str = None,
employ_job_id_label: str = None,
employ_position_id: str = None,
employ_position_id_label: str = None,
employ_position_rank_id: str = None,
employ_position_rank_id_label: str = None,
hired_date: str = None,
last_work_day: str = None,
termination_reason_voluntary: List[str] = None,
termination_reason_passive: List[str] = None,
name: str = None,
):
# 身份证姓名
self.real_name = real_name
# 身份证号码
self.cert_no = cert_no
# 主部门ID
self.main_dept_id = main_dept_id
# 主部门
self.main_dept_name = main_dept_name
# 职务ID
self.employ_job_id = employ_job_id
# 职务名称
self.employ_job_id_label = employ_job_id_label
# 职位ID
self.employ_position_id = employ_position_id
# 职位名称
self.employ_position_id_label = employ_position_id_label
# 职级ID
self.employ_position_rank_id = employ_position_rank_id
# 职级名称
self.employ_position_rank_id_label = employ_position_rank_id_label
# 入职日期
self.hired_date = hired_date
# 离职日期
self.last_work_day = last_work_day
# 主动离职原因
self.termination_reason_voluntary = termination_reason_voluntary
# 被动离职原因
self.termination_reason_passive = termination_reason_passive
# 姓名
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.real_name is not None:
result['realName'] = self.real_name
if self.cert_no is not None:
result['certNO'] = self.cert_no
if self.main_dept_id is not None:
result['mainDeptId'] = self.main_dept_id
if self.main_dept_name is not None:
result['mainDeptName'] = self.main_dept_name
if self.employ_job_id is not None:
result['employJobId'] = self.employ_job_id
if self.employ_job_id_label is not None:
result['employJobIdLabel'] = self.employ_job_id_label
if self.employ_position_id is not None:
result['employPositionId'] = self.employ_position_id
if self.employ_position_id_label is not None:
result['employPositionIdLabel'] = self.employ_position_id_label
if self.employ_position_rank_id is not None:
result['employPositionRankId'] = self.employ_position_rank_id
if self.employ_position_rank_id_label is not None:
result['employPositionRankIdLabel'] = self.employ_position_rank_id_label
if self.hired_date is not None:
result['hiredDate'] = self.hired_date
if self.last_work_day is not None:
result['lastWorkDay'] = self.last_work_day
if self.termination_reason_voluntary is not None:
result['terminationReasonVoluntary'] = self.termination_reason_voluntary
if self.termination_reason_passive is not None:
result['terminationReasonPassive'] = self.termination_reason_passive
if self.name is not None:
result['name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('realName') is not None:
self.real_name = m.get('realName')
if m.get('certNO') is not None:
self.cert_no = m.get('certNO')
if m.get('mainDeptId') is not None:
self.main_dept_id = m.get('mainDeptId')
if m.get('mainDeptName') is not None:
self.main_dept_name = m.get('mainDeptName')
if m.get('employJobId') is not None:
self.employ_job_id = m.get('employJobId')
if m.get('employJobIdLabel') is not None:
self.employ_job_id_label = m.get('employJobIdLabel')
if m.get('employPositionId') is not None:
self.employ_position_id = m.get('employPositionId')
if m.get('employPositionIdLabel') is not None:
self.employ_position_id_label = m.get('employPositionIdLabel')
if m.get('employPositionRankId') is not None:
self.employ_position_rank_id = m.get('employPositionRankId')
if m.get('employPositionRankIdLabel') is not None:
self.employ_position_rank_id_label = m.get('employPositionRankIdLabel')
if m.get('hiredDate') is not None:
self.hired_date = m.get('hiredDate')
if m.get('lastWorkDay') is not None:
self.last_work_day = m.get('lastWorkDay')
if m.get('terminationReasonVoluntary') is not None:
self.termination_reason_voluntary = m.get('terminationReasonVoluntary')
if m.get('terminationReasonPassive') is not None:
self.termination_reason_passive = m.get('terminationReasonPassive')
if m.get('name') is not None:
self.name = m.get('name')
return self
class ECertQueryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ECertQueryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ECertQueryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryJobRanksHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryJobRanksRequest(TeaModel):
def __init__(
self,
rank_category_id: str = None,
rank_code: str = None,
rank_name: str = None,
next_token: int = None,
max_results: int = None,
):
# 职级序列
self.rank_category_id = rank_category_id
# 职级编码
self.rank_code = rank_code
# 职级名称
self.rank_name = rank_name
# 标记当前开始读取的位置
self.next_token = next_token
# 本次读取的最大数据记录数量
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.rank_category_id is not None:
result['rankCategoryId'] = self.rank_category_id
if self.rank_code is not None:
result['rankCode'] = self.rank_code
if self.rank_name is not None:
result['rankName'] = self.rank_name
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('rankCategoryId') is not None:
self.rank_category_id = m.get('rankCategoryId')
if m.get('rankCode') is not None:
self.rank_code = m.get('rankCode')
if m.get('rankName') is not None:
self.rank_name = m.get('rankName')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class QueryJobRanksResponseBodyList(TeaModel):
def __init__(
self,
rank_id: str = None,
rank_category_id: str = None,
rank_code: str = None,
rank_name: str = None,
min_job_grade: int = None,
max_job_grade: int = None,
rank_description: str = None,
):
# 职级ID
self.rank_id = rank_id
# 职级序列ID
self.rank_category_id = rank_category_id
# 职级编码
self.rank_code = rank_code
# 职级名称
self.rank_name = rank_name
# 最小等级
self.min_job_grade = min_job_grade
# 最大等级
self.max_job_grade = max_job_grade
# 职级描述
self.rank_description = rank_description
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.rank_id is not None:
result['rankId'] = self.rank_id
if self.rank_category_id is not None:
result['rankCategoryId'] = self.rank_category_id
if self.rank_code is not None:
result['rankCode'] = self.rank_code
if self.rank_name is not None:
result['rankName'] = self.rank_name
if self.min_job_grade is not None:
result['minJobGrade'] = self.min_job_grade
if self.max_job_grade is not None:
result['maxJobGrade'] = self.max_job_grade
if self.rank_description is not None:
result['rankDescription'] = self.rank_description
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('rankId') is not None:
self.rank_id = m.get('rankId')
if m.get('rankCategoryId') is not None:
self.rank_category_id = m.get('rankCategoryId')
if m.get('rankCode') is not None:
self.rank_code = m.get('rankCode')
if m.get('rankName') is not None:
self.rank_name = m.get('rankName')
if m.get('minJobGrade') is not None:
self.min_job_grade = m.get('minJobGrade')
if m.get('maxJobGrade') is not None:
self.max_job_grade = m.get('maxJobGrade')
if m.get('rankDescription') is not None:
self.rank_description = m.get('rankDescription')
return self
class QueryJobRanksResponseBody(TeaModel):
def __init__(
self,
next_token: int = None,
has_more: bool = None,
list: List[QueryJobRanksResponseBodyList] = None,
):
# 表示当前调用返回读取到的位置,空代表数据已经读取完毕
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
# 职级列表
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
result['list'] = []
if self.list is not None:
for k in self.list:
result['list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
self.list = []
if m.get('list') is not None:
for k in m.get('list'):
temp_model = QueryJobRanksResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class QueryJobRanksResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryJobRanksResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryJobRanksResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryJobsHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryJobsRequest(TeaModel):
def __init__(
self,
job_name: str = None,
next_token: int = None,
max_results: int = None,
):
# 职务名称
self.job_name = job_name
# 偏移量
self.next_token = next_token
# 最大值
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.job_name is not None:
result['jobName'] = self.job_name
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('jobName') is not None:
self.job_name = m.get('jobName')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class QueryJobsResponseBodyList(TeaModel):
def __init__(
self,
job_id: str = None,
job_name: str = None,
job_description: str = None,
):
# 职务ID
self.job_id = job_id
# 职务名称
self.job_name = job_name
# 职务描述
self.job_description = job_description
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.job_id is not None:
result['jobId'] = self.job_id
if self.job_name is not None:
result['jobName'] = self.job_name
if self.job_description is not None:
result['jobDescription'] = self.job_description
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('jobId') is not None:
self.job_id = m.get('jobId')
if m.get('jobName') is not None:
self.job_name = m.get('jobName')
if m.get('jobDescription') is not None:
self.job_description = m.get('jobDescription')
return self
class QueryJobsResponseBody(TeaModel):
def __init__(
self,
next_token: int = None,
has_more: bool = None,
list: List[QueryJobsResponseBodyList] = None,
):
# 下次获取数据的起始游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
# 职务列表
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
result['list'] = []
if self.list is not None:
for k in self.list:
result['list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
self.list = []
if m.get('list') is not None:
for k in m.get('list'):
temp_model = QueryJobsResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class QueryJobsResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryJobsResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryJobsResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryCustomEntryProcessesHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryCustomEntryProcessesRequest(TeaModel):
def __init__(
self,
operate_user_id: str = None,
next_token: int = None,
max_results: int = None,
):
# 操作人id
self.operate_user_id = operate_user_id
# 偏移量
self.next_token = next_token
# 最大值
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.operate_user_id is not None:
result['operateUserId'] = self.operate_user_id
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('operateUserId') is not None:
self.operate_user_id = m.get('operateUserId')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class QueryCustomEntryProcessesResponseBodyList(TeaModel):
def __init__(
self,
form_id: str = None,
form_name: str = None,
form_desc: str = None,
short_url: str = None,
):
self.form_id = form_id
self.form_name = form_name
self.form_desc = form_desc
self.short_url = short_url
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.form_id is not None:
result['formId'] = self.form_id
if self.form_name is not None:
result['formName'] = self.form_name
if self.form_desc is not None:
result['formDesc'] = self.form_desc
if self.short_url is not None:
result['shortUrl'] = self.short_url
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('formId') is not None:
self.form_id = m.get('formId')
if m.get('formName') is not None:
self.form_name = m.get('formName')
if m.get('formDesc') is not None:
self.form_desc = m.get('formDesc')
if m.get('shortUrl') is not None:
self.short_url = m.get('shortUrl')
return self
class QueryCustomEntryProcessesResponseBody(TeaModel):
def __init__(
self,
next_token: int = None,
has_more: bool = None,
list: List[QueryCustomEntryProcessesResponseBodyList] = None,
):
# 下次获取数据的起始游标
self.next_token = next_token
# 是否有更多
self.has_more = has_more
# 表单信息列表
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
result['list'] = []
if self.list is not None:
for k in self.list:
result['list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
self.list = []
if m.get('list') is not None:
for k in m.get('list'):
temp_model = QueryCustomEntryProcessesResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class QueryCustomEntryProcessesResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryCustomEntryProcessesResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryCustomEntryProcessesResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryPositionsHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryPositionsRequest(TeaModel):
def __init__(
self,
position_name: str = None,
in_category_ids: List[str] = None,
in_position_ids: List[str] = None,
next_token: int = None,
max_results: int = None,
):
# 职位名称
self.position_name = position_name
# 职位类别列表
self.in_category_ids = in_category_ids
# 职位id列表
self.in_position_ids = in_position_ids
# 偏移量
self.next_token = next_token
# 一次查询获取记录数
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.position_name is not None:
result['positionName'] = self.position_name
if self.in_category_ids is not None:
result['inCategoryIds'] = self.in_category_ids
if self.in_position_ids is not None:
result['inPositionIds'] = self.in_position_ids
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('positionName') is not None:
self.position_name = m.get('positionName')
if m.get('inCategoryIds') is not None:
self.in_category_ids = m.get('inCategoryIds')
if m.get('inPositionIds') is not None:
self.in_position_ids = m.get('inPositionIds')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class QueryPositionsResponseBodyList(TeaModel):
def __init__(
self,
position_id: str = None,
position_name: str = None,
position_category_id: str = None,
job_id: str = None,
position_des: str = None,
rank_id_list: List[str] = None,
status: int = None,
):
# 职位ID
self.position_id = position_id
# 职位名称
self.position_name = position_name
# 职位类别ID
self.position_category_id = position_category_id
# 所属职务ID
self.job_id = job_id
# 职位描述
self.position_des = position_des
# 职位对应职级列表
self.rank_id_list = rank_id_list
# 职位状态-0,启用;1,停用
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.position_id is not None:
result['positionId'] = self.position_id
if self.position_name is not None:
result['positionName'] = self.position_name
if self.position_category_id is not None:
result['positionCategoryId'] = self.position_category_id
if self.job_id is not None:
result['jobId'] = self.job_id
if self.position_des is not None:
result['positionDes'] = self.position_des
if self.rank_id_list is not None:
result['rankIdList'] = self.rank_id_list
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('positionId') is not None:
self.position_id = m.get('positionId')
if m.get('positionName') is not None:
self.position_name = m.get('positionName')
if m.get('positionCategoryId') is not None:
self.position_category_id = m.get('positionCategoryId')
if m.get('jobId') is not None:
self.job_id = m.get('jobId')
if m.get('positionDes') is not None:
self.position_des = m.get('positionDes')
if m.get('rankIdList') is not None:
self.rank_id_list = m.get('rankIdList')
if m.get('status') is not None:
self.status = m.get('status')
return self
class QueryPositionsResponseBody(TeaModel):
def __init__(
self,
next_token: int = None,
has_more: bool = None,
list: List[QueryPositionsResponseBodyList] = None,
):
# 表示当前调用返回读取到的位置,空代表数据已经读取完毕
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
# 职位列表
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
result['list'] = []
if self.list is not None:
for k in self.list:
result['list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
self.list = []
if m.get('list') is not None:
for k in m.get('list'):
temp_model = QueryPositionsResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class QueryPositionsResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryPositionsResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryPositionsResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class MasterDataQueryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class MasterDataQueryRequestQueryParamsConditionList(TeaModel):
def __init__(
self,
operate: str = None,
value: str = None,
):
# 字段关系符
self.operate = operate
# 操作值
self.value = value
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.operate is not None:
result['operate'] = self.operate
if self.value is not None:
result['value'] = self.value
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('operate') is not None:
self.operate = m.get('operate')
if m.get('value') is not None:
self.value = m.get('value')
return self
class MasterDataQueryRequestQueryParams(TeaModel):
def __init__(
self,
field_code: str = None,
join_type: str = None,
condition_list: List[MasterDataQueryRequestQueryParamsConditionList] = None,
):
# 需要筛选的字段
self.field_code = field_code
# 筛选条件连接类型
self.join_type = join_type
# 筛选条件
self.condition_list = condition_list
def validate(self):
if self.condition_list:
for k in self.condition_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.field_code is not None:
result['fieldCode'] = self.field_code
if self.join_type is not None:
result['joinType'] = self.join_type
result['conditionList'] = []
if self.condition_list is not None:
for k in self.condition_list:
result['conditionList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('fieldCode') is not None:
self.field_code = m.get('fieldCode')
if m.get('joinType') is not None:
self.join_type = m.get('joinType')
self.condition_list = []
if m.get('conditionList') is not None:
for k in m.get('conditionList'):
temp_model = MasterDataQueryRequestQueryParamsConditionList()
self.condition_list.append(temp_model.from_map(k))
return self
class MasterDataQueryRequest(TeaModel):
def __init__(
self,
scope_code: str = None,
view_entity_code: str = None,
tenant_id: int = None,
biz_uk: str = None,
relation_ids: List[str] = None,
opt_user_id: str = None,
next_token: int = None,
max_results: int = None,
query_params: List[MasterDataQueryRequestQueryParams] = None,
):
# 领域code 由钉钉分配
self.scope_code = scope_code
# 实体code
self.view_entity_code = view_entity_code
# 数据生产方的租户id,由钉钉分配
self.tenant_id = tenant_id
# 数据唯一键
self.biz_uk = biz_uk
# 关联id列表,一般为userId
self.relation_ids = relation_ids
# 当前操作人userId
self.opt_user_id = opt_user_id
# 分页查询的游标
self.next_token = next_token
# 分页查询每页数据条数
self.max_results = max_results
# 其他查询条件
self.query_params = query_params
def validate(self):
if self.query_params:
for k in self.query_params:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.scope_code is not None:
result['scopeCode'] = self.scope_code
if self.view_entity_code is not None:
result['viewEntityCode'] = self.view_entity_code
if self.tenant_id is not None:
result['tenantId'] = self.tenant_id
if self.biz_uk is not None:
result['bizUK'] = self.biz_uk
if self.relation_ids is not None:
result['relationIds'] = self.relation_ids
if self.opt_user_id is not None:
result['optUserId'] = self.opt_user_id
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
result['queryParams'] = []
if self.query_params is not None:
for k in self.query_params:
result['queryParams'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('scopeCode') is not None:
self.scope_code = m.get('scopeCode')
if m.get('viewEntityCode') is not None:
self.view_entity_code = m.get('viewEntityCode')
if m.get('tenantId') is not None:
self.tenant_id = m.get('tenantId')
if m.get('bizUK') is not None:
self.biz_uk = m.get('bizUK')
if m.get('relationIds') is not None:
self.relation_ids = m.get('relationIds')
if m.get('optUserId') is not None:
self.opt_user_id = m.get('optUserId')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
self.query_params = []
if m.get('queryParams') is not None:
for k in m.get('queryParams'):
temp_model = MasterDataQueryRequestQueryParams()
self.query_params.append(temp_model.from_map(k))
return self
class MasterDataQueryResponseBodyResultViewEntityFieldVOListFieldDataVO(TeaModel):
def __init__(
self,
key: str = None,
value: str = None,
):
# 字段值的key
self.key = key
# 字段值的文本
self.value = value
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.key is not None:
result['key'] = self.key
if self.value is not None:
result['value'] = self.value
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('key') is not None:
self.key = m.get('key')
if m.get('value') is not None:
self.value = m.get('value')
return self
class MasterDataQueryResponseBodyResultViewEntityFieldVOList(TeaModel):
def __init__(
self,
field_code: str = None,
field_data_vo: MasterDataQueryResponseBodyResultViewEntityFieldVOListFieldDataVO = None,
field_name: str = None,
field_type: str = None,
):
# 字段code
self.field_code = field_code
# 字段值
self.field_data_vo = field_data_vo
# 字段名称
self.field_name = field_name
# 字段类型
self.field_type = field_type
def validate(self):
if self.field_data_vo:
self.field_data_vo.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.field_code is not None:
result['fieldCode'] = self.field_code
if self.field_data_vo is not None:
result['fieldDataVO'] = self.field_data_vo.to_map()
if self.field_name is not None:
result['fieldName'] = self.field_name
if self.field_type is not None:
result['fieldType'] = self.field_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('fieldCode') is not None:
self.field_code = m.get('fieldCode')
if m.get('fieldDataVO') is not None:
temp_model = MasterDataQueryResponseBodyResultViewEntityFieldVOListFieldDataVO()
self.field_data_vo = temp_model.from_map(m['fieldDataVO'])
if m.get('fieldName') is not None:
self.field_name = m.get('fieldName')
if m.get('fieldType') is not None:
self.field_type = m.get('fieldType')
return self
class MasterDataQueryResponseBodyResult(TeaModel):
def __init__(
self,
outer_id: str = None,
scope_code: str = None,
view_entity_code: str = None,
view_entity_field_volist: List[MasterDataQueryResponseBodyResultViewEntityFieldVOList] = None,
relation_id: str = None,
):
# 唯一id
self.outer_id = outer_id
# 领域
self.scope_code = scope_code
# 编码
self.view_entity_code = view_entity_code
# 字段列表
self.view_entity_field_volist = view_entity_field_volist
# 关联id列表,一般为userId
self.relation_id = relation_id
def validate(self):
if self.view_entity_field_volist:
for k in self.view_entity_field_volist:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.outer_id is not None:
result['outerId'] = self.outer_id
if self.scope_code is not None:
result['scopeCode'] = self.scope_code
if self.view_entity_code is not None:
result['viewEntityCode'] = self.view_entity_code
result['viewEntityFieldVOList'] = []
if self.view_entity_field_volist is not None:
for k in self.view_entity_field_volist:
result['viewEntityFieldVOList'].append(k.to_map() if k else None)
if self.relation_id is not None:
result['relationId'] = self.relation_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('outerId') is not None:
self.outer_id = m.get('outerId')
if m.get('scopeCode') is not None:
self.scope_code = m.get('scopeCode')
if m.get('viewEntityCode') is not None:
self.view_entity_code = m.get('viewEntityCode')
self.view_entity_field_volist = []
if m.get('viewEntityFieldVOList') is not None:
for k in m.get('viewEntityFieldVOList'):
temp_model = MasterDataQueryResponseBodyResultViewEntityFieldVOList()
self.view_entity_field_volist.append(temp_model.from_map(k))
if m.get('relationId') is not None:
self.relation_id = m.get('relationId')
return self
class MasterDataQueryResponseBody(TeaModel):
def __init__(
self,
total: int = None,
has_more: bool = None,
next_token: int = None,
success: bool = None,
result: List[MasterDataQueryResponseBodyResult] = None,
):
# 总条目数
self.total = total
# 是否还有更多
self.has_more = has_more
# 分页游标
self.next_token = next_token
# 是否成功
self.success = success
# 结果
self.result = result
def validate(self):
if self.result:
for k in self.result:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.total is not None:
result['total'] = self.total
if self.has_more is not None:
result['hasMore'] = self.has_more
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.success is not None:
result['success'] = self.success
result['result'] = []
if self.result is not None:
for k in self.result:
result['result'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('total') is not None:
self.total = m.get('total')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('success') is not None:
self.success = m.get('success')
self.result = []
if m.get('result') is not None:
for k in m.get('result'):
temp_model = MasterDataQueryResponseBodyResult()
self.result.append(temp_model.from_map(k))
return self
class MasterDataQueryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: MasterDataQueryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = MasterDataQueryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddHrmPreentryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class AddHrmPreentryRequestGroupsSectionsEmpFieldVOList(TeaModel):
def __init__(
self,
value: str = None,
field_code: str = None,
):
self.value = value
self.field_code = field_code
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.value is not None:
result['value'] = self.value
if self.field_code is not None:
result['fieldCode'] = self.field_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('value') is not None:
self.value = m.get('value')
if m.get('fieldCode') is not None:
self.field_code = m.get('fieldCode')
return self
class AddHrmPreentryRequestGroupsSections(TeaModel):
def __init__(
self,
old_index: int = None,
emp_field_volist: List[AddHrmPreentryRequestGroupsSectionsEmpFieldVOList] = None,
):
self.old_index = old_index
self.emp_field_volist = emp_field_volist
def validate(self):
if self.emp_field_volist:
for k in self.emp_field_volist:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.old_index is not None:
result['oldIndex'] = self.old_index
result['empFieldVOList'] = []
if self.emp_field_volist is not None:
for k in self.emp_field_volist:
result['empFieldVOList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('oldIndex') is not None:
self.old_index = m.get('oldIndex')
self.emp_field_volist = []
if m.get('empFieldVOList') is not None:
for k in m.get('empFieldVOList'):
temp_model = AddHrmPreentryRequestGroupsSectionsEmpFieldVOList()
self.emp_field_volist.append(temp_model.from_map(k))
return self
class AddHrmPreentryRequestGroups(TeaModel):
def __init__(
self,
group_id: str = None,
sections: List[AddHrmPreentryRequestGroupsSections] = None,
):
self.group_id = group_id
self.sections = sections
def validate(self):
if self.sections:
for k in self.sections:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.group_id is not None:
result['groupId'] = self.group_id
result['sections'] = []
if self.sections is not None:
for k in self.sections:
result['sections'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('groupId') is not None:
self.group_id = m.get('groupId')
self.sections = []
if m.get('sections') is not None:
for k in m.get('sections'):
temp_model = AddHrmPreentryRequestGroupsSections()
self.sections.append(temp_model.from_map(k))
return self
class AddHrmPreentryRequest(TeaModel):
def __init__(
self,
pre_entry_time: int = None,
name: str = None,
mobile: str = None,
agent_id: int = None,
groups: List[AddHrmPreentryRequestGroups] = None,
):
self.pre_entry_time = pre_entry_time
self.name = name
self.mobile = mobile
self.agent_id = agent_id
self.groups = groups
def validate(self):
if self.groups:
for k in self.groups:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.pre_entry_time is not None:
result['preEntryTime'] = self.pre_entry_time
if self.name is not None:
result['name'] = self.name
if self.mobile is not None:
result['mobile'] = self.mobile
if self.agent_id is not None:
result['agentId'] = self.agent_id
result['groups'] = []
if self.groups is not None:
for k in self.groups:
result['groups'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('preEntryTime') is not None:
self.pre_entry_time = m.get('preEntryTime')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('mobile') is not None:
self.mobile = m.get('mobile')
if m.get('agentId') is not None:
self.agent_id = m.get('agentId')
self.groups = []
if m.get('groups') is not None:
for k in m.get('groups'):
temp_model = AddHrmPreentryRequestGroups()
self.groups.append(temp_model.from_map(k))
return self
class AddHrmPreentryResponseBody(TeaModel):
def __init__(
self,
tmp_user_id: str = None,
):
# result
self.tmp_user_id = tmp_user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.tmp_user_id is not None:
result['tmpUserId'] = self.tmp_user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('tmpUserId') is not None:
self.tmp_user_id = m.get('tmpUserId')
return self
class AddHrmPreentryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddHrmPreentryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddHrmPreentryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
|
StarcoderdataPython
|
4811349
|
import morepath
from .app import App
def run():
morepath.autoscan()
morepath.run(App())
if __name__ == '__main__':
run()
|
StarcoderdataPython
|
3259088
|
# coding: utf-8
"""
EXACT - API
API to interact with the EXACT Server # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
from exact_sync.v1.api.pagination_base_api import PaginationBaseAPI
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from exact_sync.v1.api_client import ApiClient
class UsersApi(PaginationBaseAPI):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_user(self, **kwargs): # noqa: E501
"""create_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body body:
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_user_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_user_with_http_info(**kwargs) # noqa: E501
return data
def create_user_with_http_info(self, **kwargs): # noqa: E501
"""create_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body body:
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_user" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_user(self, **kwargs): # noqa: E501
"""create_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_user_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_user_with_http_info(**kwargs) # noqa: E501
return data
def create_user_with_http_info(self, **kwargs): # noqa: E501
"""create_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'is_superuser', 'is_staff', 'is_active', 'last_login'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_user" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_user(self, **kwargs): # noqa: E501
"""create_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_user_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_user_with_http_info(**kwargs) # noqa: E501
return data
def create_user_with_http_info(self, **kwargs): # noqa: E501
"""create_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'is_superuser', 'is_staff', 'is_active', 'last_login'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_user" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def destroy_user(self, id, **kwargs): # noqa: E501
"""destroy_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.destroy_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str id2: id
:param str username: username
:param str username__contains: username__contains
:param str is_superuser: is_superuser
:param str is_staff: is_staff
:param str is_active: is_active
:param str last_login: last_login
:param str team: team
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.destroy_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.destroy_user_with_http_info(id, **kwargs) # noqa: E501
return data
def destroy_user_with_http_info(self, id, **kwargs): # noqa: E501
"""destroy_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.destroy_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str id2: id
:param str username: username
:param str username__contains: username__contains
:param str is_superuser: is_superuser
:param str is_staff: is_staff
:param str is_active: is_active
:param str last_login: last_login
:param str team: team
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'id2', 'username', 'username__contains', 'is_superuser', 'is_staff', 'is_active', 'last_login', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method destroy_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `destroy_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser' in params:
query_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
query_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
query_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
query_params.append(('last_login', params['last_login'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/{id}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_users(self, pagination:bool=True, **kwargs): # noqa: E501
"""list_users # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_users(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param str id: id
:param str username: username
:param str username__contains: username__contains
:param str is_superuser: is_superuser
:param str is_staff: is_staff
:param str is_active: is_active
:param str last_login: last_login
:param str team: team
:return: Users
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if pagination:
if kwargs.get('async_req'):
return self.list_users_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_users_with_http_info(**kwargs) # noqa: E501
return data
else:
return self._get_all(self.list_users_with_http_info, **kwargs)
def list_users_with_http_info(self, **kwargs): # noqa: E501
"""list_users # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_users_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param str id: id
:param str username: username
:param str username__contains: username__contains
:param str is_superuser: is_superuser
:param str is_staff: is_staff
:param str is_active: is_active
:param str last_login: last_login
:param str team: team
:return: Users
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'offset', 'id', 'username', 'username__contains', 'is_superuser', 'is_staff', 'is_active', 'last_login', 'team'] # noqa: E501
all_params.append('omit')
all_params.append('fields')
all_params.append('expand')
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_users" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser' in params:
query_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
query_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
query_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
query_params.append(('last_login', params['last_login'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
if 'omit' in params:
query_params.append(('omit', params['omit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Users', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def partial_update_user(self, id, **kwargs): # noqa: E501
"""partial_update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Body6 body:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.partial_update_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.partial_update_user_with_http_info(id, **kwargs) # noqa: E501
return data
def partial_update_user_with_http_info(self, id, **kwargs): # noqa: E501
"""partial_update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Body6 body:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body', 'id2', 'username2', 'username__contains', 'is_superuser2', 'is_staff2', 'is_active2', 'last_login2', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method partial_update_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `partial_update_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'username2' in params:
query_params.append(('username', params['username2'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser2' in params:
query_params.append(('is_superuser', params['is_superuser2'])) # noqa: E501
if 'is_staff2' in params:
query_params.append(('is_staff', params['is_staff2'])) # noqa: E501
if 'is_active2' in params:
query_params.append(('is_active', params['is_active2'])) # noqa: E501
if 'last_login2' in params:
query_params.append(('last_login', params['last_login2'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/{id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def partial_update_user(self, id, **kwargs): # noqa: E501
"""partial_update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.partial_update_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.partial_update_user_with_http_info(id, **kwargs) # noqa: E501
return data
def partial_update_user_with_http_info(self, id, **kwargs): # noqa: E501
"""partial_update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'username', 'is_superuser', 'is_staff', 'is_active', 'last_login', 'id2', 'username2', 'username__contains', 'is_superuser2', 'is_staff2', 'is_active2', 'last_login2', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method partial_update_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `partial_update_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'username2' in params:
query_params.append(('username', params['username2'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser2' in params:
query_params.append(('is_superuser', params['is_superuser2'])) # noqa: E501
if 'is_staff2' in params:
query_params.append(('is_staff', params['is_staff2'])) # noqa: E501
if 'is_active2' in params:
query_params.append(('is_active', params['is_active2'])) # noqa: E501
if 'last_login2' in params:
query_params.append(('last_login', params['last_login2'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/{id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def partial_update_user(self, id, **kwargs): # noqa: E501
"""partial_update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.partial_update_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.partial_update_user_with_http_info(id, **kwargs) # noqa: E501
return data
def partial_update_user_with_http_info(self, id, **kwargs): # noqa: E501
"""partial_update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.partial_update_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'username', 'is_superuser', 'is_staff', 'is_active', 'last_login', 'id2', 'username2', 'username__contains', 'is_superuser2', 'is_staff2', 'is_active2', 'last_login2', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method partial_update_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `partial_update_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'username2' in params:
query_params.append(('username', params['username2'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser2' in params:
query_params.append(('is_superuser', params['is_superuser2'])) # noqa: E501
if 'is_staff2' in params:
query_params.append(('is_staff', params['is_staff2'])) # noqa: E501
if 'is_active2' in params:
query_params.append(('is_active', params['is_active2'])) # noqa: E501
if 'last_login2' in params:
query_params.append(('last_login', params['last_login2'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/{id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_user(self, id, **kwargs): # noqa: E501
"""retrieve_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str id2: id
:param str username: username
:param str username__contains: username__contains
:param str is_superuser: is_superuser
:param str is_staff: is_staff
:param str is_active: is_active
:param str last_login: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.retrieve_user_with_http_info(id, **kwargs) # noqa: E501
return data
def retrieve_user_with_http_info(self, id, **kwargs): # noqa: E501
"""retrieve_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str id2: id
:param str username: username
:param str username__contains: username__contains
:param str is_superuser: is_superuser
:param str is_staff: is_staff
:param str is_active: is_active
:param str last_login: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'id2', 'username', 'username__contains', 'is_superuser', 'is_staff', 'is_active', 'last_login', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('omit')
all_params.append('fields')
all_params.append('expand')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `retrieve_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser' in params:
query_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
query_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
query_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
query_params.append(('last_login', params['last_login'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
if 'omit' in params:
query_params.append(('omit', params['omit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E50
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/{id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_user(self, id, **kwargs): # noqa: E501
"""update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Body3 body:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_user_with_http_info(id, **kwargs) # noqa: E501
return data
def update_user_with_http_info(self, id, **kwargs): # noqa: E501
"""update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Body3 body:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body', 'id2', 'username2', 'username__contains', 'is_superuser2', 'is_staff2', 'is_active2', 'last_login2', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'username2' in params:
query_params.append(('username', params['username2'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser2' in params:
query_params.append(('is_superuser', params['is_superuser2'])) # noqa: E501
if 'is_staff2' in params:
query_params.append(('is_staff', params['is_staff2'])) # noqa: E501
if 'is_active2' in params:
query_params.append(('is_active', params['is_active2'])) # noqa: E501
if 'last_login2' in params:
query_params.append(('last_login', params['last_login2'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/{id}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_user(self, id, **kwargs): # noqa: E501
"""update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_user_with_http_info(id, **kwargs) # noqa: E501
return data
def update_user_with_http_info(self, id, **kwargs): # noqa: E501
"""update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'username', 'is_superuser', 'is_staff', 'is_active', 'last_login', 'id2', 'username2', 'username__contains', 'is_superuser2', 'is_staff2', 'is_active2', 'last_login2', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'username2' in params:
query_params.append(('username', params['username2'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser2' in params:
query_params.append(('is_superuser', params['is_superuser2'])) # noqa: E501
if 'is_staff2' in params:
query_params.append(('is_staff', params['is_staff2'])) # noqa: E501
if 'is_active2' in params:
query_params.append(('is_active', params['is_active2'])) # noqa: E501
if 'last_login2' in params:
query_params.append(('last_login', params['last_login2'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/{id}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_user(self, id, **kwargs): # noqa: E501
"""update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_user_with_http_info(id, **kwargs) # noqa: E501
return data
def update_user_with_http_info(self, id, **kwargs): # noqa: E501
"""update_user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str username:
:param bool is_superuser:
:param bool is_staff:
:param bool is_active:
:param datetime last_login:
:param str id2: id
:param str username2: username
:param str username__contains: username__contains
:param str is_superuser2: is_superuser
:param str is_staff2: is_staff
:param str is_active2: is_active
:param str last_login2: last_login
:param str team: team
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'username', 'is_superuser', 'is_staff', 'is_active', 'last_login', 'id2', 'username2', 'username__contains', 'is_superuser2', 'is_staff2', 'is_active2', 'last_login2', 'team'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_user`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'id2' in params:
query_params.append(('id', params['id2'])) # noqa: E501
if 'username2' in params:
query_params.append(('username', params['username2'])) # noqa: E501
if 'username__contains' in params:
query_params.append(('username__contains', params['username__contains'])) # noqa: E501
if 'is_superuser2' in params:
query_params.append(('is_superuser', params['is_superuser2'])) # noqa: E501
if 'is_staff2' in params:
query_params.append(('is_staff', params['is_staff2'])) # noqa: E501
if 'is_active2' in params:
query_params.append(('is_active', params['is_active2'])) # noqa: E501
if 'last_login2' in params:
query_params.append(('last_login', params['last_login2'])) # noqa: E501
if 'team' in params:
query_params.append(('team', params['team'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
if 'username' in params:
form_params.append(('username', params['username'])) # noqa: E501
if 'is_superuser' in params:
form_params.append(('is_superuser', params['is_superuser'])) # noqa: E501
if 'is_staff' in params:
form_params.append(('is_staff', params['is_staff'])) # noqa: E501
if 'is_active' in params:
form_params.append(('is_active', params['is_active'])) # noqa: E501
if 'last_login' in params:
form_params.append(('last_login', params['last_login'])) # noqa: E501
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/users/{id}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
StarcoderdataPython
|
1614297
|
import re
import sys
import shutil
if not sys.version_info >= (3, 3):
print('ERROR: You must be running Python >= 3.3')
sys.exit(1) # cancel project
MODULE_REGEX = r'^[_a-zA-Z][_a-zA-Z0-9]+$'
module_name = '{{ cookiecutter.project_slug}}'
if not re.match(MODULE_REGEX, module_name):
print('ERROR: The project slug (%s) is not a valid Python module name. '
'Please do not use a - and use _ instead' % module_name)
sys.exit(1) # cancel project
executables = ['flake8', 'pep8', 'pip', 'twine']
if "{{ cookiecutter.environment_manager }}" == 'conda':
executables.append('conda')
if "{{ cookiecutter.environment_manager }}" == 'tox':
executables.append('tox')
for executable in executables:
if not shutil.which(executable):
print('WARNING: You do not have the %s executable. You should '
'install it now through pip/conda' % executable)
|
StarcoderdataPython
|
3276433
|
class PubSub:
def __init__(self, logger, linked_list, bus_factory, async_service, context_service, *args, **kwargs):
super().__init__(*args, **kwargs)
self._logger = logger
self._logger.debug(
f"PubSub: context_service = {context_service}")
self.__daisy_chain_bus = linked_list
self.__bus_factory = bus_factory
self._async_service = async_service
self._context_service = context_service
self.__daisy_chain_bus.append(self.__bus_factory())
def register_topic_publisher(self, topic, publisher, **kwargs):
self.__head_bus.register_topic_publisher(topic, publisher, **kwargs)
def register_topic_subscriber(self, topic, subscriber, **kwargs):
self.__head_bus.register_topic_subscriber(topic, subscriber, **kwargs)
def register_message_pipeline(self, pipe_line):
message_bus_link = self.__daisy_chain_bus.head
for message_pipe_node in pipe_line:
if message_bus_link is None:
message_bus = self.__bus_factory()
self.__daisy_chain_bus.append(message_bus)
message_bus_link = self.__daisy_chain_bus.tail
message_bus = message_bus_link.value
message_bus_link = message_bus_link.next_link
message_pipe = message_pipe_node.value
topic = message_pipe.topic
for publisher in message_pipe.sources:
message_bus.register_topic_publisher(topic, publisher)
for subscriber, kwargs in message_pipe.destinations:
message_bus.register_topic_subscriber(topic, subscriber, **kwargs)
def start(self):
self._logger.info("Starting the pub sub service.")
self._async_service.run(self.__start)
self._logger.info("The Pub Sub service has shutdown")
async def __start(self):
next_bus_link = self.__daisy_chain_bus.head.next_link
async with self._async_service.create_channel_context() as channel_context:
self._logger.debug("starting the head message bus")
channel_context.start_soon(self.__head_bus.start_bus, next_bus_link)
@property
def __head_bus(self):
return self.__daisy_chain_bus.head.value
|
StarcoderdataPython
|
3384382
|
"""Implements basic throttle, brake, steering functionality"""
import rospy
from yaw_controller import YawController
from lowpass import LowPassFilter
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
#pylint: disable=C0326,trailing-whitespace
class Controller(object):
"""
Use a Yaw controller and a pid controller to get throttle, brake and steer values
"""
def __init__( self, vehicle_mass, fuel_capacity, brake_deadband, accel_limit, decel_limit,
wheel_base, wheel_radius, steer_ratio, max_lat_accel, max_steer_angle):
#pylint: disable=too-many-arguments, too-many-locals
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1,
max_lat_accel, max_steer_angle)
k_p, k_i, k_d = 0.3, 0.1, 0.0
min_thr, max_thr = 0.0, 1.0
self.throttle_controller = PID(k_p, k_i, k_d, min_thr, max_thr)
tau = 0.5
sample_time = 0.02 # 1 / 50 Hz
self.vel_lpf = LowPassFilter( tau, sample_time )
self.vehicle_mass= vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.accel_limit = accel_limit
self.decel_limit = decel_limit
self.wheel_base = wheel_base
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, cur_linear_vel, cur_angular_vel,
dsrd_linear_vel, dsrd_angular_vel, dbw_enabled):
"""Compute and return throttle brake and steering from dsrd linear_vel and
dsrd_angular_vel as well as current vels"""
if not dbw_enabled :
self.throttle_controller.reset()
return 0., 0., 0.
cur_vel = self.vel_lpf.filt( cur_linear_vel )
steering = self.yaw_controller.get_steering( dsrd_linear_vel, dsrd_angular_vel,
cur_vel )
vel_error = dsrd_linear_vel - cur_vel
cur_time = rospy.get_time()
del_time = cur_time - self.last_time
self.last_time = cur_time
throttle = self.throttle_controller.step( vel_error, del_time )
brake = 0.
if dsrd_linear_vel == 0. and cur_vel < 0.1 :
throttle = 0
brake = 400.0
elif throttle < 0.1 and vel_error < 0 :
decel = max( vel_error, self.decel_limit )
brake = abs(decel) * self.vehicle_mass * self.wheel_radius
return throttle, brake, steering
#return 1.0, 0.0, 1.0
def show( self ) :
"""Another method to make pylint happy"""
return "Controller(%s,%s)" % (self.throttle_controller, self.yaw_controller)
|
StarcoderdataPython
|
1739202
|
for _ in range(int(input())):
n=int(input())
l=list(map(int,input().split()))
pre=[0]
for i in range(n):
pre.append(l[i]+pre[-1])
d={}
for i in l:
if i in d:
d[i]+=1
else:
d[i]=1
c=0
for i in range(n):
for j in range(i+1,n):
s=pre[j+1]-pre[i]
if s in d and d[s]!=0:
c+=d[s]
d[s]=0
print(c)
|
StarcoderdataPython
|
3372641
|
<reponame>alterway/anonymization<filename>anonymization/anonymizers/dateAnonymizers.py
import re
from types import SimpleNamespace
from ..Anonymization import Anonymization
class DateAnonymizer():
'''
Replace the dates with fake ones
Date Formats: DD/MMM/YYYY or DD.MMM.YYYY or DD-MMM-YYYY or DD MMM YYYY
'''
def __init__(self, anonymization: Anonymization):
self.anonymization = anonymization
self.date_regex= r'\d\d(?:\/|-|\.|\s)(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)(?:\/|-|\.|\s)\d{4}'
def anonymize(self, text: str) -> str:
return self.anonymization.regex_anonymizer(text,self.date_regex,'date')
def evaluate(self, text: str) -> str:
matchs = re.finditer(self.date_regex, text)
ents = [SimpleNamespace(start=m.start(), end=m.end(), entity_type="DATE", score=1) for m in matchs]
return ents
|
StarcoderdataPython
|
93723
|
<gh_stars>100-1000
# This script removes the input reference numbers from html pages.
# They play a useful role in scientific notebooks, but they are really
# just visual clutter in this project.
# Could be an nbconvert setting, but it's an easy enough scripting job.
import os
import sys
print("\nStripping input reference numbers from code cells...")
# Find all files to work with.
path_to_notebooks = '/srv/projects/intro_programming/intro_programming/notebooks/'
filenames = []
for filename in os.listdir(path_to_notebooks):
if '.html' in filename and filename != 'index.html':
filenames.append(filename)
# one file for testing:
#filenames = ['hello_world.html']
for filename in filenames:
f = open(path_to_notebooks + filename, 'r')
lines = f.readlines()
f.close()
f = open(path_to_notebooks + filename, 'wb')
for line in lines:
# Unwanted lines have opening and closing div on same line,
# with input reference number between them.
if ('<div class="prompt input_prompt">' in line
and '</div>' in line):
# Don't write this line.
continue
else:
# Regular line, write it.
f.write(line.encode('utf-8'))
f.close()
print(" Stripped input reference numbers.\n")
|
StarcoderdataPython
|
1743735
|
import pp
from pp import Port
from pp.routing.connect_bundle import connect_bundle
def test_connect_bundle():
xs_top = [-100, -90, -80, 0, 10, 20, 40, 50, 80, 90, 100, 105, 110, 115]
pitch = 127.0
N = len(xs_top)
xs_bottom = [(i - N / 2) * pitch for i in range(N)]
top_ports = [Port("top_{}".format(i), (xs_top[i], 0), 0.5, 270) for i in range(N)]
bottom_ports = [
Port("bottom_{}".format(i), (xs_bottom[i], -400), 0.5, 90) for i in range(N)
]
top_cell = pp.Component(name="connect_bundle")
elements = connect_bundle(top_ports, bottom_ports)
for e in elements:
top_cell.add(e)
top_cell.name = "connect_bundle"
return top_cell
@pp.cell
def test_connect_corner(N=6, config="A"):
d = 10.0
sep = 5.0
top_cell = pp.Component(name="connect_bundle_corners")
if config in ["A", "B"]:
a = 100.0
ports_A_TR = [
Port("A_TR_{}".format(i), (d, a / 2 + i * sep), 0.5, 0) for i in range(N)
]
ports_A_TL = [
Port("A_TL_{}".format(i), (-d, a / 2 + i * sep), 0.5, 180) for i in range(N)
]
ports_A_BR = [
Port("A_BR_{}".format(i), (d, -a / 2 - i * sep), 0.5, 0) for i in range(N)
]
ports_A_BL = [
Port("A_BL_{}".format(i), (-d, -a / 2 - i * sep), 0.5, 180)
for i in range(N)
]
ports_A = [ports_A_TR, ports_A_TL, ports_A_BR, ports_A_BL]
ports_B_TR = [
Port("B_TR_{}".format(i), (a / 2 + i * sep, d), 0.5, 90) for i in range(N)
]
ports_B_TL = [
Port("B_TL_{}".format(i), (-a / 2 - i * sep, d), 0.5, 90) for i in range(N)
]
ports_B_BR = [
Port("B_BR_{}".format(i), (a / 2 + i * sep, -d), 0.5, 270) for i in range(N)
]
ports_B_BL = [
Port("B_BL_{}".format(i), (-a / 2 - i * sep, -d), 0.5, 270)
for i in range(N)
]
ports_B = [ports_B_TR, ports_B_TL, ports_B_BR, ports_B_BL]
elif config in ["C", "D"]:
a = N * sep + 2 * d
ports_A_TR = [
Port("A_TR_{}".format(i), (a, d + i * sep), 0.5, 0) for i in range(N)
]
ports_A_TL = [
Port("A_TL_{}".format(i), (-a, d + i * sep), 0.5, 180) for i in range(N)
]
ports_A_BR = [
Port("A_BR_{}".format(i), (a, -d - i * sep), 0.5, 0) for i in range(N)
]
ports_A_BL = [
Port("A_BL_{}".format(i), (-a, -d - i * sep), 0.5, 180) for i in range(N)
]
ports_A = [ports_A_TR, ports_A_TL, ports_A_BR, ports_A_BL]
ports_B_TR = [
Port("B_TR_{}".format(i), (d + i * sep, a), 0.5, 90) for i in range(N)
]
ports_B_TL = [
Port("B_TL_{}".format(i), (-d - i * sep, a), 0.5, 90) for i in range(N)
]
ports_B_BR = [
Port("B_BR_{}".format(i), (d + i * sep, -a), 0.5, 270) for i in range(N)
]
ports_B_BL = [
Port("B_BL_{}".format(i), (-d - i * sep, -a), 0.5, 270) for i in range(N)
]
ports_B = [ports_B_TR, ports_B_TL, ports_B_BR, ports_B_BL]
if config in ["A", "C"]:
for ports1, ports2 in zip(ports_A, ports_B):
elements = connect_bundle(ports1, ports2)
top_cell.add(elements)
elif config in ["B", "D"]:
for ports1, ports2 in zip(ports_A, ports_B):
elements = connect_bundle(ports2, ports1)
top_cell.add(elements)
return top_cell
@pp.cell
def test_connect_bundle_udirect(dy=200, angle=270):
xs1 = [-100, -90, -80, -55, -35, 24, 0] + [200, 210, 240]
axis = "X" if angle in [0, 180] else "Y"
pitch = 10.0
N = len(xs1)
xs2 = [50 + i * pitch for i in range(N)]
if axis == "X":
ports1 = [Port("top_{}".format(i), (0, xs1[i]), 0.5, angle) for i in range(N)]
ports2 = [
Port("bottom_{}".format(i), (dy, xs2[i]), 0.5, angle) for i in range(N)
]
else:
ports1 = [Port("top_{}".format(i), (xs1[i], 0), 0.5, angle) for i in range(N)]
ports2 = [
Port("bottom_{}".format(i), (xs2[i], dy), 0.5, angle) for i in range(N)
]
top_cell = pp.Component(name="connect_bundle")
elements = connect_bundle(ports1, ports2)
for e in elements:
top_cell.add(e)
return top_cell
@pp.cell
def test_connect_bundle_u_indirect(dy=-200, angle=180):
xs1 = [-100, -90, -80, -55, -35] + [200, 210, 240]
axis = "X" if angle in [0, 180] else "Y"
pitch = 10.0
N = len(xs1)
xs2 = [50 + i * pitch for i in range(N)]
a1 = angle
a2 = a1 + 180
if axis == "X":
ports1 = [Port("top_{}".format(i), (0, xs1[i]), 0.5, a1) for i in range(N)]
ports2 = [Port("bottom_{}".format(i), (dy, xs2[i]), 0.5, a2) for i in range(N)]
else:
ports1 = [Port("top_{}".format(i), (xs1[i], 0), 0.5, a1) for i in range(N)]
ports2 = [Port("bottom_{}".format(i), (xs2[i], dy), 0.5, a2) for i in range(N)]
top_cell = pp.Component()
elements = connect_bundle(ports1, ports2)
for e in elements:
top_cell.add(e)
return top_cell
@pp.cell
def test_facing_ports():
dy = 200.0
xs1 = [-500, -300, -100, -90, -80, -55, -35, 200, 210, 240, 500, 650]
pitch = 10.0
N = len(xs1)
xs2 = [-20 + i * pitch for i in range(N // 2)]
xs2 += [400 + i * pitch for i in range(N // 2)]
a1 = 90
a2 = a1 + 180
ports1 = [Port("top_{}".format(i), (xs1[i], 0), 0.5, a1) for i in range(N)]
ports2 = [Port("bottom_{}".format(i), (xs2[i], dy), 0.5, a2) for i in range(N)]
top_cell = pp.Component()
elements = connect_bundle(ports1, ports2)
# elements = link_ports_path_length_match(ports1, ports2)
top_cell.add(elements)
return top_cell
if __name__ == "__main__":
import pp
c = test_facing_ports()
pp.show(c)
|
StarcoderdataPython
|
3212118
|
<reponame>mohammadasim/online-bookstore
import uuid
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
class CustomerPayment(models.Model):
"""
A django model representing a payment made by a customer
"""
payment_id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
customer_id = models.ForeignKey(
get_user_model(),
on_delete=models.SET_DEFAULT,
related_name='payments',
default=''
)
payment_date = models.DateField(
'Payment Date',
auto_now_add=True
)
def __str__(self):
return str(self.payment_id)
def get_absolute_url(self):
"""
A method to return url for a payment, with payment_id
in the url.
"""
return reverse('payment_detail', args=[str(self.payment_id)])
|
StarcoderdataPython
|
3277042
|
""" Extra questions for Lab 08 """
from lab08 import *
# OOP
class Keyboard:
"""A Keyboard takes in an arbitrary amount of buttons, and has a
dictionary of positions as keys, and values as Buttons.
>>> b1 = Button(0, "H")
>>> b2 = Button(1, "I")
>>> k = Keyboard(b1, b2)
>>> k.buttons[0].key
'H'
>>> k.press(1)
'I'
>>> k.typing([0, 1])
'HI'
>>> k.typing([1, 0])
'IH'
>>> b1.pressed
2
>>> b2.pressed
3
"""
def __init__(self, *args):
self.buttons = {}
for btn in args:
# not required but better safe than sorry
assert isinstance(btn, Button), 'One or more args is not an instance of Button'
self.buttons[btn.pos] = btn
def press(self, info):
"""Takes in a position of the button pressed, and
returns that button's output"""
btn = self.buttons[info]
# not required but better safe than sorry
assert btn is not None, 'Requested button does not exist at position %s' % info
btn.pressed += 1
return btn.key
def typing(self, typing_input):
"""Takes in a list of positions of buttons pressed, and
returns the total output"""
return ''.join([self.press(pos) for pos in typing_input])
class Button:
def __init__(self, pos, key):
self.pos = pos
self.key = key
self.pressed = 0
# Nonlocal
def make_advanced_counter_maker():
"""Makes a function that makes counters that understands the
messages "count", "global-count", "reset", and "global-reset".
See the examples below:
>>> make_counter = make_advanced_counter_maker()
>>> tom_counter = make_counter()
>>> tom_counter('count')
1
>>> tom_counter('count')
2
>>> tom_counter('global-count')
1
>>> jon_counter = make_counter()
>>> jon_counter('global-count')
2
>>> jon_counter('count')
1
>>> jon_counter('reset')
>>> jon_counter('count')
1
>>> tom_counter('count')
3
>>> jon_counter('global-count')
3
>>> jon_counter('global-reset')
>>> tom_counter('global-count')
1
"""
"""
Be careful, this function returnsa function for the global count,
which returns another function for the local count
"""
global_count = 0
def global_counter():
nonlocal global_count
count = 0
def counter(cmd):
nonlocal global_count
nonlocal count
if cmd == "count":
count += 1
return count
elif cmd == "reset":
count = 0
elif cmd == "global-count":
global_count += 1
return global_count
elif cmd == "global-reset":
global_count = 0
return counter
return global_counter
# Lists
def trade(first, second):
"""Exchange the smallest prefixes of first and second that have equal sum.
>>> a = [1, 1, 3, 2, 1, 1, 4]
>>> b = [4, 3, 2, 7]
>>> trade(a, b) # Trades 1+1+3+2=7 for 4+3=7
'Deal!'
>>> a
[4, 3, 1, 1, 4]
>>> b
[1, 1, 3, 2, 2, 7]
>>> c = [3, 3, 2, 4, 1]
>>> trade(b, c)
'No deal!'
>>> b
[1, 1, 3, 2, 2, 7]
>>> c
[3, 3, 2, 4, 1]
>>> trade(a, c)
'Deal!'
>>> a
[3, 3, 2, 1, 4]
>>> b
[1, 1, 3, 2, 2, 7]
>>> c
[4, 3, 1, 4, 1]
"""
m, n = 1, 1
"""
This function tries to find the indicies that result in an equal sum starting from
the beginning of the lists. If either sides of the sums are smaller, the respective
index is incremented. If either of the indicies pass the boundaries of the list, then
an equal sum cannot be found.
"""
def find_equal_sum_partitions(fir, sec):
fi, si = 1, 1 # indicies for iterating through the given lists
while fi < len(fir) and si < len(sec):
sumfi, sumsi = sum(fir[:fi]), sum(sec[:si])
if sumfi < sumsi:
fi += 1
elif sumfi > sumsi:
si += 1
else:
return fi, si
return None, None
m, n = find_equal_sum_partitions(first, second)
if None not in [m, n]: # change this line!
first[:m], second[:n] = second[:n], first[:m]
return 'Deal!'
else:
return 'No deal!'
# Recursive objects
def make_to_string(front, mid, back, empty_repr):
""" Returns a function that turns linked lists to strings.
>>> kevins_to_string = make_to_string("[", "|-]-->", "", "[]")
>>> jerrys_to_string = make_to_string("(", " . ", ")", "()")
>>> lst = Link(1, Link(2, Link(3, Link(4))))
>>> kevins_to_string(lst)
'[1|-]-->[2|-]-->[3|-]-->[4|-]-->[]'
>>> kevins_to_string(Link.empty)
'[]'
>>> jerrys_to_string(lst)
'(1 . (2 . (3 . (4 . ()))))'
>>> jerrys_to_string(Link.empty)
'()'
"""
def fmt(link, depth=0):
if link is Link.empty:
return empty_repr
return front + str(link.first) + mid + fmt(link.rest, depth + 1) + back
return fmt
def tree_map(fn, t):
"""Maps the function fn over the entries of t and returns the
result in a new tree.
>>> numbers = Tree(1,
... [Tree(2,
... [Tree(3),
... Tree(4)]),
... Tree(5,
... [Tree(6,
... [Tree(7)]),
... Tree(8)])])
>>> print(tree_map(lambda x: 2**x, numbers))
2
4
8
16
32
64
128
256
"""
"""
This function does the same as what tree_map should do, but it mutates the
tree. While I could have rewritten the function to do this from the start,
it was easier to wrap it in a function and call that with a copy of the tree.
TL;DR: Too lazy to rewrite a mutating function into a non-mutating one from scratch.
"""
def tree_map_mutate(fn, t):
t.label = fn(t.label)
for b in t.branches:
if isinstance(b, Tree):
tree_map_mutate(fn, b)
else:
b.label = fn(b.label)
return t
return tree_map_mutate(fn, t.copy_tree())
def long_paths(tree, n):
"""Return a list of all paths in tree with length at least n.
>>> t = Tree(3, [Tree(4), Tree(4), Tree(5)])
>>> left = Tree(1, [Tree(2), t])
>>> mid = Tree(6, [Tree(7, [Tree(8)]), Tree(9)])
>>> right = Tree(11, [Tree(12, [Tree(13, [Tree(14)])])])
>>> whole = Tree(0, [left, Tree(13), mid, right])
>>> for path in long_paths(whole, 2):
... print(path)
...
<0 1 2>
<0 1 3 4>
<0 1 3 4>
<0 1 3 5>
<0 6 7 8>
<0 6 9>
<0 11 12 13 14>
>>> for path in long_paths(whole, 3):
... print(path)
...
<0 1 3 4>
<0 1 3 4>
<0 1 3 5>
<0 6 7 8>
<0 11 12 13 14>
>>> long_paths(whole, 4)
[Link(0, Link(11, Link(12, Link(13, Link(14)))))]
"""
paths = []
# we check if n <= 0 (at some point) and we've hit a leaf
# if we find a leaf and n > 0, the required length is too short
if tree.is_leaf() and n <= 0:
paths.append(Link(tree.label))
for b in tree.branches:
# as we climb up the branches, we call this method with 'n' as n - 1
# if there are no paths, then paths will be empty, thus they won't be added
for path in long_paths(b, n - 1):
paths.append(Link(tree.label, path))
return paths
# Orders of Growth
def zap(n):
i, count = 1, 0
while i <= n:
while i <= 5 * n:
count += i
print(i / 6)
i *= 3
return count
def boom(n):
sum = 0
a, b = 1, 1
while a <= n*n:
while b <= n*n:
sum += (a*b)
b += 1
b = 0
a += 1
return sum
# Tree class
class Tree:
def __init__(self, label, branches=[]):
for c in branches:
assert isinstance(c, Tree)
self.label = label
self.branches = list(branches)
def __repr__(self):
if self.branches:
branches_str = ', ' + repr(self.branches)
else:
branches_str = ''
return 'Tree({0}{1})'.format(self.label, branches_str)
def is_leaf(self):
return not self.branches
def __eq__(self, other):
return type(other) is type(self) and self.label == other.label \
and self.branches == other.branches
def __str__(self):
def print_tree(t, indent=0):
tree_str = ' ' * indent + str(t.label) + "\n"
for b in t.branches:
tree_str += print_tree(b, indent + 1)
return tree_str
return print_tree(self).rstrip()
def copy_tree(self):
return Tree(self.label, [b.copy_tree() for b in self.branches])
|
StarcoderdataPython
|
1628910
|
<filename>accounts/migrations/0004_userprofile_wish_list.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-28 12:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0012_auto_20180827_1748'),
('accounts', '0003_auto_20180731_1243'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='wish_list',
field=models.ManyToManyField(blank=True, to='product.Product'),
),
]
|
StarcoderdataPython
|
3270603
|
<reponame>kapikantzari/MultiBench<filename>examples/healthcare/mimic_low_rank_tensor.py
import torch
from torch import nn
import sys
import os
sys.path.append(os.getcwd())
from unimodals.common_models import MLP, GRU # noqa
from datasets.mimic.get_data import get_dataloader # noqa
from fusions.common_fusions import LowRankTensorFusion # noqa
from training_structures.Simple_Late_Fusion import train, test # noqa
# get dataloader for icd9 classification task 7
traindata, validdata, testdata = get_dataloader(
1, imputed_path='datasets/mimic/im.pk')
# build encoders, head and fusion layer
encoders = [MLP(5, 10, 10, dropout=False).cuda(), GRU(
12, 30, dropout=False, batch_first=True).cuda()]
head = MLP(100, 40, 2, dropout=False).cuda()
fusion = LowRankTensorFusion([10, 720], 100, 40).cuda()
# train
train(encoders, fusion, head, traindata, validdata, 50, auprc=True)
# test
print("Testing: ")
model = torch.load('best.pt').cuda()
# dataset = 'mimic mortality', 'mimic 1', 'mimic 7'
test(model, testdata, dataset='mimic 1', auprc=True)
|
StarcoderdataPython
|
1777350
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test
@file mi-dataset/mi/dataset/parser/test/test_winch_cspp.py
@author <NAME>
@brief Test code for Winch Cspp data parser
Files used for testing:
20141114-194242-WINCH.LOG
Contains engineering data for CSPP platform
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.winch_cspp.resource import RESOURCE_PATH
from mi.dataset.parser.winch_cspp import WinchCsppParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
MODULE_NAME = 'mi.dataset.parser.winch_cspp'
CLASS_NAME = 'WinchCsppDataParticle'
WINCH_CSPP_LOG_FILE = "20141114-194242-WINCH.LOG"
WINCH_CSPP_LOG_FILE_2 = "20141114-194242-WINCH_2.LOG"
# Define number of expected records/exceptions for various tests
NUM_REC_WINCH_CSPP_LOG_FILE = 1617
YAML_FILE = "winch_cspp_test_data.yml"
INVALID_DATA_FILE_1 = '20141114-194242-WINCH_invalid1.LOG'
NUM_INVALID_EXCEPTIONS = 5
@attr('UNIT', group='mi')
class WinchCsppParserUnitTestCase(ParserUnitTestCase):
"""
winch_cspp Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self.rec_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: CLASS_NAME
}
def open_file(self, filename):
file = open(os.path.join(RESOURCE_PATH, filename), mode='r')
return file
def open_file_write(self, filename):
file = open(os.path.join(RESOURCE_PATH, filename), mode='w')
return file
def create_parser(self, file_handle):
"""
This function creates a WinchCspp parser for Winch CSPP data.
"""
parser = WinchCsppParser(self.rec_config,
file_handle,
self.exception_callback)
return parser
def test_verify_record(self):
"""
Simple test to verify that records are successfully read and parsed from a data file
"""
log.debug('===== START SIMPLE TEST =====')
in_file = self.open_file(WINCH_CSPP_LOG_FILE)
parser = self.create_parser(in_file)
# In a single read, get all particles in this file.
number_expected_results = NUM_REC_WINCH_CSPP_LOG_FILE
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END SIMPLE TEST =====')
def test_invalid_data(self):
"""
Test the parser to handle non conformed format data. There are a total of six records in the
test file. The first line contains good data and the next 5 lines are bad lines with either wrong
delimiter (expect white space but found ',') or wrong input type (expect integer but found float).
Verify that one particle is generated and five exceptions are detected.
"""
log.debug('===== START TEST INVALID SENSOR DATA =====')
in_file = self.open_file(INVALID_DATA_FILE_1)
parser = self.create_parser(in_file)
number_expected_results = 1
# Try to get records and verify that none are returned.
result = parser.get_records(NUM_REC_WINCH_CSPP_LOG_FILE)
self.assertEqual(len(result), 1)
self.assertEqual(len(self.exception_callback_value), NUM_INVALID_EXCEPTIONS)
in_file.close()
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_verify_record_against_yaml(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START YAML TEST =====')
in_file = self.open_file(WINCH_CSPP_LOG_FILE_2)
parser = self.create_parser(in_file)
# In a single read, get all particles in this file.
number_expected_results = 6
result = parser.get_records(number_expected_results)
self.assert_particles(result, YAML_FILE, RESOURCE_PATH)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END YAML TEST =====')
def create_yml_file(self):
"""
Create a yml file corresponding to a Winch Cspp dataset. This is not an actual test. It allows
us to create what we need for integration testing, i.e. a yml file.
"""
in_file = self.open_file(WINCH_CSPP_LOG_FILE_2)
parser = self.create_parser(in_file)
# In a single read, get all particles in this file.
result = parser.get_records(100)
self.particle_to_yml(result, YAML_FILE)
def particle_to_yml(self, particles, filename, mode='w'):
"""
Write particle dictionaries to a yaml file
"""
# open write append, if you want to start from scratch manually delete this fid
fid = self.open_file_write(filename)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' % (i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.5f\n' % (val.get('value_id'), val.get('value')))
elif isinstance(val.get('value'), str):
fid.write(" %s: '%s'\n" % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
|
StarcoderdataPython
|
1668502
|
import math
import cv2
import numpy as np
from dtld_parsing.calibration import CalibrationData
from typing import Tuple
__author__ = "<NAME>, <NAME> and <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class ThreeDPosition(object):
"""
Three dimensional position with respect to a defined frame_id.
"""
def __init__(self, x: float, y: float, z: float, frame_id: str = "stereo_left"):
self._x = x
self._y = y
self._z = z
self._frame_id = frame_id
def set_pos(self, x: float, y: float, z: float):
self._x = x
self._y = y
self._z = z
def move_pos(self, x: float, y: float, z: float):
self._x += x
self._y += y
self._z += z
def get_pos(self) -> Tuple[float, float, float]:
return self._x, self._y, self._z
class ThreeDimensionalPosition(object):
def __init__(
self,
calibration_left: CalibrationData,
calibration_right: CalibrationData,
binning_x: int = 0,
binning_y: int = 0,
roi_offset_x: int = 0,
roi_offset_y: int = 0,
):
"""
Class determining the 3D position of objects from disparity images.
Args:
calibration_left(CalibrationData): calibration for left camera
calibration_right(CalibrationData): calibration for right camera
binning_x(int): binning between original camera and disparity image in x direction
binning_y(int): binning between original camera and disparity image in y direction
roi_offset_x(int): RoI offset in x
roi_offset_y(int): RoI offset in y
"""
self._calibration_left = calibration_left
self._calibration_right = calibration_right
self._binning_x = binning_x
self._binning_y = binning_y
self._roi_offset_x = roi_offset_x
self._roi_offset_y = roi_offset_y
def unrectify_rectangle(self, x: int, y: int, width: int, height: int):
"""
Converts rectified to unrectified coordinates using calibration matrices.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): height of bbox
Returns:
x, y, width, height in unrectified coordinates
"""
# not rectified coordinates
pt_distorted = np.array([[float(x), float(y)], [float(x + width), float(y + height),],])
pt_distorted = pt_distorted[:, np.newaxis, :]
# rectify points
pt_undistorted = cv2.undistortPoints(
pt_distorted,
self._calibration_left.intrinsic_calibration.intrinsic_matrix,
self._calibration_left.distortion_calibration.distortion_matrix,
R=self._calibration_left.rectification_matrix.rectification_matrix,
P=self._calibration_left.projection_matrix.projection_matrix,
)
# get new coords
x_out = pt_undistorted[0][0][0]
y_out = pt_undistorted[0][0][1]
w_out = pt_undistorted[1][0][0] - pt_undistorted[0][0][0]
h_out = pt_undistorted[1][0][1] - pt_undistorted[0][0][1]
# binning in x and y (camera images were binned before
# disparity calculation)
return (
int(round(x_out / float(self._binning_x))),
int(round(y_out / float(self._binning_y))),
int(round(w_out / float(self._binning_x))),
int(round(h_out / float(self._binning_y))),
)
def determine_disparity(self, x: int, y: int, width: int, height: int, disparity_image: np.ndarray) -> float:
"""
Calculates disparity from unrectified coordinates using calibration matrices and disparity image input.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): height of bbox
disparity_image(np.ndarray): disparity image
Returns:
float: median disparity in RoI
"""
disparity_crop = disparity_image[y : y + height, x : x + width]
# image = cv2.rectangle(
# disparity_image, (int(x), int(y)), (int(x) + int(width), int(y) + int(height)), (255, 255, 255), 1,
# )
# cv2.imwrite("/home/muelju3/disp.png", image)
return np.nanmedian(disparity_crop)
def determine_three_dimensional_position(
self, x: int, y: int, width: int, height: int, disparity_image: np.ndarray
) -> ThreeDPosition:
"""
Calculates 3d position from rectified coordinates using calibration matrices and disparity image input.
Args:
x(int): upper left corner of bbox
y(int): upper left corner of bbox
width(int): width of bbox
height(int): weight of bbox
disparity_image(np.ndarray): disparity image
Returns:
ThreeDPosition: ThreeDPosition
"""
x_u, y_u, width_u, height_u = self.unrectify_rectangle(x=x, y=y, width=width, height=height)
disparity = self.determine_disparity(
x=x_u - int(round(self._roi_offset_x / self._binning_x)),
y=y_u - int(round(self._roi_offset_y / self._binning_y)),
width=width_u,
height=height_u,
disparity_image=disparity_image,
)
# all values inside bbox are nan --> no depth
if disparity == 0.0 or math.isnan(disparity):
return ThreeDPosition(x=-1.0, y=-1.0, z=-1.0, frame_id="stereo_left")
return self.twod_point_to_threed_from_disparity(x=x + width / 2.0, y=y + height / 2.0, disparity=disparity)
def twod_point_to_threed_from_disparity(self, x, y, disparity):
# get calibration values
left_fx = self._calibration_left.intrinsic_calibration.fx
left_fy = self._calibration_left.intrinsic_calibration.fy
left_cx = self._calibration_left.intrinsic_calibration.cx
left_cy = self._calibration_left.intrinsic_calibration.cy
tx = -1.0 * self._calibration_right.projection_matrix.baseline
# determine 3d pos
x_world = left_fy * tx * x - left_fy * left_cx * tx
y_world = left_fx * tx * y - left_fx * left_cy * tx
z_world = left_fx * left_fy * tx
# normalize
w = -1.0 * self._binning_x * left_fy * disparity
return ThreeDPosition(x=x_world / w, y=y_world / w, z=z_world / w, frame_id="stereo_left")
def twod_point_to_threed_from_depth(self, x: int, y: int, depth: float) -> float:
disparity = self.depth_to_disparity(depth)
return self.twod_point_to_threed_from_disparity(x, y, disparity)
def disparity_to_depth(self, disparity: float) -> float:
"""
Converts disparity to depth.
Args:
disparity(float): Disparity in pixels
Returns:
float: depth value in meters
"""
tx = -1.0 * self._calibration_right.projection_matrix.tx
return tx / (disparity * self._binning_x)
def depth_to_disparity(self, depth: float) -> float:
"""
Converts depth to disparity.
Args:
depth(float): Depth in meters
Returns:
float: disparity in meters
"""
tx = -1.0 * self._calibration_right.projection_matrix.tx
return tx / (depth * self._binning_x)
def twod_from_threed(self, x: float, y: float, z: float):
"""
Calculates hypothesis size in pixels based on depth of object.
Args:
x(float): 3D position x coordinate
y(float): 3D position z coordinate
z(float): 3D position y coordinate
Returns:
int, int: 2d pos
"""
# translation = depth
t_vec = np.array([0.0, 0.0, 0.0])
r_vec = np.array([0.0, 0.0, 0.0])
# world corner points of object (float object assumption)
world_points = np.array([[x, y, z],])
# project world points on image plane
image_points = cv2.projectPoints(
world_points,
r_vec,
t_vec,
self._calibration_left.intrinsic_calibration.intrinsic_matrix,
distCoeffs=self._calibration_left.distortion_calibration.distortion_matrix,
)[0].tolist()
# determine box width and height
return image_points[0][0][0], image_points[0][0][1]
|
StarcoderdataPython
|
2036
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["<NAME>"]
__all__ = ["_StatsModelsAdapter"]
import numpy as np
import pandas as pd
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
from sktime.forecasting.base._sktime import _SktimeForecaster
class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):
"""Base class for interfacing statsmodels forecasting algorithms"""
_fitted_param_names = ()
def __init__(self):
self._forecaster = None
self._fitted_forecaster = None
super(_StatsModelsAdapter, self).__init__()
def fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
# statsmodels does not support the pd.Int64Index as required,
# so we coerce them here to pd.RangeIndex
if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index:
y, X = _coerce_int_to_range_index(y, X)
self._set_y_X(y, X)
self._set_fh(fh)
self._fit_forecaster(y, X)
self._is_fitted = True
return self
def _fit_forecaster(self, y_train, X_train=None):
"""Internal fit"""
raise NotImplementedError("abstract method")
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""
Make forecasts.
Parameters
----------
fh : ForecastingHorizon
The forecasters horizon with the steps ahead to to predict.
Default is one-step ahead forecast,
i.e. np.array([1])
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
return_pred_int : bool, optional (default=False)
alpha : int or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Returns series of predicted values.
"""
if return_pred_int:
raise NotImplementedError()
# statsmodels requires zero-based indexing starting at the
# beginning of the training series when passing integers
start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
y_pred = self._fitted_forecaster.predict(start, end)
# statsmodels forecasts all periods from start to end of forecasting
# horizon, but only return given time points in forecasting horizon
return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]
def get_fitted_params(self):
"""Get fitted parameters
Returns
-------
fitted_params : dict
"""
self.check_is_fitted()
return {
name: self._fitted_forecaster.params.get(name)
for name in self._get_fitted_param_names()
}
def _get_fitted_param_names(self):
"""Get names of fitted parameters"""
return self._fitted_param_names
def _coerce_int_to_range_index(y, X=None):
new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)
try:
np.testing.assert_array_equal(y.index, new_index)
except AssertionError:
raise ValueError(
"Coercion of pd.Int64Index to pd.RangeIndex "
"failed. Please provide `y_train` with a "
"pd.RangeIndex."
)
y.index = new_index
if X is not None:
X.index = new_index
return y, X
|
StarcoderdataPython
|
4808568
|
from itertools import cycle
def xor_data(data, key):
key = [q for q in key]
data = [q for q in data]
return bytes([c ^ k for c, k in zip(data, cycle(key))])
|
StarcoderdataPython
|
3270502
|
<reponame>SimonContreras/MHRiseWiki-discord-bot
import os
import discord
from discord.ext import commands
from src.skill.embed import SkillEmbed
from src.common.embed import CommonEmbed
from src.common.utils import InputParser
from src.orm.queries.header import db_header
from src.orm.queries.skill import db_skill
class SkillCog(commands.Cog):
"""
Commands related to skill
Attributes
----------
bot : commands.Bot
Discord.ext class that implements Bot class
Methods
-------
skill(ctx, *args)
Retrieve embeds related to command '?skill'
"""
def __init__(self, bot: commands.Bot):
self._bot = bot
self.name = 'Skill Cog'
self.description = '''Skill commands MH Rise Wiki'''
self.__dbHeader = db_header
self.__dbSkill = db_skill
self._skill_img_route=os.getenv('SKILL_LOCATION_ROUTE')
@commands.command(name='hab', aliases=['skill'])
async def skill(self, ctx: commands.Context, *args):
"""Manage rendered embeds of command '?skill'
Parameters
----------
ctx : commands.Context
context class that store data related to discord server
*args : list
List of params sent when the command is called
Returns
-------
Message
retrieve rendered embed
"""
skill_name = InputParser(args).concat()
dct = self.__dbSkill.get(str(ctx.guild.id), skill_name)
if dct == None:
dct = self.__dbHeader.entity_not_found(str(ctx.guild.id), 'skill_not_found')
foooter = self.__dbHeader.get_footer(str(ctx.guild.id), 'general_footer')
embed = CommonEmbed(dct, foooter, ctx)
await ctx.send(embed=embed.notFound())
else:
headers = self.__dbHeader.get_headers(str(ctx.guild.id), ctx.invoked_with)
thumbnail_file = discord.File(self._skill_img_route+dct['icon'], filename=dct['icon'])
embed = SkillEmbed(dct, headers)
embed_main, embed_deco = embed.main()
if embed_deco is None:
await ctx.send(embed = embed_main, file=thumbnail_file)
else:
message = await ctx.send(embed=embed_main, file=thumbnail_file)
await message.add_reaction('▶')
valid_reactions = ['▶']
def check(reaction, user):
return user == ctx.author
reaction = None
reaction_used = []
while True:
if str(reaction) in valid_reactions and str(reaction) not in reaction_used:
reaction_used.append(str(reaction))
deco_file = discord.File(self._skill_img_route+dct['icon'], filename=dct['icon'])
await ctx.send(embed=embed_deco, file=deco_file)
try:
reaction, user = await self._bot.wait_for(event='reaction_add', timeout = 60.0, check = check)
await message.remove_reaction(reaction, user)
except:
break
await message.clear_reactions()
|
StarcoderdataPython
|
4840749
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# This application is an example on how to use aiolifx
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import sys
import asyncio as aio
import aiolifx as alix
from functools import partial
import argparse
UDP_BROADCAST_PORT = 56700
#Simple bulb control frpm console
class bulbs():
""" A simple class with a register and unregister methods
"""
def __init__(self):
self.bulbs=[]
self.boi=None #bulb of interest
def register(self,bulb):
bulb.get_label()
bulb.get_location()
bulb.get_version()
bulb.get_group()
bulb.get_wififirmware()
bulb.get_hostfirmware()
self.bulbs.append(bulb)
self.bulbs.sort(key=lambda x: x.label or x.mac_addr)
if opts.extra:
bulb.register_callback(lambda y: print("Unexpected message: %s"%str(y)))
def unregister(self,bulb):
idx=0
for x in list([ y.mac_addr for y in self.bulbs]):
if x == bulb.mac_addr:
del(self.bulbs[idx])
break
idx+=1
def readin():
"""Reading from stdin and displaying menu"""
selection = sys.stdin.readline().strip("\n")
MyBulbs.bulbs.sort(key=lambda x: x.label or x.mac_addr)
lov=[ x for x in selection.split(" ") if x != ""]
if lov:
if MyBulbs.boi:
#try:
if True:
if int(lov[0]) == 0:
MyBulbs.boi=None
elif int(lov[0]) == 1:
if len(lov) >1:
MyBulbs.boi.set_power(lov[1].lower() in ["1","on","true"])
MyBulbs.boi=None
else:
print("Error: For power you must indicate on or off\n")
elif int(lov[0]) == 2:
if len(lov) >2:
try:
MyBulbs.boi.set_color([58275,0,
int(round((float(lov[1])*65365.0)/100.0)),
int(round(float(lov[2])))])
MyBulbs.boi=None
except:
print("Error: For white brightness (0-100) and temperature (2500-9000) must be numbers.\n")
else:
print("Error: For white you must indicate brightness (0-100) and temperature (2500-9000)\n")
elif int(lov[0]) == 3:
if len(lov) >3:
try:
MyBulbs.boi.set_color([int(round((float(lov[1])*65535.0)/360.0)),
int(round((float(lov[2])*65535.0)/100.0)),
int(round((float(lov[3])*65535.0)/100.0)),3500])
MyBulbs.boi=None
except:
print("Error: For colour hue (0-360), saturation (0-100) and brightness (0-100)) must be numbers.\n")
else:
print("Error: For colour you must indicate hue (0-360), saturation (0-100) and brightness (0-100))\n")
elif int(lov[0]) == 4:
print(MyBulbs.boi.device_characteristics_str(" "))
print(MyBulbs.boi.device_product_str(" "))
MyBulbs.boi=None
elif int(lov[0]) == 5:
print(MyBulbs.boi.device_firmware_str(" "))
MyBulbs.boi=None
elif int(lov[0]) == 6:
mypartial=partial(MyBulbs.boi.device_radio_str)
MyBulbs.boi.get_wifiinfo(callb=lambda x,y:print("\n"+mypartial(y)))
MyBulbs.boi=None
elif int(lov[0]) == 7:
mypartial=partial(MyBulbs.boi.device_time_str)
MyBulbs.boi.get_hostinfo(callb=lambda x,y:print("\n"+mypartial(y)))
MyBulbs.boi=None
elif int(lov[0]) == 8:
if len(lov) >3:
try:
print ( "Sending {}".format([int(round((float(lov[1])*65535.0)/360.0)),
int(round((float(lov[2])*65535.0)/100.0)),
int(round((float(lov[3])*65535.0)/100.0)),3500]))
MyBulbs.boi.set_waveform({"color":[int(round((float(lov[1])*65535.0)/360.0)),
int(round((float(lov[2])*65535.0)/100.0)),
int(round((float(lov[3])*65535.0)/100.0)),
3500],
"transient":1, "period":100, "cycles":30,
"skew_ratio":0,"waveform":0})
MyBulbs.boi=None
except:
print("Error: For pulse hue (0-360), saturation (0-100) and brightness (0-100)) must be numbers.\n")
else:
print("Error: For pulse you must indicate hue (0-360), saturation (0-100) and brightness (0-100))\n")
#except:
#print ("\nError: Selection must be a number.\n")
else:
try:
if int(lov[0]) > 0:
if int(lov[0]) <=len(MyBulbs.bulbs):
MyBulbs.boi=MyBulbs.bulbs[int(lov[0])-1]
else:
print("\nError: Not a valid selection.\n")
except:
print ("\nError: Selection must be a number.\n")
if MyBulbs.boi:
print("Select Function for {}:".format(MyBulbs.boi.label))
print("\t[1]\tPower (0 or 1)")
print("\t[2]\tWhite (Brigthness Temperature)")
print("\t[3]\tColour (Hue Saturation Brightness)")
print("\t[4]\tInfo")
print("\t[5]\tFirmware")
print("\t[6]\tWifi")
print("\t[7]\tUptime")
print("\t[8]\tPulse")
print("")
print("\t[0]\tBack to bulb selection")
else:
idx=1
print("Select Bulb:")
for x in MyBulbs.bulbs:
print("\t[{}]\t{}".format(idx,x.label or x.mac_addr))
idx+=1
print("")
print("Your choice: ", end='',flush=True)
parser = argparse.ArgumentParser(description="Track and interact with Lifx light bulbs.")
parser.add_argument("-6", "--ipv6prefix", default=None,
help="Connect to Lifx using IPv6 with given /64 prefix (Do not end with colon unless you have less than 64bits).")
parser.add_argument("-x","--extra", action='store_true', default=False,
help="Print unexpected messages.")
try:
opts = parser.parse_args()
except Exception as e:
parser.error("Error: " + str(e))
MyBulbs= bulbs()
loop = aio.get_event_loop()
discovery = alix.LifxDiscovery(loop, MyBulbs)
try:
loop.add_reader(sys.stdin,readin)
discovery.start()
print("Hit \"Enter\" to start")
print("Use Ctrl-C to quit")
loop.run_forever()
except:
pass
finally:
discovery.cleanup()
loop.remove_reader(sys.stdin)
loop.close()
|
StarcoderdataPython
|
3248356
|
<filename>Modules/LSTM_Config.py
######################## config ##############################
import tensorflow as tf
from keras.applications import VGG16
from tensorflow import keras
from keras.models import Model
import numpy as np
from random import shuffle
from Modules import PublicModules as lib
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation, Dropout
import matplotlib.pyplot as plt
import cv2
DIR_ROOT = ''
DIR_INPUT_TRAIN = DIR_ROOT + 'Data/Train'
DIR_INPUT_TEST = DIR_ROOT + 'Data/Test'
DIR_INPUT_TEST1 = DIR_ROOT + 'Data/Test1'
DIR_INPUT_VALIDATION = DIR_ROOT + 'Data/Validation'
DIR_INPUT_SHOW_VIDEO_TEST = DIR_ROOT + 'Data/ShowVideoTest'
DIR_INPUT_SHOW_VIDEO_TRAIN = DIR_ROOT + 'Data/ShowVideoTrain'
DIR_MODEL_LSTM = DIR_ROOT + 'Modules/LSTM_Model_17PL_06_01_2021_01.h5'
DIR_MODEL_CNN = DIR_ROOT + 'Modules/VGG16_Model.h5'
DIR_TRANSFER_VALUES_VGG16_MODEL = DIR_ROOT + 'Modules/TransferValuesVGG16.npy'
SIZE = (224, 224)
NUM_FRAME_INPUT_LSTM = 20
TRANSFER_VALUE_SIZE = 4096
RNN_SIZE = 600
DENSE1 = 1024
DENSE2 = 70
EPOCH = 400
BATCH_SIZE = 300
LEARNING_RATE = 0.0001
# So Luong Validation
VALID_PERCENT = 0.2
# % Du lieu de test
TEST_PERCENT = 0.3
# K-Folder Validation
K_FOLD = 10
VIDEO_NAMES = [
'bc',
'cq',
'da',
'dn',
'kc',
'lg',
'lk',
'na',
'nc',
'ne',
'nt',
'om',
'tc',
'vk',
'xd',
'xt',
'no'
]
VIDEO_NAMES_DETAIL = [
'bc - Bóp Cổ',
'cq - Cởi Quần Áo',
'da - Đá, Đạp',
'dn - Đánh, Tát',
'kc - Kẹp Cổ',
'lg - Lên Gối',
'lk - Lôi Kéo',
'na - Nằm Xuống Sàn',
'nc - Nắm Cổ',
'ne - Ném Đồ Vật',
'nt - Nắm Tóc',
'om - Ôm, Vật Lôn',
'tc - Thủ Thế Võ',
'vk - Vật, Vũ Khí',
'xd - Xô Đẩy',
'xt - Xỉ Tay',
'no'# Khong co hanh dong bao luc
]
VIDEO_LABELS = [
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
]
NUM_CLASSIFY = len(VIDEO_NAMES)
######################## end config ##############################
# dinh nghia VGG16 Model
def fun_getVGG16Model():
modelCNN = VGG16(include_top=True, weights='imagenet')
modelCNN.summary()
transferLayer = modelCNN.get_layer(name='fc2')
imgModelTransfer = Model(inputs=modelCNN.input, outputs=transferLayer.output)
return imgModelTransfer
# One hot
def fun_onesHotLabel(label: list):
_ones = np.ones([NUM_FRAME_INPUT_LSTM, NUM_CLASSIFY])
_onesHot = label * _ones
return np.array(_onesHot)
# Danh nhan video
def fun_getVideoLabelNames_EachFolder(path: str):
names = []
labels = []
for fol in VIDEO_NAMES:
folder = path + '/' + fol
fileNames = lib.fun_getFileNames(path=folder)
index = VIDEO_NAMES.index(fol)
for file in fileNames:
names.append('/' + fol + '/' + file)
labels.append(VIDEO_LABELS[index])
c = list(zip(names, labels))
shuffle(c)
names, labels = zip(*c)
return names, labels
# Loc video
def fun_locVideoDuFrame(path: str):
names, label = fun_getVideoLabelNames_EachFolder(path= path)
incree = 1
max = len(names)
for file in names:
frames = lib.fun_getFramesOfVideo_ALL(path= DIR_INPUT_TRAIN + file)
if len(frames) < 25:
print(file)
lib.fun_print_process(count= incree, max= max, mess= 'Filter Frame Count Precess: ')
incree += 1
# nem 20 frame hinh vao VGG16 Model
def fun_getTransferValue(pathVideoOrListFrame, modelVGG16):
if isinstance(pathVideoOrListFrame, str):
frames = lib.fun_getFramesOfVideo(path=pathVideoOrListFrame, count=NUM_FRAME_INPUT_LSTM)
else:
frames = pathVideoOrListFrame
frames = lib.fun_resizeFrames(frames=frames, size=SIZE)
frames = np.array(frames)
frames = (frames / 255.).astype(np.float16)
transfer = modelVGG16.predict(frames)
return transfer
# nem 20 frame hinh vao VGG16 Model
def fun_getTransferValue_EDIT(pathVideoOrListFrame, modelVGG16):
images = []
if (isinstance(pathVideoOrListFrame, str)):
vidcap = cv2.VideoCapture(pathVideoOrListFrame)
success, image = vidcap.read()
count = 0
while count < NUM_FRAME_INPUT_LSTM:
try:
RGB_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
res = cv2.resize(RGB_img, dsize=SIZE,
interpolation=cv2.INTER_CUBIC)
images.append(res)
success, image = vidcap.read()
count += 1
except:
break
else:
for image in pathVideoOrListFrame:
try:
RGB_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
res = cv2.resize(RGB_img, dsize=SIZE,
interpolation=cv2.INTER_CUBIC)
images.append(res)
except:
break
if len(images) != NUM_FRAME_INPUT_LSTM:
lib.fun_print(name='Frames count: ' + pathVideoOrListFrame, value=len(images))
return None
resul = np.array(images)
resul = (resul / 255.).astype(np.float16)
# # Pre-allocate input-batch-array for images.
# shape = (NUM_FRAME_INPUT_LSTM,) + SIZE + (3,)
# image_batch = np.zeros(shape=shape, dtype=np.float16)
# image_batch = resul
# # Pre-allocate output-array for transfer-values.
# # Note that we use 16-bit floating-points to save memory.
# shape = (NUM_FRAME_INPUT_LSTM, TRANSFER_VALUE_SIZE)
# transfer_values = np.zeros(shape=shape, dtype=np.float16)
transfer_values = modelVGG16.predict(resul)
return transfer_values
# chuan bi tap du lieu + nhan de train lstm
def fun_getTrainSet_LabelSet(pathVideoOrListFrame: str, numItem: int, modelVGG16, names, labels, mess: str= 'Train'):
count = 0
trainSet = []
labelSet = []
while count < numItem:
itemTrain = fun_getTransferValue_EDIT(pathVideoOrListFrame=pathVideoOrListFrame + names[count], modelVGG16= modelVGG16)
itemLable = fun_onesHotLabel(label=labels[count])
trainSet.append(itemTrain)
labelSet.append(itemLable[0])
lib.fun_print_process(count=count, max=numItem, mess='Video frame throw into VGG16 Model Processing {0}: '.format(mess))
count += 1
return trainSet, labelSet
# chuan bi tap du lieu + nhan de train lstm
def fun_getTrainSet_LabelSet_SaveFile(pathVideoOrListFrame: str, numItem: int, modelVGG16, names, labels, mess: str= 'Train'):
count = 0
trainSet = []
labelSet = []
with open(file= DIR_TRANSFER_VALUES_VGG16_MODEL, mode= 'wb') as f:
# the first write len dataset
np.save(f, np.array(numItem))
# write next recode of dataset
while count < numItem:
itemTrain = fun_getTransferValue_EDIT(pathVideoOrListFrame=pathVideoOrListFrame + names[count], modelVGG16= modelVGG16)
itemLable = fun_onesHotLabel(label=labels[count])
trainSet.append(itemTrain)
labelSet.append(itemLable[0])
np.save(f, itemTrain)
np.save(f, itemLable[0])
lib.fun_print_process(count=count, max=numItem, mess='Video frame throw into VGG16 Model Processing {0}: '.format(mess))
count += 1
return trainSet, labelSet
# chuan bi tap du lieu + nhan de train lstm
def fun_getTrainSet_LabelSet_LoadFile(numItem: int, mess: str= 'Load File: '):
count = 0
trainSet = []
labelSet = []
with open(file= DIR_TRANSFER_VALUES_VGG16_MODEL, mode= 'rb') as f:
# the first read len dataset
np.load(f)
# read next recode of dataset
while count < numItem:
itemTrain = np.load(f)
itemLable = np.load(f)
trainSet.append(itemTrain)
labelSet.append(itemLable)
lib.fun_print_process(count=count, max=numItem, mess='Video frame throw into VGG16 Model Processing {0}: '.format(mess))
count += 1
return trainSet, labelSet
# Dinh nghia mang LSTM
def fun_getModelLSTM(rnn_size: int = RNN_SIZE, input_shape: tuple = (NUM_FRAME_INPUT_LSTM, TRANSFER_VALUE_SIZE), num_classify: int = NUM_CLASSIFY):
modelLSTM = Sequential()
modelLSTM.add(LSTM(rnn_size, input_shape=input_shape))
modelLSTM.add(Dense(DENSE1))
modelLSTM.add(Activation('relu'))
modelLSTM.add(Dense(DENSE2))
modelLSTM.add(Activation('sigmoid'))
modelLSTM.add(Dense(num_classify))
modelLSTM.add(Activation('softmax'))
modelLSTM.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
return modelLSTM
# Dinh nghia mang LSTM 2
def fun_getModelLSTM_2(rnn_size: int = RNN_SIZE, input_shape: tuple = (NUM_FRAME_INPUT_LSTM, TRANSFER_VALUE_SIZE), num_classify: int = NUM_CLASSIFY):
modelLSTM = Sequential()
modelLSTM.add(LSTM(rnn_size, input_shape= input_shape))
modelLSTM.add(Dense(DENSE1))
modelLSTM.add(Activation('relu'))
modelLSTM.add(Dense(DENSE2))
modelLSTM.add(Activation('sigmoid'))
modelLSTM.add(Dense(num_classify))
modelLSTM.add(Activation('softmax'))
opt = keras.optimizers.Adam(learning_rate= LEARNING_RATE)
modelLSTM.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
return modelLSTM
# Dinh nghia mang LSTM 5
def fun_getModelLSTM_5(rnn_size: int = RNN_SIZE, input_shape: tuple = (NUM_FRAME_INPUT_LSTM, TRANSFER_VALUE_SIZE), num_classify: int = NUM_CLASSIFY):
modelLSTM = Sequential()
modelLSTM.add(LSTM(1024, input_shape=input_shape))
modelLSTM.add(Dense(200))
modelLSTM.add(Activation('relu'))
modelLSTM.add(Dense(50))
modelLSTM.add(Activation('sigmoid'))
modelLSTM.add(Dense(num_classify))
modelLSTM.add(Activation('softmax'))
opt = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
modelLSTM.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
return modelLSTM
# Dinh nghia mang LSTM 6
def fun_getModelLSTM_6(rnn_size: int = RNN_SIZE, input_shape: tuple = (NUM_FRAME_INPUT_LSTM, TRANSFER_VALUE_SIZE), num_classify: int = NUM_CLASSIFY):
modelLSTM = Sequential()
modelLSTM.add(LSTM(120, input_shape= input_shape))
modelLSTM.add(Dense(1024, activation='relu'))
modelLSTM.add(Dropout(.5))
modelLSTM.add(Dense(512, activation='relu'))
modelLSTM.add(Dropout(.5))
modelLSTM.add(Dense(128, activation='relu'))
modelLSTM.add(Dropout(.5))
modelLSTM.add(Dense(64, activation='relu'))
modelLSTM.add(Dense(NUM_CLASSIFY, activation='softmax'))
opt = keras.optimizers.Adam(learning_rate= LEARNING_RATE)
modelLSTM.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
return modelLSTM
# bat dau cong viec train lstm percent
def fun_START_TRAINT_LSTM_PERCENT(modelLSTM, trainSet, labelSet):
lenValid = int(VALID_PERCENT * len(trainSet))
# Init Valid
valSet = trainSet[0:lenValid]
valLabelSet = labelSet[0:lenValid]
# Init Train
trainSet = trainSet[lenValid:]
labelSet = labelSet[lenValid:]
print('Len Validation: ' + str(len(valSet)))
input('any: ')
print('Len Train: ' + str(len(trainSet)))
input('any: ')
history = modelLSTM.fit(np.array(trainSet), np.array(labelSet), epochs=EPOCH,
validation_data=(np.array(valSet), np.array(valLabelSet)),
batch_size=BATCH_SIZE, verbose=2)
lib.fun_print(name= 'LSTM Train', value= 'Train Finish!')
return history
def get_model_name(k):
return 'Modules/K_model_'+str(k)+'.h5'
def fun_mergeArray(arr1, arr2):
res = []
for x in arr1:
res.append(x)
for x in arr2:
res.append(x)
return res
def fun_START_TRAINT_LSTM_PERCENT_K_Fold(modelLSTM, trainSet, labelSet, testSet, testLabelSet):
history = None
VALIDATION_ACCURACY = []
VALIDATION_LOSS = []
max = len(trainSet)
index = max // K_FOLD
for k in range(0, K_FOLD):
start = index * k
end = start + index
# Anh xa validation
_valSet = trainSet[start:end]
_valLabelSet = labelSet[start:end]
# Phan con lai de train
_trainLeft = trainSet[0:start]
_trainRight = trainSet[end:max]
_trainFOLD = fun_mergeArray(_trainLeft, _trainRight)
_labelLeft = labelSet[0:start]
_labelRight = labelSet[end:max]
_labelFOLD = fun_mergeArray(_labelLeft, _labelRight)
lib.fun_print(name= 'Train fold {0}'.format(k), value= 'valid: {0}, train: {1}'.format(len(_valSet), len(_trainFOLD)))
# Bat dau train
# create callback
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=get_model_name(k),
monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = modelLSTM.fit(np.array(_trainFOLD), np.array(_labelFOLD), epochs=EPOCH,
validation_data=(np.array(_valSet), np.array(_valLabelSet)),
callbacks=callbacks_list,
batch_size=BATCH_SIZE, verbose=2)
'''
HIEN THI BIEU DO HOI TU
'''
fun_showAnalysis(history=history)
'''
DU DOAN % DO CHINH XAC,
- Thu muc test tai: Data/Test/
'''
fun_evaluate(modelLSTM=modelLSTM, testSet=testSet, testLabelSet=testLabelSet)
# PLOT HISTORY# :# :# LOAD BEST MODEL to evaluate the performance of the model
modelLSTM.load_weights(get_model_name(k))
# evaluate
results = modelLSTM.evaluate(np.array(_valSet), np.array(_valLabelSet))
results = dict(zip(modelLSTM.metrics_names, results))
VALIDATION_ACCURACY.append(results['accuracy'])
VALIDATION_LOSS.append(results['loss'])
tf.keras.backend.clear_session()
print(VALIDATION_ACCURACY)
print(VALIDATION_LOSS)
return history
# bat dau cong viec train lstm
def fun_START_TRAINT_LSTM(modelVGG16, modelLSTM, trainSet, labelSet):
valName, valLabel = fun_getVideoLabelNames_EachFolder(path= DIR_INPUT_VALIDATION)
print('len Valid: ', len(valName))
input('any: ')
valSet, valLabelSet = fun_getTrainSet_LabelSet(pathVideoOrListFrame= DIR_INPUT_VALIDATION ,numItem= len(valName),
modelVGG16= modelVGG16,
names= valName, labels= valLabel,
mess= 'Validation')
history = modelLSTM.fit(np.array(trainSet), np.array(labelSet), epochs=EPOCH,
validation_data=(np.array(valSet), np.array(valLabelSet)),
batch_size=BATCH_SIZE, verbose=2)
lib.fun_print(name= 'LSTM Train', value= 'Train Finish!')
return history
# Show bieu do hoi tu
def fun_showAnalysis(history):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig('destination_path.eps', format='eps', dpi=1000)
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig('destination_path1.eps', format='eps', dpi=1000)
plt.show()
def fun_loadModelLSTM():
modelLSTM = fun_getModelLSTM_2(num_classify= NUM_CLASSIFY)
modelLSTM.load_weights(filepath= DIR_MODEL_LSTM)
return modelLSTM
def fun_evaluate(modelLSTM, testSet, testLabelSet):
result = modelLSTM.evaluate(np.array(testSet), np.array(testLabelSet))
for name, value in zip(modelLSTM.metrics_names, result):
print(name, value)
def fun_FilterVideoFitFrameCount(fileName:str ,count: int=25):
frames = lib.fun_getFramesOfVideo_ALL(path= fileName)
if len(frames) < count:
print(fileName)
|
StarcoderdataPython
|
114028
|
import sqlite3
db_con = sqlite3.connect("./manga_db.sqlite", detect_types=sqlite3.PARSE_DECLTYPES)
db_con.row_factory = sqlite3.Row
with db_con:
c = db_con.executescript("""
PRAGMA foreign_keys=off;
BEGIN TRANSACTION;
DROP INDEX IF EXISTS id_onpage_on_site;
CREATE INDEX IF NOT EXISTS idx_id_onpage_imported_from ON
ExternalInfo (id_onpage, imported_from);
CREATE UNIQUE INDEX IF NOT EXISTS idx_artist_name ON Artist (name);
CREATE UNIQUE INDEX IF NOT EXISTS idx_category_name ON Category (name);
CREATE UNIQUE INDEX IF NOT EXISTS idx_character_name ON Character (name);
CREATE UNIQUE INDEX IF NOT EXISTS idx_collection_name ON Collection (name);
CREATE UNIQUE INDEX IF NOT EXISTS idx_groups_name ON Groups (name);
CREATE UNIQUE INDEX IF NOT EXISTS idx_list_name ON List (name);
CREATE UNIQUE INDEX IF NOT EXISTS idx_parody_name ON Parody (name);
CREATE UNIQUE INDEX IF NOT EXISTS idx_tag_name ON Tag (name);
DROP TRIGGER IF EXISTS set_last_update_ext_info;
COMMIT;
PRAGMA foreign_keys=on;
""")
|
StarcoderdataPython
|
41658
|
<reponame>Aleksander-Drozd/pycsvw
term_mappings = {
'Cell': 'csvw:Cell',
'Column': 'csvw:Column',
'Datatype': 'csvw:Datatype',
'Dialect': 'csvw:Dialect',
'Direction': 'csvw:Direction',
'ForeignKey': 'csvw:ForeignKey',
'JSON': 'csvw:JSON',
'NCName': 'xsd:NCName',
'NMTOKEN': 'xsd:NMTOKEN',
'Name': 'xsd:Name',
'NumericFormat': 'csvw:NumericFormat',
'QName': 'xsd:QName',
'Row': 'csvw:Row',
'Schema': 'csvw:Schema',
'Table': 'csvw:Table',
'TableGroup': 'csvw:TableGroup',
'TableReference': 'csvw:TableReference',
'Transformation': 'csvw:Transformation',
'aboutUrl': 'csvw:aboutUrl',
'any': 'xsd:anyAtomicType',
'anyAtomicType': 'xsd:anyAtomicType',
'anyURI': 'xsd:anyURI',
'as': 'https://www.w3.org/ns/activitystreams#',
'base': 'csvw:base',
'base64Binary': 'xsd:base64Binary',
'binary': 'xsd:base64Binary',
'boolean': 'xsd:boolean',
'byte': 'xsd:byte',
'cc': 'http://creativecommons.org/ns#',
'columnReference': 'csvw:columnReference',
'columns': 'csvw:column',
'commentPrefix': 'csvw:commentPrefix',
'csvw': 'http://www.w3.org/ns/csvw#',
'ctag': 'http://commontag.org/ns#',
'datatype': 'csvw:datatype',
'date': 'xsd:date',
'dateTime': 'xsd:dateTime',
'dateTimeStamp': 'xsd:dateTimeStamp',
'datetime': 'xsd:dateTime',
'dayTimeDuration': 'xsd:dayTimeDuration',
'dc': 'http://purl.org/dc/terms/',
'dc11': 'http://purl.org/dc/elements/1.1/',
'dcat': 'http://www.w3.org/ns/dcat#',
'dcterms': 'http://purl.org/dc/terms/',
'dctypes': 'http://purl.org/dc/dcmitype/',
'decimal': 'xsd:decimal',
'decimalChar': 'csvw:decimalChar',
'default': 'csvw:default',
'delimiter': 'csvw:delimiter',
'describedby': 'wrds:describedby',
'describes': 'csvw:describes',
'dialect': 'csvw:dialect',
'double': 'xsd:double',
'doubleQuote': 'csvw:doubleQuote',
'dqv': 'http://www.w3.org/ns/dqv#',
'duration': 'xsd:duration',
'duv': 'https://www.w3.org/TR/vocab-duv#',
'encoding': 'csvw:encoding',
'float': 'xsd:float',
'foaf': 'http://xmlns.com/foaf/0.1/',
'foreignKeys': 'csvw:foreignKey',
'format': 'csvw:format',
'gDay': 'xsd:gDay',
'gMonth': 'xsd:gMonth',
'gMonthDay': 'xsd:gMonthDay',
'gYear': 'xsd:gYear',
'gYearMonth': 'xsd:gYearMonth',
'gr': 'http://purl.org/goodrelations/v1#',
'grddl': 'http://www.w3.org/2003/g/data-view#',
'groupChar': 'csvw:groupChar',
'header': 'csvw:header',
'headerRowCount': 'csvw:headerRowCount',
'hexBinary': 'xsd:hexBinary',
'html': 'rdf:HTML',
'ical': 'http://www.w3.org/2002/12/cal/icaltzd#',
'int': 'xsd:int',
'integer': 'xsd:integer',
'json': 'csvw:JSON',
'lang': 'csvw:lang',
'language': 'xsd:language',
'ldp': 'http://www.w3.org/ns/ldp#',
'length': 'csvw:length',
'license': 'xhv:license',
'lineTerminators': 'csvw:lineTerminators',
'long': 'xsd:long',
'ma': 'http://www.w3.org/ns/ma-ont#',
'maxExclusive': 'csvw:maxExclusive',
'maxInclusive': 'csvw:maxInclusive',
'maxLength': 'csvw:maxLength',
'maximum': 'csvw:maxInclusive',
'minExclusive': 'csvw:minExclusive',
'minInclusive': 'csvw:minInclusive',
'minLength': 'csvw:minLength',
'minimum': 'csvw:minInclusive',
'name': 'csvw:name',
'negativeInteger': 'xsd:negativeInteger',
'nonNegativeInteger': 'xsd:nonNegativeInteger',
'nonPositiveInteger': 'xsd:nonPositiveInteger',
'normalizedString': 'xsd:normalizedString',
'notes': 'csvw:note',
'null': 'csvw:null',
'number': 'xsd:double',
'oa': 'http://www.w3.org/ns/oa#',
'og': 'http://ogp.me/ns#',
'ordered': 'csvw:ordered',
'org': 'http://www.w3.org/ns/org#',
'owl': 'http://www.w3.org/2002/07/owl#',
'pattern': 'csvw:pattern',
'positiveInteger': 'xsd:positiveInteger',
'primaryKey': 'csvw:primaryKey',
'propertyUrl': 'csvw:propertyUrl',
'prov': 'http://www.w3.org/ns/prov#',
'qb': 'http://purl.org/linked-data/cube#',
'quoteChar': 'csvw:quoteChar',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfa': 'http://www.w3.org/ns/rdfa#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'reference': 'csvw:reference',
'referencedRows': 'csvw:referencedRow',
'required': 'csvw:required',
'resource': 'csvw:resource',
'rev': 'http://purl.org/stuff/rev#',
'rif': 'http://www.w3.org/2007/rif#',
'role': 'xhv:role',
'row': 'csvw:row',
'rowTitles': 'csvw:rowTitle',
'rownum': 'csvw:rownum',
'rr': 'http://www.w3.org/ns/r2rml#',
'schema': 'http://schema.org/',
'schemaReference': 'csvw:schemaReference',
'scriptFormat': 'csvw:scriptFormat',
'sd': 'http://www.w3.org/ns/sparql-service-description#',
'separator': 'csvw:separator',
'short': 'xsd:short',
'sioc': 'http://rdfs.org/sioc/ns#',
'skipBlankRows': 'csvw:skipBlankRows',
'skipColumns': 'csvw:skipColumns',
'skipInitialSpace': 'csvw:skipInitialSpace',
'skipRows': 'csvw:skipRows',
'skos': 'http://www.w3.org/2004/02/skos/core#',
'skosxl': 'http://www.w3.org/2008/05/skos-xl#',
'source': 'csvw:source',
'string': 'xsd:string',
'suppressOutput': 'csvw:suppressOutput',
'tableDirection': 'csvw:tableDirection',
'tableSchema': 'csvw:tableSchema',
'tables': 'csvw:table',
'targetFormat': 'csvw:targetFormat',
'textDirection': 'csvw:textDirection',
'time': 'xsd:time',
'titles': 'csvw:title',
'token': 'xsd:token',
'transformations': 'csvw:transformations',
'trim': 'csvw:trim',
'unsignedByte': 'xsd:unsignedByte',
'unsignedInt': 'xsd:unsignedInt',
'unsignedLong': 'xsd:unsignedLong',
'unsignedShort': 'xsd:unsignedShort',
'uriTemplate': 'csvw:uriTemplate',
'url': 'csvw:url',
'v': 'http://rdf.data-vocabulary.org/#',
'valueUrl': 'csvw:valueUrl',
'vcard': 'http://www.w3.org/2006/vcard/ns#',
'virtual': 'csvw:virtual',
'void': 'http://rdfs.org/ns/void#',
'wdr': 'http://www.w3.org/2007/05/powder#',
'wrds': 'http://www.w3.org/2007/05/powder-s#',
'xhv': 'http://www.w3.org/1999/xhtml/vocab#',
'xml': 'rdf:XMLLiteral',
'xsd': 'http://www.w3.org/2001/XMLSchema#',
'yearMonthDuration': 'xsd:yearMonthDuration',
}
core_group_of_tables_annotations = ['id', 'notes', 'tables']
core_table_annotations = ['columns', 'tableDirection', 'foreignKeys', 'id', 'notes', 'rows', 'schema',
'suppressOutput', 'transformations', 'url']
core_column_annotations = ['aboutUrl', 'cells', 'datatype', 'default', 'lang', 'name', 'null', 'number', 'ordered',
'propertyUrl', 'required', 'separator', 'sourceNumber', 'suppressOutput', 'table',
'textDirection', 'titles', 'valueUrl', 'virtual']
core_row_annotations = ['cells', 'number', 'primaryKey', 'titles', 'referencedRows', 'sourceNumber', 'table']
schema_description = ['columns', 'foreignKeys', 'primaryKey', 'rowTitles', '@type', '@id']
def is_non_core_annotation(name):
return name not in core_group_of_tables_annotations \
and name not in core_table_annotations \
and name not in core_column_annotations \
and name not in core_row_annotations \
and name not in schema_description
CONST_STANDARD_MODE = 'standard'
CONST_MINIMAL_MODE = 'minimal'
core_group_of_tables_properties = ['tables', 'dialect', 'notes', 'tableDirection', 'tableSchema', 'transformations',
'@id', '@type', '@context']
core_table_properties = ['url', 'dialect', 'notes', 'suppressOutput', 'tableDirection', 'tableSchema',
'transformations', '@id', '@type']
inherited_properties = ['aboutUrl', 'datatype', 'default', 'lang', 'null', 'ordered', 'propertyUrl', 'required',
'separator', 'textDirection', 'valueUrl']
array_properties = ['tables', 'transformations', '@context', 'notes', 'foreignKeys',
'columns', 'lineTerminators']
array_property_item_types = {
'columns': dict,
'foreignKeys': dict,
'lineTerminators': str,
'notes': dict,
'transformations': dict,
'tables': dict,
}
number_datatypes = ['decimal', 'integer', 'integer', 'long', 'int', 'short', 'byte', 'nonNegativeInteger',
'positiveInteger', 'unsignedLong', 'unsignedInt', 'unsignedShort', 'unsignedByte',
'nonPositiveInteger', 'negativeInteger', 'double', 'number', 'duration', 'dayTimeDuration',
'yearMonthDuration', 'float']
date_datatypes = ['date', 'dateTime', 'datetime', 'dateTimeStamp']
fields_properties = {
'transformations': {'url': True, 'scriptFormat': True, 'targetFormat': True, 'source': False, 'titles': False,
'@id': False, '@type': True},
'tableGroup': {'tables': True, 'dialect': False, 'notes': False, 'tableDirection': False, 'tableSchema': False,
'transformations': False, '@id': False, '@type': False, '@context': True},
'tables': {'url': True, 'dialect': False, 'notes': False, 'suppressOutput': False, 'tableDirection': False,
'transformations': False, 'tableSchema': False, '@id': False, '@type': False},
'columns': {'name': False, 'suppressOutput': False, 'titles': False, 'virtual': False, '@id': False,
'@type': False, }
}
|
StarcoderdataPython
|
4826343
|
<gh_stars>0
import pandas as pd
import numpy as np
# Store filepath in a variable
path = "Resources/budget_data.csv"
# Read our Data file with the pandas library
df = pd.read_csv(path, encoding="ISO-8859-1")
# Total num of months
months = df["Date"].nunique()
# net P&L
p_and_l = df["Profit/Losses"].sum()
#get number of rows in Profit/Losses column
num_rows = len(df["Profit/Losses"])
#add a new column to data frame with monthly dollar changes.
#there will be a null value for the first month
for i in range(0,num_rows):
if i<(num_rows-1):
df.loc[i+1,"Change"]= (df["Profit/Losses"][i+1]) - (df["Profit/Losses"][i])
#get mean, max and min of monthly dollar changes
mean_change = df["Change"].mean()
min_change = df["Change"].min()
max_change = df["Change"].max()
#get the row of max and min dollar changes
min_change_row = (df["Change"].idxmin())
max_change_row = (df["Change"].idxmax())
# get corresponding month of max and min changes
min_month = df.loc[min_change_row, "Date"]
max_month = df.loc[max_change_row, "Date"]
#print output
print("Financial Analysis")
print("----------------------------")
print(f"Total Months: {months}")
print(f"Total: ${p_and_l}")
print("Average Change: $" + format (mean_change, ".2f"))
print("Greatest Increase in Profits: " + max_month + " ($" + format(max_change, ".0f") + ")")
print("Greatest Decrease in Profits: " + min_month + " ($" + format(min_change, ".0f") + ")")
##Print to text file
#print("Financial Analysis", file=open("pybank_output.txt", "a"))
print("Financial Analysis", file=open("pybank_output.txt", "a"))
print("----------------------------", file=open("pybank_output.txt", "a"))
print(f"Total Months: {months}", file=open("pybank_output.txt", "a"))
print(f"Total: ${p_and_l}", file=open("pybank_output.txt", "a"))
print("Average Change: $" + format (mean_change, ".2f"), file=open("pybank_output.txt", "a"))
print("Greatest Increase in Profits: " + max_month + " ($" + format(max_change, ".0f") + ")", file=open("pybank_output.txt", "a"))
print("Greatest Decrease in Profits: " + min_month + " ($" + format(min_change, ".0f") + ")", file=open("pybank_output.txt", "a"))
# get min profit
#idmin, axis = 0 returns index of the row
#.loc returns the row of the above index
#min_profit_df = budget_data_df.loc[budget_data_df["Profit/Losses"].idxmin(axis = 0)]
#min_profit_month = min_profit_df[0]
#min_profit_value = min_profit_df[1]
# average change
#average_change = budget_data_df["Profit/Losses"].mean()
# get max profit
#idxmax, axis = 0 returns index of the row
#.loc returns the row of the above index
#max_profit_df = budget_data_df.loc[budget_data_df["Profit/Losses"].idxmax(axis = 0)]
#max_profit_month = max_profit_df[0]
#max_profit_value = max_profit_df[1]
#print(len(df["Profit/Losses"]))
#print(df["Profit/Losses"][1])
#print("----------------------------")
#calc average change
#dollar_change = []
#pandas.DataFrame.append()
#avg_dollar_change = average(dollar_change)
#mean_dollar_change = np.mean(dollar_change)
#max_dollar_change = np.max(dollar_change)
#min_dollar_change = np.min(dollar_change)
#
#print(mean_dollar_change)
#print(max_dollar_change)
#print(min_dollar_change)
|
StarcoderdataPython
|
3295927
|
<reponame>PortableProgrammer/Status-Light
# https://github.com/portableprogrammer/Status-Light/
# Module imports
import sys
import signal
import os
import time
import logging
from datetime import datetime
# Project imports
import webex
import office365
import tuya
import env
import const
currentStatus = const.Status.unknown
lastStatus = currentStatus
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt='[%Y-%m-%d %H:%M:%S]', level=logging.WARNING)
logger = logging.getLogger(__name__)
logger.info('Startup')
print(datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),'Startup')
# Register for SIGHUP, SIGINT, SIGQUIT, SIGTERM
# At the moment, we'll treat them all (but SIGTERM) the same and exit
shouldContinue = True
def receiveSignal(signalNumber, frame):
logger.warning('\nSignal received: %s', signalNumber)
# TODO: Make better choices here, this is really a hack
global shouldContinue
shouldContinue = False
return
# SIGTERM should be handled special
def receiveTerminate(signalNumber, frame):
logger.warning('\nSIGTERM received, terminating immediately')
sys.exit(0)
signals = [signal.SIGHUP, signal.SIGINT, signal.SIGQUIT]
for sig in signals:
signal.signal(sig, receiveSignal)
signal.signal(signal.SIGTERM, receiveTerminate)
# Validate environment variables in a structured way
localEnv = env.Environment()
if False in [localEnv.getSources(), localEnv.getTuya(), localEnv.getColors(), localEnv.getStatus(), localEnv.getSleep(), localEnv.getLogLevel()]:
# We failed to gather some environment variables
logger.warning('Failed to find all environment variables!')
sys.exit(1)
# 23 - Make logging level configurable
logger.info('Setting log level to %s', localEnv.logLevel)
print(datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),'Setting log level to', localEnv.logLevel)
logger.setLevel(localEnv.logLevel)
# Depending on the selected sources, get the environment
webexAPI = None
if const.StatusSource.webex in localEnv.selectedSources:
if localEnv.getWebex():
logger.info('Requested Webex')
webexAPI = webex.WebexAPI()
webexAPI.botID = localEnv.webexBotID
else:
logger.warning('Requested Webex, but could not find all environment variables!')
sys.exit(1)
officeAPI = None
if const.StatusSource.office365 in localEnv.selectedSources:
if localEnv.getOffice():
logger.info('Requested Office 365')
officeAPI = office365.OfficeAPI()
officeAPI.appID = localEnv.officeAppID
officeAPI.appSecret = localEnv.officeAppSecret
officeAPI.tokenStore = localEnv.officeTokenStore
officeAPI.authenticate()
else:
logger.warning('Requested Office 365, but could not find all environment variables!')
sys.exit(1)
# Tuya
light = tuya.TuyaLight()
light.device = eval(localEnv.tuyaDevice)
logger.debug('Retrieved TUYA_DEVICE variable: %s', light.device)
# TODO: Connect to the device and ensure it's available
#light.getCurrentStatus()
while shouldContinue:
try:
webexStatus = const.Status.unknown
officeStatus = const.Status.unknown
# Webex Status
if const.StatusSource.webex in localEnv.selectedSources:
webexStatus = webexAPI.getPersonStatus(localEnv.webexPersonID)
# O365 Status (based on calendar)
if const.StatusSource.office365 in localEnv.selectedSources:
officeStatus = officeAPI.getCurrentStatus()
# Compare statii and pick a winner
logger.debug('Webex: %s | Office: %s', webexStatus, officeStatus)
# Webex status always wins except in specific scenarios
currentStatus = webexStatus
if (webexStatus in localEnv.availableStatus or webexStatus in localEnv.offStatus) and officeStatus not in localEnv.offStatus:
logger.debug('Using officeStatus: %s', officeStatus)
currentStatus = officeStatus
if lastStatus != currentStatus:
lastStatus = currentStatus
print()
print(datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),'Found new status:',currentStatus, end='', flush=True)
logger.info('Transitioning to %s',currentStatus)
light.transitionStatus(currentStatus, localEnv)
else:
print('.', end='', flush=True)
# Sleep for a few seconds
time.sleep(localEnv.sleepSeconds)
except (SystemExit, KeyboardInterrupt) as e:
logger.info('%s received; shutting down...', e.__class__.__name__)
shouldContinue = False
except BaseException as e:
logger.warning('Exception during main loop: %s', e)
logger.debug(e)
print()
print(datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),'Shutdown')
logger.info('Shutdown')
logger.debug('Turning light off')
light.off()
|
StarcoderdataPython
|
3261849
|
<filename>lib/script/who.py
#
# EXAMPLE: A primitive who command
#
__name__ = "who Command"
__author__ = "jh"
__date__ = "Feb 2004"
__version__ = "1.0"
__text__ = "blah blah blah"
__deps__ = []
from mud import *
import server
def __init__() :
register_command("who", \
{
"position" : POS_DEAD,
"function" : do_who,
"level" : 0,
"subcmd" : 0,
"priority" : 25,
"override" : 1
}
)
def __end__() :
deregister_command(do_who)
def class_abbr(num) :
return server.class_abbreviate(num)
#
# Get the flag string of a bottled char
#
def who_flag_str(ch) :
s = []
if ch.prf_flagged(PRF_NOTELL) :
s.append("(notell)")
return " ".join(s)
def do_who(ch,cmd,args,subcmd,rawtext) :
d = server.cvar.descriptor_list
show = []
while d != None :
c = d.character
if c == None :
continue
them = server.py_Bottle_Char(c)
if them == None :
continue
if ch.can_see(them) :
show.append(c)
d = d.next
ch.send("Players")
ch.send("-------")
for them in show :
ch.write(ch.yel(NRM))
if ch.isnpc :
a = "--"
else :
a = class_abbr(them.player.chclass)
ch.write("[%2d %s] %s %s %s" % (them.player.level, a, them.player.name, them.player.title, who_flag_str(server.py_Bottle_Char(them))))
ch.send(ch.nrm(NRM))
ch.send("")
if len(show) != 1 :
ch.send("%d characters shown" % len(show))
else :
ch.send("1 character shown")
|
StarcoderdataPython
|
1793532
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
class Handler(object):
"""
The abstract handler class to be used for handlers that
are class oriented (not direct function calls).
"""
filters = None
""" The reference to the list that will hold the various
filters to be applied to the message to be sent """
def __init__(self):
self.filters = []
def add_filter(self, filter):
self.filters.append(filter)
def filter(self, message):
# retrieves the (data) type of the current message, and
# in case it's not a map returns immediately, plain messages
# are not "filterable"
message_t = type(message)
if not message_t == dict: return message
# iterates over the complete set of filters registered in the
# handler and runs them in the message, retrieving the filtered
# message as the new message
for filter in self.filters: message = filter(message)
return message
def plain_filter(self, message):
contents = message.get("contents", {})
value = contents.get("value", None)
return value
|
StarcoderdataPython
|
52185
|
from RPi import GPIO
from time import sleep
# clk = 17
# dt = 18
sw = 24
clk = 12
dt = 25
GPIO.setmode(GPIO.BCM)
GPIO.setup(clk, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(dt, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(sw, GPIO.IN, pull_up_down=GPIO.PUD_UP)
counter = 0
clkLastState = GPIO.input(clk)
try:
while True:
pushBtn = GPIO.input(sw)
if pushBtn !=1:
print("button pressed..")
clkState = GPIO.input(clk)
dtState = GPIO.input(dt)
if clkState != clkLastState:
if dtState != clkState:
counter += 1
else:
counter -= 1
print(counter)
clkLastState = clkState
sleep(0.01)
finally:
GPIO.cleanup()
|
StarcoderdataPython
|
1761704
|
"""
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from astropy.utils.misc import NumpyRNGContext
from ..mean_los_velocity_vs_rp import mean_los_velocity_vs_rp
from ...tests.cf_helpers import generate_locus_of_3d_points
__all__ = ('test_mean_los_velocity_vs_rp_correctness1', 'test_mean_los_velocity_vs_rp_correctness2',
'test_mean_los_velocity_vs_rp_correctness3', 'test_mean_los_velocity_vs_rp_correctness4',
'test_mean_los_velocity_vs_rp_parallel', 'test_mean_los_velocity_vs_rp_auto_consistency',
'test_mean_los_velocity_vs_rp_cross_consistency')
fixed_seed = 43
def pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rp_min, rp_max, pi_max, Lbox=None):
""" Brute force pure python function calculating mean los velocities
in a single bin of separation.
"""
if Lbox is None:
xperiod, yperiod, zperiod = np.inf, np.inf, np.inf
else:
xperiod, yperiod, zperiod = Lbox, Lbox, Lbox
npts1, npts2 = len(sample1), len(sample2)
running_tally = []
for i in range(npts1):
for j in range(npts2):
dx = sample1[i, 0] - sample2[j, 0]
dy = sample1[i, 1] - sample2[j, 1]
dz = sample1[i, 2] - sample2[j, 2]
dvz = velocities1[i, 2] - velocities2[j, 2]
if dx > xperiod/2.:
dx = xperiod - dx
elif dx < -xperiod/2.:
dx = -(xperiod + dx)
if dy > yperiod/2.:
dy = yperiod - dy
elif dy < -yperiod/2.:
dy = -(yperiod + dy)
if dz > zperiod/2.:
dz = zperiod - dz
zsign_flip = -1
elif dz < -zperiod/2.:
dz = -(zperiod + dz)
zsign_flip = -1
else:
zsign_flip = 1
d_rp = np.sqrt(dx*dx + dy*dy)
if (d_rp > rp_min) & (d_rp < rp_max) & (abs(dz) < pi_max):
if abs(dz) > 0:
vlos = dvz*dz*zsign_flip/abs(dz)
else:
vlos = dvz
running_tally.append(vlos)
if len(running_tally) > 0:
return np.mean(running_tally)
else:
return 0.
def test_mean_radial_velocity_vs_r_vs_brute_force_pure_python():
""" This function tests that the
`~halotools.mock_observables.mean_radial_velocity_vs_r` function returns
results that agree with a brute force pure python implementation
for a random distribution of points, both with and without PBCs.
"""
npts = 99
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
sample2 = np.random.random((npts, 3))
velocities1 = np.random.uniform(-10, 10, npts*3).reshape((npts, 3))
velocities2 = np.random.uniform(-10, 10, npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.2, 0.3]), 0.1
############################################
# Run the test with PBCs turned off
s1s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False)
rmin, rmax = rp_bins[0], rp_bins[1]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[0], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[1], rp_bins[2]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[1], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[2], rp_bins[3]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[2], pure_python_s1s2, rtol=0.01)
# ############################################
# # Run the test with PBCs operative
s1s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False, period=1)
rmin, rmax = rp_bins[0], rp_bins[1]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[0], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[1], rp_bins[2]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[1], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[2], rp_bins[3]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert np.allclose(s1s2[2], pure_python_s1s2, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness1():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (1, 0, 0.1), the second at (1, 0.2, 0.25).
The first set of points is moving at +50 in the z-direction;
the second set of points is at rest.
PBCs are set to infinity in this test.
So in this configuration, the two sets of points are moving towards each other,
and so the relative z-velocity should be -50 for cross-correlations
in separation bins containing the pair of points. For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = -50
npts = 100
xc1, yc1, zc1 = 1, 0, 0.1
xc2, yc2, zc2 = 1, 0.2, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 2] = 50.
rp_bins, pi_max = np.array([0, 0.1, 0.15, 0.21, 0.25]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
assert np.allclose(s1s1[0:2], 0, rtol=0.01)
assert np.allclose(s1s2[0:2], 0, rtol=0.01)
assert np.allclose(s2s2[0:2], 0, rtol=0.01)
assert np.allclose(s1s1[2], 0, rtol=0.01)
assert np.allclose(s1s2[2], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[2], 0, rtol=0.01)
assert np.allclose(s1s1[3], 0, rtol=0.01)
assert np.allclose(s1s2[3], 0, rtol=0.01)
assert np.allclose(s2s2[3], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max)
assert np.allclose(s1s1[0:2], 0, rtol=0.01)
assert np.allclose(s1s1[2], correct_relative_velocity, rtol=0.01)
assert np.allclose(s1s1[3], 0, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness2():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.2).
The first set of points is moving at -50 in the z-direction;
the second set of points is at rest.
PBCs are set to infinity in this test.
So in this configuration, the two sets of points are moving away from each other,
and so the relative z-velocity should be +50 for cross-correlations
in separation bins containing the pair of points. For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = +50
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 2] = -50.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness3():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.55, 0.1), the second at (0.5, 0.4, 0.95).
The first set of points is moving at (-50, -10, +20),
the second set of points is moving at (+25, +10, +40).
So in this configuration, the second set of points is "gaining ground" on
the second set in the z-direction, and so the relative z-velocity
should be -20 for cross-correlations in separation bins containing the pair of points.
For any separation bin containing only
one set or the other, the auto-correlations should be 0 because each set of
points moves coherently.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = -20
npts = 100
xc1, yc1, zc1 = 0.5, 0.55, 0.1
xc2, yc2, zc2 = 0.5, 0.4, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 0] = -50.
velocities1[:, 1] = -10.
velocities1[:, 2] = +20.
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities2[:, 0] = +25.
velocities2[:, 1] = +10.
velocities2[:, 2] = +40.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_correctness4():
""" This function tests that the
`~halotools.mock_observables.mean_los_velocity_vs_rp` function returns correct
results for a controlled distribution of points whose mean radial velocity
is analytically calculable.
For this test, the configuration is two tight localizations of points,
the first at (0.05, 0.05, 0.3), the second at (0.95, 0.95, 0.4).
The first set of points is moving at (-50, -10, +20),
the second set of points is moving at (+25, +10, +40).
So in this configuration, the first set of points is "losing ground" on
the second set in the z-direction, and so the relative z-velocity
should be +20 for cross-correlations in separation bins containing the pair of points.
For any separation bin containing only one set or the other,
the auto-correlations should be 0 because each set of
points moves coherently.
Note that in this test, PBCs operate in both x & y directions
to identify pairs of points, but PBCs are irrelevant in the z-direction.
The tests will be run with the two point configurations passed in as
separate ``sample1`` and ``sample2`` distributions, as well as bundled
together into the same distribution.
"""
correct_relative_velocity = +20
npts = 100
xc1, yc1, zc1 = 0.05, 0.05, 0.3
xc2, yc2, zc2 = 0.95, 0.95, 0.4
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities1[:, 0] = -50.
velocities1[:, 1] = -10.
velocities1[:, 2] = +20.
velocities2 = np.zeros(npts*3).reshape(npts, 3)
velocities2[:, 0] = +25.
velocities2[:, 1] = +10.
velocities2[:, 2] = +40.
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1, s1s2, s2s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s2[0], 0, rtol=0.01)
assert np.allclose(s2s2[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], 0, rtol=0.01)
assert np.allclose(s1s2[1], correct_relative_velocity, rtol=0.01)
assert np.allclose(s2s2[1], 0, rtol=0.01)
# Now bundle sample2 and sample1 together and only pass in the concatenated sample
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
s1s1 = mean_los_velocity_vs_rp(sample, velocities, rp_bins, pi_max, period=1)
assert np.allclose(s1s1[0], 0, rtol=0.01)
assert np.allclose(s1s1[1], correct_relative_velocity, rtol=0.01)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_parallel():
"""
Verify that the `~halotools.mock_observables.mean_los_velocity_vs_rp` function
returns identical results for a random distribution of points whether the function
runs in parallel or serial.
"""
npts = 101
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
velocities1 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
sample2 = np.random.random((npts, 3))
velocities2 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1_parallel, s1s2_parallel, s2s2_parallel = mean_los_velocity_vs_rp(
sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, num_threads=2, period=1)
s1s1_serial, s1s2_serial, s2s2_serial = mean_los_velocity_vs_rp(
sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, num_threads=1, period=1)
assert np.allclose(s1s1_serial, s1s1_parallel, rtol=0.001)
assert np.allclose(s1s2_serial, s1s2_parallel, rtol=0.001)
assert np.allclose(s2s2_serial, s2s2_parallel, rtol=0.001)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_auto_consistency():
""" Verify that the `~halotools.mock_observables.mean_los_velocity_vs_rp` function
returns self-consistent auto-correlation results
regardless of whether we ask for cross-correlations.
"""
npts = 101
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
velocities1 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
sample2 = np.random.random((npts, 3))
velocities2 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1a, s1s2a, s2s2a = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
s1s1b, s2s2b = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2,
do_cross=False)
assert np.allclose(s1s1a, s1s1b, rtol=0.001)
assert np.allclose(s2s2a, s2s2b, rtol=0.001)
@pytest.mark.slow
def test_mean_los_velocity_vs_rp_cross_consistency():
""" Verify that the `~halotools.mock_observables.mean_los_velocity_vs_rp` function
returns self-consistent cross-correlation results
regardless of whether we ask for auto-correlations.
"""
npts = 101
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
velocities1 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
sample2 = np.random.random((npts, 3))
velocities2 = np.random.normal(loc=0, scale=100, size=npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.3]), 0.2
s1s1a, s1s2a, s2s2a = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
s1s2b = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2,
do_auto=False)
assert np.allclose(s1s2a, s1s2b, rtol=0.001)
|
StarcoderdataPython
|
4842858
|
<reponame>SantaSpeen/gitflic
"""
Gitflic authentication wrapper.
"""
import json
import os
import threading
from urllib.parse import quote_plus
import webbrowser
from enum import Enum
from typing import Union
import logging
import requests
from .exceptions import AuthError, GitflicExceptions
from .__version__ import __version__
from gitflic.GitflicOAuthServer import get_server
OAUTH_URL = "https://oauth.gitflic.ru/oauth/authorize?scope={}&clientId={}&redirectUrl={}&state={}"
def _add_enum_values(*args):
string = str()
for arg in args:
if isinstance(arg, Enum):
string += arg.value
else:
string += str(arg)
string += ","
return string[:len(string) - 1]
class GitflicAuthScopes(Enum):
""" Authentication scopes from Gitflic. Doc: https://gitflic.ru/help/api/access-token"""
USER_READ = "USER_READ"
USER_WRITE = "USER_WRITE"
PROJECT_READ = "PROJECT_READ"
PROJECT_WRITE = "PROJECT_WRITE"
PROJECT_EDIT = "PROJECT_EDIT"
# For a hint in the IDE
ALL_READ: "GitflicAuthScopes.ALL_READ"
ALL_WRITE: "GitflicAuthScopes.ALL_WRITE"
ALL: "GitflicAuthScopes.ALL"
GitflicAuthScopes.ALL_READ = _add_enum_values(GitflicAuthScopes.USER_READ,
GitflicAuthScopes.PROJECT_READ)
GitflicAuthScopes.ALL_WRITE = _add_enum_values(GitflicAuthScopes.PROJECT_WRITE,
GitflicAuthScopes.PROJECT_EDIT,
GitflicAuthScopes.PROJECT_WRITE)
GitflicAuthScopes.ALL = _add_enum_values(GitflicAuthScopes.ALL_WRITE,
GitflicAuthScopes.ALL_READ)
class GitflicAuth:
"""
Gitflic authentication wrapper.
"""
# noinspection PyTypeChecker
def __init__(self,
access_token: str = None,
localhost_oauth: bool = False,
scope: Union[GitflicAuthScopes, str] = GitflicAuthScopes.ALL_READ,
client_id: str = "cc2a5d8a-385a-436<PASSWORD>b2b-bb2412e<PASSWORD>",
redirect_url: str = "https://gitflic.ru/settings/oauth/token",
state: str = None):
"""
:param access_token: Raw token for raw AUTH.
:param scope: OAUTH field. Default GitflicAuthScopes.ALL_READ
:param client_id: OAUTH field. Default "cc<PASSWORD>", Simple gitflic app
:param redirect_url: OAUTH field. Default "https://gitflic.ru/settings/oauth/token/"
:param state: OAUTH field. Default "python_user"
"""
# Logging.
self.log: logging.Logger = logging.getLogger(__name__)
# Requests.
self.session: requests.Session = requests.Session()
# Set headers
self.session.headers = {
"User-Agent": f"gitflic-py/{__version__}",
'Accept': "application/*",
'Authorization': "token "
}
# Token fields.
self.access_token: str = access_token
self.refresh_token: str = None
# OAUTH fields.
self.scope: str = scope if not isinstance(scope, GitflicAuthScopes) else scope.value
self._localhost_oauth: bool = localhost_oauth
self.client_id: str = client_id
self.redirect_url: str = redirect_url
self.state: str = state
self._server_thread: threading.Thread = None
self._try_login()
def _try_login(self):
"""
Tries to login user with token or OAUTH.
"""
if self._localhost_oauth:
self.state = self.state or "GitflicOAuthServer"
if not (self.scope and self.client_id):
raise GitflicExceptions(
"Using localhost, you are required to this params: ('scope', 'client_id')! "
)
self._oauth_login()
elif self.access_token:
# Raw authorization.
self._token_login()
else:
if self.scope and self.client_id and self.state:
# OAUTH authorization.
self._oauth_login()
else:
if any((self.scope, self.client_id, self.redirect_url, self.state)):
raise GitflicExceptions(
"Not found one or more of params for OAUTH, you are required to send ALL params from ('scope', 'client_id', 'redirect_url', 'state')! "
"See docs: https://gitflic.ru/help/api/access-token."
)
raise GitflicExceptions(
"Please pass 'token' param for raw auth or ('scope', 'client_id', 'redirect_url', 'state') params for OAUTH "
"See docs: https://gitflic.ru/help/api/access-token."
)
def _oauth_login(self):
"""
Tries to login user with OAUTH.
"""
self.log.debug("Trying to login with OAUTH...")
if self._localhost_oauth:
server, self.redirect_url, self.state = get_server(self)
# OAUTH authorization.
redirect_url = quote_plus(self.redirect_url)
webbrowser.open(OAUTH_URL.format(self.scope, self.client_id, redirect_url, self.state))
if not self._localhost_oauth:
code = input("Paste code: ")
if not code:
raise AuthError("Cannot find code.")
res = self.session.get("https://oauth.gitflic.ru/api/token/access?code=" + code)
if res.status_code == 200:
res = res.json()
jsn = {"request": {"code": code, "state": None}, "response": res}
with open(os.path.join(os.getcwd(), "config.json"), "w") as f:
json.dump(jsn, f, indent=3)
access_token = res['accessToken']
self.access_token = access_token
self.refresh_token = res['refreshToken']
self.session.headers["Authorization"] += access_token
else:
error = None
title_split = res.json()['title'].split(".")
if len(title_split) == 6:
error = title_split[2].title() + " " + title_split[3].title() + ": " + title_split[4]
raise AuthError(error or "Unknown error")
else:
self._server_thread = threading.Thread(target=server.serve_forever)
self._server_thread.start()
print("Waiting server..")
self._server_thread.join()
self.session.headers['Authorization'] += self.access_token
self.check_token()
def _token_login(self):
"""
Tries to login user with given access token.
"""
self.log.debug(f"Trying to login with token={self.access_token}...")
assert isinstance(self.access_token, str)
self.session.headers['Authorization'] += self.access_token
self.check_token()
def check_token(self):
"""
Checks that current auth session token is valid or not (by making request to get current user).
"""
self.log.debug("Checking token....")
r = self.session.get("https://api.gitflic.ru/user/me")
if r.status_code == 403:
e = AuthError("Authentication failed. Invalid token?")
e.response = r
raise e
else:
r = r.json()
self.log.debug(f"Successfully logged as {r.get('username')} {r.get('id')}")
|
StarcoderdataPython
|
1771784
|
#instaspy
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class instaspy:
def __init__(self, username, password, target_username):
self.username = username
self.password = password
self.target_username = target_username
self.browser = webdriver.Firefox()
def login(self):
browser = self.browser
browser.implicitly_wait(5)
#opening instagram.com
browser.get('https://www.instagram.com/')
#-------login process starts
#finding input boxes for username and password and pasing the appropriate values
browser.find_element_by_xpath("//input[@name='username']").send_keys(self.username)
browser.find_element_by_xpath("//input[@name='password']").send_keys(self.password)
#findind login button and clicking it
browser.find_element_by_xpath("//button[@type='submit']").click()
#-------login process ends
def target_profile(self):
browser = self.browser
browser.implicitly_wait(5)
#Clicking "Not Now" in pop up just after login
sleep(4)
not_now_button = browser.find_element_by_xpath("//button[text()='Not Now']")
sleep(1)
not_now_button.click()
#-------search for victim's username starts
#click search bar
browser.find_element_by_xpath("//span[text()='Search']").click()
#enter victim's username and clicking Search
browser.find_element_by_xpath("//input[@placeholder='Search']").send_keys(self.target_username)
sleep(3)
#open victim's profile
browser.find_element_by_xpath("//span[text()='"+self.target_username+"'][@class='Ap253']").click()
sleep(2)
#-------search for username stops
def list_followers(self):
browser = self.browser
browser.implicitly_wait(5)
#clicking and opening followers tabs and getting maximum numbers of followers
max = int(browser.find_element_by_xpath("/html[1]/body[1]/div[1]/section[1]/main[1]/div[1]/header[1]/section[1]/ul[1]/li[2]/a[1]/span[1]").get_attribute('title').replace(',',''))
browser.find_element_by_xpath("//a[@class='-nal3 '][@href='/"+self.target_username+"/followers/']").click()
sleep(0.5)
followersList = browser.find_element_by_css_selector('div[role=\'dialog\'] ul')
numberOfFollowersInList = len(followersList.find_elements_by_css_selector('li'))
temp = [numberOfFollowersInList, 0]
followersList.click()
actionChain = webdriver.ActionChains(browser)
while (numberOfFollowersInList < max):
actionChain.key_down(Keys.SPACE).key_up(Keys.SPACE).perform()
sleep(1)
numberOfFollowersInList = len(followersList.find_elements_by_css_selector('li'))
sleep(1)
if (numberOfFollowersInList == temp[1]):
break
else:
temp[1] = temp[0]
temp[0] = numberOfFollowersInList
followers = []
for user in followersList.find_elements_by_css_selector('li'):
userLink = user.find_element_by_css_selector('a').get_attribute('href')
followers.append(userLink)
if (len(followers) == max):
break
return followers
def list_following(self):
browser = self.browser
browser.implicitly_wait(5)
browser.get("https://www.instagram.com/"+self.target_username+"/")
#clicking and opening following tabs and getting maximum numbers of following
max = int(browser.find_element_by_xpath("/html[1]/body[1]/div[1]/section[1]/main[1]/div[1]/header[1]/section[1]/ul[1]/li[3]/a[1]/span[1]").text.replace(',',''))
browser.find_element_by_xpath("//a[@class='-nal3 '][@href='/"+self.target_username+"/following/']").click()
sleep(0.5)
followingList = browser.find_element_by_css_selector('div[role=\'dialog\'] ul')
numberOfFollowingInList = len(followingList.find_elements_by_css_selector('li'))
temp = [numberOfFollowingInList, 0]
followingList.click()
actionChain = webdriver.ActionChains(browser)
while (numberOfFollowingInList < max):
actionChain.key_down(Keys.SPACE).key_up(Keys.SPACE).perform()
sleep(1)
numberOfFollowingInList = len(followingList.find_elements_by_css_selector('li'))
sleep(1)
if (numberOfFollowingInList == temp[1]):
break
else:
temp[1] = temp[0]
temp[0] = numberOfFollowingInList
following = []
for user in followingList.find_elements_by_css_selector('li'):
userLink = user.find_element_by_css_selector('a').get_attribute('href')
following.append(userLink)
if (len(following) == max):
break
browser.close()
return following
if __name__ == '__main__':
InstaSpy_bot = instaspy('<Username>', '<Password>', '<target_username>')
InstaSpy_bot.login()
InstaSpy_bot.target_profile()
followers_list = InstaSpy_bot.list_followers()
following_list = InstaSpy_bot.list_following()
with open('followers.csv', 'w') as f1:
f1.write("Followers list:\n")
for l1 in followers_list:
f1.write(l1)
f1.write('\n')
with open('following.csv', 'w') as f2:
f2.write("Following list:\n")
for l2 in following_list:
f2.write(l2)
f2.write("\n")
|
StarcoderdataPython
|
4829050
|
from django import template
register = template.Library()
@register.inclusion_tag('widgets/std_field.html', takes_context=True)
def std_field(context, field, **kwargs):
field.__dict__.update(kwargs)
return {"field": field}
@register.inclusion_tag('widgets/multi_field.html', takes_context=True)
def radio_field(context, field, **kwargs):
kwargs.update({"radiogroup": True})
field.__dict__.update(kwargs)
return {"field": field}
@register.inclusion_tag('widgets/multi_field.html', takes_context=True)
def multi_field(context, field, **kwargs):
field.__dict__.update(kwargs)
return {"field": field}
@register.inclusion_tag('widgets/hidden_field.html', takes_context=True)
def hidden_field(context, field, **kwargs):
field.__dict__.update(kwargs)
return {"field": field}
@register.inclusion_tag("widgets/split_form.html", takes_context=True)
def split_form(context, split_form):
return {"form": split_form}
|
StarcoderdataPython
|
1771732
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - a RAMSES-II protocol decoder & analyser.
Schema processor.
"""
# TODO: move max_zones into system-specific location, ?profile
import logging
import re
from types import SimpleNamespace
from typing import Any
import voluptuous as vol
from .const import (
DEFAULT_MAX_ZONES,
DEV_ROLE,
DEV_ROLE_MAP,
DEV_TYPE,
DEV_TYPE_MAP,
DEVICE_ID_REGEX,
DONT_CREATE_MESSAGES,
SZ_ALIAS,
SZ_CLASS,
SZ_FAKED,
SZ_ZONE_IDX,
ZON_ROLE_MAP,
SystemType,
__dev_mode__,
)
from .helpers import shrink
from .protocol import PACKET_LOG, PACKET_LOG_SCHEMA, SERIAL_CONFIG_SCHEMA
from .protocol.const import (
SZ_ACTUATORS,
SZ_DEVICES,
SZ_INBOUND,
SZ_NAME,
SZ_OUTBOUND,
SZ_SENSOR,
SZ_ZONE_TYPE,
SZ_ZONES,
)
from .protocol.transport import DEV_HACK_REGEX, SZ_BLOCK_LIST, SZ_KNOWN_LIST
DEV_MODE = __dev_mode__ and False
_LOGGER = logging.getLogger(__name__)
if DEV_MODE:
_LOGGER.setLevel(logging.DEBUG)
# schema strings
SCHEMA = "schema"
SZ_MAIN_CONTROLLER = "main_controller"
SZ_CONTROLLER = DEV_TYPE_MAP[DEV_TYPE.CTL]
SZ_TCS_SYSTEM = "system"
SZ_APPLIANCE_CONTROL = DEV_ROLE_MAP[DEV_ROLE.APP]
SZ_ORPHANS = "orphans"
SZ_DHW_SYSTEM = "stored_hotwater"
SZ_DHW_SENSOR = DEV_ROLE_MAP[DEV_ROLE.DHW]
SZ_DHW_VALVE = DEV_ROLE_MAP[DEV_ROLE.HTG]
SZ_HTG_VALVE = DEV_ROLE_MAP[DEV_ROLE.HT1]
SZ_SENSOR_FAKED = "sensor_faked"
SZ_UFH_SYSTEM = "underfloor_heating"
SZ_UFH_CTL = DEV_TYPE_MAP[DEV_TYPE.UFC] # ufh_controller
SZ_CIRCUITS = "circuits"
DEV_REGEX_ANY = vol.Match(DEVICE_ID_REGEX.ANY)
DEV_REGEX_SEN = vol.Match(DEVICE_ID_REGEX.SEN)
DEV_REGEX_CTL = vol.Match(DEVICE_ID_REGEX.CTL)
DEV_REGEX_DHW = vol.Match(DEVICE_ID_REGEX.DHW)
DEV_REGEX_HGI = vol.Match(DEVICE_ID_REGEX.HGI)
DEV_REGEX_APP = vol.Match(DEVICE_ID_REGEX.APP)
DEV_REGEX_BDR = vol.Match(DEVICE_ID_REGEX.BDR)
DEV_REGEX_UFC = vol.Match(DEVICE_ID_REGEX.UFC)
HEAT_ZONES_STRS = tuple(ZON_ROLE_MAP[t] for t in ZON_ROLE_MAP.HEAT_ZONES)
DOMAIN_ID = vol.Match(r"^[0-9A-F]{2}$")
UFH_IDX_REGEX = r"^0[0-8]$"
UFH_IDX = vol.Match(UFH_IDX_REGEX)
ZONE_IDX = vol.Match(r"^0[0-9AB]$") # TODO: what if > 12 zones? (e.g. hometronics)
INPUT_FILE = "input_file"
# Config parameters
DEBUG_MODE = "debug_mode"
SZ_CONFIG = "config"
DISABLE_DISCOVERY = "disable_discovery"
DISABLE_SENDING = "disable_sending"
ENABLE_EAVESDROP = "enable_eavesdrop"
ENFORCE_KNOWN_LIST = f"enforce_{SZ_KNOWN_LIST}"
EVOFW_FLAG = "evofw_flag"
SZ_MAX_ZONES = "max_zones"
REDUCE_PROCESSING = "reduce_processing"
SERIAL_CONFIG = "serial_config"
USE_ALIASES = "use_aliases" # use friendly device names from known_list
USE_SCHEMA = "use_schema"
USE_REGEX = "use_regex"
def renamed(new_key):
def func(value):
raise vol.Invalid(f"the key name has changed: rename it to '{new_key}'")
return func
# 1/3: Schemas for Configuration
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DISABLE_DISCOVERY, default=False): bool,
vol.Optional(DISABLE_SENDING, default=False): bool,
vol.Optional(ENABLE_EAVESDROP, default=False): bool,
vol.Optional(REDUCE_PROCESSING, default=0): vol.All(
int, vol.Range(min=0, max=DONT_CREATE_MESSAGES)
),
vol.Optional(SZ_MAX_ZONES, default=DEFAULT_MAX_ZONES): vol.All(
int, vol.Range(min=1, max=16)
),
vol.Optional(USE_SCHEMA, default=True): vol.Any(None, bool),
vol.Optional(ENFORCE_KNOWN_LIST, default=None): vol.Any(None, bool),
vol.Optional(USE_ALIASES, default=None): vol.Any(None, bool),
vol.Optional(EVOFW_FLAG, default=None): vol.Any(None, str),
vol.Optional(USE_REGEX, default={}): dict,
},
extra=vol.ALLOW_EXTRA, # TODO: remove for production
)
SCHEMA_DEV = vol.Any(
{
vol.Optional(SZ_ALIAS, default=None): vol.Any(None, str),
vol.Optional(SZ_CLASS, default=None): vol.Any(
None, *(DEV_TYPE_MAP[s] for s in DEV_TYPE_MAP.slugs())
),
vol.Optional(SZ_FAKED, default=None): vol.Any(None, bool),
vol.Optional("_note"): str, # only a convenience, not used
},
)
_SCHEMA_DEV = vol.Schema(
{vol.Optional(DEV_REGEX_ANY): SCHEMA_DEV},
extra=vol.PREVENT_EXTRA,
)
# 2/3: Schemas for Heating systems
SYSTEM_KLASS = (SystemType.EVOHOME, SystemType.HOMETRONICS, SystemType.SUNDIAL)
SCHEMA_TCS = vol.Schema(
{
vol.Required(SZ_APPLIANCE_CONTROL, default=None): vol.Any(None, DEV_REGEX_APP),
vol.Optional("heating_control"): renamed(SZ_APPLIANCE_CONTROL),
vol.Optional(SZ_CLASS, default=SystemType.EVOHOME): vol.Any(*SYSTEM_KLASS),
},
extra=vol.PREVENT_EXTRA,
)
SCHEMA_DHW = vol.Schema(
{
vol.Optional(SZ_SENSOR, default=None): vol.Any(None, DEV_REGEX_DHW),
vol.Optional(SZ_DHW_VALVE, default=None): vol.Any(None, DEV_REGEX_BDR),
vol.Optional(SZ_HTG_VALVE, default=None): vol.Any(None, DEV_REGEX_BDR),
vol.Optional(SZ_DHW_SENSOR): renamed(SZ_SENSOR),
}
)
UFC_CIRCUIT = vol.Schema(
{
vol.Required(UFH_IDX): vol.Any(
{vol.Optional(SZ_ZONE_IDX): vol.Any(ZONE_IDX)},
),
}
)
SCHEMA_UFH = vol.Schema(
{
vol.Required(DEV_REGEX_UFC): vol.Any(
None, {vol.Optional(SZ_CIRCUITS): vol.Any(None, dict)}
)
}
)
SCHEMA_UFH = vol.All(SCHEMA_UFH, vol.Length(min=1, max=3))
SCHEMA_ZON = vol.Schema( # vol.All([DEV_REGEX_ANY], vol.Length(min=0))(['01:123456'])
{
vol.Optional(SZ_CLASS, default=None): vol.Any(None, *HEAT_ZONES_STRS),
vol.Optional(SZ_SENSOR, default=None): vol.Any(None, DEV_REGEX_SEN),
vol.Optional(SZ_DEVICES): renamed(SZ_ACTUATORS),
vol.Optional(SZ_ACTUATORS, default=[]): vol.All(
[DEV_REGEX_ANY], vol.Length(min=0)
),
vol.Optional(SZ_ZONE_TYPE): renamed(SZ_CLASS),
vol.Optional("zone_sensor"): renamed(SZ_SENSOR),
# vol.Optional(SZ_SENSOR_FAKED): bool,
vol.Optional(f"_{SZ_NAME}"): vol.Any(str, None),
},
extra=vol.PREVENT_EXTRA,
)
# SCHEMA_ZON({SZ_CLASS: None, SZ_DEVICES: None}) # TODO: remove me
SCHEMA_ZONES = vol.All(
vol.Schema({vol.Required(ZONE_IDX): SCHEMA_ZON}),
vol.Length(min=1, max=DEFAULT_MAX_ZONES),
)
SCHEMA_SYS = vol.Schema(
{
# vol.Required(SZ_CONTROLLER): DEV_REGEX_CTL,
vol.Optional(SZ_TCS_SYSTEM, default={}): vol.Any({}, SCHEMA_TCS),
vol.Optional(SZ_DHW_SYSTEM, default={}): vol.Any({}, SCHEMA_DHW),
vol.Optional(SZ_UFH_SYSTEM, default={}): vol.Any({}, SCHEMA_UFH),
vol.Optional(SZ_ORPHANS, default=[]): vol.Any([], [DEV_REGEX_ANY]),
vol.Optional(SZ_ZONES, default={}): vol.Any({}, SCHEMA_ZONES),
},
extra=vol.PREVENT_EXTRA,
)
# 3/3: Global Schemas
SCHEMA_GLOBAL_CONFIG = vol.Schema(
{
vol.Required(SZ_CONFIG): CONFIG_SCHEMA.extend(
{
vol.Optional(SERIAL_CONFIG): SERIAL_CONFIG_SCHEMA,
vol.Optional(PACKET_LOG, default={}): vol.Any({}, PACKET_LOG_SCHEMA),
}
),
vol.Optional(SZ_KNOWN_LIST, default={}): vol.All(
_SCHEMA_DEV, vol.Length(min=0)
),
vol.Optional(SZ_BLOCK_LIST, default={}): vol.All(
_SCHEMA_DEV, vol.Length(min=0)
),
},
extra=vol.REMOVE_EXTRA,
)
def load_config(
serial_port, input_file, **kwargs
) -> tuple[SimpleNamespace, dict, dict, dict]:
"""Process the configuration, including any filter lists.
Returns:
- config (includes config.enforce_known_list)
- schema (processed further later on)
- known_list (is a dict)
- block_list (is a dict)
"""
config = SCHEMA_GLOBAL_CONFIG(kwargs)
schema = {k: v for k, v in kwargs.items() if k not in config and k[:1] != "_"}
block_list = config.pop(SZ_BLOCK_LIST)
known_list = config.pop(SZ_KNOWN_LIST)
config = CONFIG_SCHEMA.extend(
{vol.Optional(SERIAL_CONFIG, default={}): SERIAL_CONFIG_SCHEMA}
)(config[SZ_CONFIG])
if serial_port and input_file:
_LOGGER.warning(
"Serial port was specified (%s), so input file (%s) will be ignored",
serial_port,
input_file,
)
elif serial_port is None:
config[DISABLE_SENDING] = True
if config[DISABLE_SENDING]:
config[DISABLE_DISCOVERY] = True
if config[ENABLE_EAVESDROP]:
_LOGGER.warning(
f"{ENABLE_EAVESDROP} enabled: this is discouraged for routine use"
" (there be dragons here)"
)
update_config(config, known_list, block_list)
config = SimpleNamespace(**config)
return (config, schema, known_list, block_list)
def update_config(config, known_list, block_list) -> dict:
"""Determine which device filter to use, if any: known_list or block_list."""
if SZ_INBOUND not in config[USE_REGEX]: # TODO: move to voluptuous
config[USE_REGEX][SZ_INBOUND] = {}
if SZ_OUTBOUND not in config[USE_REGEX]:
config[USE_REGEX][SZ_OUTBOUND] = {}
if DEV_HACK_REGEX: # HACK: for DEV/TEST convenience, not for production
config[USE_REGEX][SZ_INBOUND].update(
{
"( 03:.* 03:.* (1060|2389|30C9) 003) ..": "\\1 00",
# "02:153425": "20:153425",
}
)
if config[ENFORCE_KNOWN_LIST] and not known_list:
_LOGGER.warning(
f"An empty {SZ_KNOWN_LIST} was provided, so it cant be used "
f"as a whitelist (device_id filter)"
)
config[ENFORCE_KNOWN_LIST] = False
if config[ENFORCE_KNOWN_LIST]:
_LOGGER.info(
f"The {SZ_KNOWN_LIST} will be used "
f"as a whitelist (device_id filter), length = {len(known_list)}"
)
_LOGGER.debug(f"known_list = {known_list}")
elif block_list:
_LOGGER.info(
f"The {SZ_BLOCK_LIST} will be used "
f"as a blacklist (device_id filter), length = {len(block_list)}"
)
_LOGGER.debug(f"block_list = {block_list}")
elif known_list:
_LOGGER.warning(
f"It is strongly recommended to use the {SZ_KNOWN_LIST} "
f"as a whitelist (device_id filter), configure: {ENFORCE_KNOWN_LIST} = True"
)
_LOGGER.debug(f"known_list = {known_list}")
else:
_LOGGER.warning(
f"It is strongly recommended to provide a {SZ_KNOWN_LIST}, and use it "
f"as a whitelist (device_id filter), configure: {ENFORCE_KNOWN_LIST} = True"
)
def _get_device(gwy, dev_id, **kwargs) -> Any: # Device
"""Raise an LookupError if a device_id is filtered out by a list.
The underlying method is wrapped only to provide a better error message.
"""
def check_filter_lists(dev_id: str) -> None:
"""Raise an LookupError if a device_id is filtered out by a list."""
err_msg = None
if gwy.config.enforce_known_list and dev_id not in gwy._include:
err_msg = f"it is in the {SCHEMA}, but not in the {SZ_KNOWN_LIST}"
if dev_id in gwy._exclude:
err_msg = f"it is in the {SCHEMA}, but also in the {SZ_BLOCK_LIST}"
if err_msg:
raise LookupError(
f"Can't create {dev_id}: {err_msg} (check the lists and the {SCHEMA})"
)
check_filter_lists(dev_id)
return gwy.get_device(dev_id, **kwargs)
def load_schema(gwy, **kwargs) -> dict:
"""Process the schema, and the configuration and return True if it is valid."""
[
load_system(gwy, ctl_id, schema)
for ctl_id, schema in kwargs.items()
if re.match(DEVICE_ID_REGEX.ANY, ctl_id)
]
if kwargs.get(SZ_MAIN_CONTROLLER):
gwy._tcs = gwy.system_by_id.get(kwargs[SZ_MAIN_CONTROLLER])
[
_get_device(gwy, device_id, disable_warning=True)
for device_id in kwargs.pop(SZ_ORPHANS, [])
]
def load_system(gwy, ctl_id, schema) -> Any: # System
"""Create a system using its schema."""
# print(schema)
# schema = SCHEMA_ZON(schema)
ctl = _get_device(gwy, ctl_id)
ctl.tcs._update_schema(**schema) # TODO
for dev_id in schema.get(SZ_UFH_SYSTEM, {}).keys(): # UFH controllers
_get_device(gwy, dev_id, parent=ctl) # , **_schema)
for dev_id in schema.get(SZ_ORPHANS, []):
_get_device(gwy, dev_id, parent=ctl)
if False and DEV_MODE:
import json
src = json.dumps(shrink(schema), sort_keys=True)
dst = json.dumps(shrink(gwy.system_by_id[ctl.id].schema), sort_keys=True)
# assert dst == src, "They don't match!"
print(src)
print(dst)
return ctl.tcs
|
StarcoderdataPython
|
77913
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------------------
# Connect to MongoDB and return the client -
# ---------------------------------------------------------------------------------------
import pymongo, logging, config, time
from logging.config import fileConfig
class Mongodb:
client = ""
def __init__(self) -> None:
# set up logging
fileConfig(config.env.LOGGING_CONFIG_FILE)
def connect(self, DB_URI):
logging.info("Connecting to DB...")
while(True):
try:
self.client = pymongo.MongoClient(DB_URI)
db = self.client.binance
status = db.command("serverStatus")
logging.info("Running MongoDB version {}".format(status.get('version')))
return self.client
except Exception as error:
logging.error(error)
time.sleep(1)
def close(self):
self.client.close()
|
StarcoderdataPython
|
1707494
|
# Copyright 2021 <NAME> (KRR-Oxford). All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import datetime
import time
# subclass of logging.Formatter
class RuntimeFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_time = time.time()
def formatTime(self, record, datefmt=None):
"""Record relative runtime in hr:min:sec format
"""
duration = datetime.datetime.utcfromtimestamp(record.created - self.start_time)
elapsed = duration.strftime("%H:%M:%S")
return "{}".format(elapsed)
def create_logger(model_name: str, saved_path: str):
"""Create logger for both console info and saved info
"""
logger = logging.getLogger(model_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{saved_path}/{model_name}.log")
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = RuntimeFormatter('[Time: %(asctime)s] - [PID: %(process)d] - [Model: %(name)s] \n%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
##################################################################################
### pretty print ###
##################################################################################
def banner_msg(message: str, banner_len: int = 70, sym="#"):
"""Print banner message:
######################################################################
### example ###
######################################################################
"""
print()
print(sym * banner_len)
message = sym * 3 + " " * ((banner_len - len(message)) // 2 - 3) + message
message = message + " " * (banner_len - len(message) - 3) + sym * 3
print(message)
print(sym * banner_len)
print()
|
StarcoderdataPython
|
4815952
|
<gh_stars>1-10
# Generated by Django 3.2.6 on 2021-08-22 02:30
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('committees', '0028_auto_20210809_0310'),
]
operations = [
migrations.AlterField(
model_name='resourcespage',
name='resources',
field=wagtail.core.fields.StreamField([('resource', wagtail.core.blocks.StreamBlock([('resource_name', wagtail.core.blocks.CharBlock()), ('information', wagtail.core.blocks.RichTextBlock()), ('structured_info_block', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.RichTextBlock())]))]))]),
),
]
|
StarcoderdataPython
|
133220
|
<reponame>Sketos/PyAutoArray<filename>test_autoarray/unit/structures/test_arrays.py
import os
import numpy as np
import pytest
import shutil
import autoarray as aa
from autoarray.structures import arrays
from autoarray import exc
test_data_dir = "{}/../test_files/array/".format(
os.path.dirname(os.path.realpath(__file__))
)
class TestArrayAPI:
class TestManual:
def test__array__makes_array_without_other_inputs(self):
arr = aa.array.manual_2d(array=[[1.0, 2.0], [3.0, 4.0]])
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
arr = aa.array.manual_1d(array=[1.0, 2.0, 3.0, 4.0], shape_2d=(2, 2))
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
arr = aa.array.manual_1d(
array=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape_2d=(2, 3), store_in_1d=True
)
assert type(arr) == arrays.Array
assert (arr == np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])).all()
assert (arr.in_2d == np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])).all()
arr = aa.array.manual_1d(
array=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape_2d=(2, 3), store_in_1d=False
)
assert type(arr) == arrays.Array
assert (arr == np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])).all()
assert (arr.in_2d == np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])).all()
def test__array__makes_array_with_pixel_scale(self):
arr = aa.array.manual_2d(array=[[1.0, 2.0], [3.0, 4.0]], pixel_scales=1.0)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
arr = aa.array.manual_1d(
array=[1.0, 2.0, 3.0, 4.0],
shape_2d=(2, 2),
pixel_scales=1.0,
origin=(0.0, 1.0),
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
arr = aa.array.manual_1d(
array=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
shape_2d=(2, 3),
pixel_scales=(2.0, 3.0),
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])).all()
assert arr.pixel_scales == (2.0, 3.0)
assert arr.geometry.origin == (0.0, 0.0)
def test__array__makes_with_pixel_scale_and_sub_size(self):
arr = aa.array.manual_2d(
array=[[1.0, 2.0], [3.0, 4.0]], pixel_scales=1.0, sub_size=1
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 1
arr = aa.array.manual_1d(
array=[1.0, 2.0, 3.0, 4.0],
shape_2d=(1, 1),
pixel_scales=1.0,
sub_size=2,
origin=(0.0, 1.0),
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
assert arr.mask.sub_size == 2
arr = aa.array.manual_1d(
array=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
shape_2d=(2, 1),
pixel_scales=2.0,
sub_size=2,
)
assert type(arr) == arrays.Array
assert (
arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (
arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
).all()
assert (arr.in_2d_binned == np.array([[2.5], [6.5]])).all()
assert (arr.in_1d_binned == np.array([2.5, 6.5])).all()
assert arr.pixel_scales == (2.0, 2.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 2
class TestFull:
def test__array__makes_array_without_other_inputs(self):
arr = aa.array.full(fill_value=1.0, shape_2d=(2, 2))
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 1.0], [1.0, 1.0]])).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
arr = aa.array.full(fill_value=2.0, shape_2d=(2, 2), store_in_1d=True)
assert type(arr) == arrays.Array
assert (arr == np.array([2.0, 2.0, 2.0, 2.0])).all()
assert (arr.in_2d == np.array([[2.0, 2.0], [2.0, 2.0]])).all()
assert (arr.in_1d == np.array([2.0, 2.0, 2.0, 2.0])).all()
arr = aa.array.full(fill_value=2.0, shape_2d=(2, 2), store_in_1d=False)
assert type(arr) == arrays.Array
assert (arr == np.array([[2.0, 2.0], [2.0, 2.0]])).all()
assert (arr.in_2d == np.array([[2.0, 2.0], [2.0, 2.0]])).all()
assert (arr.in_1d == np.array([2.0, 2.0, 2.0, 2.0])).all()
def test__array__makes_scaled_array_with_pixel_scale(self):
arr = aa.array.full(fill_value=1.0, shape_2d=(2, 2), pixel_scales=1.0)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 1.0], [1.0, 1.0]])).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
arr = aa.array.full(
fill_value=2.0, shape_2d=(2, 2), pixel_scales=1.0, origin=(0.0, 1.0)
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[2.0, 2.0], [2.0, 2.0]])).all()
assert (arr.in_1d == np.array([2.0, 2.0, 2.0, 2.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
def test__array__makes_scaled_sub_array_with_pixel_scale_and_sub_size(self):
arr = aa.array.full(
fill_value=1.0, shape_2d=(1, 4), pixel_scales=1.0, sub_size=1
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 1.0, 1.0, 1.0]])).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 1
arr = aa.array.full(
fill_value=2.0,
shape_2d=(1, 1),
pixel_scales=1.0,
sub_size=2,
origin=(0.0, 1.0),
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[2.0, 2.0], [2.0, 2.0]])).all()
assert (arr.in_1d == np.array([2.0, 2.0, 2.0, 2.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
assert arr.mask.sub_size == 2
class TestOnesZeros:
def test__array__makes_array_without_other_inputs(self):
arr = aa.array.ones(shape_2d=(2, 2))
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 1.0], [1.0, 1.0]])).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
arr = aa.array.zeros(shape_2d=(2, 2), store_in_1d=True)
assert type(arr) == arrays.Array
assert (arr == np.array([0.0, 0.0, 0.0, 0.0])).all()
assert (arr.in_2d == np.array([[0.0, 0.0], [0.0, 0.0]])).all()
assert (arr.in_1d == np.array([0.0, 0.0, 0.0, 0.0])).all()
arr = aa.array.zeros(shape_2d=(2, 2), store_in_1d=False)
assert type(arr) == arrays.Array
assert (arr == np.array([[0.0, 0.0], [0.0, 0.0]])).all()
assert (arr.in_2d == np.array([[0.0, 0.0], [0.0, 0.0]])).all()
assert (arr.in_1d == np.array([0.0, 0.0, 0.0, 0.0])).all()
def test__array__makes_scaled_array_with_pixel_scale(self):
arr = aa.array.ones(shape_2d=(2, 2), pixel_scales=1.0)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 1.0], [1.0, 1.0]])).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
arr = aa.array.zeros(shape_2d=(2, 2), pixel_scales=1.0, origin=(0.0, 1.0))
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[0.0, 0.0], [0.0, 0.0]])).all()
assert (arr.in_1d == np.array([0.0, 0.0, 0.0, 0.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
def test__array__makes_scaled_sub_array_with_pixel_scale_and_sub_size(self):
arr = aa.array.ones(shape_2d=(1, 4), pixel_scales=1.0, sub_size=1)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 1.0, 1.0, 1.0]])).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 1
arr = aa.array.zeros(
shape_2d=(1, 1), pixel_scales=1.0, sub_size=2, origin=(0.0, 1.0)
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[0.0, 0.0], [0.0, 0.0]])).all()
assert (arr.in_1d == np.array([0.0, 0.0, 0.0, 0.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
assert arr.mask.sub_size == 2
class TestFromFits:
def test__array__makes_array_without_other_inputs(self):
arr = aa.array.from_fits(file_path=test_data_dir + "3x3_ones.fits", hdu=0)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.ones((3, 3))).all()
assert (arr.in_1d == np.ones(9)).all()
arr = aa.array.from_fits(
file_path=test_data_dir + "4x3_ones.fits", hdu=0, store_in_1d=True
)
assert type(arr) == arrays.Array
assert (arr == np.ones((12,))).all()
assert (arr.in_2d == np.ones((4, 3))).all()
assert (arr.in_1d == np.ones((12,))).all()
arr = aa.array.from_fits(
file_path=test_data_dir + "4x3_ones.fits", hdu=0, store_in_1d=False
)
assert type(arr) == arrays.Array
assert (arr == np.ones((4, 3))).all()
assert (arr.in_2d == np.ones((4, 3))).all()
assert (arr.in_1d == np.ones((12,))).all()
def test__array__makes_scaled_array_with_pixel_scale(self):
arr = aa.array.from_fits(
file_path=test_data_dir + "3x3_ones.fits", hdu=0, pixel_scales=1.0
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.ones((3, 3))).all()
assert (arr.in_1d == np.ones(9)).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
arr = aa.array.from_fits(
file_path=test_data_dir + "4x3_ones.fits",
hdu=0,
pixel_scales=1.0,
origin=(0.0, 1.0),
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.ones((4, 3))).all()
assert (arr.in_1d == np.ones((12,))).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
def test__array__makes_scaled_sub_array_with_pixel_scale_and_sub_size(self):
arr = aa.array.from_fits(
file_path=test_data_dir + "3x3_ones.fits",
hdu=0,
pixel_scales=1.0,
sub_size=1,
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.ones((3, 3))).all()
assert (arr.in_1d == np.ones(9)).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 1
arr = aa.array.from_fits(
file_path=test_data_dir + "4x3_ones.fits",
hdu=0,
pixel_scales=1.0,
sub_size=1,
origin=(0.0, 1.0),
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.ones((4, 3))).all()
assert (arr.in_1d == np.ones(12)).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
assert arr.mask.sub_size == 1
class TestMaskedArrayAPI:
class TestManual:
def test__array__makes_array_with_pixel_scale(self):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0)
arr = aa.masked.array.manual_2d(array=[[1.0, 2.0], [3.0, 4.0]], mask=mask)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
mask = aa.mask.manual(
mask_2d=[[False, False], [True, False]],
pixel_scales=1.0,
origin=(0.0, 1.0),
)
arr = aa.masked.array.manual_1d(array=[1.0, 2.0, 4.0], mask=mask)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [0.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 4.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
mask = aa.mask.manual(
mask_2d=[[False, False], [True, False]],
pixel_scales=1.0,
origin=(0.0, 1.0),
)
arr = aa.masked.array.manual_2d(array=[[1.0, 2.0], [3.0, 4.0]], mask=mask)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 2.0], [0.0, 4.0]])).all()
assert (arr.in_1d == np.array([1.0, 2.0, 4.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
mask = aa.mask.manual(
mask_2d=[[False], [True]], pixel_scales=2.0, sub_size=2
)
arr = aa.masked.array.manual_1d(
array=[1.0, 2.0, 3.0, 4.0], mask=mask, store_in_1d=True
)
assert type(arr) == arrays.Array
assert (arr == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (
arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (arr.in_2d_binned == np.array([[2.5], [0.0]])).all()
assert (arr.in_1d_binned == np.array([2.5])).all()
assert arr.pixel_scales == (2.0, 2.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 2
arr = aa.masked.array.manual_1d(
array=[1.0, 2.0, 3.0, 4.0], mask=mask, store_in_1d=False
)
assert type(arr) == arrays.Array
assert (
arr == np.array([[1.0, 2.0], [3.0, 4.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (
arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0])).all()
assert (arr.in_2d_binned == np.array([[2.5], [0.0]])).all()
assert (arr.in_1d_binned == np.array([2.5])).all()
assert arr.pixel_scales == (2.0, 2.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 2
def test__manual_2d__exception_raised_if_input_array_is_2d_and_not_sub_shape_of_mask(
self
):
with pytest.raises(exc.ArrayException):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0, sub_size=1)
aa.masked.array.manual_2d(array=[[1.0], [3.0]], mask=mask)
with pytest.raises(exc.ArrayException):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0, sub_size=2)
aa.masked.array.manual_2d(array=[[1.0, 2.0], [3.0, 4.0]], mask=mask)
with pytest.raises(exc.ArrayException):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0, sub_size=2)
aa.masked.array.manual_2d(
array=[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], mask=mask
)
def test__exception_raised_if_input_array_is_1d_and_not_number_of_masked_sub_pixels(
self
):
with pytest.raises(exc.ArrayException):
mask = aa.mask.manual(
mask_2d=[[False, False], [True, False]], sub_size=1
)
aa.masked.array.manual_1d(array=[1.0, 2.0, 3.0, 4.0], mask=mask)
with pytest.raises(exc.ArrayException):
mask = aa.mask.manual(
mask_2d=[[False, False], [True, False]], sub_size=1
)
aa.masked.array.manual_1d(array=[1.0, 2.0], mask=mask)
with pytest.raises(exc.ArrayException):
mask = aa.mask.manual(mask_2d=[[False, True], [True, True]], sub_size=2)
aa.masked.array.manual_1d(array=[1.0, 2.0, 4.0], mask=mask)
with pytest.raises(exc.ArrayException):
mask = aa.mask.manual(mask_2d=[[False, True], [True, True]], sub_size=2)
aa.masked.array.manual_1d(array=[1.0, 2.0, 3.0, 4.0, 5.0], mask=mask)
class TestFull:
def test__makes_array_using_mask(self):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0)
arr = aa.masked.array.full(fill_value=1.0, mask=mask)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 1.0], [1.0, 1.0]])).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
mask = aa.mask.manual(
mask_2d=[[False, False], [True, False]],
pixel_scales=1.0,
origin=(0.0, 1.0),
)
arr = aa.masked.array.full(fill_value=2.0, mask=mask)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[2.0, 2.0], [0.0, 2.0]])).all()
assert (arr.in_1d == np.array([2.0, 2.0, 2.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
mask = aa.mask.manual(
mask_2d=[[False], [True]], pixel_scales=2.0, sub_size=2
)
arr = aa.masked.array.full(fill_value=3.0, mask=mask, store_in_1d=True)
assert type(arr) == arrays.Array
assert (arr == np.array([3.0, 3.0, 3.0, 3.0])).all()
assert (
arr.in_2d == np.array([[3.0, 3.0], [3.0, 3.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (arr.in_1d == np.array([3.0, 3.0, 3.0, 3.0])).all()
assert (arr.in_2d_binned == np.array([[3.0], [0.0]])).all()
assert (arr.in_1d_binned == np.array([3.0])).all()
assert arr.pixel_scales == (2.0, 2.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 2
arr = aa.masked.array.full(fill_value=3.0, mask=mask, store_in_1d=False)
assert type(arr) == arrays.Array
assert (
arr == np.array([[3.0, 3.0], [3.0, 3.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (
arr.in_2d == np.array([[3.0, 3.0], [3.0, 3.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (arr.in_1d == np.array([3.0, 3.0, 3.0, 3.0])).all()
assert (arr.in_2d_binned == np.array([[3.0], [0.0]])).all()
assert (arr.in_1d_binned == np.array([3.0])).all()
assert arr.pixel_scales == (2.0, 2.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 2
class TestOnesZeros:
def test__makes_array_using_mask(self):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0)
arr = aa.masked.array.ones(mask=mask)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[1.0, 1.0], [1.0, 1.0]])).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
mask = aa.mask.manual(
mask_2d=[[False, False], [True, False]],
pixel_scales=1.0,
origin=(0.0, 1.0),
)
arr = aa.masked.array.zeros(mask=mask)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.array([[0.0, 0.0], [0.0, 0.0]])).all()
assert (arr.in_1d == np.array([0.0, 0.0, 0.0])).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
mask = aa.mask.manual(
mask_2d=[[False], [True]], pixel_scales=2.0, sub_size=2
)
arr = aa.masked.array.ones(mask=mask, store_in_1d=True)
assert type(arr) == arrays.Array
assert (arr == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert (
arr.in_2d == np.array([[1.0, 1.0], [1.0, 1.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert (arr.in_2d_binned == np.array([[1.0], [0.0]])).all()
assert (arr.in_1d_binned == np.array([1.0])).all()
assert arr.pixel_scales == (2.0, 2.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 2
arr = aa.masked.array.ones(mask=mask, store_in_1d=False)
assert type(arr) == arrays.Array
assert (
arr == np.array([[1.0, 1.0], [1.0, 1.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (
arr.in_2d == np.array([[1.0, 1.0], [1.0, 1.0], [0.0, 0.0], [0.0, 0.0]])
).all()
assert (arr.in_1d == np.array([1.0, 1.0, 1.0, 1.0])).all()
assert (arr.in_2d_binned == np.array([[1.0], [0.0]])).all()
assert (arr.in_1d_binned == np.array([1.0])).all()
assert arr.pixel_scales == (2.0, 2.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 2
class TestFromFits:
def test__array_from_fits_uses_mask(self):
mask = aa.mask.unmasked(shape_2d=(3, 3), pixel_scales=1.0)
arr = aa.masked.array.from_fits(
file_path=test_data_dir + "3x3_ones.fits", hdu=0, mask=mask
)
assert type(arr) == arrays.Array
assert (arr.in_2d == np.ones((3, 3))).all()
assert (arr.in_1d == np.ones(9)).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 0.0)
assert arr.mask.sub_size == 1
mask = aa.mask.manual(
[
[False, False, False],
[False, False, False],
[True, False, True],
[False, False, False],
],
pixel_scales=1.0,
origin=(0.0, 1.0),
)
arr = aa.masked.array.from_fits(
file_path=test_data_dir + "4x3_ones.fits",
hdu=0,
mask=mask,
store_in_1d=True,
)
assert type(arr) == arrays.Array
assert (
arr == np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
).all()
assert (
arr.in_2d
== np.array(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0], [1.0, 1.0, 1.0]]
)
).all()
assert (
arr.in_1d
== np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
assert arr.mask.sub_size == 1
arr = aa.masked.array.from_fits(
file_path=test_data_dir + "4x3_ones.fits",
hdu=0,
mask=mask,
store_in_1d=False,
)
assert type(arr) == arrays.Array
assert (
arr
== np.array(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0], [1.0, 1.0, 1.0]]
)
).all()
assert (
arr.in_2d
== np.array(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0], [1.0, 1.0, 1.0]]
)
).all()
assert (
arr.in_1d
== np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.origin == (0.0, 1.0)
assert arr.mask.sub_size == 1
class TestArray:
class TestConstructorMethods:
def test__constructor_class_method_in_2d__store_in_1d(self):
arr = arrays.Array.manual_2d(
array=np.ones((3, 3)),
sub_size=1,
pixel_scales=(1.0, 1.0),
store_in_1d=True,
)
assert (arr == np.ones((9,))).all()
assert (arr.in_1d == np.ones((9,))).all()
assert (arr.in_2d == np.ones((3, 3))).all()
assert (arr.in_1d_binned == np.ones((9,))).all()
assert (arr.in_2d_binned == np.ones((3, 3))).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.central_pixel_coordinates == (1.0, 1.0)
assert arr.geometry.shape_2d_scaled == pytest.approx((3.0, 3.0))
assert arr.geometry.scaled_maxima == (1.5, 1.5)
assert arr.geometry.scaled_minima == (-1.5, -1.5)
arr = arrays.Array.manual_2d(
array=np.ones((4, 4)),
sub_size=2,
pixel_scales=(0.1, 0.1),
store_in_1d=True,
)
assert (arr == np.ones((16,))).all()
assert (arr.in_1d == np.ones((16,))).all()
assert (arr.in_2d == np.ones((4, 4))).all()
assert (arr.in_1d_binned == np.ones((4,))).all()
assert (arr.in_2d_binned == np.ones((2, 2))).all()
assert arr.pixel_scales == (0.1, 0.1)
assert arr.geometry.central_pixel_coordinates == (0.5, 0.5)
assert arr.geometry.shape_2d_scaled == pytest.approx((0.2, 0.2))
assert arr.geometry.scaled_maxima == pytest.approx((0.1, 0.1), 1e-4)
assert arr.geometry.scaled_minima == pytest.approx((-0.1, -0.1), 1e-4)
arr = arrays.Array.manual_2d(
array=np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]),
pixel_scales=(0.1, 0.1),
sub_size=2,
origin=(1.0, 1.0),
store_in_1d=True,
)
assert (arr == np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])).all()
assert arr.shape_2d == (2, 1)
assert arr.sub_shape_2d == (4, 2)
assert (
arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
).all()
assert (
arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert arr.in_2d_binned.shape == (2, 1)
assert (arr.in_1d_binned == np.array([2.5, 6.5])).all()
assert (arr.in_2d_binned == np.array([[2.5], [6.5]])).all()
assert arr.pixel_scales == (0.1, 0.1)
assert arr.geometry.central_pixel_coordinates == (0.5, 0.0)
assert arr.geometry.shape_2d_scaled == pytest.approx((0.2, 0.1))
assert arr.geometry.scaled_maxima == pytest.approx((1.1, 1.05), 1e-4)
assert arr.geometry.scaled_minima == pytest.approx((0.9, 0.95), 1e-4)
arr = arrays.Array.manual_2d(
array=np.ones((3, 3)),
pixel_scales=(2.0, 1.0),
sub_size=1,
origin=(-1.0, -2.0),
store_in_1d=True,
)
assert arr == pytest.approx(np.ones((9,)), 1e-4)
assert arr.in_1d == pytest.approx(np.ones((9,)), 1e-4)
assert arr.in_2d == pytest.approx(np.ones((3, 3)), 1e-4)
assert arr.pixel_scales == (2.0, 1.0)
assert arr.geometry.central_pixel_coordinates == (1.0, 1.0)
assert arr.geometry.shape_2d_scaled == pytest.approx((6.0, 3.0))
assert arr.geometry.origin == (-1.0, -2.0)
assert arr.geometry.scaled_maxima == pytest.approx((2.0, -0.5), 1e-4)
assert arr.geometry.scaled_minima == pytest.approx((-4.0, -3.5), 1e-4)
def test__constructor_class_method_in_1d__store_in_1d(self):
arr = arrays.Array.manual_1d(
array=np.ones((9,)),
shape_2d=(3, 3),
pixel_scales=(2.0, 1.0),
sub_size=1,
origin=(-1.0, -2.0),
store_in_1d=True,
)
assert arr == pytest.approx(np.ones((9,)), 1e-4)
assert arr.in_1d == pytest.approx(np.ones((9,)), 1e-4)
assert arr.in_2d == pytest.approx(np.ones((3, 3)), 1e-4)
assert arr.pixel_scales == (2.0, 1.0)
assert arr.geometry.central_pixel_coordinates == (1.0, 1.0)
assert arr.geometry.shape_2d_scaled == pytest.approx((6.0, 3.0))
assert arr.geometry.origin == (-1.0, -2.0)
assert arr.geometry.scaled_maxima == pytest.approx((2.0, -0.5), 1e-4)
assert arr.geometry.scaled_minima == pytest.approx((-4.0, -3.5), 1e-4)
def test__constructor_class_method_in_2d__store_in_2d(self):
arr = arrays.Array.manual_2d(
array=np.ones((3, 3)),
sub_size=1,
pixel_scales=(1.0, 1.0),
store_in_1d=False,
)
assert (arr == np.ones((3, 3))).all()
assert (arr.in_1d == np.ones((9,))).all()
assert (arr.in_2d == np.ones((3, 3))).all()
assert (arr.in_1d_binned == np.ones((9,))).all()
assert (arr.in_2d_binned == np.ones((3, 3))).all()
assert arr.pixel_scales == (1.0, 1.0)
assert arr.geometry.central_pixel_coordinates == (1.0, 1.0)
assert arr.geometry.shape_2d_scaled == pytest.approx((3.0, 3.0))
assert arr.geometry.scaled_maxima == (1.5, 1.5)
assert arr.geometry.scaled_minima == (-1.5, -1.5)
arr = arrays.Array.manual_2d(
array=np.ones((4, 4)),
sub_size=2,
pixel_scales=(0.1, 0.1),
store_in_1d=False,
)
assert (arr == np.ones((4, 4))).all()
assert (arr.in_1d == np.ones((16,))).all()
assert (arr.in_2d == np.ones((4, 4))).all()
assert (arr.in_1d_binned == np.ones((4,))).all()
assert (arr.in_2d_binned == np.ones((2, 2))).all()
assert arr.pixel_scales == (0.1, 0.1)
assert arr.geometry.central_pixel_coordinates == (0.5, 0.5)
assert arr.geometry.shape_2d_scaled == pytest.approx((0.2, 0.2))
assert arr.geometry.scaled_maxima == pytest.approx((0.1, 0.1), 1e-4)
assert arr.geometry.scaled_minima == pytest.approx((-0.1, -0.1), 1e-4)
arr = arrays.Array.manual_2d(
array=np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]),
pixel_scales=(0.1, 0.1),
sub_size=2,
origin=(1.0, 1.0),
store_in_1d=False,
)
assert (
arr == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert arr.shape_2d == (2, 1)
assert arr.sub_shape_2d == (4, 2)
assert (
arr.in_1d == np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
).all()
assert (
arr.in_2d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert arr.in_2d_binned.shape == (2, 1)
assert (arr.in_1d_binned == np.array([2.5, 6.5])).all()
assert (arr.in_2d_binned == np.array([[2.5], [6.5]])).all()
assert arr.pixel_scales == (0.1, 0.1)
assert arr.geometry.central_pixel_coordinates == (0.5, 0.0)
assert arr.geometry.shape_2d_scaled == pytest.approx((0.2, 0.1))
assert arr.geometry.scaled_maxima == pytest.approx((1.1, 1.05), 1e-4)
assert arr.geometry.scaled_minima == pytest.approx((0.9, 0.95), 1e-4)
arr = arrays.Array.manual_2d(
array=np.ones((3, 3)),
pixel_scales=(2.0, 1.0),
sub_size=1,
origin=(-1.0, -2.0),
store_in_1d=False,
)
assert arr == pytest.approx(np.ones((3, 3)), 1e-4)
assert arr.in_1d == pytest.approx(np.ones((9,)), 1e-4)
assert arr.in_2d == pytest.approx(np.ones((3, 3)), 1e-4)
assert arr.pixel_scales == (2.0, 1.0)
assert arr.geometry.central_pixel_coordinates == (1.0, 1.0)
assert arr.geometry.shape_2d_scaled == pytest.approx((6.0, 3.0))
assert arr.geometry.origin == (-1.0, -2.0)
assert arr.geometry.scaled_maxima == pytest.approx((2.0, -0.5), 1e-4)
assert arr.geometry.scaled_minima == pytest.approx((-4.0, -3.5), 1e-4)
def test__constructor_class_method_in_1d__store_in_2d(self):
arr = arrays.Array.manual_1d(
array=np.ones((9,)),
shape_2d=(3, 3),
pixel_scales=(2.0, 1.0),
sub_size=1,
origin=(-1.0, -2.0),
store_in_1d=False,
)
assert arr == pytest.approx(np.ones((3, 3)), 1e-4)
assert arr.in_1d == pytest.approx(np.ones((9,)), 1e-4)
assert arr.in_2d == pytest.approx(np.ones((3, 3)), 1e-4)
assert arr.pixel_scales == (2.0, 1.0)
assert arr.geometry.central_pixel_coordinates == (1.0, 1.0)
assert arr.geometry.shape_2d_scaled == pytest.approx((6.0, 3.0))
assert arr.geometry.origin == (-1.0, -2.0)
assert arr.geometry.scaled_maxima == pytest.approx((2.0, -0.5), 1e-4)
assert arr.geometry.scaled_minima == pytest.approx((-4.0, -3.5), 1e-4)
class TestNewArrays:
def test__pad__compare_to_array_util(self):
array_2d = np.ones((5, 5))
array_2d[2, 2] = 2.0
arr = arrays.Array.manual_2d(
array=array_2d, sub_size=1, pixel_scales=(1.0, 1.0)
)
arr = arr.resized_from_new_shape(new_shape=(7, 7))
arr_resized_manual = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 2.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
assert type(arr) == arrays.Array
assert (arr.in_2d == arr_resized_manual).all()
assert arr.mask.pixel_scales == (1.0, 1.0)
def test__trim__compare_to_array_util(self):
array_2d = np.ones((5, 5))
array_2d[2, 2] = 2.0
arr = arrays.Array.manual_2d(
array=array_2d, sub_size=1, pixel_scales=(1.0, 1.0)
)
arr = arr.resized_from_new_shape(new_shape=(3, 3))
arr_resized_manual = np.array(
[[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]]
)
assert type(arr) == arrays.Array
assert (arr.in_2d == arr_resized_manual).all()
assert arr.mask.pixel_scales == (1.0, 1.0)
def test__kernel_trim__trim_edges_where_extra_psf_blurring_is_performed(self):
array_2d = np.ones((5, 5))
array_2d[2, 2] = 2.0
arr = arrays.Array.manual_2d(
array=array_2d, sub_size=1, pixel_scales=(1.0, 1.0)
)
new_arr = arr.trimmed_from_kernel_shape(kernel_shape_2d=(3, 3))
assert type(new_arr) == arrays.Array
assert (
new_arr.in_2d
== np.array([[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
).all()
assert new_arr.mask.pixel_scales == (1.0, 1.0)
new_arr = arr.trimmed_from_kernel_shape(kernel_shape_2d=(5, 5))
assert type(new_arr) == arrays.Array
assert (new_arr.in_2d == np.array([[2.0]])).all()
assert new_arr.mask.pixel_scales == (1.0, 1.0)
array_2d = np.ones((9, 9))
array_2d[4, 4] = 2.0
arr = arrays.Array.manual_2d(
array=array_2d, sub_size=1, pixel_scales=(1.0, 1.0)
)
new_arr = arr.trimmed_from_kernel_shape(kernel_shape_2d=(7, 7))
assert type(new_arr) == arrays.Array
assert (
new_arr.in_2d
== np.array([[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
).all()
assert new_arr.mask.pixel_scales == (1.0, 1.0)
def test__zoomed__2d_array_zoomed__uses_the_limits_of_the_mask(self):
array_2d = np.array(
[
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0],
]
)
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, True],
[True, False, False, True],
[True, False, False, True],
[True, True, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
arr_masked = aa.masked.array.manual_2d(array=array_2d, mask=mask)
arr_zoomed = arr_masked.zoomed_around_mask(buffer=0)
assert (arr_zoomed.in_2d == np.array([[6.0, 7.0], [10.0, 11.0]])).all()
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, True],
[True, False, False, False],
[True, False, False, False],
[True, True, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
arr_masked = aa.masked.array.manual_2d(array=array_2d, mask=mask)
arr_zoomed = arr_masked.zoomed_around_mask(buffer=0)
assert (
arr_zoomed.in_2d == np.array([[6.0, 7.0, 8.0], [10.0, 11.0, 12.0]])
).all()
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, True],
[True, False, False, True],
[True, False, False, True],
[True, False, False, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
arr_masked = aa.masked.array.manual_2d(array=array_2d, mask=mask)
arr_zoomed = arr_masked.zoomed_around_mask(buffer=0)
assert (
arr_zoomed.in_2d == np.array([[6.0, 7.0], [10.0, 11.0], [14.0, 15.0]])
).all()
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, True],
[True, False, False, True],
[False, False, False, True],
[True, True, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
arr_masked = aa.masked.array.manual_2d(array=array_2d, mask=mask)
arr_zoomed = arr_masked.zoomed_around_mask(buffer=0)
assert (
arr_zoomed.in_2d == np.array([[0.0, 6.0, 7.0], [9.0, 10.0, 11.0]])
).all()
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, False, True, True],
[True, False, False, True],
[True, False, False, True],
[True, True, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
arr_masked = aa.masked.array.manual_2d(array=array_2d, mask=mask)
arr_zoomed = arr_masked.zoomed_around_mask(buffer=0)
assert (
arr_zoomed.in_2d == np.array([[2.0, 0.0], [6.0, 7.0], [10.0, 11.0]])
).all()
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, True],
[True, False, False, True],
[True, False, False, True],
[True, True, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
arr_masked = aa.masked.array.manual_2d(array=array_2d, mask=mask)
arr_zoomed = arr_masked.zoomed_around_mask(buffer=1)
assert (
arr_zoomed.in_2d
== np.array(
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 6.0, 7.0, 0.0],
[0.0, 10.0, 11.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
]
)
).all()
def test__zoomed__array_extent__uses_the_limits_of_the_unzoomed_mask(self):
array_2d = np.array(
[
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0],
]
)
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, False],
[True, False, False, True],
[True, False, False, True],
[True, True, True, True],
]
),
pixel_scales=(1.0, 2.0),
sub_size=1,
)
arr_masked = aa.masked.array.manual_2d(array=array_2d, mask=mask)
extent = arr_masked.extent_of_zoomed_array(buffer=1)
assert extent == pytest.approx(np.array([-4.0, 6.0, -2.0, 3.0]), 1.0e-4)
def test__binned_up__compare_all_extract_methods_to_array_util(self):
array_2d = np.array(
[
[1.0, 6.0, 3.0, 7.0, 3.0, 2.0],
[2.0, 5.0, 3.0, 7.0, 7.0, 7.0],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
]
)
arr = arrays.Array.manual_2d(
array=array_2d, sub_size=1, pixel_scales=(0.1, 0.1)
)
arr_binned_util = aa.util.binning.bin_array_2d_via_mean(
array_2d=array_2d, bin_up_factor=4
)
arr_binned = arr.binned_from_bin_up_factor(bin_up_factor=4, method="mean")
assert (arr_binned.in_2d == arr_binned_util).all()
assert arr_binned.pixel_scales == (0.4, 0.4)
arr_binned_util = aa.util.binning.bin_array_2d_via_quadrature(
array_2d=array_2d, bin_up_factor=4
)
arr_binned = arr.binned_from_bin_up_factor(
bin_up_factor=4, method="quadrature"
)
assert (arr_binned.in_2d == arr_binned_util).all()
assert arr_binned.pixel_scales == (0.4, 0.4)
arr_binned_util = aa.util.binning.bin_array_2d_via_sum(
array_2d=array_2d, bin_up_factor=4
)
arr_binned = arr.binned_from_bin_up_factor(bin_up_factor=4, method="sum")
assert (arr_binned.in_2d == arr_binned_util).all()
assert arr_binned.pixel_scales == (0.4, 0.4)
def test__binned_up__invalid_method__raises_exception(self):
array_2d = np.array(
[
[1.0, 6.0, 3.0, 7.0, 3.0, 2.0],
[2.0, 5.0, 3.0, 7.0, 7.0, 7.0],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
]
)
array_2d = arrays.Array.manual_2d(
array=array_2d, sub_size=1, pixel_scales=(0.1, 0.1)
)
with pytest.raises(exc.ArrayException):
array_2d.binned_from_bin_up_factor(bin_up_factor=4, method="wrong")
class TestOutputToFits:
def test__output_to_fits(self):
arr = aa.array.from_fits(file_path=test_data_dir + "3x3_ones.fits", hdu=0)
output_data_dir = "{}/../test_files/array/output_test/".format(
os.path.dirname(os.path.realpath(__file__))
)
if os.path.exists(output_data_dir):
shutil.rmtree(output_data_dir)
os.makedirs(output_data_dir)
arr.output_to_fits(file_path=output_data_dir + "array.fits")
array_from_out = aa.array.from_fits(
file_path=output_data_dir + "array.fits", hdu=0
)
assert (array_from_out.in_2d == np.ones((3, 3))).all()
def test__output_to_fits__shapes_of_arrays_are_2d(self):
arr = aa.array.from_fits(file_path=test_data_dir + "3x3_ones.fits", hdu=0)
output_data_dir = "{}/../test_files/array/output_test/".format(
os.path.dirname(os.path.realpath(__file__))
)
if os.path.exists(output_data_dir):
shutil.rmtree(output_data_dir)
os.makedirs(output_data_dir)
arr.output_to_fits(file_path=output_data_dir + "array.fits")
array_from_out = aa.util.array.numpy_array_2d_from_fits(
file_path=output_data_dir + "array.fits", hdu=0
)
assert (array_from_out == np.ones((3, 3))).all()
mask = aa.mask.unmasked(shape_2d=(3, 3), pixel_scales=0.1)
masked_array = aa.masked.array(array=arr, mask=mask)
masked_array.output_to_fits(file_path=output_data_dir + "masked_array.fits")
masked_array_from_out = aa.util.array.numpy_array_2d_from_fits(
file_path=output_data_dir + "masked_array.fits", hdu=0
)
assert (masked_array_from_out == np.ones((3, 3))).all()
|
StarcoderdataPython
|
1724039
|
# Copyright 2019-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# HPCTools Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import sys
import reframe as rfm
import reframe.utility.sanity as sn
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../common'))) # noqa: E402
import sphexa.hooks as hooks
# {{{ class SphExa_STAT_Check
@rfm.simple_test
class SphExa_STAT_Check(rfm.RegressionTest, hooks.setup_pe, hooks.setup_code):
# {{{
'''
This class runs the test code with Cray stat
'''
# }}}
steps = parameter([2]) # will hang rank0 at step0
compute_node = parameter([1])
np_per_c = parameter([1e3])
debug_flags = variable(bool, value=True)
def __init__(self):
# {{{ pe
self.descr = 'Tool validation'
self.valid_prog_environs = [
'PrgEnv-gnu', 'cpeGNU'
]
self.valid_systems = [
'dom:mc', 'dom:gpu', 'daint:mc', 'daint:gpu',
'eiger:mc', 'pilatus:mc'
]
self.tool = 'stat-cl'
self.modules = ['cray-stat', 'cray-cti']
self.maintainers = ['JG']
self.tags = {'sph', 'hpctools', 'cpu', 'craype', 'debugging'}
# }}}
# {{{ compile
self.testname = 'sedov'
self.executable = 'mpi+omp'
# re_ver_1 = 'STAT_VERSION1=$'
# re_ver_2 = 'STAT_VERSION2=$'
version_rpt = 'version.rpt'
which_rpt = 'which.rpt'
cs = self.current_system.name
if cs not in {'pilatus', 'eiger'}:
self.prebuild_cmds += [
# --- chech tool version
f'echo STAT_VERSION1=$STAT_VERSION > {version_rpt}',
f'echo STAT_VERSION2=`STATbin --version` >> {version_rpt}',
]
else:
self.prebuild_cmds += [
# --- chech tool version
f'echo STAT_VERSION1=$STAT_LEVEL > {version_rpt}',
f'echo STAT_VERSION2=`STATbin --version` >> {version_rpt}',
]
self.prebuild_cmds += [
f'STATbin -V >> {version_rpt}',
f'which {self.tool} > {which_rpt}',
]
# {{{ run
self.time_limit = '10m'
# }}}
# {{{ sanity
# TODO: regex = (
# r'function="(?P<fun>.*)", source="(?P<filename>.*)",'
# r' line=":(?P<ll>\d+)"'
# )
# + cuda
# + largescale
self.sanity_patterns = sn.all([
# check the job output:
sn.assert_found(r'Starting main loop', self.stdout),
# check the tool output:
sn.assert_not_found('not found', which_rpt),
sn.assert_not_found('ERROR', self.stdout),
# <LMON FE API> (ERROR): read_lmonp_msgheader failed while
# attempting to receive continue launch message from back end
sn.assert_found(r'STAT started', self.stdout),
sn.assert_found(r'Attaching to job launcher', self.stdout),
sn.assert_found(r'launching tool daemons', self.stdout),
sn.assert_found(r'Results written to', self.stdout),
])
# }}}
# {{{ performance
# see common/sphexa/hooks.py
# }}}
# {{{ hooks
# {{{ set_hang
@run_before('compile')
def set_hang(self):
source_file = 'include/sph/totalEnergy.hpp'
die = '60'
sed_header = (r'"s@USE_MPI@USE_MPI\n#include <thread>@"')
sed_hang = (
r'"/MPI_Allreduce(MPI_IN_PLACE, &einttmp, 1, MPI_DOUBLE, MPI_SUM, '
r'MPI_COMM_WORLD);/a'
r'int mpirank; MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);'
r'if (mpirank == 0 && d.iteration == 0) '
r'{std::this_thread::sleep_for(std::chrono::seconds('f'{die}));'
r'}"'
)
self.prebuild_cmds += [
f'# --- make the code hang:',
f'sed -i {sed_header} {source_file}',
f'sed -i {sed_hang} {source_file}',
]
# }}}
# {{{ set_tool
@run_before('run')
def set_tool(self):
self.executable_opts += ['& #']
# FIXME: will default to: '& # -n 50 -s 2'
ssh_cmd = 'ssh -o "StrictHostKeyChecking no"'
self.postrun_cmds = [
# slurm only
'nid=`SQUEUE_FORMAT=%.9B squeue --noheader -j $SLURM_JOBID`',
f'pidsrun=`{ssh_cmd} $nid "ps -hxo pid,cmd |grep -m1 srun" `',
'pid=${pidsrun% srun*}',
'echo "# nid=$nid pid=$pid"',
'sleep 1',
f'tool=`which {self.tool}`',
'echo "tool=$tool"',
f'{ssh_cmd} $nid "cd {self.stagedir} ;'
f'$tool -i -j $SLURM_JOBID $pid"',
'wait',
'echo done'
]
# }}}
# }}}
# }}}
|
StarcoderdataPython
|
3363129
|
<reponame>Rayckey/motion_imitation<gh_stars>1-10
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from envs import locomotion_gym_env
from envs import locomotion_gym_config
from envs.env_wrappers import imitation_wrapper_env
from envs.env_wrappers import observation_dictionary_to_array_wrapper
from envs.env_wrappers import trajectory_generator_wrapper_env
from envs.env_wrappers import simple_openloop
from envs.env_wrappers import simple_TG_group
from envs.env_wrappers import imitation_task
from envs.sensors import environment_sensors
from envs.sensors import sensor_wrappers
from envs.sensors import robot_sensors
from envs.utilities import controllable_env_randomizer_from_config
from robots import laikago
import numpy as np
def build_imitation_env(motion_files, num_parallel_envs, mode,
enable_randomizer, enable_rendering,
action_lim = 0.2, hist = 3,
curr_steps = 30000000, path = 0):
assert len(motion_files) > 0
curriculum_episode_length_start = 20
curriculum_episode_length_end = 600
sim_params = locomotion_gym_config.SimulationParameters()
sim_params.enable_rendering = enable_rendering
gym_config = locomotion_gym_config.LocomotionGymConfig(simulation_parameters=sim_params)
robot_class = laikago.Laikago
# sensors = [
# sensor_wrappers.HistoricSensorWrapper(
# wrapped_sensor=robot_sensors.MotorAngleSensor(num_motors=laikago.NUM_MOTORS), num_history=3),
# sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.IMUSensor(), num_history=3),
# sensor_wrappers.HistoricSensorWrapper(
# wrapped_sensor=environment_sensors.LastActionSensor(num_actions=laikago.NUM_MOTORS), num_history=3)
# ]
sensors = [
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.BasePositionSensor(), num_history=hist),
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.IMUSensor(['Y', 'R', 'dR', 'P', 'dP']), num_history=hist),
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.MotorAngleSensor(num_motors=laikago.NUM_MOTORS), num_history=hist),
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=environment_sensors.LastActionSensor(num_actions=laikago.NUM_MOTORS), num_history=hist)
]
# Look at this, this is the TG now
trajectory_generator = simple_TG_group.SimpleTGGroup(
action_limit=action_lim,
init_lg_param=None, is_touting=2, init_f_tg=2)
init_lg_param = trajectory_generator.init_lg_param
# print(" initial tg parameters is this")
# print(init_lg_param)
init_lg_param = np.concatenate([np.zeros([12]), init_lg_param[1:]])
tg_init_position = trajectory_generator.get_action(current_time=0, input_action=init_lg_param)
task = imitation_task.ImitationTask(ref_motion_filenames=motion_files,
enable_cycle_sync=True,
tar_frame_steps=[1, 2, 10, 30],
ref_state_init_prob=0.9,
warmup_time=0.25,tg_init_position=tg_init_position,
path = path)
randomizers = []
if enable_randomizer:
randomizer = controllable_env_randomizer_from_config.ControllableEnvRandomizerFromConfig(verbose=False)
randomizers.append(randomizer)
env = locomotion_gym_env.LocomotionGymEnv(gym_config=gym_config, robot_class=robot_class,
env_randomizers=randomizers, robot_sensors=sensors, task=task)
env = observation_dictionary_to_array_wrapper.ObservationDictionaryToArrayWrapper(env)
env = trajectory_generator_wrapper_env.TrajectoryGeneratorWrapperEnv(env,
trajectory_generator=trajectory_generator)
if mode == "test":
curriculum_episode_length_start = curriculum_episode_length_end
env = imitation_wrapper_env.ImitationWrapperEnv(env,
episode_length_start=curriculum_episode_length_start,
episode_length_end=curriculum_episode_length_end,
curriculum_steps=curr_steps,
num_parallel_envs=num_parallel_envs)
return env
def build_other_env(motion_files, num_parallel_envs, mode,
enable_randomizer, enable_rendering, action_lim = 0.2, curr_steps = 30000000):
assert len(motion_files) > 0
curriculum_episode_length_start = 20
curriculum_episode_length_end = 600
sim_params = locomotion_gym_config.SimulationParameters()
sim_params.enable_rendering = enable_rendering
gym_config = locomotion_gym_config.LocomotionGymConfig(simulation_parameters=sim_params)
robot_class = laikago.Laikago
# sensors = [
# sensor_wrappers.HistoricSensorWrapper(
# wrapped_sensor=robot_sensors.MotorAngleSensor(num_motors=laikago.NUM_MOTORS), num_history=3),
# sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.IMUSensor(), num_history=3),
# sensor_wrappers.HistoricSensorWrapper(
# wrapped_sensor=environment_sensors.LastActionSensor(num_actions=laikago.NUM_MOTORS), num_history=3)
# ]
sensors = [
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.BasePositionSensor(), num_history=3),
sensor_wrappers.HistoricSensorWrapper(wrapped_sensor=robot_sensors.IMUSensor(), num_history=3),
sensor_wrappers.HistoricSensorWrapper(
wrapped_sensor=robot_sensors.MotorAngleSensor(num_motors=laikago.NUM_MOTORS), num_history=3),
sensor_wrappers.HistoricSensorWrapper(
wrapped_sensor=environment_sensors.LastActionSensor(num_actions=laikago.NUM_MOTORS), num_history=3)
]
# Look at this, this is the TG now
trajectory_generator = simple_TG_group.SimpleTGGroup(
action_limit=action_lim,
init_lg_param=None, is_touting=2, init_f_tg=2)
init_lg_param = trajectory_generator.init_lg_param
# print(" initial tg parameters is this")
# print(init_lg_param)
init_lg_param = np.concatenate([np.zeros([12]), init_lg_param[1:]])
tg_init_position = trajectory_generator.get_action(current_time=0, input_action=init_lg_param)
task = imitation_task.ImitationTask(ref_motion_filenames=motion_files,
enable_cycle_sync=True,
tar_frame_steps=[1, 2, 10, 30],
ref_state_init_prob=0.9,
warmup_time=0.25,tg_init_position=tg_init_position)
randomizers = []
if enable_randomizer:
randomizer = controllable_env_randomizer_from_config.ControllableEnvRandomizerFromConfig(verbose=False)
randomizers.append(randomizer)
env = locomotion_gym_env.LocomotionGymEnv(gym_config=gym_config, robot_class=robot_class,
env_randomizers=randomizers, robot_sensors=sensors, task=task)
env = observation_dictionary_to_array_wrapper.ObservationDictionaryToArrayWrapper(env)
env = trajectory_generator_wrapper_env.TrajectoryGeneratorWrapperEnv(env,
trajectory_generator=trajectory_generator)
if mode == "test":
curriculum_episode_length_start = curriculum_episode_length_end
env = imitation_wrapper_env.ImitationWrapperEnv(env,
episode_length_start=curriculum_episode_length_start,
episode_length_end=curriculum_episode_length_end,
curriculum_steps=curr_steps,
num_parallel_envs=num_parallel_envs)
return env
|
StarcoderdataPython
|
74020
|
import time
import dweepy
import RPi.GPIO as GPIO
KEY = 'tweet_about_me'
OUTPUT_PIN = 18
OUTPUT_DURATION = 10
GPIO.setmode(GPIO.BCM)
GPIO.setup(OUTPUT_PIN, GPIO.OUT)
while True:
try:
for dweet in dweepy.listen_for_dweets_from(KEY):
print('Tweet: ' + dweet['content']['text'])
GPIO.output(OUTPUT_PIN, True)
time.sleep(OUTPUT_DURATION)
GPIO.output(OUTPUT_PIN, False)
except Exception:
pass
|
StarcoderdataPython
|
86706
|
from setuptools import setup, find_packages
setup(
name='GitSpammer',
version='0.1.0',
packages=find_packages(),
install_requires=[
'Click'
],
entry_points={
'console_scripts': [
'gitspam = gitspammer.entry:cli'
],
},
description="Experiment with Git's features. Create as many test commits as necessary. And branches too."
)
|
StarcoderdataPython
|
1619394
|
<gh_stars>0
class GiteeUser:
def __init__(self):
self.id = None
self.giteeLogin = None
self.userLogin = None
def from_record(self, record):
self.id = record[0]
self.giteeLogin = record[1]
self.userLogin = record[2]
|
StarcoderdataPython
|
4808377
|
<reponame>drevicko/senpy
import random
from senpy.plugins import EmotionPlugin
from senpy.models import EmotionSet, Emotion
class RmoRandPlugin(EmotionPlugin):
def analyse_entry(self, entry, params):
category = "emoml:big6happiness"
number = max(-1, min(1, random.gauss(0, 0.5)))
if number > 0:
category = "emoml:big6anger"
emotionSet = EmotionSet()
emotion = Emotion({"onyx:hasEmotionCategory": category})
emotionSet.onyx__hasEmotion.append(emotion)
emotionSet.prov__wasGeneratedBy = self.id
entry.emotions.append(emotionSet)
yield entry
|
StarcoderdataPython
|
4830547
|
from django.contrib import admin
from aditamento.militaries.models import Military
class MilitaryModelAdmin(admin.ModelAdmin):
list_display = ('name', 'cpf')
search_fields = ('name', 'cpf')
admin.site.register(Military, MilitaryModelAdmin)
|
StarcoderdataPython
|
1723432
|
import urllib.request
import json
#from PIL import Image
from Kaspa.modules.abstract_modules.abstractModule import AbstractModule
from Kaspa.modules.extension_modules.knowledge_module.knowledgeModuleDe import KnowledgeModuleDe
from Kaspa.modules.extension_modules.knowledge_module.knowledgeModuleEn import KnowledgeModuleEn
from Kaspa.config import Config
from Kaspa.modules.exceptions.impossibleActionError import ImpossibleActionError
from Kaspa.modules.exceptions.moduleError import ModuleError
import Kaspa.modules.extension_modules.helper.comandOps as Co
import wikipedia as wiki
from wikipedia.exceptions import DisambiguationError
class KnowledgeModuleMain(AbstractModule):
module_name = "Wolfram Alpha"
config_parameters = {"api_key" : "This is the Wolfram Alpha Api Key. You can get it for free from here: \n"
"http://products.wolframalpha.com/api/"}
api_key = None
url = "http://api.wolframalpha.com/v1/conversation.jsp"
conversationID = None
s = None
def __init__(self):
super(KnowledgeModuleMain, self).__init__()
self.add_submodule(KnowledgeModuleDe())
self.add_submodule(KnowledgeModuleEn())
def configure(self):
self.api_key = Config.get_instance().get('wolfram alpha', 'api_key')
@staticmethod
def convert_query(orig_query):
new_query = orig_query.replace(" ", "+")
return new_query
def get_answer(self, query):
query = self.convert_query(query)
try:
url = "http://api.wolframalpha.com/v1/result?appid=" + self.api_key + "&i=" + query
response = urllib.request.urlopen(url).read().decode('utf-8')
except:
# TODO localize
raise ImpossibleActionError("nope.")
return response
def get_wolfram_alpha_answer(self, query):
query = self.convert_query(query)
if self.conversationID is None:
url = self.url + "?appid=" + self.api_key + "&i=" + query
else:
url = self.url + "?appid=" + self.api_key + "&conversationid=" + self.conversationID + "&i=" + query
if self.s is not None:
url = url + "&s=" + self.s
response = urllib.request.urlopen(url).read().decode('utf-8')
result = json.loads(response)
if "error" in result.keys():
# reset values
self.url = "http://api.wolframalpha.com/v1/conversation.jsp"
self.conversationID = None
self.s = None
raise ImpossibleActionError("even Wolfram Alpha cannot help")
self.url = "http://" + result["host"] + "/api/v1/conversation.jsp"
self.conversationID = result["conversationID"]
if "s" in result.keys():
self.s = result["s"]
else:
self.s = None
return Co.get_sentences(result["result"], 1)
def get_picture(self, query):
query = self.convert_query(query)
url = "https://api.wolframalpha.com/v1/simple?i=" + query + "%3F&appid=" + self.api_key
img = urllib.request.urlopen(url).read()
f = open('/tmp/output', 'wb')
f.write(img)
f.close()
return '/tmp/output'
def get_wikipedia_description(self, query_text, language):
try:
wiki.set_lang(language)
# query_text = wiki.search(query_text)[0] # search for query and take title of first result
ret = wiki.summary(query_text)
# TODO improve sentence detection
return Co.get_sentences(ret, 1) # better than the built in function of the wikipedia module
except DisambiguationError as disambigError:
query_text = disambigError.options[0] # take first guess of meaning
ret = wiki.summary(query_text)
return Co.get_sentences(ret, 1)
except Exception as e:
raise ModuleError(self.module_name, str(e))
|
StarcoderdataPython
|
1765017
|
<gh_stars>0
import sys
import numpy as np
import math
fname = sys.argv[1]
quantums = []
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
count = 0
is_quantum = False
quantum = []
for line in content:
if line.startswith("-----------"):
quantum.append(line)
is_quantum = True
elif line.startswith("thread id :"):
quantum.append(line)
quantums.append(quantum)
quantum = []
is_quantum = False
elif is_quantum:
quantum.append(line)
threads = []
for thread in quantums[0][4:7]:
threads.append(thread.split()[1:3])
threads.append(quantums[0][4].split()[1:3])
quantums = quantums[1:]
big_core_errors = []
big_core_predicted_error = []
big_core_next_quantum_error = []
small_core_errors = []
small_core_predicted_error = []
small_core_next_quantum_error = []
system_error = []
system_predicted_error = []
system_next_quantum_error = []
system_ipc = []
quantum_count = 1
s_ipc = 0
for quantum in quantums:
temp = []
for line in quantum:
if line[0].isdigit():
temp.append(line.split()[1:3])
small_errors = []
small_predicted_error = []
small_next_quantum_error =[]
s_predicted = 0
s_real = 0
s_ml_predicted = 0
s_ml_predicted_real = 0
s_next_quantum = 0
s_next_quantum_real = 0
for prev_t, t in zip(threads, temp):
if not t[0].startswith("*") and not t[0].startswith("?"):
big_core_errors.append([t[0], prev_t[0]])
s_predicted += float(prev_t[0][1:] if prev_t[0].startswith("*") or prev_t[0].startswith("?") else prev_t[0])
s_real += float(t[0][1:] if t[0].startswith("*") or t[0].startswith("?") else t[0])
if prev_t[0].startswith("*") and not prev_t[0].startswith("?"):
big_core_predicted_error.append([t[0], prev_t[0][1:]])
s_ml_predicted += float(prev_t[0][1:])
s_ml_predicted_real += float(t[0])
if not prev_t[0].startswith("*") and not prev_t[0].startswith("?"):
big_core_next_quantum_error.append([t[0], prev_t[0]])
s_next_quantum += float(prev_t[0])
s_next_quantum_real += float(t[0])
s_ipc += s_real
if not t[1].startswith("*") and not t[1].startswith("?"):
small_errors.append([t[1], prev_t[1]])
s_predicted += float(prev_t[1][1:] if prev_t[1].startswith("*") or prev_t[1].startswith("?") else prev_t[1])
s_real += float(t[1][1:] if t[1].startswith("*") or t[1].startswith("?") else t[1])
s_ipc += s_real
if prev_t[1].startswith("*") and not prev_t[1].startswith("?"):
small_predicted_error.append([t[1], prev_t[1][1:]])
s_ml_predicted += float(prev_t[1][1:])
s_ml_predicted_real += float(t[1])
if not prev_t[1].startswith("*") and not prev_t[1].startswith("?"):
small_next_quantum_error.append([t[1], prev_t[1]])
s_next_quantum += float(prev_t[1])
s_next_quantum_real += float(t[1])
quantum_count += 1
system_ipc.append([quantum_count, s_real])
quantum_count += 1
system_error.append([s_real, s_predicted])
threads = temp
small_core_errors.append(small_errors)
small_core_predicted_error.append(small_predicted_error)
small_core_next_quantum_error.append(small_next_quantum_error)
system_predicted_error.append([s_ml_predicted_real, s_ml_predicted])
system_next_quantum_error.append([s_next_quantum_real, s_next_quantum])
big_mean_square_error = 0
small_mean_square_error = 0
big_percentage_error = []
small_percentage_error = []
diff_small = []
diff_big = []
system_mean_square_error = 0
system_percentage_error = []
diff_system = []
system_predicted_mean_square_error = 0
system_next_quantum_mean_square_error = 0
small_core_predicted_mean_square_error = 0
small_core_next_quantum_mean_square_error = 0
big_core_predicted_mean_square_error = 0
big_core_next_quantum_mean_square_error = 0
big_err = []
for big_error in big_core_errors:
if big_error[1].startswith("*") or big_error[1].startswith("?"):
big_error[1] = big_error[1][1:]
big_error = np.array(big_error)
big_error = big_error.astype(np.float)
if big_error[0] == 0:
continue
big_err.append(((abs(big_error[1] - big_error[0])) / big_error[0]))
big_percentage_error.append((big_error[1] - big_error[0]) / big_error[0])
diff_big.append(big_error[1] - big_error[0])
big_uerr = sum(big_err) / len(big_err)
big_std_err = 0
for err in big_err:
big_std_err += pow((err - big_uerr), 2)
big_std_err = math.sqrt(big_std_err / len(big_err))
#=================
big_err_pred = []
for big_error in big_core_predicted_error:
if big_error[1].startswith("*") or big_error[1].startswith("?"):
big_error[1] = big_error[1][1:]
big_error = np.array(big_error)
big_error = big_error.astype(np.float)
if big_error[0] == 0:
continue
big_err_pred.append(((abs(big_error[1] - big_error[0])) / big_error[0]))
big_uerr_pred = sum(big_err_pred) / len(big_err_pred)
big_core_predicted_mean_square_error = 0
for err in big_err_pred:
big_core_predicted_mean_square_error += pow((err - big_uerr_pred), 2)
big_core_predicted_mean_square_error = math.sqrt(big_core_predicted_mean_square_error / len(big_err_pred))
#=================
#=================
big_err_next = []
for big_error in big_core_next_quantum_error:
if big_error[1].startswith("*") or big_error[1].startswith("?"):
big_error[1] = big_error[1][1:]
big_error = np.array(big_error)
big_error = big_error.astype(np.float)
if big_error[0] == 0:
continue
big_err_next.append(((abs(big_error[1] - big_error[0])) / big_error[0]))
big_uerr_next = sum(big_err_next) / len(big_err_next)
big_core_next_quantum_mean_square_error = 0
for err in big_err_next:
big_core_next_quantum_mean_square_error += pow((err - big_uerr_next), 2)
big_core_next_quantum_mean_square_error = math.sqrt(big_core_next_quantum_mean_square_error / len(big_err_next))
#=================
#=================
small_error_pred = []
for small_error_quantum in small_core_predicted_error:
s_p = 0
s_r = 0
for small_error in small_error_quantum:
if small_error[1].startswith("*") or small_error[1].startswith("?"):
small_error[1] = small_error[1][1:]
small_error = np.array(small_error)
small_error = small_error.astype(np.float)
if small_error[0] == 0:
continue
s_p += small_error[1]
s_r += small_error[0]
if len(small_error_quantum) == 0:
continue
s_p = s_p / len(small_error_quantum)
s_r = s_r / len(small_error_quantum)
small_error_pred.append(abs(s_p - s_r) / s_r)
small_uerror_pred = sum(small_error_pred) / len(small_error_pred)
small_core_predicted_mean_square_error = 0
for err in small_error_pred:
small_core_predicted_mean_square_error += pow((err - small_uerror_pred), 2)
small_core_predicted_mean_square_error = math.sqrt(small_core_predicted_mean_square_error / len(small_error_pred))
#=================
#=================
small_error_next = []
for small_error_quantum in small_core_next_quantum_error:
s_p = 0
s_r = 0
for small_error in small_error_quantum:
if small_error[1].startswith("*") or small_error[1].startswith("?"):
small_error[1] = small_error[1][1:]
small_error = np.array(small_error)
small_error = small_error.astype(np.float)
if small_error[0] == 0:
continue
s_p += small_error[1]
s_r += small_error[0]
s_p = s_p / len(small_error_quantum)
s_r = s_r / len(small_error_quantum)
small_error_next.append(abs(s_p - s_r) / s_r)
small_uerror_next = sum(small_error_next) / len(small_error_next)
small_core_next_quantum_mean_square_error = 0
for err in small_error_next:
small_core_next_quantum_mean_square_error += pow((err - small_uerror_next), 2)
small_core_next_quantum_mean_square_error = math.sqrt(small_core_next_quantum_mean_square_error / len(small_error_next))
#=================
small_error_ = []
for small_error_quantum in small_core_errors:
small_error_quantum_percentage = []
small_error_quantum_meansquare = []
s_p = 0
s_r = 0
for small_error in small_error_quantum:
if small_error[1].startswith("*") or small_error[1].startswith("?"):
small_error[1] = small_error[1][1:]
small_error = np.array(small_error)
small_error = small_error.astype(np.float)
if small_error[0] == 0:
continue
s_p += small_error[1]
s_r += small_error[0]
small_error_quantum_meansquare.append(pow((small_error[1] - small_error[0]), 2))
small_error_quantum_percentage.append((small_error[1] - small_error[0]) / small_error[0])
s_p = s_p / len(small_error_quantum)
s_r = s_r / len(small_error_quantum)
small_error_.append(abs(s_p - s_r) / s_r)
small_percentage_error.append(sum(small_error_quantum_percentage) / len(small_error_quantum_percentage))
small_uerror = sum(small_error_) / len(small_error_)
small_std_error = 0
for err in small_error_:
small_std_error += pow((err - small_uerror), 2)
small_std_error = math.sqrt(small_std_error / len(small_error_))
#=======
system_error_pred = []
for s_error in system_predicted_error:
if s_error[0] == 0:
continue
system_error_pred.append(((abs(s_error[1] - s_error[0])) / s_error[0]))
system_uerror_pred = sum(system_error_pred) / len(system_error_pred)
system_predicted_mean_square_error = 0
for err in system_error_pred:
system_predicted_mean_square_error += pow((err - system_uerror_pred), 2)
system_predicted_mean_square_error = math.sqrt((system_predicted_mean_square_error / len(system_error_pred)))
#=======
#=======
system_error_next = []
for s_error in system_next_quantum_error:
if s_error[0] == 0:
continue
system_error_next.append(((abs(s_error[1] - s_error[0])) / s_error[0]))
system_uerror_next = sum(system_error_next) / len(system_error_next)
system_next_quantum_mean_square_error = 0
for err in system_error_next:
system_next_quantum_mean_square_error += pow((err - system_uerror_next), 2)
system_next_quantum_mean_square_error = math.sqrt((system_next_quantum_mean_square_error / len(system_error_next)))
#=======
system_error_ = []
for s_error in system_error:
if s_error[0] == 0:
continue
system_error_.append(((abs(s_error[1] - s_error[0])) / s_error[0]))
system_mean_square_error += pow((s_error[1] - s_error[0]), 2)
system_percentage_error.append((s_error[1] - s_error[0]) / s_error[0])
diff_system.append(s_error[1] - s_error[0])
system_uerror = sum(system_error_) / len(system_error_)
system_std_error = 0
for err in system_error_:
system_std_error += pow((err - system_uerror), 2)
system_std_error = math.sqrt((system_std_error / len(system_error_)))
big_mean_square_error = big_mean_square_error / len(big_core_errors)
small_mean_square_error = small_mean_square_error / len(small_core_errors)
system_mean_square_error = system_mean_square_error / len(system_error)
big_std = np.std(big_percentage_error)
small_std = np.std(small_percentage_error)
system_std = np.std(system_percentage_error)
big_percentage_error = sum(big_percentage_error) / len(big_core_errors)
small_percentage_error = sum(small_percentage_error) / len(small_core_errors)
system_percentage_error = sum(system_percentage_error) / len(system_error)
# diff_big = np.array(diff_big)
# diff_small = np.array(diff_small)
# diff_system = np.array(diff_system)
s_ipc = s_ipc / quantum_count
with open("IpcError.txt", "a") as myfile:
myfile.write(fname + "\nbig_std_error_whole : " + str(big_std_err) + "\nbig_std_error_pred : " + str(big_core_predicted_mean_square_error) + "\nbig_std_error_next_quantum : " + str(big_core_next_quantum_mean_square_error) +"\nsmall_std_error_whole : " + str(small_std_error) + "\nsmall_std_error_pred : " + str(small_core_predicted_mean_square_error) + "\nsmall_std_error_next : " + str(small_core_next_quantum_mean_square_error) + "\nsystem_std_whole : " + str(system_std_error) + "\nsystem_std_pred: " + str(system_predicted_mean_square_error) + "\nsystem_std_next : " + str(system_next_quantum_mean_square_error) + "\n\n")
|
StarcoderdataPython
|
1786373
|
<gh_stars>0
import json
import torch
import PIL
import argparse
import matplotlib
import numpy as np
import torchvision as tv
import matplotlib.pyplot as plt
from torch import nn
from collections import OrderedDict
from train import setup_nn
def main():
# Parse Arguments
parser = argparse.ArgumentParser()
parser.add_argument('image_path', type=str)
parser.add_argument('checkpoint_path', type=str)
parser.add_argument('--top', dest="top", type=int, default=5)
parser.add_argument('--category_names', dest="category_names_path", type=str, default="./cat_to_name.json")
parser.add_argument('--gpu', dest="gpu", type=bool, default=True)
args = parser.parse_args()
print('Chosen configuration:')
for arg in vars(args):
print("{} = {}".format(arg, getattr(args, arg)))
image_path = args.image_path
checkpoint_path = args.checkpoint_path
top = args.top
category_names_path = args.category_names_path
# load neural network
model = load_nn(checkpoint_path)
# load label mapping
with open(category_names_path, 'r') as f:
cat_to_name = json.load(f)
# predict image and display probabilities
predicted_probabilities, predicted_labels = predict(image_path, model)
image = process_image(image_path)
predicted_probabilities = np.array(predicted_probabilities[0])
predicted_labels = np.array(predicted_labels[0])
print(predicted_probabilities)
print(predicted_labels)
# Show image
ax1 = imshow(image, ax = plt)
ax1.axis('off')
ax1.show()
# Do assignments
assigned_probabilities = np.array(predicted_probabilities)
assigned_labels = [cat_to_name[str(label+1)] for label in predicted_labels]
print(assigned_probabilities)
print(assigned_labels)
# Show Assignments
_,ax2 = plt.subplots()
ticks = np.arange(len(assigned_labels))
ax2.bar(ticks, assigned_probabilities)
ax2.set_xticks(ticks = ticks)
ax2.set_xticklabels(assigned_labels)
ax2.yaxis.grid(True)
plt.show()
def load_nn(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
model,_,_ = setup_nn(checkpoint['input_size'],
checkpoint['hidden_sizes'],
checkpoint['output_size'],
checkpoint['drop_p'],
checkpoint['learning_rate'])
model.load_state_dict(checkpoint['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
pil_image = PIL.Image.open(image)
image_transforms = tv.transforms.Compose([tv.transforms.Resize(255),
tv.transforms.CenterCrop(224),
tv.transforms.ToTensor(),
tv.transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
tensor_image = image_transforms(pil_image)
np_image = tensor_image.numpy()
return np_image
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
model.to('cuda')
image = process_image(image_path)
image = torch.from_numpy(image)
image = image.unsqueeze_(0)
image = image.to('cuda')
# Calculate the class probabilities (softmax) for img
with torch.no_grad():
output = model.forward(image)
output_softmaxed = torch.nn.functional.softmax(output.data, dim=1)
return output_softmaxed.topk(topk)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1712303
|
from django.conf.urls import patterns, include, url
from qa.views import QuestionListView, QuestionView, QuestionNewView, \
QuestionUpdateView, QuestionDeleteView, vote_question
urlpatterns = [
url(r'^$', QuestionListView.as_view(), name='questions'),
url(r'^q/(?P<slug_title>[\w-]+)$', QuestionView.as_view(), name='question'),
url(r'^new$', QuestionNewView.as_view(), name='new'),
url(r'^edit/(?P<slug_title>[\w-]+)$', QuestionUpdateView.as_view(),
name='edit'),
url(r'^answer/(?P<slug_title>[\w-]+)$', QuestionView.as_view(),
name='answer'),
url(r'^delete/(?P<slug_title>[\w-]+)$', QuestionDeleteView.as_view(),
name='delete'),
url(r'^vote/(?P<slug_title>[\w-]+)$', vote_question,
name='vote')
]
|
StarcoderdataPython
|
3228876
|
import os
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, Optional
from rich import print
from cookietemple.common.version import load_ct_template_version
from cookietemple.config.config import ConfigCommand
from cookietemple.create.domains.cookietemple_template_struct import CookietempleTemplateStruct
from cookietemple.create.github_support import load_github_username, prompt_github_repo
from cookietemple.create.template_creator import TemplateCreator
from cookietemple.custom_cli.questionary import cookietemple_questionary_or_dot_cookietemple
@dataclass
class TemplateStructPub(CookietempleTemplateStruct):
"""
This class contains all attributes specific for PUB projects
"""
"""
This section contains some PUB-domain specific attributes
"""
pubtype: str = ""
author: str = ""
title: str = ""
university: str = ""
department: str = ""
degree: str = ""
github_username = ""
class PubCreator(TemplateCreator):
def __init__(self):
self.pub_struct = TemplateStructPub(domain="pub", language="latex")
super().__init__(self.pub_struct)
self.WD_Path = Path(os.path.dirname(__file__))
self.TEMPLATES_PUB_PATH = f"{self.WD_Path.parent}/templates/pub"
'"" TEMPLATE VERSIONS ""'
self.PUB_LATEX_TEMPLATE_VERSION = load_ct_template_version("pub-thesis-latex", self.AVAILABLE_TEMPLATES_PATH)
def create_template(self, path: Path, dot_cookietemple: Optional[dict]):
"""
Prompts the user for the publication type and forwards to subsequent prompts.
Creates the pub template.
"""
# latex is default language
self.pub_struct.pubtype = cookietemple_questionary_or_dot_cookietemple(
function="select",
question="Choose between the following publication types",
choices=["thesis"],
dot_cookietemple=dot_cookietemple,
to_get_property="pubtype",
)
if not os.path.exists(ConfigCommand.CONF_FILE_PATH):
print("[bold red]Cannot find a Cookietemple config file! Is this your first time with Cookietemple?\n")
print("[bold blue]Lets set your configs for Cookietemple and you are ready to go!\n")
ConfigCommand.all_settings()
# switch case statement to prompt the user to fetch template specific configurations
switcher: Dict[str, Any] = {
"latex": self.common_latex_options,
}
switcher.get(self.pub_struct.language.lower(), lambda: "Invalid language!")(dot_cookietemple) # type: ignore
self.handle_pub_type(dot_cookietemple)
(
self.pub_struct.is_github_repo,
self.pub_struct.is_repo_private,
self.pub_struct.is_github_orga,
self.pub_struct.github_orga,
) = prompt_github_repo(dot_cookietemple)
if self.pub_struct.is_github_orga:
self.pub_struct.github_username = self.pub_struct.github_orga
# create the pub template
super().create_template_with_subdomain(self.TEMPLATES_PUB_PATH, self.pub_struct.pubtype) # type: ignore
# switch case statement to fetch the template version
switcher_version = {
"latex": self.PUB_LATEX_TEMPLATE_VERSION,
}
self.pub_struct.template_version = switcher_version.get(
self.pub_struct.language.lower(), lambda: "Invalid language!"
)
self.pub_struct.template_version, self.pub_struct.template_handle = (
switcher_version.get(self.pub_struct.language.lower(), lambda: "Invalid language!"),
f"pub-{self.pub_struct.pubtype}-{self.pub_struct.language.lower()}",
)
# perform general operations like creating a GitHub repository and general linting, but skip common_files copying and rst linting
super().process_common_operations(
path=Path(path).resolve(),
skip_common_files=True,
skip_fix_underline=True,
domain="pub",
subdomain=self.pub_struct.pubtype,
language=self.pub_struct.language,
dot_cookietemple=dot_cookietemple,
)
def handle_pub_type(self, dot_cookietemple: Optional[dict]) -> None:
"""
Determine the type of publication and handle it further.
"""
switcher = {
"thesis": self.handle_thesis_latex,
}
switcher.get(self.pub_struct.pubtype.lower(), lambda: "Invalid Pub Project Type!")(dot_cookietemple) # type: ignore
def handle_thesis_latex(self, dot_cookietemple: Optional[dict]) -> None:
self.pub_struct.degree = cookietemple_questionary_or_dot_cookietemple(
function="text",
question="Degree",
default="PhD",
dot_cookietemple=dot_cookietemple,
to_get_property="degree",
)
def common_latex_options(self, dot_cookietemple: Optional[dict]) -> None:
"""
Prompt the user for common thesis/paper data
"""
self.pub_struct.author = cookietemple_questionary_or_dot_cookietemple(
function="text",
question="Author",
default="<NAME>",
dot_cookietemple=dot_cookietemple,
to_get_property="author",
)
self.pub_struct.project_slug = cookietemple_questionary_or_dot_cookietemple(
function="text",
question="Project name",
default="PhD Thesis",
dot_cookietemple=dot_cookietemple,
to_get_property="project_name",
).replace( # type: ignore
" ", "_"
)
self.pub_struct.project_slug_no_hyphen = self.pub_struct.project_slug.replace("-", "_")
self.pub_struct.title = cookietemple_questionary_or_dot_cookietemple(
function="text",
question="Publication title",
default="On how Springfield exploded",
dot_cookietemple=dot_cookietemple,
to_get_property="title",
)
self.pub_struct.university = cookietemple_questionary_or_dot_cookietemple(
function="text",
question="University",
default="<NAME> University",
dot_cookietemple=dot_cookietemple,
to_get_property="university",
)
self.pub_struct.department = cookietemple_questionary_or_dot_cookietemple(
function="text",
question="Department",
default="Department of Nuclear Physics",
dot_cookietemple=dot_cookietemple,
to_get_property="department",
)
self.pub_struct.github_username = load_github_username() # Required for Github support
|
StarcoderdataPython
|
1688528
|
__author__ = 'justinarmstrong'
import pygame as pg
from .. import setup
from .. import constants as c
from . import powerups
from . import coin
class Coin_box(pg.sprite.Sprite):
"""Coin box sprite"""
def __init__(self, x, y, contents='coin', group=None):
pg.sprite.Sprite.__init__(self)
self.sprite_sheet = setup.GFX['tile_set']
self.frames = []
self.setup_frames()
self.frame_index = 0
self.image = self.frames[self.frame_index]
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.mask = pg.mask.from_surface(self.image)
self.animation_timer = 0
self.first_half = True # First half of animation cycle
self.state = c.RESTING
self.rest_height = y
self.gravity = 1.2
self.y_vel = 0
self.contents = contents
self.group = group
def get_image(self, x, y, width, height):
"""Extract image from sprite sheet"""
image = pg.Surface([width, height]).convert()
rect = image.get_rect()
image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))
image.set_colorkey(c.BLACK)
image = pg.transform.scale(image,
(int(rect.width*c.BRICK_SIZE_MULTIPLIER),
int(rect.height*c.BRICK_SIZE_MULTIPLIER)))
return image
def setup_frames(self):
"""Create frame list"""
self.frames.append(
self.get_image(384, 0, 16, 16))
self.frames.append(
self.get_image(400, 0, 16, 16))
self.frames.append(
self.get_image(416, 0, 16, 16))
self.frames.append(
self.get_image(432, 0, 16, 16))
def update(self, game_info):
"""Update coin box behavior"""
self.current_time = game_info[c.CURRENT_TIME]
self.handle_states()
def handle_states(self):
"""Determine action based on RESTING, BUMPED or OPENED
state"""
if self.state == c.RESTING:
self.resting()
elif self.state == c.BUMPED:
self.bumped()
elif self.state == c.OPENED:
self.opened()
def resting(self):
"""Action when in the RESTING state"""
if self.first_half:
if self.frame_index == 0:
if (self.current_time - self.animation_timer) > 375:
self.frame_index += 1
self.animation_timer = self.current_time
elif self.frame_index < 2:
if (self.current_time - self.animation_timer) > 125:
self.frame_index += 1
self.animation_timer = self.current_time
elif self.frame_index == 2:
if (self.current_time - self.animation_timer) > 125:
self.frame_index -= 1
self.first_half = False
self.animation_timer = self.current_time
else:
if self.frame_index == 1:
if (self.current_time - self.animation_timer) > 125:
self.frame_index -= 1
self.first_half = True
self.animation_timer = self.current_time
self.image = self.frames[self.frame_index]
def bumped(self):
"""Action after Mario has bumped the box from below"""
self.rect.y += self.y_vel
self.y_vel += self.gravity
if self.rect.y > self.rest_height + 5:
self.rect.y = self.rest_height
self.state = c.OPENED
if self.contents == 'mushroom':
self.group.add(powerups.Mushroom(self.rect.centerx, self.rect.y))
elif self.contents == 'fireflower':
self.group.add(powerups.FireFlower(self.rect.centerx, self.rect.y))
elif self.contents == '1up_mushroom':
self.group.add(powerups.DeathMushroom(self.rect.centerx, self.rect.y))
elif self.contents == 'death_mushroom':
self.group.add(powerups.DeathMushroom(self.rect.centerx, self.rect.y))
self.frame_index = 3
self.image = self.frames[self.frame_index]
def start_bump(self, score_group):
"""Transitions box into BUMPED state"""
self.y_vel = -6
self.state = c.BUMPED
if self.contents == 'coin':
self.group.add(coin.Coin(self.rect.centerx,
self.rect.y,
score_group))
setup.SFX['coin'].play()
else:
setup.SFX['powerup_appears'].play()
def opened(self):
"""Placeholder for OPENED state"""
pass
|
StarcoderdataPython
|
4838374
|
class ClassNames:
RECURRENCE_EVENTS = "recurrence-events"
NO_RECURRENCE_EVENTS = "no-recurrence-events"
CLASSES = [
ClassNames.RECURRENCE_EVENTS,
ClassNames.NO_RECURRENCE_EVENTS
]
FEATURES = [
"age",
"menopause",
"tumor-size",
"inv-nodes",
"node-caps",
"deg-malig",
"breast",
"breast-quad",
"irradiat"
]
FEATURE_VALUES = {
"age": ["10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "70-79", "80-89", "90-99"],
"menopause": ["lt40", "ge40", "premeno"],
"tumor-size": ["0-4", "5-9", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40-44",
"45-49", "50-54", "55-59"],
"inv-nodes": ["0-2", "3-5", "6-8", "9-11", "12-14", "15-17", "18-20", "21-23", "24-26",
"27-29", "30-32", "33-35", "36-39"],
"node-caps": ["yes", "no"],
"deg-malig": ["1", "2", "3"],
"breast": ["left", "right"],
"breast-quad": ["left_up", "left_low", "right_up", "right_low", "central"],
"irradiat": ["yes", "no"]
}
|
StarcoderdataPython
|
3271052
|
<reponame>ahmedkhalf/SimpleShell
from subprocess import check_call, CalledProcessError
import readline
while True:
try:
command = input("$ ")
except EOFError:
break
except KeyboardInterrupt:
print()
continue
if command.strip() == "exit":
break
else:
try:
check_call(command, shell=True)
except CalledProcessError as e:
pass
|
StarcoderdataPython
|
3362924
|
<gh_stars>0
#!/usr/bin/env python3
from .arguments import arguments
from . import capture, cleanup
from .compileVideo import compileVideo
from .interface import banner, print_statusline
from .logger import logging
from time import sleep
def main():
# print program banner if verbose is set
if arguments["--verbose"]:
banner()
if arguments["compile"]:
compileVideo(
workDir=arguments["--save-dir"],
videoName=arguments["--output-video"],
fps=int(arguments["--fps"]),
)
exit()
elif arguments["clean"]:
cleanup.clean()
exit()
camera = capture.camera(
workdir=arguments["--save-dir"],
length=int(arguments["--length"]),
width=int(arguments["--width"]),
)
if arguments["capture"]:
camera.capture(
imageName=arguments["--image-name"], addTime=not arguments["--no-time"]
)
logging.success("Image successfully captured.")
elif arguments["record"]:
try:
waitingTime = int(arguments["--shutter-wait"])
if waitingTime != 0:
for sec in range(0, waitingTime + 1):
print_statusline(f"Countdown before record: {waitingTime - sec}")
sleep(1)
print()
camera.record(
duration=int(arguments["--duration"]),
frequency=int(arguments["--frequency"]),
continuous=arguments["--continuous"],
)
if arguments["--auto-compile"]:
compileVideo(
workDir=arguments["--save-dir"],
videoName=arguments["--output-video"],
fps=int(arguments["--fps"]),
)
except KeyboardInterrupt:
if (
arguments["--auto-compile"]
and input("[prompt] Continue compiling video? [y/n] ") == "y"
):
compileVideo(
workDir=arguments["--save-dir"],
videoName=arguments["--output-video"],
fps=int(arguments["--fps"]),
)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
58436
|
<reponame>smarie/python-doit-api<filename>doit_api/main.py
import sys
from inspect import isgeneratorfunction
from os.path import exists
import platform
try:
from typing import Callable, Union, List, Tuple, Dict, Optional, Type, Any
from pathlib import Path
DoitAction = Union[str, List, Callable, Tuple[Callable, Tuple, Dict]]
DoitTask = Union[str, Callable, 'task', 'taskgen']
DoitPath = Union[str, Path]
except ImportError:
pass
from doit.action import CmdAction
# --- configuration
def doit_config(
# execution related
default_tasks=None, # type: List[str]
single=None, # type: bool
continue_=None, # type: bool
always=None, # type: bool
cleanforget=None, # type: bool
cleandep=None, # type: bool
dryrun=None, # type: bool
# database
db_file=None, # type: Union[str, Path]
dep_file=None, # type: Union[str, Path]
backend=None, # type: str
# verbosities
verbosity=None, # type: int
failure_verbosity=None, # type: int
# output and working dir
outfile=None, # type: Union[str, Path]
reporter=None, # type: Union[str, Type]
dir=None, # type: Union[str, Path]
# parallel processing
num_process=None, # type: int
parallel_type=None, # type: str
# misc
check_file_uptodate=None, # type: str
pdb=None, # type: bool
codec_cls=None, # type: Type
minversion=None, # type: Union[str, Tuple[int, int, int]]
auto_delayed_regex=None, # type: bool
action_string_formatting=None, # type: str
):
"""
Generates a valid DOIT_CONFIG dictionary, that can contain GLOBAL options. You can use it at the beginning of your
`dodo.py` file:
```python
from doit_api import doit_config
DOIT_CONFIG = doit_config(verbosity=2)
```
Almost all command line options can be changed here.
See https://pydoit.org/configuration.html#configuration-at-dodo-py
:param default_tasks: The list of tasks to run when no task names are specified in the commandline. By default
all tasks are run. See https://pydoit.org/tasks.html#task-selection
:param single: set this to true to execute only specified tasks ignoring their task_dep. Default: False
:param continue_: by default the execution of tasks is halted on the first task failure or error. You can force it
to continue execution by setting this to True. See https://pydoit.org/cmd_run.html#continue
:param always: set this to True to always execute tasks even if up-to-date (default: False)
:param cleanforget: a boolean, set this to true (default: False) if you like to also make doit forget previous
execution of cleaned tasks. See https://pydoit.org/cmd_other.html#clean
:param cleandep: By default if a task contains task-dependencies those are not automatically cleaned too. Set this
flag to True to do it. Note that if you execute the default tasks, this is set to True by default.
See https://pydoit.org/cmd_other.html#clean
:param dryrun: a boolean (default False), telling doit to print actions without really executing them.
See https://pydoit.org/cmd_other.html#dry-run
:param db_file: an alias for dep_file
:param dep_file: sets the name of the file to save the "DB", default is .doit.db. Note that DBM backends might save
more than one file, in this case the specified name is used as a base name.
See https://pydoit.org/cmd_run.html#db-file
:param backend: The backend used by pydoit to store the execution states and results. A string that can be any of
'dbm' (default), 'json' (slow but good for debugging), 'sqlite3' (supports concurrent access).
Other choices may be available if you install doit plugins adding backends (e.g. redit...).
See https://pydoit.org/cmd_run.html#db-backend
:param verbosity: An integer defining the verbosity level:
0 capture (do not print) stdout/stderr from task,
1 capture stdout only,
2 do not capture anything (print everything immediately).
Default is 1. See https://pydoit.org/tasks.html#verbosity
:param failure_verbosity: Option to control if stdout/stderr should be re-displayed in the end of of report. This
is useful when used in conjunction with the `continue` option. Default: 0
0 do not show re-display
1 re-display stderr only
2 re-display both stderr/stdout
See https://pydoit.org/cmd_run.html#failure-verbosity
:param outfile: output file where to write the results to. Default is stdout.
See https://pydoit.org/cmd_run.html#output-file
:param reporter: choice of reporter for the console. Can be a string indicating a reporter included in doit, or
a class. Supported string values are
'console' (default),
'executed-only' (Produces zero output if no task is executed),
'json' (Output results in JSON format)
'zero' (display only error messages (does not display info on tasks being executed/skipped). This is used when
you only want to see the output generated by the tasks execution.)
see https://pydoit.org/cmd_run.html#reporter and https://pydoit.org/cmd_run.html#custom-reporter
:param dir: By default the directory of the dodo file is used as the "current working directory" on python
execution. You can specify a different cwd with this argument. See https://pydoit.org/cmd_run.html#dir-cwd
:param num_process: the number of parallel execution processes to use. Default 1. See
https://pydoit.org/cmd_run.html#parallel-execution
:param parallel_type: the type of parallelism mechanism used when process is set to a number larger than 1. A string
one of 'thread' (uses threads) and 'process' (uses python multiprocessing module, default).
:param check_file_uptodate: a string indicating how to check if files have been modified. 'md5': use the md5sum
(default) 'timestamp': use the timestamp. See https://pydoit.org/cmd_run.html#check-file-uptodate
:param pdb: set this to True to get into PDB (python debugger) post-mortem in case of unhandled exception.
Default: False. See https://pydoit.org/cmd_run.html#pdb
:param codec_cls: a class used to serialize and deserialize values returned by python-actions. Default `JSONCodec`.
See https://pydoit.org/cmd_run.html#codec-cls
:param minversion: an optional string or a 3-element tuple with integer values indicating the minimum/oldest doit
version that can be used with a dodo.py file. If specified as a string any part that is not
a number i.e.(dev0, a2, b4) will be converted to -1. See https://pydoit.org/cmd_run.html#minversion
:param auto_delayed_regex: set this to True (default False) to use the default regex ".*" for every delayed task
loader for which no regex was explicitly defined.
See https://pydoit.org/cmd_run.html#automatic-regex-for-delayed-task-loaders
:param action_string_formatting: Defines the templating style used by your cmd action strings for automatic variable
substitution. It is a string that can be 'old' (default), 'new', or 'both'.
See https://pydoit.org/tasks.html#keywords-on-cmd-action-string
:return: a configuration dictionary that you can use as the DOIT_CONFIG variable in your dodo.py file
"""
config_dict = dict()
# execution related
if default_tasks is not None:
# note: yes, not a dash here but an underscore
config_dict.update(default_tasks=default_tasks)
if single is not None:
config_dict.update(single=single)
if continue_ is not None:
config_dict['continue'] = continue_
if always is not None:
config_dict.update(always=always)
if cleanforget is not None:
config_dict.update(cleanforget=cleanforget)
if cleandep is not None:
config_dict.update(cleandep=cleandep)
if dryrun is not None:
config_dict.update(dryrun=dryrun)
# database
if db_file is not None:
assert dep_file is None, "db_file and dep_file are equivalent, you should not specify both"
dep_file = db_file
if dep_file is not None:
# note: yes, not a dash here but an underscore
config_dict.update(dep_file=dep_file)
if backend is not None:
config_dict.update(backend=backend)
# verbosities
if verbosity is not None:
config_dict.update(verbosity=verbosity)
if failure_verbosity is not None:
# confirmed
config_dict.update(failure_verbosity=failure_verbosity)
# output, reporter and working dir
if outfile is not None:
# yes, short name
config_dict.update(outfile=outfile)
if reporter is not None:
config_dict.update(reporter=reporter)
if dir is not None:
config_dict.update(dir=dir)
# parallel processing
if num_process is not None:
config_dict.update(num_process=num_process)
if parallel_type is not None:
config_dict.update(par_type=parallel_type)
# misc
if check_file_uptodate is not None:
config_dict.update(check_file_uptodate=check_file_uptodate)
if pdb is not None:
config_dict.update(pdb=pdb)
if codec_cls is not None:
config_dict.update(codec_cls=codec_cls)
if minversion is not None:
config_dict.update(minversion=minversion)
if auto_delayed_regex is not None:
config_dict.update(auto_delayed_regex=auto_delayed_regex)
if action_string_formatting is not None:
config_dict.update(action_string_formatting=action_string_formatting)
return config_dict
# --- task utilities
def why_am_i_running(task, changed):
"""
Goodie: a python action that you can use in any `doit` task, to print the reason why the task is running if the
task declared a `file_dep`, `task_dep`, `uptodate` or `targets`. Useful for debugging.
See [this doit conversation](https://github.com/pydoit/doit/issues/277).
"""
for t in task.targets:
if not exists(t):
print("Running %s because one of its targets does not exist: %r" % (task, t))
return
if changed is None or len(changed) == 0:
if len(task.targets) > 0:
print("Running %s because even though it declares at least a target, it does not have"
" explicit `uptodate=True`." % task)
else:
# silence
# print("Running %s because it declares no mechanism (file_dep or target) to avoid useless executions." % task)
pass
else:
print("Running %s because the following changed: %r" % (task, changed))
def title_with_actions(task):
"""
Goodie: an automatic title for doit tasks.
Same than `doit.title_with_actions` but removes `why_am_i_running` actions if any is present.
"""
if task.actions:
title = "\n\t".join([str(action) for action in task.actions
if not hasattr(action, 'py_callable') or action.py_callable is not why_am_i_running])
# A task that contains no actions at all
# is used as group task
else:
title = "Group: %s" % ", ".join(task.task_dep)
return "%s => %s" % (task.name, title)
# ----------- tasks creators
def validate_action(a):
"""
Internal helper to validate an action. Validates the conventions in https://pydoit.org/tasks.html#actions
- a command action = A string (command to be executed with the shell) or a list of strings or pathlib Paths
(command to be executed without the shell).
See https://pydoit.org/tasks.html#cmd-action
- a python action = a python callable or a tuple (callable, *args, **kwargs).
See https://pydoit.org/tasks.html#python-action
:param a:
:return:
"""
if isinstance(a, str):
# command action with the shell (Popen argument shell=True)
pass
elif isinstance(a, list):
# command action without the shell (Popen argument shell=False)
pass
elif isinstance(a, tuple):
# python callable with args and kwargs
# assert len(a) == 3
# assert callable(a[0])
pass
elif callable(a):
pass
elif isinstance(a, CmdAction):
pass
else:
raise ValueError("Action %r is not a valid action" % a)
def replace_task_names(list_of_tasks):
"""internal helper to replace tasks with their names in a list"""
def gen_all():
for o in list_of_tasks:
if isinstance(o, task):
yield o.name
elif isinstance(o, taskgen):
yield o.name
elif callable(o):
yield o.__name__.replace('task_', '')
else:
# a string task name
yield o
return list(gen_all())
class taskbase(object):
""" Base class for `task` and `taskgen`. """
def __init__(self,
name, # type: str
doc, # type: str
title # type: Union[str, Callable]
):
"""
:param name: an alternate base name for the task. By default the name of the decorated function is used.
See https://pydoit.org/tasks.html#task-name
:param title: an optional message string or callable generating a message, to print when the task is run. If
nothing is provided, by default the task name is printed. If a string is provided, the task name will
automatically be printed before it. If a callable is provided it should receive a single `task` argument
and return a string. See https://pydoit.org/tasks.html#title
:param doc: an optional documentation string for the task. If `@task` is used as a decorator, the decorated
function docstring will be used. See https://pydoit.org/tasks.html#doc
"""
self.name = name
self.doc = doc
self.title=title
self.actions = None
def add_default_desc_from_fun(self, func):
"""
Uses the information from `func` to fill the blanks in name and doc
:param func:
:return:
"""
if self.name is None:
self.name = func.__name__
if self.doc is None:
self.doc = func.__doc__
def get_base_desc(self, is_subtask=False, **additional_meta):
task_dict = dict()
# base name
if is_subtask:
task_dict.update(name=self.name)
else:
task_dict.update(basename=self.name)
# doc
if self.doc is not None:
task_dict.update(doc=self.doc)
# title
if self.title is not None:
if isinstance(self.title, str):
# a string: doit does not support this, so create a callable with a simple format.
def make_title(task):
return "%s => %s" % (task.name, self.title)
task_dict.update(title=make_title)
else:
# a callable already
task_dict.update(title=self.title)
# update with additional meta if provided
task_dict.update(additional_meta)
return task_dict
class task(taskbase):
"""
Creates a doit task.
```python
from doit_api import task
echoer = task(name="echoer", actions=["echo hi"], doc="the doc for echoer")
```
It signature regroups all options that you usually can set on a `doit` task, with sensible defaults. See constructor
for details.
Note: this relies on the `create_doit_tasks` hook, see https://pydoit.org/task_creation.html#custom-task-definition
"""
def __init__(self,
# *, (support for python 2: no kw only args)
# -- task information + what the task is doing when run
name, # type: str
actions, # type: List[DoitAction]
doc=None, # type: str
title=title_with_actions, # type: Union[str, Callable]
tell_why_am_i_running=True, # type: bool
# -- preventing useless runs and selecting order
targets=None, # type: List[DoitPath]
clean=None, # type: Union[bool, List[DoitAction]]
file_dep=None, # type: List[DoitPath]
task_dep=None, # type: List[DoitTask]
uptodate=None, # type: List[Optional[Union[bool, Callable, str]]]
# -- advanced
setup=None, # type: List[DoitTask]
teardown=None, # type: List[DoitAction]
getargs=None, # type: Dict[str, Tuple[str, str]]
calc_dep=None, # type: List[DoitTask]
# -- misc
verbosity=None, # type: int
):
"""
A minimal `doit` task consists of one or several actions. You must provide at least one action in `actions`.
If `tell_why_i_am_running=True` (default) an additional action will be prepended to print the reason why the
task is running if the task declared a `file_dep`, `task_dep`, `uptodate` or `targets`.
All other parameters match those in `doit` conventions (See docstrings below), except
- `name` that is an intelligent placeholder for `basename` (if a task is a simple task) or `name` (if the task
is a subtask in a `@taskgen` generator),
- `title` that adds support for plain strings and by default is `title_with_actions`
- `task_dep`, `setup` and `calc_dep` where if a task callable (decorated with `@task` or not) is provided, the
corresponding name will be used.
Note: the `watch` parameter (Linux and Mac only) is not yet supported.
See https://pydoit.org/cmd_other.html?highlight=watch#auto-watch
:param name: a mandatory name for the task. Note that this parameter will intelligently set 'basename' for
normal tasks or 'name' for subtasks in a task generator (`@taskgen`).
See https://pydoit.org/tasks.html#task-name
:param actions: a mandatory list of actions that this task should execute. There are 2 basic kinds of actions:
cmd-action and python-action. See https://pydoit.org/tasks.html#actions
:param doc: an optional documentation string for the task. See https://pydoit.org/tasks.html#doc
:param title: an optional message string or callable generating a message, to print when the task is run. If
nothing is provided, by default the task name is printed. If a string is provided, the task name will
automatically be printed before it. If a callable is provided it should receive a single `task` argument
and return a string. See https://pydoit.org/tasks.html#title
:param tell_why_am_i_running: if True (default), an additional `why_am_i_running` action is prepended to the
list of actions
:param file_dep: an optional list of strings or instances of any pathlib Path class indicating the files
required for this task to run. When none of these files are modified, the task will be skipped if already
run. See https://pydoit.org/tasks.html#file-dep-file-dependency
:param task_dep: an optional list of tasks (names or callables) that should be run *before* this task. Note
that this is also a convenient way to create a group of tasks.
See https://pydoit.org/dependencies.html#task-dependency
:param uptodate: an optional list where each element can be True (up to date), False (not up to date),
None (ignored), a callable or a command(string). Many pre-baked callables from `doit.tools` can be used:
`result_dep` to depend on the result of another task, `run_once` to run only once, `timeout` for time-based
expiration, `config_changed`for changes in a "configuration" string or dictionary, and more...
See https://pydoit.org/dependencies.html#uptodate
:param targets: an optional list of strings or instances of any pathlib Path class indicating the files created
by the task. They can be any file path (a file or folder). If a target does not exist the task will be
executed. Two different tasks *can not* have the same target. See https://pydoit.org/tasks.html#targets
:param clean: an optional boolean or list of tasks indicating if the task should perform some cleaning when
`doit clean` is executed. `True` means "delete all targets". If there is a folder as a target it will be
removed if the folder is empty, otherwise it will display a warning message. If you want to clean the
targets and add some custom clean actions, you can include the doit.task.clean_targets
See https://pydoit.org/cmd_other.html#clean
:param setup: tasks to be run before this task but only when it is run.
See https://pydoit.org/dependencies.html#setup-task
:param teardown: actions to run once all tasks are completed.
See https://pydoit.org/dependencies.html#setup-task
:param getargs: an optional dictionary where the key is the argument name used on actions, and the value is a
tuple with 2 strings: task name, "value name". getargs provides a way to use values computed from one task
in another task. See https://pydoit.org/dependencies.html#getargs
:param calc_dep: See https://pydoit.org/dependencies.html#calculated-dependencies
:param verbosity: an optional custom verbosity level (0, 1, or 2) for this task:
0 capture (do not print) stdout/stderr from task,
1 capture stdout only,
2 do not capture anything (print everything immediately).
Default is 1. See https://pydoit.org/tasks.html#verbosity
"""
# base
super(task, self).__init__(name=name, doc=doc, title=title)
# validate all actions
if actions is None or not isinstance(actions, list):
raise TypeError("actions should be a list, found: %r" % actions)
# for a in actions:
# validate_action(a)
self.actions = actions
self.tell_why_am_i_running = tell_why_am_i_running
# store other attributes
self.file_dep = file_dep
self.task_dep = task_dep
self.uptodate = uptodate
self.targets = targets
self.clean = clean
# advanced ones
self.setup = setup
self.teardown = teardown
self.getargs = getargs
self.calc_dep = calc_dep
self.verbosity = verbosity
# finally attach the `create_doit_tasks` hook if needed
self.create_doit_tasks = self._create_doit_tasks_noargs
def _create_doit_tasks_noargs(self):
return self._create_doit_tasks()
def _create_doit_tasks(self, is_subtask=False):
"""Called by doit to know this task's definition, or by `@taskgen`"""
# first get the base description
task_dict = self.get_base_desc(is_subtask=is_subtask)
# actions
if self.tell_why_am_i_running:
actions = [why_am_i_running] + self.actions
else:
actions = self.actions
task_dict.update(actions=actions)
# task dep, setup, calc dep: support direct link
if self.task_dep is not None:
task_dict.update(task_dep=replace_task_names(self.task_dep))
if self.setup is not None:
task_dict.update(setup=replace_task_names(self.setup))
if self.calc_dep is not None:
task_dict.update(calc_dep=replace_task_names(self.calc_dep))
# others: simply use if not none
if self.file_dep is not None:
task_dict.update(file_dep=self.file_dep)
if self.uptodate is not None:
task_dict.update(uptodate=self.uptodate)
if self.targets is not None:
task_dict.update(targets=self.targets)
if self.clean is not None:
task_dict.update(clean=self.clean)
if self.teardown is not None:
task_dict.update(teardown=self.teardown)
if self.getargs is not None:
task_dict.update(getargs=self.getargs)
if self.verbosity is not None:
task_dict.update(verbosity=self.verbosity)
return task_dict
class taskgen(taskbase):
"""
A decorator to create a doit task generator (See https://pydoit.org/tasks.html#sub-tasks).
Similar to `@task`, you can use it without arguments and it will capture the name and docstring of the decorated
function. This function needs to be a generator, meaning that it should `yield` tasks. Such tasks can be plain old
dictionaries as in `doit`, or can be created with `task`.
For example this is a task group named `mygroup` with two tasks `mygroup:echo0` and `mygroup:echo1`
```python
from doit_api import taskgen, task
@taskgen
def mygroup():
''' hey!!! '''
for i in range(2):
yield task(name="echo%s" % i, actions=["echo hi > hoho%s.txt" % i], targets=["hoho%s.txt" % i])
```
And this is one with two python subtasks:
```python
from doit_api import taskgen, task
@taskgen
def mygroup():
''' hey!!! '''
for i in range(2):
@task(name="subtask %i" % i,
doc="a subtask %s" % i,
title="this is %s running" % i)
def c_():
print("hello sub")
yield c_
```
`@taskgen` only accepts three optional arguments: `name` (that will be used for the base group name), doc, and
title.
"""
def __init__(self,
_func=None,
# *, (support for python 2: no kw only args)
# -- task information
name=None, # type: str
doc=None, # type: str
# -- what the task is doing when run
title=None # type: Union[str, Callable]
):
"""
:param name: an alternate base name for the task group. By default the name of the decorated function is used.
See https://pydoit.org/tasks.html#sub-tasks
:param doc: an optional documentation string for the task group. By default the decorated
function docstring will be used. See https://pydoit.org/tasks.html#doc
:param title: an optional message string or callable generating a message, to print when this task group is run.
If nothing is provided, by default the task name is printed. If a string is provided, the task name will
automatically be printed before it. If a callable is provided it should receive a single `task` argument
and return a string. See https://pydoit.org/tasks.html#title
"""
# base
super(taskgen, self).__init__(name=name, doc=doc, title=title)
# this will be non-None if @taskgen is used as a decorator without arguments
self.func = _func
# late-rename so that doit doesn't try to call the unbound method.
self.create_doit_tasks = self._create_doit_tasks
def __call__(self, func):
self.func = func # When instantiated with kwargs & used as a decorator
return self
def _create_doit_tasks(self):
"""Called by doit to know this task's definition"""
# validate decorated function - a generator
if self.func is None:
raise TypeError("No task generator function is provided")
if not isgeneratorfunction(self.func):
raise TypeError("The decorated function should be a generator")
# Protect against empty subtasks by yielding a first def with name None, see https://pydoit.org/tasks.html#sub-tasks
self.add_default_desc_from_fun(self.func)
yield self.get_base_desc(name=None)
for f in self.func():
if isinstance(f, dict):
yield f
else:
yield f._create_doit_tasks(is_subtask=True)
# class TaskBase(object):
# todo we could wish to provide the same level of functionality than this letsdoit class, but with fields listed.
# """Subclass this to define tasks."""
# @classmethod
# def create_doit_tasks(cls):
# if cls is TaskBase:
# return # avoid create tasks from base class 'Task'
# instance = cls()
# kw = dict((a, getattr(instance, a)) \
# for a in dir(instance) if not a.startswith('_'))
#
# kw.pop('create_doit_tasks')
# if 'actions' not in kw:
# kw['actions'] = [kw.pop('run')]
# if 'doc' not in kw and (cls.__doc__ != TaskBase.__doc__):
# kw['doc'] = cls.__doc__
# return kw
def cmdtask(
# -- task information
name=None, # type: Union[str, Any]
doc=None, # type: str
# -- what the task is doing when run
title=title_with_actions, # type: Union[str, Callable]
pre_actions=None, # type: List[DoitAction]
post_actions=None, # type: List[DoitAction]
tell_why_am_i_running=True, # type: bool
# -- preventing useless runs and selecting order
targets=None, # type: List[DoitPath]
clean=None, # type: Union[bool, List[DoitAction]]
file_dep=None, # type: List[DoitPath]
task_dep=None, # type: List[DoitTask]
uptodate=None, # type: List[Optional[Union[bool, Callable, str]]]
# -- advanced
setup=None, # type: List[DoitTask]
teardown=None, # type: List[DoitAction]
getargs=None, # type: Dict[str, Tuple[str, str]]
calc_dep=None, # type: List[DoitTask]
# -- misc
verbosity=None # type: int
):
"""
A decorator to create a task containing a shell command action (returned by the decorated function), and
optional additional actions.
```python
from doit_api import cmdtask
@cmdtask
def a():
''' the doc for a '''
return "echo hi"
@cmdtask(targets='foo.txt', file_deps=..., ...)
def b():
''' the doc for b '''
return '''
echo about to create file
echo hi > foo.txt
'''
@cmdtask
def c():
''' the doc for c '''
return [
"echo hi",
("echo", "hi")
]
```
A minimal `doit` task consists of one or several actions. Here, the main action is a shell command or sequence
of shell commands, returned by the decorated function. In addition to supporting all ways to express a command
action in doit, this also supports multiline strings to easily concatenate several commands into one (see rejected
[feature request](https://github.com/pydoit/doit/issues/314)), and plain string or tuple (not in a list). Your
function can therefore return:
- A string (command to be executed with the shell).
- A multiline string (commands to be executed with the shell. Blank lines automatically trimmed.
All lines are concatenated into the same shell command using '&' (windows) or ';' (linux) before
execution). This allows several commands to leverage each other, for example `conda activate` + some
python execution.
- a tuple (not list!) of strings or pathlib Paths (command to be executed without the shell).
- a list of strings or tuples. Note that in this case strings can not be multiline.
See [doit cmd-action](https://pydoit.org/tasks.html#cmd-action).
You can specify actions to be done before and after that/these `actions` in `pre_actions` and `post_actions`.
If `tell_why_i_am_running=True` (default) an additional action will be prepended to print the reason why the
task is running if the task declared a `file_dep`, `task_dep`, `uptodate` or `targets`.
All other parameters match those in `doit` conventions (See docstrings below), except
- `name` that is an intelligent placeholder for `basename` (if a task is a simple task) or `name` (if the task
is a subtask in a `@taskgen` generator),
- `title` that adds support for plain strings and by default is `title_with_actions`
- `task_dep`, `setup` and `calc_dep` where if a task callable (decorated with `@task` or not) is provided, the
corresponding name will be used.
Note: the `watch` parameter (Linux and Mac only) is not yet supported.
See https://pydoit.org/cmd_other.html?highlight=watch#auto-watch
:param name: an alternate name for the task. By default the name of the decorated function is used. Note that
this parameter will intelligently set 'basename' for normal tasks or 'name' for subtasks in a task
generator (`@taskgen`). See https://pydoit.org/tasks.html#task-name
:param doc: an optional documentation string for the task. By default, the decorated function docstring will
be used. See https://pydoit.org/tasks.html#doc
:param title: an optional message string or callable generating a message, to print when the task is run. If
nothing is provided, by default the task name is printed. If a string is provided, the task name will
automatically be printed before it. If a callable is provided it should receive a single `task` argument
and return a string. See https://pydoit.org/tasks.html#title
:param pre_actions: an optional list of actions to be executed before the main python action.
There are 2 basic kinds of actions: cmd-action and python-action. See https://pydoit.org/tasks.html#actions
:param post_actions: an optional list of actions to be executed after the main python action.
There are 2 basic kinds of actions: cmd-action and python-action. See https://pydoit.org/tasks.html#actions
:param tell_why_am_i_running: if True (default), an additional `why_am_i_running` action is prepended to the
list of actions
:param file_dep: an optional list of strings or instances of any pathlib Path class indicating the files
required for this task to run. When none of these files are modified, the task will be skipped if already
run. See https://pydoit.org/tasks.html#file-dep-file-dependency
:param task_dep: an optional list of tasks (names or callables) that should be run *before* this task. Note
that this is also a convenient way to create a group of tasks.
See https://pydoit.org/dependencies.html#task-dependency
:param uptodate: an optional list where each element can be True (up to date), False (not up to date),
None (ignored), a callable or a command(string). Many pre-baked callables from `doit.tools` can be used:
`result_dep` to depend on the result of another task, `run_once` to run only once, `timeout` for time-based
expiration, `config_changed`for changes in a "configuration" string or dictionary, and more...
See https://pydoit.org/dependencies.html#uptodate
:param targets: an optional list of strings or instances of any pathlib Path class indicating the files created
by the task. They can be any file path (a file or folder). If a target does not exist the task will be
executed. Two different tasks *can not* have the same target. See https://pydoit.org/tasks.html#targets
:param clean: an optional boolean or list of tasks indicating if the task should perform some cleaning when
`doit clean` is executed. `True` means "delete all targets". If there is a folder as a target it will be
removed if the folder is empty, otherwise it will display a warning message. If you want to clean the
targets and add some custom clean actions, you can include the doit.task.clean_targets
See https://pydoit.org/cmd_other.html#clean
:param setup: tasks to be run before this task but only when it is run.
See https://pydoit.org/dependencies.html#setup-task
:param teardown: actions to run once all tasks are completed.
See https://pydoit.org/dependencies.html#setup-task
:param getargs: an optional dictionary where the key is the argument name used on actions, and the value is a
tuple with 2 strings: task name, "value name". getargs provides a way to use values computed from one task
in another task. See https://pydoit.org/dependencies.html#getargs
:param calc_dep: See https://pydoit.org/dependencies.html#calculated-dependencies
:param verbosity: an optional custom verbosity level (0, 1, or 2) for this task:
0 capture (do not print) stdout/stderr from task,
1 capture stdout only,
2 do not capture anything (print everything immediately).
Default is 1. See https://pydoit.org/tasks.html#verbosity
"""
# our decorator
def _decorate(f # type: Callable
):
# checks on the decorated function name
if f.__name__.startswith("task_"):
raise ValueError("You can not decorate a function named `task_xxx` with `@pytask` ; please remove the "
"`task_` prefix.")
# call the function to get the list of actions
f_actions = f()
if isinstance(f_actions, str):
# by default multiline strings are executed in the same shell command
f_actions = [join_cmds(get_multiline_actions(f_actions))]
elif isinstance(f_actions, tuple):
f_actions = [f_actions]
elif isinstance(f_actions, list):
# just convert the possible tuples inside, into lists.
f_actions = [list(a) if isinstance(a, tuple) else a for a in f_actions]
else:
raise TypeError("Unsupported return type for @cmdtask '%s': returned %r" % (f.__name__, f_actions))
# create the actions: pre + [fun] + post
actions = []
for _actions in (pre_actions, f_actions, post_actions):
if _actions is None:
continue
if not isinstance(_actions, list):
raise TypeError("pre_actions and post_actions should be lists")
# for a in _actions:
# validate_action(a)
actions += _actions
# create the task object
f_task = task(name=name, doc=doc,
title=title, actions=actions,
tell_why_am_i_running=tell_why_am_i_running,
targets=targets, clean=clean, file_dep=file_dep, task_dep=task_dep, uptodate=uptodate,
setup=setup, teardown=teardown, getargs=getargs, calc_dep=calc_dep,
verbosity=verbosity)
# declare the fun
f_task.add_default_desc_from_fun(f)
# move the hooks from f_task to f
f.create_doit_tasks = f_task.create_doit_tasks
del f_task.create_doit_tasks
f._create_doit_tasks = f_task._create_doit_tasks
return f
if name is not None and callable(name):
# used without arguments: we have to return a function, not a task ! otherwise pickle wont work
f = name
name = None # important ! indeed it is used in _decorate
return _decorate(f)
else:
# used with arguments: return a decorator
return _decorate
def pytask(
# -- task information
name=None, # type: Union[str, Any]
doc=None, # type: str
# -- what the task is doing when run
title=title_with_actions, # type: Union[str, Callable]
pre_actions=None, # type: List[DoitAction]
post_actions=None, # type: List[DoitAction]
tell_why_am_i_running=True, # type: bool
# -- preventing useless runs and selecting order
targets=None, # type: List[DoitPath]
clean=None, # type: Union[bool, List[DoitAction]]
file_dep=None, # type: List[DoitPath]
task_dep=None, # type: List[DoitTask]
uptodate=None, # type: List[Optional[Union[bool, Callable, str]]]
# -- advanced
setup=None, # type: List[DoitTask]
teardown=None, # type: List[DoitAction]
getargs=None, # type: Dict[str, Tuple[str, str]]
calc_dep=None, # type: List[DoitTask]
# -- misc
verbosity=None # type: int
):
"""
A decorator to create a task containing a python action (the decorated function), and optional additional actions.
```python
from doit_api import pytask
@pytask
def a():
''' the doc for a '''
print("hi")
@pytask(targets=..., file_deps=..., ...)
def b():
print("hi")
```
A minimal `doit` task consists of one or several actions. Here, the main action is a call to the decorated function.
You can specify actions to be done before and after that/these `actions` in `pre_actions` and `post_actions`.
If `tell_why_i_am_running=True` (default) an additional action will be prepended to print the reason why the
task is running if the task declared a `file_dep`, `task_dep`, `uptodate` or `targets`.
All other parameters match those in `doit` conventions (See docstrings below), except
- `name` that is an intelligent placeholder for `basename` (if a task is a simple task) or `name` (if the task
is a subtask in a `@taskgen` generator),
- `title` that adds support for plain strings and by default is `title_with_actions`
- `task_dep`, `setup` and `calc_dep` where if a task callable (decorated with `@task` or not) is provided, the
corresponding name will be used.
Note: the `watch` parameter (Linux and Mac only) is not yet supported.
See https://pydoit.org/cmd_other.html?highlight=watch#auto-watch
:param name: an alternate name for the task. By default the name of the decorated function is used. Note that
this parameter will intelligently set 'basename' for normal tasks or 'name' for subtasks in a task
generator (`@taskgen`). See https://pydoit.org/tasks.html#task-name
:param doc: an optional documentation string for the task. By default, the decorated function docstring will
be used. See https://pydoit.org/tasks.html#doc
:param title: an optional message string or callable generating a message, to print when the task is run. If
nothing is provided, by default the task name is printed. If a string is provided, the task name will
automatically be printed before it. If a callable is provided it should receive a single `task` argument
and return a string. See https://pydoit.org/tasks.html#title
:param pre_actions: an optional list of actions to be executed before the main python action.
There are 2 basic kinds of actions: cmd-action and python-action. See https://pydoit.org/tasks.html#actions
:param post_actions: an optional list of actions to be executed after the main python action.
There are 2 basic kinds of actions: cmd-action and python-action. See https://pydoit.org/tasks.html#actions
:param tell_why_am_i_running: if True (default), an additional `why_am_i_running` action is prepended to the
list of actions
:param file_dep: an optional list of strings or instances of any pathlib Path class indicating the files
required for this task to run. When none of these files are modified, the task will be skipped if already
run. See https://pydoit.org/tasks.html#file-dep-file-dependency
:param task_dep: an optional list of tasks (names or callables) that should be run *before* this task. Note
that this is also a convenient way to create a group of tasks.
See https://pydoit.org/dependencies.html#task-dependency
:param uptodate: an optional list where each element can be True (up to date), False (not up to date),
None (ignored), a callable or a command(string). Many pre-baked callables from `doit.tools` can be used:
`result_dep` to depend on the result of another task, `run_once` to run only once, `timeout` for time-based
expiration, `config_changed`for changes in a "configuration" string or dictionary, and more...
See https://pydoit.org/dependencies.html#uptodate
:param targets: an optional list of strings or instances of any pathlib Path class indicating the files created
by the task. They can be any file path (a file or folder). If a target does not exist the task will be
executed. Two different tasks *can not* have the same target. See https://pydoit.org/tasks.html#targets
:param clean: an optional boolean or list of tasks indicating if the task should perform some cleaning when
`doit clean` is executed. `True` means "delete all targets". If there is a folder as a target it will be
removed if the folder is empty, otherwise it will display a warning message. If you want to clean the
targets and add some custom clean actions, you can include the doit.task.clean_targets
See https://pydoit.org/cmd_other.html#clean
:param setup: tasks to be run before this task but only when it is run.
See https://pydoit.org/dependencies.html#setup-task
:param teardown: actions to run once all tasks are completed.
See https://pydoit.org/dependencies.html#setup-task
:param getargs: an optional dictionary where the key is the argument name used on actions, and the value is a
tuple with 2 strings: task name, "value name". getargs provides a way to use values computed from one task
in another task. See https://pydoit.org/dependencies.html#getargs
:param calc_dep: See https://pydoit.org/dependencies.html#calculated-dependencies
:param verbosity: an optional custom verbosity level (0, 1, or 2) for this task:
0 capture (do not print) stdout/stderr from task,
1 capture stdout only,
2 do not capture anything (print everything immediately).
Default is 1. See https://pydoit.org/tasks.html#verbosity
"""
# our decorator
def _decorate(f # type: Callable
):
# checks on the decorated function name
if f.__name__.startswith("task_"):
raise ValueError("You can not decorate a function named `task_xxx` with `@pytask` ; please remove the "
"`task_` prefix.")
# create the actions: pre + [fun] + post
actions = []
for _actions in (pre_actions, [f], post_actions):
if _actions is None:
continue
if not isinstance(_actions, list):
raise TypeError("pre_actions and post_actions should be lists")
# for a in _actions:
# validate_action(a)
actions += _actions
# create the task object
f_task = task(name=name, doc=doc,
title=title, actions=actions,
tell_why_am_i_running=tell_why_am_i_running,
targets=targets, clean=clean, file_dep=file_dep, task_dep=task_dep, uptodate=uptodate,
setup=setup, teardown=teardown, getargs=getargs, calc_dep=calc_dep,
verbosity=verbosity)
# declare the fun
f_task.add_default_desc_from_fun(f)
# move the hooks from f_task to f
f.create_doit_tasks = f_task.create_doit_tasks
del f_task.create_doit_tasks
f._create_doit_tasks = f_task._create_doit_tasks
return f
if name is not None and callable(name):
# used without arguments: we have to return a function, not a task ! otherwise pickle wont work
f = name
name = None # important ! indeed it is used in _decorate
return _decorate(f)
else:
# used with arguments: return a decorator
return _decorate
# python 2 -specific hack to enable pickling task and taskgen objects
# without getting TypeError: can't pickle instancemethod objects
# see https://stackoverflow.com/a/25161919/7262247
if sys.version_info < (3, 0):
import copy_reg
import types
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
def get_multiline_actions(a_string):
"""
Transforms the multiline command string provided into a list of single-line commands.
:param a_string:
:return: a list of action strings
"""
def _procline(l):
# remove spaces on the left
l = l.strip()
# remove comments
try:
cmt_idx = l.index('#')
l = l[:cmt_idx]
except:
pass
# finally remove trailing spaces
return l.rstrip()
lines = [_procline(l) for l in a_string.splitlines()]
return [l for l in lines if len(l) > 0]
OS_CMD_SEP = '& ' if platform.system() == 'Windows' else '; '
def join_cmds(cmds_list):
"""
Joins all commands in cmds_list into a single-line command, with the appropriate OS-dependent separator.
See https://github.com/pydoit/doit/issues/314 (rejected :( )
:param cmds_list: a list of shell commands (string)
:return: a single string containing a shell command
"""
def _procline(l):
# remove spaces on the left
l = l.strip()
# remove comments
try:
cmt_idx = l.index('#')
l = l[:cmt_idx]
except:
pass
# finally remove trailing spaces
return l.rstrip()
lines = [_procline(l) for l in cmds_list]
lines = [l for l in lines if len(l) > 0]
return OS_CMD_SEP.join(lines)
|
StarcoderdataPython
|
83231
|
#
# elkme - the command-line sms utility
# see main.py for the main entry-point
#
__version__ = '0.6.0'
__release_date__ = '2017-07-17'
|
StarcoderdataPython
|
1758703
|
<filename>test-rst2.py
import io
import os
import select
import socket
import time
import utils
# 以下はnetwork namespaceを新たに作成するためか、tcp_fin_timeoutの変更等が継承されないためコメントアウト
# utils.new_ns()
port = 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.bind(('127.0.0.1', port))
s.listen(16)
tcpdump = utils.tcpdump_start(port)
c = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
time.sleep(1)
utils.ss(port)
c.connect(('127.0.0.1', port))
time.sleep(1)
print("[c] client connect")
utils.ss(port)
x, _ = s.accept()
time.sleep(1)
print("[x] server accept")
utils.ss(port)
for num in range(3):
print("[x] send")
c.send(b"hello world")
x.shutdown(socket.SHUT_WR)
time.sleep(1)
print("[x] server shutdown(SHUT_WR)")
utils.ss(port)
x.close()
time.sleep(1)
print("[x] server close socket")
utils.ss(port)
for num in range(3):
print("[x] send")
c.send(b"hello world")
time.sleep(1)
utils.ss(port)
print("#####just ss outputs from now#####")
time.sleep(1)
utils.ss(port)
time.sleep(1)
utils.ss(port)
time.sleep(1)
utils.ss(port)
|
StarcoderdataPython
|
96489
|
"""MPC Algorithms."""
import torch
from torch.distributions import MultivariateNormal
from rllib.util.parameter_decay import Constant, ParameterDecay
from .abstract_solver import MPCSolver
class MPPIShooting(MPCSolver):
"""Solve MPC using Model Predictive Path Integral control.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2016).
Aggressive driving with model predictive path integral control. ICRA.
<NAME>., <NAME>., & <NAME>. (2015).
Model predictive path integral control using covariance variable importance
sampling. arXiv.
<NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Deep Dynamics Models for Learning Dexterous Manipulation. arXiv.
"""
def __init__(self, kappa=1.0, filter_coefficients=(0.25, 0.8, 0), *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(kappa, ParameterDecay):
kappa = Constant(kappa)
self.kappa = kappa
self.filter_coefficients = torch.tensor(filter_coefficients)
self.filter_coefficients /= torch.sum(self.filter_coefficients)
def get_candidate_action_sequence(self):
"""Get candidate actions by sampling from a multivariate normal."""
noise_dist = MultivariateNormal(torch.zeros_like(self.mean), self.covariance)
noise = noise_dist.sample((self.num_samples,))
lag = len(self.filter_coefficients)
for i in range(self.horizon):
weights = self.filter_coefficients[: min(i + 1, lag)]
aux = torch.einsum(
"i, ki...j-> k...j",
weights.flip(0),
noise[:, max(0, i - lag + 1) : i + 1, ..., :],
)
noise[:, i, ..., :] = aux / torch.sum(weights)
action_sequence = self.mean.unsqueeze(0).repeat_interleave(self.num_samples, 0)
action_sequence += noise
action_sequence = action_sequence.permute(
tuple(torch.arange(1, action_sequence.dim() - 1)) + (0, -1)
)
if self.clamp:
return action_sequence.clamp(-1.0, 1.0)
return action_sequence
def get_best_action(self, action_sequence, returns):
"""Get best action by a weighted average of e^kappa returns."""
returns = self.kappa() * returns
weights = torch.exp(returns - torch.max(returns))
normalization = weights.sum()
weights = weights.unsqueeze(0).unsqueeze(-1)
weights = weights.repeat_interleave(self.horizon, 0).repeat_interleave(
self.dim_action, -1
)
return (weights * action_sequence).sum(dim=-2) / normalization
def update_sequence_generation(self, elite_actions):
"""Update distribution by the fitting the elite_actions to the mean."""
self.mean = elite_actions
self.kappa.update()
|
StarcoderdataPython
|
3393912
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-23 21:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snorna', '0002_clinical_genomic_analysis_snorna_expression'),
]
operations = [
migrations.RenameField(
model_name='dataset',
old_name='read_length',
new_name='average_mappable_reads',
),
migrations.RemoveField(
model_name='dataset',
name='sequencing_strategy',
),
migrations.AddField(
model_name='dataset',
name='snorna_n',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='dataset',
name='snorna_rpkm_n',
field=models.IntegerField(null=True),
),
]
|
StarcoderdataPython
|
184287
|
<gh_stars>1-10
#!/usr/bin/env python
import pytest
"""
Test 1805. Number of Different Integers in a String
"""
@pytest.fixture(scope="session")
def init_variables_1805():
from src.leetcode_1805_number_of_different_integers_in_a_string import Solution
solution = Solution()
def _init_variables_1805():
return solution
yield _init_variables_1805
class TestClass1805:
def test_solution_0(self, init_variables_1805):
assert init_variables_1805().numDifferentIntegers("a123bc34d8ef34") == 3
def test_solution_1(self, init_variables_1805):
assert init_variables_1805().numDifferentIntegers("leet1234code234") == 2
def test_solution_2(self, init_variables_1805):
assert init_variables_1805().numDifferentIntegers("a1b01c001") == 1
|
StarcoderdataPython
|
42994
|
<reponame>josborne-noaa/PyFerret<gh_stars>10-100
'''
Template for creating a PyFerret Python External Function (PyEF).
The names of the functions provided should not be changed. By
default, PyFerret uses the name of the module as the function name.
Copy this file using a name that you would like to be the function
name, then modify the contents of these functions and comments as
desired.
'''
import numpy
def ferret_init(efid):
'''
Initialization function for this PyFerret PyEF. Returns
a dictionary describing the features of this PyFerret PyEF.
At a minimum, assigns the number of arguments expected and
a descriptions of the functions. May also provide
descriptions of the arguments and specifications for a
non-standard result grid.
'''
init_dict = { }
init_dict["numargs"] = 1
init_dict["descript"] = "Pass through"
return init_dict
def ferret_result_limits(efid):
'''
Defines the index limits for all abstract axes in the result grid.
Returns an (X,Y,Z,T,E,F)-axis six-tuple of either (low,high) pairs,
for an abstract axis, or None, for a non-abstract axis. The low
and high values are integer values. If the result grid has no
abstract axes, this function will not be called and can be deleted.
'''
axis_limits = (None, None, None, None, None, None)
return axis_limits
def ferret_custom_axes(efid):
'''
Defines all custom axes in ther result grid. Returns a (X,Y,Z,T,E,F)-
axis six-tuple of either a (low, high, delta, unit_name, is_modulo)
tuple, for a custom axis, or None, for a non-custom axis. The low,
high, and delta values are floating-point values in units of the axis
coordinate ("world coordinates"). If the result grid has no custom
axes, this function will not be called and can be deleted.
'''
axis_info = (None, None, None, None, None, None)
return axis_info
def ferret_compute(efid, result, result_bdf, inputs, input_bdfs):
'''
Computation function for this PyFerret PyEF. Assign values to the
elements of result; do not reassign result itself. In other words,
assign values using notation similar to 'result[...] = ...'; do not
use notation similar to 'result = ...' as this will simply define
a new local variable called result, hiding the variable passed into
this function.
If an error is detected, raise an appropriate exception. ValueError
is commonly used for unexpected values. IndexError is commonly used
for unexpected array sizes.
Arguments:
result - numpy float array to be assigned
result_bdf - numpy read-only float array of one element giving the
missing-data value to be used when assigning result
inputs - tuple of numpy read-only float arrays giving the input
values provided by the caller
input_bdfs - numpy read-only float arrays of one element giving the
missing-data value for the corresponding inputs array
'''
# Create masks of values that are undefined and that are defined
bad_mask = ( inputs[0] == input_bdfs[0] )
good_mask = numpy.logical_not(bad_mask)
result[good_mask] = inputs[0][good_mask]
result[bad_mask] = result_bdf
return
|
StarcoderdataPython
|
3334883
|
from .pytorch_helpers import cuda_to_numpy
import numpy as np
from .costum_loss_functions import CostumMetric, INetworkLossFunction
import torch
import torch.nn.functional as F
class Accuracy(CostumMetric):
def __init__(self):
self.mode = 'max'
self.__name__ = 'acc'
def __call__(self, y_pred, y_true):
y_pred = cuda_to_numpy(y_pred)
y_pred = np.argmax(y_pred, axis=-1)
y_true = cuda_to_numpy(y_true)
y_true = y_true[..., 0]
return np.mean(y_pred == y_true)
class DiceLoss(INetworkLossFunction):
def __init__(self, class_weights=None):
self.__name__ = 'dice_loss'
self.mode = 'min'
if class_weights is not None:
self.class_weights = torch.tensor(
class_weights,
requires_grad=False).view(1,
len(class_weights),
1,
1)
else:
self.class_weights = None
def __call__(self, logits, true, eps=1e-7):
"""Computes the Sørensen–Dice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the Sørensen–Dice loss.
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
dims = (0,) + tuple(range(2, true.ndimension()))
probas = probas.cuda()
# apply weights
if self.class_weights is not None:
probas = probas * self.class_weights
true_1_hot = true_1_hot.cuda()
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return (1.0 - dice_loss.cuda())
|
StarcoderdataPython
|
1636277
|
from aioazstorage import TableClient
from os import environ
from datetime import datetime
from uuid import uuid1
from time import time
from asyncio import set_event_loop_policy, Task, gather
try:
from uvloop import get_event_loop, EventLoopPolicy
set_event_loop_policy(EventLoopPolicy())
except ImportError:
from asyncio import get_event_loop
# TODO: add SAS token support, reference:
# https://github.com/yokawasa/azure-functions-python-samples/blob/master/blob-sas-token-generator/function/run.py
STORAGE_ACCOUNT=environ['STORAGE_ACCOUNT']
STORAGE_KEY=environ['STORAGE_KEY']
OPERATION_COUNT=int(environ.get('OPERATION_COUNT',100))
async def main():
t = TableClient(STORAGE_ACCOUNT, STORAGE_KEY)
#print("Table Deletion", end=" ")
#print((await t.deleteTable('aiotest')).status)
print("Table Creation", end=" ")
print((await t.createTable('aiotest')).status)
print("Table Query", end=" ")
async for item in t.getTables({"$filter": "TableName eq 'aiotest'"}):
print(item['TableName'], end=" ")
print("\nInsertion:", end=" ")
tasks = []
for i in range(OPERATION_COUNT):
tasks.append(Task(t.insertEntity('aiotest', {
"Address":"Mountain View",
"Age":23 + i,
"AmountDue":200.23,
"CustomerCode": str(uuid1()), # send this as string intentionally
"CustomerSince":datetime.now(),
"IsActive": True,
"NumberOfOrders": 255,
"PartitionKey":"mypartitionkey",
"RowKey": "Customer%d" % i
})))
start = time()
res = await gather(*tasks)
print("{} operations/s".format(OPERATION_COUNT/(time()-start)))
#print([r.status for r in res])
print("Deletion:")
tasks = []
for i in range(OPERATION_COUNT):
tasks.append(Task(t.deleteEntity('aiotest', {
"PartitionKey":"mypartitionkey",
"RowKey": "Customer%d" % i
})))
start = time()
res = await gather(*tasks)
print("{} operations/s".format(OPERATION_COUNT/(time()-start)))
#print([r.status for r in res])
print("Upsert:")
tasks = []
for i in range(OPERATION_COUNT):
tasks.append(Task(t.insertOrReplaceEntity('aiotest', {
"Address":"Mountain View",
"Age": 23 - i,
"AmountDue": 0,
"CustomerCode": uuid1(), # this updates the entry schema as well
"CustomerSince":datetime.now(),
"IsActive": True,
"NumberOfOrders": 0,
"PartitionKey":"mypartitionkey",
"RowKey": "Customer%d" % i
})))
start = time()
res = await gather(*tasks)
print("{} operations/s".format(OPERATION_COUNT/(time()-start)))
#print([r.status for r in res])
print("Query")
async for item in t.queryEntities('aiotest', {"$filter": "Age gt 0"}):
print(item['RowKey'], end= " ")
print()
entities = []
for i in range(OPERATION_COUNT):
entities.append({
"Address":"Mountain View",
"Age":23 + i,
"AmountDue":200.23,
"CustomerCode": str(uuid1()), # send this as string intentionally
"<EMAIL>": "Edm.DateTime",
"CustomerSince":datetime.now(),
"IsActive": True,
"NumberOfOrders": 255,
"PartitionKey":"mypartitionkey",
"RowKey": "Customer%d" % i
})
start = time()
res = await t.batchUpdate('aiotest', entities)
print("{} operations/s".format(OPERATION_COUNT/(time()-start)))
print(res.status)
print(res.headers)
print(await res.text())
print()
await t.close()
if __name__ == '__main__':
loop = get_event_loop()
loop.run_until_complete(main())
|
StarcoderdataPython
|
4814952
|
import datetime
from database.database_schemas import Schemas
from database.dsstox.generic_substances import GenericSubstances
from database.dsstox.source_substances import SourceSubstances
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.dialects.mysql import FLOAT
from sqlalchemy.orm import relationship
from database.base import Base
class SourceGenericSubstanceMappings(Base):
"""Maps to source_generic_substance_mappings table in dsstox databases."""
__tablename__ = 'source_generic_substance_mappings'
__table_args__ = {'schema': Schemas.dsstox_schema}
id = Column(Integer, primary_key=True, nullable=False)
fk_source_substance_id = Column(ForeignKey(SourceSubstances.id))
fk_generic_substance_id = Column(ForeignKey(GenericSubstances.id))
connection_reason = Column(String(255))
linkage_score = Column(FLOAT)
curator_validated = Column(Integer)
qc_notes = Column(String(1024))
created_by = Column(String(255), nullable=False)
updated_by = Column(String(255), nullable=False)
created_at = Column(DateTime, default=datetime.datetime.now, nullable=False)
updated_at = Column(DateTime, default=datetime.datetime.now, nullable=False)
source_substance = relationship("SourceSubstances")
generic_substance = relationship("GenericSubstances")
|
StarcoderdataPython
|
3252573
|
"""String utils module"""
import re
def replace_ascii(str):
# Substitute swedish characters for sensible counterparts
str = str.replace(u'\xc5','A')
str = str.replace(u'\xe5','a')
str = str.replace(u'\xc4','A')
str = str.replace(u'\xe4','a')
str = str.replace(u'\xd6','O')
str = str.replace(u'\xf6','o')
return str.encode('ascii','replace')
def hamming_distance(s1, s2):
"""Calculate the Hamming distance between two strings of equal lengths.
Raise ValueError if strings are of unequal length.
"""
if len(s1) != len(s2): raise ValueError('strings of unequal length')
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
def strip_extensions(fn, ext=[]):
"""Strip extensions from a filename.
:param fn: filename
:param ext: list of extensions to strip
:returns: stripped version of fn and extension
"""
pattern = "|".join(".*({})$".format(x) for x in ext)
if not pattern:
return (fn, None)
m = re.search(pattern, fn)
if not m:
return (fn, None)
return (fn.replace(m.group(1), ""), m.group(1))
|
StarcoderdataPython
|
3369657
|
"""
Unique usernames
You get an list of names and need to return a list of unique usernames.
For a duplicate name, add the next integer after the name
For example:
Input
['Julie', 'Emma', 'Zoe', 'Liam', 'Emma']
Output
['Julie', 'Emma', 'Zoe', 'Liam', 'Emma1']
Input
['Julie', 'Zoe', 'Zoe', 'Liam', 'Emma', 'Zoe']
Output
['Julie', 'Zoe', 'Zoe1', 'Liam', 'Emma', 'Zoe2']
"""
def users(names):
usernames = []
for name in names[1:]:
if name not in usernames:
usernames.append(name)
else:
count = sum(name in user for user in usernames)
usernames.append(name + str(count))
return usernames
|
StarcoderdataPython
|
4803189
|
<reponame>atamraka/PythonExamples<gh_stars>0
'''
Created on May 14, 2017
@author: rujina
'''
'''
select any 6 columns out of the columns inside csv file attached to this email and write it in other csv files
with the same column names.(do not select adjacent coloumns)
'''
# with open('write.csv', 'wb') as csv_write_file:
# writer=csv.writer(csv_write_file)
# with open('FL_insurance_sample.csv','rb') as csv_data:
# read_data=csv.reader(csv_data)
# for rowdata in read_data:
# writer.writerow([rowdata[0], rowdata[2], rowdata[4], rowdata[6], rowdata[8], rowdata[10]])
'''
Write a program that accepts sequence of lines as input and prints the lines after making all characters in
the sentence capitalized.
Suppose the following input is supplied to the program:
Hello world s
Practice makes perfect
Then, the output should be:
HELLO WORLD
PRACTICE MAKES PERFECT
'''
# print "Enter/Paste your content. Ctrl-D to save it."
# contents = ""
#
# while True:
# line = raw_input("")
# if(line==""):
# break
# contents=contents+"\n"+line
# print contents.upper()
'''
3)-Define a function that can accept two strings as input and print the string with maximum length in console.
If two strings have the same length,
then the function should print all strings line by line.
'''
# no_of_lines = 2
# lines = []
# for i in xrange(no_of_lines):
# lines.append(raw_input())
# maxLineLen = max(lines, key=len)
# for st in lines:
# if(len(st) == len(maxLineLen)):
# print st
'''
4)-With a given list [12,24,35,24,88,120,155,88,120,155], write a program to print this list after removing all
duplicate values with original order reserved.
'''
# t=[12,24,35,24,88,120,155,88,120,155]
# list_suplicateRemoved=list(set(t))
# print list_suplicateRemoved
# list_suplicateRemoved.reverse()
# print list_suplicateRemoved
'''
5)- create html page consisting of all points of the course from w3schools.
'''
|
StarcoderdataPython
|
3305447
|
<reponame>nacknime-official/freelancehunt-api<gh_stars>1-10
#!usr/bin/python3
"""`Freelancehunt Documentation - Profiles API <https://apidocs.freelancehunt.com/?version=latest#7dfb1bc1-4d54-46d8-9c01-75b7a32f3db6>`_."""
from typing import List, Optional, Tuple, Union
from ..core import FreelancehuntObject
from ..models.user import Profile, Freelancer, Employer
__all__ = ('Profiles',)
class Profiles(FreelancehuntObject):
"""Provide operations with Profiles API part.
.. warning:: For directly usage please set `token` argument.
:param str token: your API token, optional
"""
def __init__(self, token: Optional[str] = None, **kwargs):
"""Create object to provide operations with Profiles API part.
:param token: your API token (only for directly usage, not inside Client class), defaults to None
"""
super().__init__(token, **kwargs)
@property
def my_profile(self) -> Union[Employer, Freelancer]:
"""Get my profile information.
:return: information of your account
"""
responce = self._get('/my/profile')
return Profile.de_json(**responce)
def get_freelancers_list(
self,
country_id: Optional[int] = None,
city_id: Optional[int] = None,
skill_id: Optional[int] = None,
login: Optional[str] = None,
pages: Optional[Union[int, Tuple[int], List[int]]] = 1
) -> List[Freelancer]:
"""Get filtered freelancer profiles.
:param country_id: freelancer from country (API-related Country identifier), defaults to None
:param city_id: freelancer from city (API-related City identifier), defaults to None
:param skill_id: freelancer skill (API-related Skill identifier), defaults to None
:param login: with the desired login, defaults to None
:param pages: number of pages, defaults to 1
:return: list of filtered freelancer profiles
"""
filters = {
'country_id': country_id,
'city_id': city_id,
'skill_id': skill_id,
'login': login
}
responce = self._multi_page_get('/freelancers', filters, pages)
return [Freelancer.de_json(**data) for data in responce]
def get_employers_list(
self,
country_id: Optional[int] = None,
city_id: Optional[int] = None,
login: Optional[str] = None,
pages: Optional[Union[int, Tuple[int], List[int]]] = 1
) -> List[Employer]:
"""Get filtered employer profiles.
:param country_id: employer from country (API-related Country identifier), defaults to None
:param city_id: employer from city (API-related City identifier), defaults to None
:param login: with the desired login, defaults to None
:param pages: number of pages, defaults to 1
:return: list of filtered employer profiles
"""
filters = {
'country_id': country_id,
'city_id': city_id,
'login': login
}
responce = self._multi_page_get('/employers', filters, pages)
return [Employer.de_json(**data) for data in responce]
def get_freelancer_datails(self, profile_id: int) -> Freelancer:
"""Get information about freelancer by identifier.
:param profile_id: the desired profile identifier
"""
responce = self._get(f'/freelancers/{profile_id}')
return Freelancer.de_json(**responce)
def get_employer_datails(self, profile_id: int) -> Employer:
"""Get information about employer by identifier.
:param profile_id: the desired profile identifier
"""
responce = self._get(f'/employers/{profile_id}')
return Employer.de_json(**responce)
|
StarcoderdataPython
|
20539
|
from abaqusConstants import *
from .OdbPart import OdbPart
from .OdbStep import OdbStep
from .SectionCategory import SectionCategory
from ..Amplitude.AmplitudeOdb import AmplitudeOdb
from ..BeamSectionProfile.BeamSectionProfileOdb import BeamSectionProfileOdb
from ..Filter.FilterOdb import FilterOdb
from ..Material.MaterialOdb import MaterialOdb
class Odb(AmplitudeOdb,
FilterOdb,
MaterialOdb,
BeamSectionProfileOdb):
"""The Odb object is the in-memory representation of an output database (ODB) file.
Attributes
----------
isReadOnly: Boolean
A Boolean specifying whether the output database was opened with read-only access.
amplitudes: dict[str, Amplitude]
A repository of :py:class:`~abaqus.Amplitude.Amplitude.Amplitude` objects.
filters: dict[str, Filter]
A repository of :py:class:`~abaqus.Filter.Filter.Filter` objects.
rootAssembly: OdbAssembly
An :py:class:`~abaqus.Odb.OdbAssembly.OdbAssembly` object.
jobData: JobData
A :py:class:`~abaqus.Odb.JobData.JobData` object.
parts: dict[str, OdbPart]
A repository of :py:class:`~abaqus.Odb.OdbPart.OdbPart` objects.
materials: dict[str, Material]
A repository of :py:class:`~abaqus.Material.Material.Material` objects.
steps: dict[str, OdbStep]
A repository of :py:class:`~abaqus.Odb.OdbStep.OdbStep` objects.
sections: dict[str, Section]
A repository of :py:class:`~abaqus.Section.Section.Section` objects.
sectionCategories: dict[str, SectionCategory]
A repository of :py:class:`~abaqus.Odb.SectionCategory.SectionCategory` objects.
sectorDefinition: SectorDefinition
A :py:class:`~abaqus.Odb.SectorDefinition.SectorDefinition` object.
userData: UserData
A :py:class:`~abaqus.Odb.UserData.UserData` object.
customData: RepositorySupport
A :py:class:`~abaqus.CustomKernel.RepositorySupport.RepositorySupport` object.
profiles: dict[str, Profile]
A repository of :py:class:`~abaqus.BeamSectionProfile.Profile.Profile` objects.
Notes
-----
This object can be accessed by:
.. code-block:: python
import odbAccess
session.odbs[name]
"""
def Part(self, name: str, embeddedSpace: SymbolicConstant, type: SymbolicConstant) -> OdbPart:
"""This method creates an OdbPart object. Nodes and elements are added to this object at a
later stage.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[name].Part
Parameters
----------
name
A String specifying the part name.
embeddedSpace
A SymbolicConstant specifying the dimensionality of the Part object. Possible values are
THREE_D, TWO_D_PLANAR, and AXISYMMETRIC.
type
A SymbolicConstant specifying the type of the Part object. Possible values are
DEFORMABLE_BODY and ANALYTIC_RIGID_SURFACE.
Returns
-------
An OdbPart object.
"""
self.parts[name] = odbPart = OdbPart(name, embeddedSpace, type)
return odbPart
def Step(self, name: str, description: str, domain: SymbolicConstant, timePeriod: float = 0,
previousStepName: str = '', procedure: str = '', totalTime: float = None) -> OdbStep:
"""This method creates an OdbStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[name].Step
Parameters
----------
name
A String specifying the repository key.
description
A String specifying the step description.
domain
A SymbolicConstant specifying the domain of the step. Possible values are TIME,
FREQUENCY, ARC_LENGTH, and MODAL.The type of OdbFrame object that can be created for
this step is based on the value of the *domain* argument.
timePeriod
A Float specifying the time period of the step. *timePeriod* is required if
*domain*=TIME; otherwise, this argument is not applicable. The default value is 0.0.
previousStepName
A String specifying the preceding step. If *previousStepName* is the empty string, the
last step in the repository is used. If *previousStepName* is not the last step, this
will result in a change to the *previousStepName* member of the step that was in that
position. A special value 'Initial' refers to the internal initial model step and may be
used exclusively for inserting a new step at the first position before any other
existing steps. The default value is an empty string.
procedure
A String specifying the step procedure. The default value is an empty string. The
following is the list of valid procedures:
```
*ANNEAL
*BUCKLE
*COMPLEX FREQUENCY
*COUPLED TEMPERATURE-DISPLACEMENT
*COUPLED TEMPERATURE-DISPLACEMENT, CETOL
*COUPLED TEMPERATURE-DISPLACEMENT, STEADY STATE
*COUPLED THERMAL-ELECTRICAL, STEADY STATE
*COUPLED THERMAL-ELECTRICAL
*COUPLED THERMAL-ELECTRICAL, DELTMX
*DYNAMIC
*DYNAMIC, DIRECT
*DYNAMIC, EXPLICIT
*DYNAMIC, SUBSPACE
*DYNAMIC TEMPERATURE-DISPLACEMENT, EXPLICT
*ELECTROMAGNETIC, HIGH FREQUENCY, TIME HARMONIC
*ELECTROMAGNETIC, LOW FREQUENCY, TIME DOMAIN
*ELECTROMAGNETIC, LOW FREQUENCY, TIME DOMAIN, DIRECT
*ELECTROMAGNETIC, LOW FREQUENCY, TIME HARMONIC
*FREQUENCY
*GEOSTATIC
*HEAT TRANSFER
*HEAT TRANSFER, DELTAMX=__
*HEAT TRANSFER, STEADY STATE
*MAGNETOSTATIC
*MAGNETOSTATIC, DIRECT
*MASS DIFFUSION
*MASS DIFFUSION, DCMAX=
*MASS DIFFUSION, STEADY STATE
*MODAL DYNAMIC
*RANDOM RESPONSE
*RESPONSE SPECTRUM
*SOILS
*SOILS, CETOL/UTOL
*SOILS, CONSOLIDATION
*SOILS, CONSOLIDATION, CETOL/UTOL
*STATIC
*STATIC, DIRECT
*STATIC, RIKS
*STEADY STATE DYNAMICS
*STEADY STATE TRANSPORT
*STEADY STATE TRANSPORT, DIRECT
*STEP PERTURBATION, *STATIC
*SUBSTRUCTURE GENERATE
*USA ADDDED MASS GENERATION
*VISCO
```
totalTime
A Float specifying the analysis time spend in all the steps previous to this step. The
default value is −1.0.
Returns
-------
An OdbStep object.
Raises
------
- If *previousStepName* is invalid:
ValueError: previousStepName is invalid
"""
self.steps[name] = odbStep = OdbStep(name, description, domain, timePeriod, previousStepName, procedure,
totalTime)
return odbStep
def SectionCategory(self, name: str, description: str) -> SectionCategory:
"""This method creates a SectionCategory object.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[*name*].SectionCategory
Parameters
----------
name
A String specifying the name of the category.
description
A String specifying the description of the category.
Returns
-------
A SectionCategory object.
"""
self.sectionCategories[name] = sectionCategory = SectionCategory(name, description)
return sectionCategory
|
StarcoderdataPython
|
138714
|
import hydra
from hydra.core.config_store import ConfigStore
from omegaconf import OmegaConf
from configs import TrainConfig
from jerex import model, util
cs = ConfigStore.instance()
cs.store(name="train", node=TrainConfig)
@hydra.main(config_name='train', config_path='configs/docred_joint')
def train(cfg: TrainConfig) -> None:
print(OmegaConf.to_yaml(cfg))
util.config_to_abs_paths(cfg.datasets, 'train_path', 'valid_path', 'test_path', 'types_path')
util.config_to_abs_paths(cfg.model, 'tokenizer_path', 'encoder_path')
util.config_to_abs_paths(cfg.misc, 'cache_path')
model.train(cfg)
if __name__ == '__main__':
train()
|
StarcoderdataPython
|
197832
|
<gh_stars>10-100
from apiwrapper.endpoints.endpoint import Endpoint
from apiwrapper.endpoints.monetary_account import MonetaryAccount
class DraftPayment(Endpoint):
__endpoint_draft_payment = "draft-payment"
@classmethod
def _get_base_endpoint(cls, user_id, account_id):
endpoint = MonetaryAccount._get_base_endpoint(user_id)
endpoint += "/%d/%s" % (
account_id,
cls.__endpoint_draft_payment
)
return endpoint
def get_all_draft_payments_for_account(self, user_id, account_id):
endpoint = self._get_base_endpoint(user_id, account_id)
return self._make_get_request(endpoint)
def get_draft_payment_by_id(self, user_id, account_id, draft_id):
endpoint = self._get_base_endpoint(user_id, account_id)
endpoint += "/%d" % draft_id
return self._make_get_request(endpoint)
|
StarcoderdataPython
|
3203919
|
<reponame>PoncinMatthieu/skrm
from __future__ import print_function
import os
import getopt
import sys
import subprocess
import re
def exit_with_usage(error=0, msg=""):
if error != 0:
print("Error: " + msg)
print("usage: ./skrm [OPTIONS] [COMMANDS] [TAGS]")
print("skrm stands for simple keyring manager, it stores keys with tags into a file encrypted using gpg.")
print("skrm will ask for the master password to encrypt/decrypt the storing file.")
print("OPTIONS:")
print("\t-h, --help: Print usage.")
print("\t-g, --get: Return keyrings matching strictly the given tags. This option is used by default. If a keyId is selected, a get or a search return only the keyring matching the keyId.")
print("\t-s, --search: Return keyrings matching the given tags (tags are interpreted as a regex expression).")
print("\t-c, --clip: Copy the key of the last matched keyring from a get or a search into the clipboard using xclip. Nothing will be printed out to the shell.")
print("COMMANDS:")
print("\t--file=[FILENAME]: use the given file to read/store keyrings.")
print("\t--recipient=[USER_ID_NAME]: set the user id name for gpg to get the key and encrypt the file.")
print("\t--pass=[MASTER_PASS]: set the master pass to use when encrypting or decrypting the file.")
print("\t--add=[KEY]: add a key to the file with the specified tags.")
print("\t--select=[KEYID]: select a keyring using its key id. To use with a command like \"remove\" or \"update\".")
print("\t--remove: remove the selected key.")
print("\t--update=[KEY]: update the selected key.")
print("\t--backup=[HOSTDEST]: scp the bdd file to the given host destination.")
print("TAGS:")
print("\tA list of strings to define tags you want to use for any commands keyring related management.")
sys.exit(error)
class KeyringManager:
def __init__(self, user_pref_path, bdd_path, argv):
self.read_user_prefs(user_pref_path, bdd_path)
try:
opts, args = getopt.getopt(argv, "hgsc", ["help", "file=", "get", "search", "pass=", "add=", "select=", "remove", "update=", "recipient=", "backup=", "clip"])
except getopt.GetoptError:
exit_with_usage(1, "Bad arguments.")
for opt, arg in opts:
if opt in ("-h", "--help"):
exit_with_usage()
elif opt == "--file":
self.filename = os.path.expanduser(arg)
elif opt in ("-g", "--get"):
self.command = "get"
elif opt in ("-s", "--search"):
self.command = "search"
elif opt == "--add":
self.command = "add"
self.key = arg
elif opt == "--select":
if arg.isdigit():
self.keyId = int(arg)
else:
exit_with_usage(1, "The given keyid is not a number.")
elif opt == "--remove":
self.command = "remove"
elif opt == "--update":
self.command = "update"
self.key = arg
elif opt == "--pass":
self.passphrase = arg
elif opt == "--recipient":
self.recipient = arg
elif opt == "--backup":
self.command = "backup"
self.hostdest = arg
elif opt in ("-c", "--clip"):
self.clip = 1
for arg in args:
self.tags.append(arg)
def read_user_prefs(self, user_pref_path, bdd_path):
user_pref_file = user_pref_path
self.filename = bdd_path
self.command = "get"
self.passphrase = ""
self.tags = []
self.key = ""
self.keyId = -1
self.recipient = ""
self.clip = 0
try:
with open(user_pref_file, "r") as f:
for line in f:
option = line.split("=")
option[1] = option[1].rstrip('\n')
if option[0][0] != '#':
if option[0] == "file":
self.filename = option[1]
elif option[0] == "recipient":
self.recipient = option[1]
except IOError: # use preffs not found, do nothing. args must be defined in command line arguments.
pass
def load_raw_bdd(self):
""" Decript gpg file and return the content """
args = ["gpg", "-dq"]
if self.passphrase:
args.append("--no-use-agent")
args.append("--passphrase")
args.append(self.passphrase)
args.append(self.filename)
p = subprocess.Popen(args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(None)
if stdout == "" and stdout != "":
print(stderr)
exit(1)
return stdout.rstrip()
def save_raw_bdd(self, raw):
""" Encript gpg file """
args = ["gpg", "--yes", "-e", "-r", self.recipient, "-o", self.filename]
p = subprocess.Popen(args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(raw)
stdout = stdout.rstrip()
stderr = stderr.rstrip()
if stdout != "":
print(stdout)
if stderr != "":
print(stderr)
def parse_raw(self, raw):
bdd = []
if raw:
keyrings = raw.split(b"\x03")
for keyring in keyrings:
bdd.append(keyring.split(b"\x02"))
return bdd
def parse_bdd(self, bdd):
raw = b""
bddLen = len(bdd)
for i, keyring in enumerate(bdd):
keyringLen = len(keyring)
for j, tag in enumerate(keyring):
if isinstance(tag, str):
tag = bytes(tag, 'utf8')
raw += tag
if j < (keyringLen - 1):
raw += b"\x02"
if i < (bddLen - 1):
raw += b"\x03"
return raw
def save_bdd(self, bdd):
raw = self.parse_bdd(bdd)
self.save_raw_bdd(raw)
def get_fonctor(self, keyring, tag):
keyringLen = len(keyring)
for i, t in enumerate(keyring):
if i < (keyringLen - 1):
if tag.upper() == t.upper().decode('utf8'):
return 1
return 0
def search_fonctor(self, keyring, tag):
keyringLen = len(keyring)
p = re.compile(tag.upper())
for i, t in enumerate(keyring):
if i < (keyringLen - 1):
if p.search(t.upper().decode('utf8')) != None:
return 1
return 0
def print_keyring(self, i, keyring):
if self.clip == 0: # print the keyring
print(i, end='')
print(":", end='')
print(keyring)
else: # copy the keyring to the clipboard
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2": # linux
args = ["xclip"]
p = subprocess.Popen(args, stdin = subprocess.PIPE)
p.communicate(keyring[len(keyring) - 1])
elif _platform == "darwin": # OS X
args = ["pbcopy"]
p = subprocess.Popen(args, stdin = subprocess.PIPE)
p.communicate(keyring[len(keyring) - 1])
elif _platform == "win32": # Windows
print("Can't copy on clipboard under windows, method not implemented!")
def print_matching_keyrings(self, bdd, Functor):
if self.keyId >= 0:
print(self.keyId, end='')
print(":", end='')
print(bdd[self.keyId])
else:
for i, keyring in enumerate(bdd):
if len(self.tags) == 0:
print(i, end='')
print(":", end='')
print(keyring)
else:
foundAll = 1
for tag in self.tags:
if Functor(keyring, tag) == 0:
foundAll = 0
if foundAll == 1:
self.print_keyring(i, keyring)
def command_get(self, bdd):
print("GET")
self.print_matching_keyrings(bdd, self.get_fonctor)
def command_search(self, bdd):
print("SEARCH")
self.print_matching_keyrings(bdd, self.search_fonctor)
def command_add(self, bdd):
newKeyring = self.tags
newKeyring.append(self.key)
bdd.append(newKeyring)
self.save_bdd(bdd)
print("Add OK")
def command_remove(self, bdd):
if (self.keyId < 0 or self.keyId >= len(bdd)):
exit_with_usage(1, "Wrong argument, the given key id must be a valid number.")
print("Removing: ", end='')
print(bdd[self.keyId])
del bdd[self.keyId];
self.save_bdd(bdd)
print("Remove OK")
def command_update(self, bdd):
if (self.keyId < 0 or self.keyId >= len(bdd)):
exit_with_usage(1, "Wrong argument, the given key id must be a valid number.")
bdd[self.keyId][len(bdd[self.keyId]) - 1] = self.key;
print("New keyring: ", end='')
print(bdd[self.keyId])
self.save_bdd(bdd)
print("Update OK")
def command_backup(self):
args = ["scp", self.filename, self.hostdest]
p = subprocess.Popen(args, stdin = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(None)
stderr = stderr.rstrip()
if stderr != "":
print(stderr)
print("Backup Failed!")
exit(1)
print("Backup OK")
def run(self):
if self.command == "backup":
self.command_backup()
else:
raw_bdd = self.load_raw_bdd()
bdd = self.parse_raw(raw_bdd)
if self.command == "get":
self.command_get(bdd)
elif self.command == "search":
self.command_search(bdd)
elif self.command == "add":
self.command_add(bdd)
elif self.command == "remove":
self.command_remove(bdd)
elif self.command == "update":
self.command_update(bdd)
|
StarcoderdataPython
|
1757447
|
import os.path
import sys
from download import download
import numpy as np
import pandas as pd
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + (os.path.sep + '..')*2)
import Coberny as cyb
from Coberny.url import *
def test_indice1():
url = 'https://raw.githubusercontent.com/ABernard27/PROJET-groupe-3/master/Document/Distance.csv'
path_target = "./Distance.csv"
download(url, path_target, replace = True)
#download(url_dist, path_dist, replace=True)
DISTANCE=pd.read_csv("Distance.csv", sep=',')
assert cyb.ind('Agde Pezenas', DISTANCE) == 2
def test_indice2():
url = 'https://raw.githubusercontent.com/ABernard27/PROJET-groupe-3/master/Document/Distance.csv'
path_target = "./Distance.csv"
download(url, path_target, replace = True)
#download(url_dist, path_dist, replace=True)
DISTANCE=pd.read_csv("Distance.csv", sep=',')
assert cyb.ind('Castelnaudary', DISTANCE) == 17
def test_indice3():
url = 'https://raw.githubusercontent.com/ABernard27/PROJET-groupe-3/master/Document/Distance.csv'
path_target = "./Distance.csv"
download(url, path_target, replace = True)
#download(url_dist, path_dist, replace=True)
DISTANCE=pd.read_csv("Distance.csv", sep=',')
assert cyb.ind('Montgiscard', DISTANCE) == 22
def KMaxConstraint():
download(url_prix , path_prix , replace=True)
assert cyb.GetKMaxConstraint(df_price, 'Sete', 'Narbonne est ') == 4
def cheaperPath():
download(url_prix , path_prix , replace=True)
couple = cyb.FindBestPathForPrice(df_price, 'St-Jean-de-Vedas', 'Montgiscard', 3)
cheaper_path = couple[0]
assert cheaper_path == ['St-Jean-de-Vedas', 'Sete', '<NAME>', '<NAME>',
'Montgiscard']
def PricecheaperPath():
download(url_prix , path_prix , replace=True)
couple = cyb.FindBestPathForPrice(df_price, 'St-Jean-de-Vedas', 'Bram', 3)
PriceOfCheaperPath = couple[1]
assert PriceOfCheaperPath == 14.7
|
StarcoderdataPython
|
1748594
|
<gh_stars>1-10
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from optparse import make_option
import os
import sys
import glob
import shutil
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
# Based on the collectmedia management command by <NAME> (exogen)
# http://blog.brianbeck.com/post/50940622/collectmedia
class Command(BaseCommand):
media_dirs = ['media']
ignore_apps = ['django.contrib.admin']
exclude = ['CVS', '.*', '*~']
option_list = BaseCommand.option_list + (
make_option('--media-root', default=settings.MEDIA_ROOT, dest='media_root', metavar='DIR',
help="Specifies the root directory in which to collect media files."),
make_option('-n', '--dry-run', action='store_true', dest='dry_run',
help="Do everything except modify the filesystem."),
make_option('-d', '--dir', action='append', default=media_dirs, dest='media_dirs', metavar='NAME',
help="Specifies the name of the media directory to look for in each app."),
make_option('-e', '--exclude', action='append', default=exclude, dest='exclude', metavar='PATTERNS',
help="A space-delimited list of glob-style patterns to ignore. Use multiple times to add more."),
make_option('-l', '--link', action='store_true', dest='link',
help="Create a symbolic link to each file instead of copying."),
make_option('-i', '--interactive', action='store_true', dest='interactive',
help="Ask before modifying files and selecting from multiple sources."),
make_option('-t', '--theme', default=settings.PINAX_THEME, dest='theme', metavar='DIR',
help="Use this Pinax theme as a the basis."
)
)
help = 'Collect media files from installed apps, Pinax and project in a single media directory.'
args = '[appname ...]'
def handle(self, *app_labels, **options):
if not app_labels:
app_labels = settings.INSTALLED_APPS
short_app_labels = [label.split('.')[-1] for label in app_labels]
interactive = options.get('interactive', False)
dry_run = options.get('dry_run', False)
exclude = options.get('exclude')
theme = options.get('theme', settings.PINAX_THEME)
media_root = options.get('media_root', settings.MEDIA_ROOT)
pinax_media_root = os.path.join(settings.PINAX_ROOT, 'media', theme)
project_media_root = os.path.join(settings.PROJECT_ROOT, 'media')
if dry_run:
print "\n DRY RUN! NO FILES WILL BE MODIFIED."
# This mapping collects files that may be copied. Keys are what the
# file's path relative to `media_root` will be when copied. Values
# are a list of 2-tuples containing the the name of the app providing
# the file and the file's absolute path. The list will have a length
# greater than 1 if multiple apps provide a media file with the same
# relative path.
media_files = {}
for app in app_labels:
if app not in self.ignore_apps:
for rel_path, abs_path in self.handle_app(app, **options):
media_files.setdefault(rel_path, []).append((app, abs_path))
for location in (pinax_media_root, project_media_root):
if not os.path.isdir(location):
continue
app_labels = []
app_labels[:] = self.filter_names(os.listdir(location), exclude=exclude)
for app in app_labels:
if app in short_app_labels and app not in self.ignore_apps:
for rel_path, abs_path in self.handle_override(app, location, **options):
media_files.setdefault(rel_path, []).append((app, abs_path))
# Forget the unused versions of a media file
for f in media_files:
media_files[f] = dict(media_files[f]).items()
# Stop if no media files were found
if not media_files:
print "\nNo media found."
return
# Try to copy in some predictable order.
destinations = list(media_files)
destinations.sort()
for destination in destinations:
sources = media_files[destination]
first_source, other_sources = sources[0], sources[1:]
if interactive and other_sources:
first_app = first_source[0]
app_sources = dict(sources)
print "\nThe file %r is provided by multiple apps:" % destination
print "\n".join([" %s" % app for (app, source) in sources])
message = "Enter the app that should provide this file [%s]: " % first_app
while True:
app = raw_input(message)
if not app:
app, source = first_source
break
elif app in app_sources:
source = app_sources[app]
break
else:
print "The app %r does not provide this file." % app
else:
app, source = first_source
print "\nSelected %r provided by %r." % (destination, app)
# Move in site_media/<app_label>/<file>
destination = os.path.join(app.split('.')[-1], destination)
self.process_file(source, destination, media_root, **options)
def handle_override(self, app, location, **options):
media_dirs = options.get('media_dirs')
exclude = options.get('exclude')
for media_dir in media_dirs:
app_media = os.path.join(location, app)
if os.path.isdir(app_media):
prefix_length = len(app_media) + len(os.sep)
for root, dirs, files in os.walk(app_media):
# Filter `dirs` and `files` based on the exclusion pattern.
dirs[:] = self.filter_names(dirs, exclude=exclude)
files[:] = self.filter_names(files, exclude=exclude)
for filename in files:
absolute_path = os.path.join(root, filename)
relative_path = absolute_path[prefix_length:]
yield (relative_path, absolute_path)
def handle_app(self, app, **options):
if isinstance(app, basestring):
app = __import__(app, {}, {}, [''])
media_dirs = options.get('media_dirs')
exclude = options.get('exclude')
app_root = os.path.dirname(app.__file__)
for media_dir in media_dirs:
app_media = os.path.join(app_root, media_dir)
if os.path.isdir(app_media):
prefix_length = len(app_media) + len(os.sep)
for root, dirs, files in os.walk(app_media):
# Filter `dirs` and `files` based on the exclusion pattern.
dirs[:] = self.filter_names(dirs, exclude=exclude)
files[:] = self.filter_names(files, exclude=exclude)
for filename in files:
absolute_path = os.path.join(root, filename)
relative_path = absolute_path[prefix_length:]
yield (relative_path, absolute_path)
def process_file(self, source, destination, root, link=False, **options):
dry_run = options.get('dry_run', False)
interactive = options.get('interactive', False)
destination = os.path.abspath(os.path.join(root, destination))
if not dry_run:
# Get permission bits and ownership of `root`.
try:
root_stat = os.stat(root)
except os.error, e:
mode = 0777 # Default for `os.makedirs` anyway.
uid = gid = None
else:
mode = root_stat.st_mode
uid, gid = root_stat.st_uid, root_stat.st_gid
destination_dir = os.path.dirname(destination)
try:
# Recursively create all the required directories, attempting
# to use the same mode as `root`.
os.makedirs(destination_dir, mode)
except os.error, e:
# This probably just means the leaf directory already exists,
# but if not, we'll find out when copying or linking anyway.
pass
else:
os.lchown(destination_dir, uid, gid)
if link:
success = self.link_file(source, destination, interactive, dry_run)
else:
success = self.copy_file(source, destination, interactive, dry_run)
if success and None not in (uid, gid):
# Try to use the same ownership as `root`.
os.lchown(destination, uid, gid)
def copy_file(self, source, destination, interactive=False, dry_run=False):
"Attempt to copy `source` to `destination` and return True if successful."
if interactive:
exists = os.path.exists(destination) or os.path.islink(destination)
if exists:
print "The file %r already exists." % destination
if not self.prompt_overwrite(destination):
return False
print "Copying %r to %r." % (source, destination)
if not dry_run:
try:
os.remove(destination)
except os.error, e:
pass
shutil.copy2(source, destination)
return True
return False
def link_file(self, source, destination, interactive=False, dry_run=False):
"Attempt to link to `source` from `destination` and return True if successful."
if sys.platform == 'win32':
message = "Linking is not supported by this platform (%s)."
raise os.error(message % sys.platform)
if interactive:
exists = os.path.exists(destination) or os.path.islink(destination)
if exists:
print "The file %r already exists." % destination
if not self.prompt_overwrite(destination):
return False
if not dry_run:
try:
os.remove(destination)
except os.error, e:
pass
print "Linking to %r from %r." % (source, destination)
if not dry_run:
os.symlink(source, destination)
return True
return False
def prompt_overwrite(self, filename, default=True):
"Prompt the user to overwrite and return their selection as True or False."
yes_values = ['Y']
no_values = ['N']
if default:
prompt = "Overwrite? [Y/n]: "
yes_values.append('')
else:
prompt = "Overwrite? [y/N]: "
no_values.append('')
while True:
overwrite = raw_input(prompt).strip().upper()
if overwrite in yes_values:
return True
elif overwrite in no_values:
return False
else:
print "Select 'Y' or 'N'."
def filter_names(self, names, exclude=None, func=glob.fnmatch.filter):
if exclude is None:
exclude = []
elif isinstance(exclude, basestring):
exclude = exclude.split()
else:
exclude = [pattern for patterns in exclude for pattern in patterns.split()]
excluded_names = set(
[name for pattern in exclude for name in func(names, pattern)]
)
return set(names) - excluded_names
|
StarcoderdataPython
|
111468
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
version = "0.0.25"
setuptools.setup(
name="commondtools",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Common D-tools.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/M0Rph3U56031769/commondtools",
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=[
"astroid==2.4.2",
"bleach==3.3.0",
"bump2version==1.0.1",
"bumpversion==0.6.0",
"certifi==2020.12.5",
"cffi==1.14.4",
"chardet==3.0.4",
"colorama==0.4.4",
"coverage==5.3",
"cryptography==3.3.2",
"docutils==0.16",
"idna>=2.5",
"importlib-metadata==3.1.0",
"isort==5.6.4",
"jeepney==0.6.0",
"keyring==21.5.0",
"lazy-object-proxy==1.4.3",
"mccabe==0.6.1",
"numpy==1.19.4",
"packaging==20.7",
"pandas==1.1.4",
"pkginfo==1.6.1",
"pycparser==2.20",
"Pygments~>2.7.4",
"pylint==2.6.0",
"pyparsing==2.4.7",
"python-dateutil==2.8.1",
"pytz==2020.4",
"pywin32-ctypes==0.2.0",
"readme-renderer==28.0",
"requests==2.25.0",
"requests-toolbelt==0.9.1",
"rfc3986==1.4.0",
"SecretStorage==3.3.0",
"selenium==3.141.0",
"six==1.15.0",
"toml==0.10.2",
"tqdm==4.54.0",
"twine==3.2.0",
"urllib3~>1.26.5",
"webencodings==0.5.1",
"wrapt==1.12.1",
"zipp==3.4.0"
],
classifiers=[
"Programming Language :: Python :: 3.9",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3',
)
|
StarcoderdataPython
|
3204141
|
<filename>algorithms/sorting/selection_sort.py<gh_stars>1-10
"""
Selection sort always out performs the bubble sort. It is aunstable.
Selection sort also does fewer swap operations, and by extension memory writes
than Bubble sort. In the worst case, it does n-1 swap operations
The core principle in selection is to devide the list to be sorted into the
sorted and unsorted part. With each iteration the unsorted part is traversed and manipulated.
This is repeated until there is no unsorted part left. How is this achieved?
With each iteration the smallest item in the unsorted part is spoted and swapped with the
leftmost item in the unsorted part. The smallest item put in place of the leftmost item then
becomes a part of the sorted part.
The number of elements to be parsed decreases with each iteration.
For example given an array, [1,5,2,7,0,11]
For iteration one we will consider elements from index 0 to the last index. The smallest element
is o so it will be swapped with 1 = [0,5,2,7,1,11]. Our sorted part is now [0] and the unsorted part is
[5,2,7,1,11]
For iteration 2, we again find the smallest item in the unsorted part and swap it with the leftmost item.
This entials swapping 1 with 5. this then yields, [1,2,7,5,11]. Our sorted part is now [0, 1], and unsorted
is [2,7,5,11]
We will repeat the circle until we are only left with no unsorted part
Assumption: sort in ascending order
Run time complexity: O(n^2)
"""
class SelectionSort():
def __init__(self, nums) -> None:
self.nums = nums
def sort(self) -> None:
for i in range(len(self.nums)-1):
index = i # index of the smallest item in the unsorted part
for j in range(i, len(self.nums)):
if self.nums[j] < self.nums[index]:
index = j
if index != i: # To avoid swapping an element with itself
self._swap(i, index)
def _swap(self, i, j) -> None:
self.nums[i], self.nums[j] = self.nums[j], self.nums[i]
|
StarcoderdataPython
|
167733
|
<filename>Sem5SLLPYTHON/FINALS/partb/1b/1.py
def remdup(a):
return list(set(a))
l=[]
for i in range(0,5):
l.append(input())
print(l)
rev = l[::-1]
print(rev)
print(remdup(l))
print([i for i in range(0,10) if i%2==0])
|
StarcoderdataPython
|
4836244
|
<reponame>chiragmatkar/testplan<gh_stars>0
"""
This file is base on the difflib from python standard library (version: 2.7.9)
it provides diff (context/unified) functions with more options like GNU diff,
including: --ignore-space-change, --ignore-whitespace, --ignore-blank-lines
Due to the different algorithm, its output might be a slightly difference
compared with that of gnu diff or windiff, but it won't lead to confusing.
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function diff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
"""
import os
import re
import heapq
import six
from collections import namedtuple as _namedtuple
from functools import reduce
from datetime import datetime
__all__ = [
"Match",
"SequenceMatcher",
"get_close_matches",
"Differ",
"IS_CHARACTER_JUNK",
"IS_LINE_JUNK",
"diff",
"context_diff",
"unified_diff",
]
Match = _namedtuple("Match", "a b size")
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher(object):
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by <NAME> Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a="", b="", autojunk=False):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# isbjunk
# for x in b, isbjunk(x) == isjunk(x) but much faster;
# it's really the __contains__ method of a hidden dict.
# DOES NOT WORK for x in a!
# isbpopular
# for x in b, isbpopular(x) is true iff b is reasonably long
# (at least 200 elements) and x accounts for more than 1 + 1% of
# its elements (when autojunk is enabled).
# DOES NOT WORK for x in a!
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.a_real_content = (
self.a.real_content()
if isinstance(self.a, FuzzyMatchingString)
else None
)
self.matching_blocks = self.opcodes = self._ratio = None
# cache the data for self.find_longest_match()
ajunk = (
{elt for elt in self.a if self.isjunk(elt)}
if self.isjunk
else set()
)
self.isajunk = ajunk.__contains__
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.b_real_content = (
self.b.real_content()
if isinstance(self.b, FuzzyMatchingString)
else None
)
self.matching_blocks = self.opcodes = self._ratio = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# <NAME>, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
bjunk = set()
isjunk = self.isjunk
if isjunk:
for elt in list(b2j.keys()): # using list() since b2j is modified
if isjunk(elt):
bjunk.add(elt)
del b2j[elt]
# Purge popular elements that are not junk
popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in list(b2j.items()):
if len(idxs) > ntest:
popular.add(elt)
del b2j[elt]
# Now for x in b, isjunk(x) == x in junk, but the latter is much faster.
# Sicne the number of *unique* junk elements is probably small, the
# memory burden of keeping this set alive is likely trivial compared to
# the size of b2j.
self.isbjunk = bjunk.__contains__
self.isbpopular = popular.__contains__
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
j <= j'
and if j == j', i <= i'
In other words, of all maximal matching blocks, return one that
starts earliest in b, and of all those maximal matching blocks that
start earliest in b, return the one that starts earliest in a.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j = self.a, self.b, self.b2j
isajunk, isbjunk = self.isajunk, self.isbjunk
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
indices = []
# TODO: (need improve) time consuming in this loop. For immutable
# string we could get indices from b2j directly, but we may compare
# mutable objects here, i.e. instances of FuzzyMatchingString.
for key, val in b2j.items():
if a[i] == key:
indices = val
break
for j in indices:
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j - 1, 0) + 1
# with condition 'k == bestsize and j-k+1 < bestj', the longest
# substring starts earliest in b will be selected with priority
if k > bestsize or k == bestsize and j - k + 1 < bestj:
besti, bestj, bestsize = i - k + 1, j - k + 1, k
j2len = newj2len
# Extend the best by non-junk and junk elements repeatedly until
# no element can be appended to the front or back of the best match.
prev_bestsize = 0
while prev_bestsize != bestsize:
prev_bestsize = bestsize
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while (
besti > alo
and bestj > blo
and not isajunk(a[besti - 1])
and not isbjunk(b[bestj - 1])
and a[besti - 1] == b[bestj - 1]
):
besti, bestj, bestsize = besti - 1, bestj - 1, bestsize + 1
while (
besti + bestsize < ahi
and bestj + bestsize < bhi
and not isajunk(a[besti + bestsize])
and not isbjunk(b[bestj + bestsize])
and a[besti + bestsize] == b[bestj + bestsize]
):
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while (
besti > alo
and bestj > blo
and isajunk(a[besti - 1])
and isbjunk(b[bestj - 1])
and a[besti - 1] == b[bestj - 1]
):
besti, bestj, bestsize = besti - 1, bestj - 1, bestsize + 1
while (
besti + bestsize < ahi
and bestj + bestsize < bhi
and isajunk(a[besti + bestsize])
and isbjunk(b[bestj + bestsize])
and a[besti + bestsize] == b[bestj + bestsize]
):
bestsize += 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[Match(a=0, b=0, size=2),
Match(a=3, b=2, size=2),
Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i + k < ahi and j + k < bhi:
queue.append((i + k, ahi, j + k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append((la, lb, 0))
self.matching_blocks = map(Match._make, non_adjacent)
return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ""
if i < ai and j < bj:
tag = "replace"
elif i < ai:
tag = "delete"
elif j < bj:
tag = "insert"
if tag:
answer.append((tag, i, ai, j, bj))
i, j = ai + size, bj + size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append(("equal", ai, i, bj, j))
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None, a, b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8),
('insert', 8, 8, 8, 9),
('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == "equal":
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2 - n), i2, max(j1, j2 - n), j2
if codes[-1][0] == "equal":
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1 + n), j1, min(j2, j1 + n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == "equal" and i2 - i1 > nn:
group.append((tag, i1, min(i2, i1 + n), j1, min(j2, j1 + n)))
yield group
group = []
i1, j1 = max(i1, i2 - n), max(j1, j2 - n)
group.append((tag, i1, i2, j1, j2))
if group and not (len(group) == 1 and group[0][0] == "equal"):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
if self._ratio:
return self._ratio
a = self.a if self.a_real_content is None else self.a_real_content
b = self.b if self.b_real_content is None else self.b_real_content
# compute the similar ratio and cache it
matches = reduce(
lambda count, triple: count + triple[-1],
SequenceMatcher(self.isjunk, a, b).get_matching_blocks(),
0,
)
self._ratio = _calculate_ratio(matches, len(a) + len(b))
return self._ratio
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
a = self.a if self.a_real_content is None else self.a_real_content
b = self.b if self.b_real_content is None else self.b_real_content
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(a) + len(b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la = len(
self.a if self.a_real_content is None else self.a_real_content
)
lb = len(
self.b if self.b_real_content is None else self.b_real_content
)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if (
s.real_quick_ratio() >= cutoff
and s.quick_ratio() >= cutoff
and s.ratio() >= cutoff
):
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
class FuzzyMatchingString(str):
"""
Inherits built-in str, but two strings can be considered equal when
their content in accord with the rules defined.
"""
def __new__(cls, value, *args, **kwargs):
return super(FuzzyMatchingString, cls).__new__(cls, value)
def __init__(self, value, *args, **kwargs):
raise NotImplementedError("FuzzyMatchingString: Not implemented!")
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.real_content() == other.real_content()
else:
return self.real_content() == str(other)
def __ne__(self, other):
return not self.__eq__(other)
def real_content(self):
return None
class SpaceIgnoredString(FuzzyMatchingString):
"""
Inherits FuzzyMatchingString, ingores whitespace and space change
when compared with other strings.
"""
def __init__(
self, value, ignore_space_change=False, ignore_whitespaces=False
):
self.ignore_space_change = ignore_space_change
self.ignore_whitespaces = ignore_whitespaces
def real_content(self):
if self.ignore_whitespaces:
return re.sub(r"\s+", "", self)
elif self.ignore_space_change:
# gnu diff ignores all whitespace (include line-feed) in the
# right side when compare with -b or --ignore-space-change,
# just simulatethat behavior
return re.sub(r"\s+", " ", self).rstrip()
else:
return str(self)
class Differ(object):
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Use get_opcodes() and get_merged_opcodes() to get a list of 5-tuples
describing how to turn a into b. The former can give detailed
transformation steps, especially the 'replace' operation for similar
lines will be recorded, it is userful for ndiff (not implemented in
this file), the latter gives concise transformation steps by merging
adjacent op items if they can be merged. get_grouped_opcodes() is used
for context_diff() and unified_diff().
Example: Comparing two texts.
>>> a = ['aaa\n', 'bbb\n', 'c\n', 'cc\n', 'ccc\n', '\n', 'ddd\n',
... 'eee\n', 'ggg\n']
>>> b = ['aaaa\n', 'bbbb\n', 'c\n', 'cc\n', 'ccc\n', 'dddd\n', 'hhh\n',
... 'fff\n', '\n', 'ggg\n']
>>> d = Differ()
>>> for op in d.get_opcodes(a, b): print(op)
...
('replace', 0, 1, 0, 1)
('replace', 1, 2, 1, 2)
('equal', 2, 5, 2, 5)
('delete', 5, 6, 5, 5)
('replace', 6, 7, 5, 6)
('replace', 7, 8, 6, 9)
('equal', 8, 9, 9, 10)
>>> for op in d.get_merged_opcodes(a, b): print(op)
...
('replace', 0, 2, 0, 2)
('equal', 2, 5, 2, 5)
('replace', 5, 8, 5, 9)
('equal', 8, 9, 9, 10)
>>> for op in d.get_grouped_opcodes(a, b, 1): print(op)
...
[('replace', 0, 2, 0, 2), ('equal', 2, 3, 2, 3)]
[('equal', 4, 5, 4, 5), ('replace', 5, 8, 5, 9), ('equal', 8, 9, 9, 10)]
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
"""
def __init__(
self,
linejunk=None,
charjunk=None,
ignore_space_change=False,
ignore_whitespaces=False,
ignore_blank_lines=False,
):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
- `ignore_space_change`: refer to gnu diff option -b
- `ignore_whitespaces`: refer to gnu diff option -w
- `ignore_blank_lines`: refer to gnu diff option -B
"""
self.linejunk = linejunk
self.charjunk = charjunk
self.ignore_space_change = ignore_space_change
self.ignore_whitespaces = ignore_whitespaces
self.ignore_blank_lines = ignore_blank_lines
def get_opcodes(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> for op in Differ().get_opcodes('one\ntwo\nthree\n',
... 'ore\nthree\nemu\n'):
... print(op)
...
('replace', 0, 2, 0, 1)
('equal', 2, 3, 1, 2)
('insert', 3, 3, 2, 3)
"""
assert all(str(i) != "" for i in a) and all(str(j) != "" for j in b)
if self.ignore_space_change or self.ignore_whitespaces:
new_a, new_b = [], []
for i in a:
new_a.append(
SpaceIgnoredString(
i, self.ignore_space_change, self.ignore_whitespaces
)
)
for j in b:
new_b.append(
SpaceIgnoredString(
j, self.ignore_space_change, self.ignore_whitespaces
)
)
a, b = new_a, new_b
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == "replace":
# `_fancy_replace` can give us a more specific result, for
# example, it can recognize completely equal lines among
# a block of line junks. it is also useful when we want to
# show exact difference line by line like what ndiff does.
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag in ("equal", "delete", "insert"):
g = ((tag, alo, ahi, blo, bhi),)
else:
raise ValueError("unknown tag %r" % (tag,))
for tag, alo, ahi, blo, bhi in g:
yield (tag, alo, ahi, blo, bhi)
def get_merged_opcodes(self, a, b):
r"""
Similar like get_opcodes(), but the adjacent items might be merge
as one, for example:
('equal', 2, 9, 0, 7)
('equal', 9, 11, 7, 9)
('replace', 11, 12, 9, 10)
('replace', 12, 13, 10, 11)
('replace', 13, 14, 11, 12)
will be merged as:
('equal', 2, 11, 0, 9)
('replace', 11, 14, 9, 12)
Another example:
('delete', 11, 12, 10, 10)
('replace', 12, 13, 10, 11)
will be merged as:
('replace', 11, 13, 10, 11)
"""
g = self._merge_opcodes(self.get_opcodes(a, b))
if self.ignore_blank_lines:
g = self._merge_opcodes(self._verify_blank_lines(a, b, g))
for tag, alo, ahi, blo, bhi in g:
yield (tag, alo, ahi, blo, bhi)
def get_grouped_opcodes(self, a, b, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8),
('insert', 8, 8, 8, 9),
('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
def _is_blank_block(code):
"Check if the opcode represents blank lines"
return code[0] == "equal" and (
code[1] == code[2] or code[3] == code[4]
)
def _check_adjacent_blank_block(codes, i):
"""Make change to the nearest opcode of blank lines if there are
no more than `n` lines between them."""
if i >= 0 and i < len(codes) and _is_blank_block(codes[i]):
# code[i-1][0] or code[i+1][0] MUST BE 'equal' if it exists
if (
i < len(codes) - 2
and codes[i + 1][2] - codes[i + 1][1] < n
and codes[i + 2][0] != "equal"
or i > 1
and codes[i - 1][2] - codes[i - 1][1] < n
and codes[i - 2][0] != "equal"
):
tag, i1, i2, j1, j2 = codes[i]
codes[i] = (
"insert" if i1 == i2 else "delete",
i1,
i2,
j1,
j2,
)
_check_adjacent_blank_block(codes, i - 2)
_check_adjacent_blank_block(codes, i + 2)
g = self._merge_opcodes(self.get_opcodes(a, b))
if self.ignore_blank_lines:
g = self._verify_blank_lines(a, b, g)
codes = list(g)
# Block composed of blank lines already changed its tag to 'equal',
# but in a unified or context diff, we might have to output them.
if self.ignore_blank_lines:
for i in range(len(codes)):
_check_adjacent_blank_block(codes, i)
codes = list(self._merge_opcodes(codes))
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == "equal":
tag, i1, i2, j1, j2 = codes[0]
codes[0] = (tag, max(i1, i2 - n), i2, max(j1, j2 - n), j2)
if codes[-1][0] == "equal":
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = (tag, i1, min(i2, i1 + n), j1, min(j2, j1 + n))
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == "equal" and i2 - i1 > nn:
group.append((tag, i1, min(i2, i1 + n), j1, min(j2, j1 + n)))
yield group
group = []
i1, j1 = max(i1, i2 - n), max(j1, j2 - n)
group.append((tag, i1, i2, j1, j2))
if group and not (len(group) == 1 and group[0][0] == "equal"):
yield group
def _merge_opcodes(self, generator):
"Algorithm for merging opcode"
prev_tag = ""
prev_alo, prev_ahi, prev_blo, prev_bhi = 0, 0, 0, 0
for tag, alo, ahi, blo, bhi in generator:
assert prev_ahi == alo and prev_bhi == blo
if prev_tag == tag or not prev_tag:
prev_tag, prev_ahi, prev_bhi = tag, ahi, bhi
elif tag == "equal" or prev_tag == "equal":
yield (prev_tag, prev_alo, prev_ahi, prev_blo, prev_bhi)
prev_tag = tag
prev_alo, prev_ahi, prev_blo, prev_bhi = alo, ahi, blo, bhi
else:
prev_tag, prev_ahi, prev_bhi = "replace", ahi, bhi
if prev_tag:
yield (prev_tag, prev_alo, prev_ahi, prev_blo, prev_bhi)
def _verify_blank_lines(self, a, b, g):
"Modify tag if all lines in a deletion or insertion block are blank"
for tag, alo, ahi, blo, bhi in g:
if (
tag == "delete"
and all(
str(a[i]) in ("\n", "\r\n", "\r") for i in range(alo, ahi)
)
or tag == "insert"
and all(
str(b[j]) in ("\n", "\r\n", "\r") for j in range(blo, bhi)
)
):
yield ("equal", alo, ahi, blo, bhi)
else:
yield (tag, alo, ahi, blo, bhi)
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> for op in Differ()._fancy_replace('abcDefghiJkl\n', 3, 12,
... 'abcdefGhijkl\n', 3, 12):
... print(op)
...
('replace', 3, 4, 3, 4)
('equal', 4, 5, 4, 5)
('equal', 5, 6, 5, 6)
('replace', 6, 7, 6, 7)
('equal', 7, 8, 7, 8)
('equal', 8, 9, 8, 9)
('replace', 9, 10, 9, 10)
('equal', 10, 11, 10, 11)
('equal', 11, 12, 11, 12)
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, junk_line_best_ratio, cutoff = 0.74, 0.74, 0.75
best_i, best_j, junk_line_best_i, junk_line_best_j = -1, -1, -1, -1
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
# here, at least ai or bj MUST BE link junks
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if (
cruncher.real_quick_ratio() > best_ratio
and cruncher.quick_ratio() > best_ratio
and cruncher.ratio() > best_ratio
):
# junk line should not be considered as very similar to
# normal line or other junk line unless there's no other
# similar normal lines found
if self.linejunk and (
self.linejunk(ai) or self.linejunk(bj)
):
if cruncher.ratio() > junk_line_best_ratio:
junk_line_best_ratio = cruncher.ratio()
junk_line_best_i, junk_line_best_j = i, j
else:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
# priority: normal lines (pretty close) > line junks (identical)
# > line junks (pretty close)
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
if junk_line_best_ratio < cutoff:
# even no non-identical "pretty close" line junks found
# treat it as a straight replace
yield ("replace", alo, ahi, blo, bhi)
return
else:
# at least we can find junk lines that are pretty close
best_i, best_j = junk_line_best_i, junk_line_best_j
best_ratio = junk_line_best_ratio
else:
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None if they're not
# identical
# pump out diffs from before the synch point
for opcode in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield opcode
# generate op code on the synch pair
if eqi is None:
# the synch pair is similar
yield ("replace", best_i, best_i + 1, best_j, best_j + 1)
else:
# the synch pair is identical
yield ("equal", best_i, best_i + 1, best_j, best_j + 1)
# pump out diffs from after the synch point
for opcode in self._fancy_helper(
a, best_i + 1, ahi, b, best_j + 1, bhi
):
yield opcode
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = [("delete", alo, ahi, blo, bhi)]
elif blo < bhi:
g = [("insert", alo, ahi, blo, bhi)]
for opcode in g:
yield opcode
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Diff
########################################################################
def diff(
a,
b,
ignore_space_change=False,
ignore_whitespaces=False,
ignore_blank_lines=False,
unified=False,
context=False,
):
r"""
Compare two blocks of text or two sequences of lines and generate delta
as a normal/unified/context diff. Lines that only contain whitespaces
have lower priority to get matched.
If a or b is a string, then is will be split as list of strings which
are separated by '\n', '\r\n' or '\r', while keep the line terminator.
By default, the function generates the delta as a normal diff, but you
can specify a unified diff or context diff. And you can directly set the
parameter unified or context to a positive integer, that means the number
of context lines, which defaults to three.
- `ignore_space_change`: refer to gnu diff option -b
- `ignore_whitespaces`: refer to gnu diff option -w
- `ignore_blank_lines`: refer to gnu diff option -B
- `unified`: if True, output in unified format, default False
- `context`: if True, output in context format, default False
"""
a = a.splitlines(True) if isinstance(a, six.string_types) else a
b = b.splitlines(True) if isinstance(b, six.string_types) else b
assert isinstance(a, list) and isinstance(b, list)
if unified:
n = 3 if isinstance(unified, bool) else int(unified)
assert n > 0
g = unified_diff(
a,
b,
n,
ignore_space_change,
ignore_whitespaces,
ignore_blank_lines,
)
elif context:
n = 3 if isinstance(context, int) else int(context)
assert n > 0
g = context_diff(
a,
b,
n,
ignore_space_change,
ignore_whitespaces,
ignore_blank_lines,
)
else:
g = _diff(
a, b, ignore_space_change, ignore_whitespaces, ignore_blank_lines
)
return g
########################################################################
### Basic Diff
########################################################################
def _dump_line(prefix, content):
"Add a prefix in front, also add line break at tail if there is not one"
if not content.endswith("\n"):
yield prefix + content + os.linesep
yield "\\ No newline at end of file" + os.linesep
else:
yield prefix + content
def _diff(
a,
b,
ignore_space_change=False,
ignore_whitespaces=False,
ignore_blank_lines=False,
):
r"""
Compare `a` and `b` (lists of strings); return a delta of gnu diff style.
- `ignore_space_change`: refer to gnu diff option -b
- `ignore_whitespaces`: refer to gnu diff option -w
- `ignore_blank_lines`: refer to gnu diff option -B
Example:
>>> difference = _diff('one\ntwo\nthree\n'.splitlines(True),
'ore\nthree\nemu\n'.splitlines(True))
>>> print(''.join(difference))
1,2c1
< one
< two
---
> ore
3a3
> emu
"""
for tag, alo, ahi, blo, bhi in Differ(
linejunk=IS_LINE_JUNK,
charjunk=None,
ignore_space_change=ignore_space_change,
ignore_whitespaces=ignore_whitespaces,
ignore_blank_lines=ignore_blank_lines,
).get_merged_opcodes(a, b):
if tag == "replace":
head_a = (
"{},{}".format(alo + 1, ahi) if ahi - alo > 1 else str(alo + 1)
)
head_b = (
"{},{}".format(blo + 1, bhi) if bhi - blo > 1 else str(blo + 1)
)
yield "{}c{}{}".format(head_a, head_b, os.linesep)
for line in a[alo:ahi]:
for text in _dump_line("< ", line):
yield text
yield "---" + os.linesep
for line in b[blo:bhi]:
for text in _dump_line("> ", line):
yield text
elif tag == "delete":
head_a = (
"{},{}".format(alo + 1, ahi) if ahi - alo > 1 else str(alo + 1)
)
yield "{}d{}{}".format(head_a, bhi, os.linesep)
for line in a[alo:ahi]:
for text in _dump_line("< ", line):
yield text
elif tag == "insert":
head_b = (
"{},{}".format(blo + 1, bhi) if bhi - blo > 1 else str(blo + 1)
)
yield "{}a{}{}".format(alo, head_b, os.linesep)
for line in b[blo:bhi]:
for text in _dump_line("> ", line):
yield text
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return "{}".format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return "{},{}".format(beginning, length)
def unified_diff(
a,
b,
n=3,
ignore_space_change=False,
ignore_whitespaces=False,
ignore_blank_lines=False,
):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
The unidiff format normally has a header for filenames and modification
times. Here, "a.text" and "b.text" are displayed instead of file names,
and current UTC time instead of the modification time.
- `ignore_space_change`: refer to gnu diff option -b
- `ignore_whitespaces`: refer to gnu diff option -w
- `ignore_blank_lines`: refer to gnu diff option -B
Example:
>>> difference = unified_diff('one\ntwo\nthree\n'.splitlines(True),
'ore\nthree\nemu\n'.splitlines(True))
>>> print(''.join(difference))
--- a.text 2018-08-28 11:36:46 UTC
+++ b.text 2018-08-28 11:36:46 UTC
@@ -1,3 +1,3 @@
-one
-two
+ore
three
+emu
"""
started = False
for group in Differ(
linejunk=IS_LINE_JUNK,
charjunk=None,
ignore_space_change=ignore_space_change,
ignore_whitespaces=ignore_whitespaces,
ignore_blank_lines=ignore_blank_lines,
).get_grouped_opcodes(a, b, n):
if not started:
started = True
utc_time_str = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
fromdate = todate = "\t{}".format(utc_time_str)
yield "--- {}{}{}".format("a.text", fromdate, os.linesep)
yield "+++ {}{}{}".format("b.text", todate, os.linesep)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield "@@ -{} +{} @@{}".format(file1_range, file2_range, os.linesep)
for tag, i1, i2, j1, j2 in group:
if tag == "equal":
for line in a[i1:i2]:
for text in _dump_line(" ", line):
yield text
continue
if tag in ("replace", "delete"):
for line in a[i1:i2]:
for text in _dump_line("-", line):
yield text
if tag in ("replace", "insert"):
for line in b[j1:j2]:
for text in _dump_line("+", line):
yield text
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return "{}".format(beginning)
return "{},{}".format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(
a,
b,
n=3,
ignore_space_change=False,
ignore_whitespaces=False,
ignore_blank_lines=False,
):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
The context diff format normally has a header for filenames and
modification times. Here, "a.text" and "b.text" are displayed instead of
file names, and current UTC time instead of the modification time.
- `ignore_space_change`: refer to gnu diff option -b
- `ignore_whitespaces`: refer to gnu diff option -w
- `ignore_blank_lines`: refer to gnu diff option -B
Example:
>>> difference = context_diff('one\ntwo\nthree\n'.splitlines(True),
'ore\nthree\nemu\n'.splitlines(True))
>>> print(''.join(difference))
--- a.text 2018-08-28 11:40:17 UTC
+++ b.text 2018-08-28 11:40:17 UTC
***************
*** 1,3 ****
! one
! two
three
--- 1,3 ----
! ore
three
+ emu
"""
prefix = dict(insert="+ ", delete="- ", replace="! ", equal=" ")
started = False
for group in Differ(
linejunk=IS_LINE_JUNK,
charjunk=None,
ignore_space_change=ignore_space_change,
ignore_whitespaces=ignore_whitespaces,
ignore_blank_lines=ignore_blank_lines,
).get_grouped_opcodes(a, b, n):
if not started:
started = True
utc_time_str = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
fromdate = todate = "\t{}".format(utc_time_str)
yield "--- {}{}{}".format("a.text", fromdate, os.linesep)
yield "+++ {}{}{}".format("b.text", todate, os.linesep)
first, last = group[0], group[-1]
yield "***************" + os.linesep
file1_range = _format_range_context(first[1], last[2])
yield "*** {} ****{}".format(file1_range, os.linesep)
if any(tag in ("replace", "delete") for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != "insert":
for line in a[i1:i2]:
for text in _dump_line(prefix[tag], line):
yield text
file2_range = _format_range_context(first[3], last[4])
yield "--- {} ----{}".format(file2_range, os.linesep)
if any(tag in ("replace", "insert") for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != "delete":
for line in b[j1:j2]:
for text in _dump_line(prefix[tag], line):
yield text
|
StarcoderdataPython
|
3269048
|
#!/usr/bin/python
# Can enable debug output by uncommenting:
#import logging
#logging.basicConfig(level=logging.DEBUG)
import math
import Turbo_I2C.MPU6050 as MPU6050
sensor = MPU6050.MPU6050()
sensor.read_raw_data()
print 'Temp = {0:0.2f} *C'.format(sensor.read_temp())
print 'Pitch = {0:0.2f} grader '.format(sensor.read_pitch()/ math.pi *180)
print 'Roll = {0:0.2f} grader '.format(sensor.read_roll()/ math.pi *180)
|
StarcoderdataPython
|
83060
|
from django.db import models
from django.db.models import Case, F, Q, Value, When
from psqlextra.expressions import HStoreRef
from psqlextra.fields import HStoreField
from .fake_model import get_fake_model
def test_query_annotate_hstore_key_ref():
"""Tests whether annotating using a :see:HStoreRef expression works
correctly.
This allows you to select an individual hstore key.
"""
model_fk = get_fake_model({"title": HStoreField()})
model = get_fake_model(
{"fk": models.ForeignKey(model_fk, on_delete=models.CASCADE)}
)
fk = model_fk.objects.create(title={"en": "english", "ar": "arabic"})
model.objects.create(fk=fk)
queryset = (
model.objects.annotate(english_title=HStoreRef("fk__title", "en"))
.values("english_title")
.first()
)
assert queryset["english_title"] == "english"
def test_query_annotate_rename():
"""Tests whether field names can be overwritten with a annotated field."""
model = get_fake_model({"title": models.CharField(max_length=12)})
model.objects.create(title="swen")
obj = model.objects.annotate(title=F("title")).first()
assert obj.title == "swen"
def test_query_annotate_rename_chain():
"""Tests whether annotations are behaving correctly after a QuerySet
chain."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
model.objects.create(name="test", value=23)
obj = model.objects.values("name").annotate(value=F("value"))[:1]
assert "value" in obj[0]
assert obj[0]["value"] == 23
def test_query_annotate_rename_order():
"""Tests whether annotation order is preserved after a rename."""
model = get_fake_model(
{
"name": models.CharField(max_length=10),
"value": models.IntegerField(),
}
)
qs = model.objects.annotate(value=F("value"), value_2=F("value"))
assert list(qs.query.annotations.keys()) == ["value", "value_2"]
def test_query_annotate_in_expression():
"""Tests whether annotations can be used in expressions."""
model = get_fake_model({"name": models.CharField(max_length=10)})
model.objects.create(name="henk")
result = model.objects.annotate(
real_name=F("name"),
is_he_henk=Case(
When(Q(real_name="henk"), then=Value("really henk")),
default=Value("definitely not henk"),
output_field=models.CharField(),
),
).first()
assert result.real_name == "henk"
assert result.is_he_henk == "really henk"
def test_query_hstore_value_update_f_ref():
"""Tests whether F(..) expressions can be used in hstore values when
performing update queries."""
model = get_fake_model(
{"name": models.CharField(max_length=255), "name_new": HStoreField()}
)
model.objects.create(name="waqas", name_new=dict(en="swen"))
model.objects.update(name_new=dict(en=models.F("name")))
inst = model.objects.all().first()
assert inst.name_new.get("en") == "waqas"
def test_query_hstore_value_update_cast():
"""Tests whether values in a HStore field are automatically cast to strings
when doing updates."""
model = get_fake_model({"title": HStoreField()})
model.objects.create(title=dict(en="test"))
model.objects.update(title=dict(en=2))
inst = model.objects.all().first()
assert inst.title.get("en") == "2"
def test_query_hstore_value_update_escape():
"""Tests whether values in a HStore field are properly escaped using
prepared statement values."""
model = get_fake_model({"title": HStoreField()})
model.objects.create(title=dict(en="test"))
model.objects.update(title=dict(en="console.log('test')"))
inst = model.objects.all().first()
assert inst.title.get("en") == "console.log('test')"
|
StarcoderdataPython
|
110143
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
import xml.dom.minidom as pxml
import os
def convert(tree,fileName=None):
"""
Converts input files to be compatible with merge request #460:
- Removes "all" node
- Sets default variable names
@ In, tree, xml.etree.ElementTree.ElementTree object, the contents of a RAVEN input file
@ In, fileName, the name for the raven input file
@Out, tree, xml.etree.ElementTree.ElementTree object, the modified RAVEN input file
"""
simulation = tree.getroot()
opts = simulation.find('Optimizers')
if opts is not None:
for child in opts:
if child.tag == 'FiniteDifferenceGradientOptimizer':
child.tag = 'FiniteDifference'
return tree
if __name__=='__main__':
import convert_utils
import sys
convert_utils.standardMain(sys.argv,convert)
|
StarcoderdataPython
|
1798595
|
<reponame>paulmassen/kibitzr
import os
import logging
import pkgutil
import importlib
logger = logging.getLogger(__name__)
def dummy_notify_factory(notify_func):
def factory(conf, value):
return notify_func
return factory
def load_notifiers():
path = os.path.dirname(os.path.abspath(__file__))
before, sep, _ = __name__.rpartition('.')
prefix = before + sep
registry = {}
for _, modname, _ in pkgutil.walk_packages([path], prefix):
submodule = importlib.import_module(modname, __name__)
if hasattr(submodule, 'register'):
submodule.register(registry)
else:
key = getattr(submodule, 'NAME', modname.split('.')[-1])
if hasattr(submodule, 'notify_factory'):
registry[key] = submodule.notify_factory
elif hasattr(submodule, 'notify'):
registry[key] = dummy_notify_factory(submodule.notify)
return registry
class CompositeNotifier(object):
REGISTRY = load_notifiers()
def __init__(self, conf):
self.conf = conf
notifiers_conf = conf.get('notify', [])
if not notifiers_conf:
logger.warning(
"No notifications configured for %r",
conf['name'],
)
self.notifiers = []
for notifier_conf in notifiers_conf:
self.add_notifier(notifier_conf)
def add_notifier(self, notifier_conf):
try:
name, value = next(iter(notifier_conf.items()))
except AttributeError:
name, value = notifier_conf, None
try:
notify_factory = self.REGISTRY[name]
except KeyError:
logger.error("Unknown notifier %r", name)
else:
self.notifiers.append(
notify_factory(conf=self.conf, value=value)
)
def notify(self, report):
if report:
logger.debug('Sending report: %r', report)
for notifier in self.notifiers:
try:
notifier(report=report)
except:
logger.exception(
"Exception occurred during sending notification"
)
else:
logger.debug('Report is empty, skipping notification')
__call__ = notify
|
StarcoderdataPython
|
21436
|
from time import time
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
from singly_linkedlist.singly_linkedlist import SinglyLinkedList
start = time()
linked_list = SinglyLinkedList()
for i in range(100000):
linked_list.insert_head(111111111111)
end = time()
print("Took {0} seconds".format(start-end))
# linked_list.print_elements()
|
StarcoderdataPython
|
189873
|
<reponame>yoshihikosuzuki/pbcore<gh_stars>0
from nose.tools import assert_equal, assert_true, assert_false
from numpy.testing import assert_array_equal
from StringIO import StringIO
from pbcore.io import BasH5Collection
from pbcore import data
def lookupSomeReadsByName(bc):
pass
def test():
for fofn in data.getFofns():
bc = BasH5Collection(fofn)
for zmw in bc:
zmwAgain = bc[zmw.zmwName]
assert_equal(zmw.zmwName, zmwAgain.zmwName)
def test_read_iterators():
for fofn in data.getFofns():
bc = BasH5Collection(fofn)
# TODO Add some meaningful tests here
list(bc.subreads())
list(bc.reads())
list(bc.ccsReads())
|
StarcoderdataPython
|
3221949
|
<filename>vsts/vsts/work/v4_0/models/capacity_patch.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class CapacityPatch(Model):
"""CapacityPatch.
:param activities:
:type activities: list of :class:`Activity <work.v4_0.models.Activity>`
:param days_off:
:type days_off: list of :class:`DateRange <work.v4_0.models.DateRange>`
"""
_attribute_map = {
'activities': {'key': 'activities', 'type': '[Activity]'},
'days_off': {'key': 'daysOff', 'type': '[DateRange]'}
}
def __init__(self, activities=None, days_off=None):
super(CapacityPatch, self).__init__()
self.activities = activities
self.days_off = days_off
|
StarcoderdataPython
|
184119
|
<reponame>kilinger/marathon-rocketchat-hubot
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_auto_20160122_0305'),
]
operations = [
migrations.AddField(
model_name='projectport',
name='container_port',
field=models.IntegerField(default=5000),
preserve_default=False,
),
]
|
StarcoderdataPython
|
54499
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: sorting.py
# Purpose: Music21 class for sorting
#
# Authors: <NAME>
#
# Copyright: Copyright © 2014-2015 <NAME> and the music21
# Project
# License: BSD, see license.txt
# -----------------------------------------------------------------------------
'''
This module defines a single class, SortTuple, which is a named tuple that can
sort against bare offsets and other SortTuples.
This is a performance-critical object.
It also defines three singleton instance of the SortTupleLow class as ZeroSortTupleDefault,
ZeroSortTupleLow and
ZeroSortTupleHigh which are sortTuple at
offset 0.0, priority [0, -inf, inf] respectively:
>>> sorting.ZeroSortTupleDefault
SortTuple(atEnd=0, offset=0.0, priority=0, classSortOrder=0, isNotGrace=1, insertIndex=0)
>>> sorting.ZeroSortTupleLow
SortTuple(atEnd=0, offset=0.0, priority=-inf, classSortOrder=0, isNotGrace=1, insertIndex=0)
>>> sorting.ZeroSortTupleHigh
SortTuple(atEnd=0, offset=0.0, priority=inf, classSortOrder=0, isNotGrace=1, insertIndex=0)
'''
from collections import namedtuple
from math import inf as INFINITY
from music21 import exceptions21
_attrList = ['atEnd', 'offset', 'priority', 'classSortOrder', 'isNotGrace', 'insertIndex']
class SortingException(exceptions21.Music21Exception):
pass
class SortTuple(namedtuple('SortTuple', _attrList)):
'''
Derived class of namedTuple which allows for comparisons with pure ints/fractions.
>>> n = note.Note()
>>> s = stream.Stream()
>>> s.insert(4, n)
>>> st = n.sortTuple()
>>> st
SortTuple(atEnd=0, offset=4.0, priority=0, classSortOrder=20, isNotGrace=1, insertIndex=...)
>>> st.shortRepr()
'4.0 <0.20...>'
>>> st.atEnd
0
>>> st.offset
4.0
>>> st < 5.0
True
>>> 5.0 > st
True
>>> st > 3.0
True
>>> 3.0 < st
True
>>> st == 4.0
True
>>> ts = bar.Barline('double')
>>> t = stream.Stream()
>>> t.storeAtEnd(ts)
>>> ts_st = ts.sortTuple()
>>> ts_st
SortTuple(atEnd=1, offset=0.0, priority=0, classSortOrder=-5, isNotGrace=1, insertIndex=...)
>>> st < ts_st
True
>>> ts_st > 999999
True
>>> import math
>>> ts_st == math.inf
True
Construct one w/ keywords:
>>> st = sorting.SortTuple(atEnd=0, offset=1.0, priority=0, classSortOrder=20,
... isNotGrace=1, insertIndex=323)
>>> st.shortRepr()
'1.0 <0.20.323>'
or as tuple:
>>> st = sorting.SortTuple(0, 1.0, 0, 20, 1, 323)
>>> st.shortRepr()
'1.0 <0.20.323>'
'''
def __new__(cls, *tupEls, **kw):
# noinspection PyTypeChecker
return super(SortTuple, cls).__new__(cls, *tupEls, **kw)
def __eq__(self, other):
if isinstance(other, tuple):
return super().__eq__(other)
try:
if self.atEnd == 1 and other != INFINITY:
return False
elif self.atEnd == 1:
return True
else:
return self.offset == other
except ValueError:
return NotImplemented
def __lt__(self, other):
if isinstance(other, tuple):
return super().__lt__(other)
try:
if self.atEnd == 1:
return False
else:
return self.offset < other
except ValueError:
return NotImplemented
def __gt__(self, other):
if isinstance(other, tuple):
return super().__gt__(other)
try:
if self.atEnd == 1 and other != INFINITY:
return True
elif self.atEnd == 1:
return False
else:
return self.offset > other
except ValueError:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def shortRepr(self):
'''
Returns a nice representation of a SortTuple
>>> st = sorting.SortTuple(atEnd=0, offset=1.0, priority=0, classSortOrder=20,
... isNotGrace=1, insertIndex=323)
>>> st.shortRepr()
'1.0 <0.20.323>'
>>> st = sorting.SortTuple(atEnd=1, offset=1.0, priority=4, classSortOrder=7,
... isNotGrace=0, insertIndex=200)
>>> st.shortRepr()
'End <4.7.[Grace].200>'
'''
reprParts = []
if self.atEnd:
reprParts.append('End')
else:
reprParts.append(str(self.offset))
reprParts.append(' <')
reprParts.append(str(self.priority))
reprParts.append('.')
reprParts.append(str(self.classSortOrder))
if self.isNotGrace == 0:
reprParts.append('.[Grace]')
reprParts.append('.')
reprParts.append(str(self.insertIndex))
reprParts.append('>')
return ''.join(reprParts)
def modify(self, **kw):
'''
return a new SortTuple identical to the previous, except with
the given keyword modified. Works only with keywords.
>>> st = sorting.SortTuple(atEnd=0, offset=1.0, priority=0, classSortOrder=20,
... isNotGrace=1, insertIndex=32)
>>> st2 = st.modify(offset=2.0)
>>> st2.shortRepr()
'2.0 <0.20.32>'
>>> st2
SortTuple(atEnd=0, offset=2.0, priority=0, classSortOrder=20, isNotGrace=1, insertIndex=32)
>>> st3 = st2.modify(atEnd=1, isNotGrace=0)
>>> st3.shortRepr()
'End <0.20.[Grace].32>'
The original tuple is never modified (hence tuple):
>>> st.offset
1.0
Changing offset, but nothing else, helps in creating .flatten() positions.
'''
outList = [kw.get(attr, getattr(self, attr)) for attr in _attrList]
return self.__class__(*outList)
def add(self, other):
'''
Add all attributes from one sortTuple to another,
returning a new one.
>>> n = note.Note()
>>> n.offset = 10
>>> s = stream.Stream()
>>> s.offset = 10
>>> n.sortTuple()
SortTuple(atEnd=0, offset=10.0, priority=0, classSortOrder=20, isNotGrace=1, insertIndex=0)
>>> s.sortTuple()
SortTuple(atEnd=0, offset=10.0, priority=0, classSortOrder=-20, isNotGrace=1, insertIndex=0)
>>> s.sortTuple().add(n.sortTuple())
SortTuple(atEnd=0, offset=20.0, priority=0, classSortOrder=0, isNotGrace=1, insertIndex=0)
Note that atEnd and isNotGrace are equal to other's value. are upper bounded at 1 and
take the maxValue of either.
'''
if not isinstance(other, self.__class__):
raise SortingException('Cannot add attributes from a different class')
outList = [max(getattr(self, attr), getattr(other, attr))
if attr in ('atEnd', 'isNotGrace')
else (getattr(self, attr) + getattr(other, attr))
for attr in _attrList]
return self.__class__(*outList)
def sub(self, other):
'''
Subtract all attributes from to another. atEnd and isNotGrace take the min value of either.
>>> n = note.Note()
>>> n.offset = 10
>>> s = stream.Stream()
>>> s.offset = 10
>>> n.sortTuple()
SortTuple(atEnd=0, offset=10.0, priority=0, classSortOrder=20, isNotGrace=1, insertIndex=0)
>>> s.sortTuple()
SortTuple(atEnd=0, offset=10.0, priority=0, classSortOrder=-20, isNotGrace=1, insertIndex=0)
>>> s.sortTuple().sub(n.sortTuple())
SortTuple(atEnd=0, offset=0.0, priority=0, classSortOrder=-40, isNotGrace=1, insertIndex=0)
Note that atEnd and isNotGrace are lower bounded at 0.
'''
if not isinstance(other, self.__class__):
raise SortingException('Cannot add attributes from a different class')
outList = [min(getattr(self, attr), getattr(other, attr))
if attr in ('atEnd', 'isNotGrace')
else (getattr(self, attr) - getattr(other, attr))
for attr in _attrList]
return self.__class__(*outList)
ZeroSortTupleDefault = SortTuple(atEnd=0, offset=0.0, priority=0, classSortOrder=0,
isNotGrace=1, insertIndex=0)
ZeroSortTupleLow = SortTuple(atEnd=0, offset=0.0, priority=-INFINITY, classSortOrder=0,
isNotGrace=1, insertIndex=0)
ZeroSortTupleHigh = SortTuple(atEnd=0, offset=0.0, priority=INFINITY, classSortOrder=0,
isNotGrace=1, insertIndex=0)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import music21
music21.mainTest()
|
StarcoderdataPython
|
1793038
|
<reponame>GmZhang3/data-science-ipython-notebooks<filename>python/python101/basis/distince_test.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
from sklearn.cluster import DBSCAN
def distince(vect1,vect2):
dist = (vect1-vect2)*((vect1-vect2).T)
return dist[0,0]
if __name__ == "__main__":
v1 = np.array([1,2])
v2 = np.array([1,1])
print v1 -v2
print (v1 -v2).T
vv = (v1 -v2)*((v1 -v2).T)
print vv
# print type(v1)
# print v2
# print distince(v1,v2)
|
StarcoderdataPython
|
30198
|
from enum import Enum
from typing import Tuple, Type, Optional
class Mode(Enum):
SIMPLE = "s"
EXTENDED = "e"
def __str__(self) -> str:
return self.value
class Header(Enum):
PATH = "Path"
NAME = "Name"
SIZE = "Size"
MODIFIED = "Modified"
ACCESSED = "Accessed"
INPUT = "Input"
OUTPUT = "Output"
def __str__(self) -> str:
return self.value
Header_ref = Tuple[Header, Type, Optional[str]]
|
StarcoderdataPython
|
1762400
|
# +--------------------------------------------------------------------------+
# | Licensed Materials - Property of IBM |
# | |
# | (C) Copyright IBM Corporation 2009-2014. |
# +--------------------------------------------------------------------------+
# | This module complies with Django 1.0 and is |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable |
# | law or agreed to in writing, software distributed under the License is |
# | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
# | KIND, either express or implied. See the License for the specific |
# | language governing permissions and limitations under the License. |
# +--------------------------------------------------------------------------+
# | Authors: <NAME>, <NAME>, <NAME> |
# +--------------------------------------------------------------------------+
import sys
_IS_JYTHON = sys.platform.startswith( 'java' )
if not _IS_JYTHON:
try:
# Import IBM_DB wrapper ibm_db_dbi
import ibm_db_dbi as Database
#from Database import DatabaseError
except ImportError, e:
raise ImportError( "ibm_db module not found. Install ibm_db module from http://code.google.com/p/ibm-db/. Error: %s" % e )
else:
from com.ziclix.python.sql import zxJDBC
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
from django import VERSION as djangoVersion
class DatabaseIntrospection( BaseDatabaseIntrospection ):
"""
This is the class where database metadata information can be generated.
"""
if not _IS_JYTHON:
data_types_reverse = {
Database.STRING : "CharField",
Database.TEXT : "TextField",
Database.XML : "XMLField",
Database.NUMBER : "IntegerField",
Database.FLOAT : "FloatField",
Database.DECIMAL : "DecimalField",
Database.DATE : "DateField",
Database.TIME : "TimeField",
Database.DATETIME : "DateTimeField",
}
if(djangoVersion[0:2] > (1, 1)):
data_types_reverse[Database.BINARY] = "BinaryField"
data_types_reverse[Database.BIGINT] = "BigIntegerField"
else:
data_types_reverse[Database.BIGINT] = "IntegerField"
else:
data_types_reverse = {
zxJDBC.CHAR: "CharField",
zxJDBC.BIGINT: "BigIntegerField",
zxJDBC.BINARY: "BinaryField",
zxJDBC.BIT: "SmallIntegerField",
zxJDBC.BLOB: "BinaryField",
zxJDBC.CLOB: "TextField",
zxJDBC.DATE: "DateField",
zxJDBC.DECIMAL: "DecimalField",
zxJDBC.DOUBLE: "FloatField",
zxJDBC.FLOAT: "FloatField",
zxJDBC.INTEGER: "IntegerField",
zxJDBC.LONGVARCHAR: "TextField",
zxJDBC.LONGVARBINARY: "ImageField",
zxJDBC.NUMERIC: "DecimalField",
zxJDBC.REAL: "FloatField",
zxJDBC.SMALLINT: "SmallIntegerField",
zxJDBC.VARCHAR: "CharField",
zxJDBC.TIMESTAMP: "DateTimeField",
zxJDBC.TIME: "TimeField",
}
def get_field_type(self, data_type, description):
if not _IS_JYTHON:
if data_type == Database.NUMBER:
if description.precision == 5:
return 'SmallIntegerField'
return super(DatabaseIntrospection, self).get_field_type(data_type, description)
# Converting table name to lower case.
def table_name_converter ( self, name ):
return name.lower()
# Getting the list of all tables, which are present under current schema.
def get_table_list ( self, cursor ):
table_list = []
if not _IS_JYTHON:
for table in cursor.connection.tables( cursor.connection.get_current_schema() ):
table_list.append( table['TABLE_NAME'].lower() )
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# tables(String catalog, String schemaPattern, String tableNamePattern, String[] types) gives a description of tables available in a catalog
cursor.tables( None, schema, None, ( "TABLE", ) )
for table in cursor.fetchall():
# table[2] is table name
table_list.append( table[2].lower() )
return table_list
# Generating a dictionary for foreign key details, which are present under current schema.
def get_relations( self, cursor, table_name ):
relations = {}
if not _IS_JYTHON:
schema = cursor.connection.get_current_schema()
for fk in cursor.connection.foreign_keys( True, schema, table_name ):
relations[self.__get_col_index( cursor, schema, table_name, fk['FKCOLUMN_NAME'] )] = ( self.__get_col_index( cursor, schema, fk['PKTABLE_NAME'], fk['PKCOLUMN_NAME'] ), fk['PKTABLE_NAME'].lower() )
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# foreign_keys(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable)
# gives a description of the foreign key columns in the foreign key table that reference the primary key columns
# of the primary key table (describe how one table imports another's key.) This should normally return a single foreign key/primary key pair
# (most tables only import a foreign key from a table once.) They are ordered by FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, and KEY_SEQ
cursor.foreignkeys( None, schema, table_name, None, '%', '%' )
for fk in cursor.fetchall():
# fk[2] is primary key table name, fk[3] is primary key column name, fk[7] is foreign key column name being exported
relations[self.__get_col_index( cursor, schema, table_name, fk[7] )] = ( self.__get_col_index( cursor, schema, fk[2], fk[3] ), fk[3], fk[2] )
return relations
# Private method. Getting Index position of column by its name
def __get_col_index ( self, cursor, schema, table_name, col_name ):
if not _IS_JYTHON:
for col in cursor.connection.columns( schema, table_name, [col_name] ):
return col['ORDINAL_POSITION'] - 1
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# columns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) gives a description of table columns available in the specified catalog
cursor.columns( None, schema, table_name, col_name )
for col in cursor.fetchall():
#col[16] is index of column in table
return col[16] - 1
def get_key_columns(self, cursor, table_name):
relations = []
if not _IS_JYTHON:
schema = cursor.connection.get_current_schema()
for fk in cursor.connection.foreign_keys( True, schema, table_name ):
relations.append( (fk['FKCOLUMN_NAME'].lower(), fk['PKTABLE_NAME'].lower(), fk['PKCOLUMN_NAME'].lower()) )
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# foreign_keys(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable)
# gives a description of the foreign key columns in the foreign key table that reference the primary key columns
# of the primary key table (describe how one table imports another's key.) This should normally return a single foreign key/primary key pair
# (most tables only import a foreign key from a table once.) They are ordered by FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, and KEY_SEQ
cursor.foreignkeys( None, schema, table_name, None, '%', '%' )
for fk in cursor.fetchall():
# fk[2] is primary key table name, fk[3] is primary key column name, fk[7] is foreign key column name being exported
relations.append( (fk[7], fk[2], fk[3]) )
return relations
# Getting list of indexes associated with the table provided.
def get_indexes( self, cursor, table_name ):
indexes = {}
# To skip indexes across multiple fields
multifield_indexSet = set()
if not _IS_JYTHON:
schema = cursor.connection.get_current_schema()
all_indexes = cursor.connection.indexes( True, schema, table_name )
for index in all_indexes:
if (index['ORDINAL_POSITION'] is not None) and (index['ORDINAL_POSITION']== 2):
multifield_indexSet.add(index['INDEX_NAME'])
for index in all_indexes:
temp = {}
if index['INDEX_NAME'] in multifield_indexSet:
continue
if ( index['NON_UNIQUE'] ):
temp['unique'] = False
else:
temp['unique'] = True
temp['primary_key'] = False
indexes[index['COLUMN_NAME'].lower()] = temp
for index in cursor.connection.primary_keys( True, schema, table_name ):
indexes[index['COLUMN_NAME'].lower()]['primary_key'] = True
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# statistics(String catalog, String schema, String table, boolean unique, boolean approximate) returns a description of a table's indices and statistics.
cursor.statistics( None, schema, table_name, 0, 0 )
all_indexes = cursor.fetchall()
for index in all_indexes:
#index[7] indicate ORDINAL_POSITION within index, and index[5] is index name
if index[7] == 2:
multifield_indexSet.add(index[5])
for index in all_indexes:
temp = {}
if index[5] in multifield_indexSet:
continue
# index[3] indicate non-uniqueness of column
if ( index[3] != None ):
if ( index[3] ) == 1:
temp['unique'] = False
else:
temp['unique'] = True
temp['primary_key'] = False
# index[8] is column name
indexes[index[8].lower()] = temp
# primarykeys(String catalog, String schema, String table) gives a description of a table's primary key columns
cursor.primarykeys( None, schema, table_name )
for index in cursor.fetchall():
#index[3] is column name
indexes[index[3].lower()]['primary_key'] = True
return indexes
# Getting the description of the table.
def get_table_description( self, cursor, table_name ):
qn = self.connection.ops.quote_name
cursor.execute( "SELECT * FROM %s FETCH FIRST 1 ROWS ONLY" % qn( table_name ) )
description = []
if djangoVersion < (1, 6):
for desc in cursor.description:
description.append( [ desc[0].lower(), ] + desc[1:] )
else:
for desc in cursor.description:
description.append(FieldInfo(*[desc[0].lower(), ] + desc[1:]))
return description
def get_constraints(self, cursor, table_name):
constraints = {}
if not _IS_JYTHON:
schema = cursor.connection.get_current_schema()
sql = "SELECT CONSTNAME, COLNAME FROM SYSCAT.COLCHECKS WHERE TABSCHEMA='%(schema)s' AND TABNAME='%(table)s'" % {'schema': schema.upper(), 'table': table_name.upper()}
cursor.execute(sql)
for constname, colname in cursor.fetchall():
if constname not in constraints:
constraints[constname] = {
'columns': [],
'primary_key': False,
'unique': False,
'foreign_key': None,
'check': True,
'index': False
}
constraints[constname]['columns'].append(colname.lower())
sql = "SELECT KEYCOL.CONSTNAME, KEYCOL.COLNAME FROM SYSCAT.KEYCOLUSE KEYCOL INNER JOIN SYSCAT.TABCONST TABCONST ON KEYCOL.CONSTNAME=TABCONST.CONSTNAME WHERE TABCONST.TABSCHEMA='%(schema)s' and TABCONST.TABNAME='%(table)s' and TABCONST.TYPE='U'" % {'schema': schema.upper(), 'table': table_name.upper()}
cursor.execute(sql)
for constname, colname in cursor.fetchall():
if constname not in constraints:
constraints[constname] = {
'columns': [],
'primary_key': False,
'unique': True,
'foreign_key': None,
'check': False,
'index': True
}
constraints[constname]['columns'].append(colname.lower())
for pkey in cursor.connection.primary_keys(None, schema, table_name):
if pkey['PK_NAME'] not in constraints:
constraints[pkey['PK_NAME']] = {
'columns': [],
'primary_key': True,
'unique': False,
'foreign_key': None,
'check': False,
'index': True
}
constraints[pkey['PK_NAME']]['columns'].append(pkey['COLUMN_NAME'].lower())
for fk in cursor.connection.foreign_keys( True, schema, table_name ):
if fk['FK_NAME'] not in constraints:
constraints[fk['FK_NAME']] = {
'columns': [],
'primary_key': False,
'unique': False,
'foreign_key': (fk['PKTABLE_NAME'].lower(), fk['PKCOLUMN_NAME'].lower()),
'check': False,
'index': False
}
constraints[fk['FK_NAME']]['columns'].append(fk['FKCOLUMN_NAME'].lower())
if fk['PKCOLUMN_NAME'].lower() not in constraints[fk['FK_NAME']]['foreign_key']:
fkeylist = list(constraints[fk['FK_NAME']]['foreign_key'])
fkeylist.append(fk['PKCOLUMN_NAME'].lower())
constraints[fk['FK_NAME']]['foreign_key'] = tuple(fkeylist)
for index in cursor.connection.indexes( True, schema, table_name ):
if index['INDEX_NAME'] not in constraints:
constraints[index['INDEX_NAME']] = {
'columns': [],
'primary_key': False,
'unique': False,
'foreign_key': None,
'check': False,
'index': True
}
elif constraints[index['INDEX_NAME']]['unique'] :
continue
elif constraints[index['INDEX_NAME']]['primary_key']:
continue
constraints[index['INDEX_NAME']]['columns'].append(index['COLUMN_NAME'].lower())
return constraints
|
StarcoderdataPython
|
1620730
|
import sys
import os
import collections
import nltk.data
import string
import math
import features
import traceback
import time
import argparse
import nltk.corpus
import nltk.stem.porter
import textClasses as tc
import cluster
import fuzzy
import rules
CUE_PHRASE_FILE = 'bonus_words'
STIGMA_WORDS_FILE = 'stigma_words'
def pre_process_text(text):
while text[0] == "\n":
text = text[1:]
text = text.split('\n', 1)
title = tc.Title(text[0], [])
text = text[1].replace(u"\u2018", '\'').replace(u"\u2019", '\'').replace(u"\u201c",'"').replace(u"\u201d", '"')
words = dict()
sentences = []
sentence_detector = nltk.data.load('tokenizers/punkt/english.pickle')
detected_sentences = sentence_detector.tokenize(text.strip())
stopwords_list = nltk.corpus.stopwords.words('english')
stemmer = nltk.stem.porter.PorterStemmer()
#Pre-process title
tokens = nltk.word_tokenize(title.original)
tokens = [token for token in tokens if token not in stopwords_list]
part_of_speech = nltk.pos_tag(tokens)
for (token, word_pos) in zip(tokens, part_of_speech):
token = token.lower()
if (token not in words) and (token not in list(string.punctuation) and (token not in stopwords_list)):
words[token] = tc.Word(stemmer.stem(token), word_pos, [(lemma, stemmer.stem(lemma)) for synset in nltk.corpus.wordnet.synsets(token) for lemma in synset.lemma_names()])
title.bag_of_words.append(token)
#Pre-process text
for detected_sentence in detected_sentences:
tokens = nltk.word_tokenize(detected_sentence)
tokens = [token for token in tokens if token not in stopwords_list]
if tokens:
part_of_speech = nltk.pos_tag(tokens)
bag_of_words = []
stemmed_bag_of_words = []
for (token, word_pos) in zip(tokens, part_of_speech):
token = token.lower()
if (token not in list(string.punctuation) and (token not in stopwords_list)):
if (token not in words):
words[token] = tc.Word(stemmer.stem(token), word_pos, [(lemma, stemmer.stem(lemma)) for synset in nltk.corpus.wordnet.synsets(token) for lemma in synset.lemma_names()])
elif token in words:
words[token].increment_abs_frequency()
bag_of_words.append(token)
stemmed_bag_of_words.append(stemmer.stem(token))
if (len(bag_of_words) != 0 or len(stemmed_bag_of_words) != 0):
sentences.append(tc.Sentence(detected_sentence, len(sentences) + 1, [], [], None))
sentences[-1].bag_of_words = list(bag_of_words)
sentences[-1].stemmed_bag_of_words = list(stemmed_bag_of_words)
return [title, sentences, words]
def process_input(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--text-file", dest="text_file", help="the file containing the text tom be summarized", required=True)
parser.add_argument("-p", "--percent", dest="percentage", help="the compression rate as percentage", required=True)
parser.add_argument("-t", "--threads", dest="threads", help="the number of threads", required=True)
# Process arguments
args = parser.parse_args()
threads = args.threads
percentage = args.percentage
text_file = args.text_file
with open(text_file, 'r') as f:
text = f.read()
f.closed
return {"text": text, "percentage": percentage, "threads": threads}
def resource_loader():
resources = dict()
path = os.path.dirname(os.path.realpath(__file__)) + '\\resources\\'
resource_files = [file.split()[0] for file in os.listdir(path)]
for resource_file_name in resource_files:
with open(path + "/"+resource_file_name, 'r') as f:
text = f.read()
f.closed
resources[resource_file_name.split('.')[0]] = set(list(text.split('\n')))
return resources
def print_stuff(sentences, sentences_features):
data = sentences_features
for i in range(0, len(data)):
print("******************************")
print("Sentence: ", end="")
print(sentences[i].original)
print_sentence_info(data[i])
print("Rules: ")
rules.print_rules_results(data[i])
def filter_using_clusters(sentences, percentage, clusters):
number_sentences = math.floor(percentage * len(sentences))
sentences = sorted(sentences, key=lambda x: x.rank, reverse=True)
clusters_counter = [0] * len(clusters)
sentence_counter = 0;
chosen_sentences = []
while len(chosen_sentences) < number_sentences:
sentence_counter = 0
for i in range(0, len(clusters)):
for j in range(0, len(sentences)):
if (clusters_counter[i] == min(clusters_counter) and clusters[i].count(sentences[j].position) == 1):
chosen_sentences.append(sentences[j])
clusters[i].remove(sentences[j].position)
if (len(clusters[i]) == 0):
clusters_counter[i] = sys.maxsize
else:
clusters_counter[i] += 1
break;
if (len(chosen_sentences) >= number_sentences):
break;
chosen_sentences = sorted(chosen_sentences, key=lambda x: x.position)
return chosen_sentences
def print_based_on_fuzzy(angels_objects, p):
print("***** RESULTS BASED ONLY ON FUZZY *****")
number_sentences = math.floor(p * len(angels_objects))
sorted_by_rank = [element for element in sorted(angels_objects,
key=lambda x: x.rank, reverse=True)][0:number_sentences]
vukans_list = sorted(sorted_by_rank, key=lambda x: x.position, reverse=False)
for sentence in vukans_list:
print(sentence.original)
print("")
def main():
try:
start_time = time.time()
processed_input = process_input()
text = processed_input['text']
percentage = processed_input['percentage']
threads = int(processed_input['threads'])
resources = resource_loader()
preprocessed_text = pre_process_text(text)
preprocessed_text[1] = sorted(preprocessed_text[1], key=lambda x: x.position)
keyword_feature_value = features.keyword_feature(preprocessed_text[1], preprocessed_text[2])
title_word_feature_value = features.title_word_feature(preprocessed_text[0], preprocessed_text[1])
sentence_location_feature_value = features.sentence_location_feature(preprocessed_text[1])
sentence_length_feature_value = features.sentence_length_feature(preprocessed_text[1])
proper_noun_feature_value = features.pos_tag_feature(preprocessed_text[1], preprocessed_text[2], 'NNP')
cue_phrase_feature_value = features.phrase_feature(preprocessed_text[1], resources[CUE_PHRASE_FILE])
stigma_phrase_feature_value = features.phrase_feature(preprocessed_text[1], resources[STIGMA_WORDS_FILE])
numerical_data_feature_value = features.pos_tag_feature(preprocessed_text[1], preprocessed_text[2], 'CD')
k_means_result = cluster.k_means(preprocessed_text[1], preprocessed_text[2], percentage, threads)
# summary = cluster.cluster_based_summary(preprocessed_text[1], k_means_result[0], k_means_result[1])
sentences_feature_list = []
for (
keyword_value,
title_word_value,
sentence_location_value,
sentence_lenght_value,
proper_noun_value,
cue_phase_value,
stigma_word_value,
numerical_data_value,
) in zip(
keyword_feature_value,
title_word_feature_value,
sentence_location_feature_value,
sentence_length_feature_value,
proper_noun_feature_value,
cue_phrase_feature_value,
stigma_phrase_feature_value,
numerical_data_feature_value,
):
sentences_feature_list.append({
'keyword': keyword_value,
'title_word': title_word_value,
'sentence_location': sentence_location_value,
'sentence_length': sentence_lenght_value,
'proper_noun': proper_noun_value,
'cue_phrase': cue_phase_value,
'nonessential': stigma_word_value,
'numerical_data': numerical_data_value,
})
#fuzzy.print_everything(preprocessed_text[1], sentences_feature_list)
fuzzy.set_fuzzy_ranks(preprocessed_text[1], sentences_feature_list)
chosen_sentences = filter_using_clusters(preprocessed_text[1], float(percentage)/100, k_means_result[1])
all_sentences_information = []
for sentence in preprocessed_text[1]:
chosen = 0
if (sentence in chosen_sentences):
chosen = 1
all_sentences_information.append([sentence.position, sentence.rank, chosen])
print(all_sentences_information)
print([keyword_feature_value,
title_word_feature_value,
sentence_location_feature_value,
sentence_length_feature_value,
proper_noun_feature_value,
numerical_data_feature_value,
cue_phrase_feature_value,
stigma_phrase_feature_value])
print(preprocessed_text[0].original)
for sentence in chosen_sentences:
print(sentence.original)
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
sys.stderr.write(repr(e))
traceback.print_tb(e.__traceback__)
return 2
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
3328285
|
__author__ = '1988'
import drawtree
class node:
def __init__(self,parent,isroot=False):
self.parent=parent
self.root=isroot
self.children=[]
def show(self,i=0,drawtool=drawtree.drawtree().showdraw):
if not drawtool:
return
elif drawtool=='print':
if self.root:
print(self.getnodetext())
else:
indent=" "*i
print(indent+self.getnodetext())
i+=1
for child in self.children:
child.show(i=i)
else:
drawtool(self)
def getnodetext(self):
return 'node'
def isleaf(self):
if not self.children:return True
return len(self.children)==0
def getwidth(self,step=1):
if self.isleaf():
return step
else:
return sum([child.getwidth(step=step) for child in self.children])
def getheight(self):
if self.isleaf():
return 1
else:
return 1+max([child.getheight() for child in self.children])
#获取所有子孙节点
def getallchildren(self):
if self.isleaf():
return []
return self.getallnodeslist()[1:]
#获取所有子孙节点+自身
def getallnodeslist(self):
if self.isleaf():
return [self]
return [self]+[child.getallchildren() for child in self.children]
|
StarcoderdataPython
|
1668931
|
<reponame>zianke/cmdnote
from unittest import TestCase
from unittest.mock import patch
from cmdnote.main import main
from .utils import *
class Test(TestCase):
def test_main(self):
with patch('sys.argv', ['cmdnote']):
with captured_output() as (out, err):
main()
output = out.getvalue().strip()
self.assertTrue(output.startswith('usage'))
|
StarcoderdataPython
|
1760837
|
# from contextlib import redirect_stdout, redirect_stderr
# from io import StringIO
from unittest.mock import patch, call
from ..base import IntegrationTests
from ..util import fork
class TestNoProfile(IntegrationTests):
""" Integration tests for no profile. """
@fork()
def test_logout_via_awscli(self):
""" Logging out should throw AlreadyLoggedOut when not logged in. """
mesg = 'Already logged out!\n'
self.assertAwsCliReturns('logout', stderr=mesg, code=3)
# TODO FIXME NO HARDCODE
@fork()
@patch('builtins.input', return_value='')
def test_save_credentials_default_profile(self, mock_input):
""" Creates a default entry in ~/.aws/credentials """
calls = [
call('ECP Endpoint URL [None]: '),
call('Username [None]: '),
call('Enable Keyring [False]: '),
call('Duo Factor [None]: '),
call('Role ARN [None]: '),
]
self.assertAwsCliReturns('login', 'configure', code=0)
mock_input.assert_has_calls(calls)
self.assertEqual(len(mock_input.call_args_list), len(calls))
class TestDefaultProfile(IntegrationTests):
""" Integration tests for default profile. """
@fork()
def test_logout_via_awscli(self):
""" Logging out should throw AlreadyLoggedOut when not logged in. """
self.profile = 'default'
self.aws_credentials = """
[default]
aws_access_key_id = abc
aws_secret_access_key = def
aws_session_token = ghi
"""
mesg = 'Already logged out!\n'
self.assertAwsCliReturns('logout', stderr=mesg, code=3)
# TODO FIXME NO HARDCODE ERROR CODES!
|
StarcoderdataPython
|
51757
|
from datetime import timedelta, datetime
from typing import Optional
from fastapi import HTTPException, Depends
from fastapi.security import OAuth2PasswordBearer
from jose import jwt, JWTError
from passlib.context import CryptContext
from pydantic import BaseModel
from db import database as adb
from usermanagement.models import users
from usermanagement.schema import UserCreate, User
from starlette import status
SECRET_KEY = "<KEY>"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
class Token(BaseModel):
access_token: str
token_type: str
class TokenData(BaseModel):
username: Optional[str] = None
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def verify_password(plain_password, hashed_password) -> str:
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password) -> str:
return pwd_context.hash(password)
async def get_user(username: Optional[str]) -> UserCreate:
query = users.select()
user_list = await adb.fetch_all(query)
for user in user_list:
if user["username"] == username:
return UserCreate(**user)
return HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="User not found")
"""authenticating user"""
async def authenticate_user(username: str, password: str):
user = await get_user(username)
if not user:
return False
if not verify_password(password, user.password):
return False
return user
"""code to create the access token"""
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
to_encode = data.copy()
if expires_delta:
expire = datetime.now() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
"""getting the current user details"""
async def get_current_user(token: str = Depends(oauth2_scheme)) -> User:
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user = await get_user(username=token_data.username)
if not user:
raise credentials_exception
return user
"""Checking users if they are active or not"""
async def get_current_active_user(
current_user: User = Depends(get_current_user)
) -> User:
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
def testFunc():
return "Hello"
|
StarcoderdataPython
|
1636476
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AGNewsmodelWrapper(nn.Module):
def __init__(self, model):
super(AGNewsmodelWrapper, self).__init__()
self.model = model
def compute_bert_outputs( # pylint: disable=no-self-use
self, model_bert, embedding_input, attention_mask=None, head_mask=None
):
"""Computes Bert Outputs.
Args:
model_bert : the bert model
embedding_input : input for bert embeddings.
attention_mask : attention mask
head_mask : head mask
Returns:
output : the bert output
"""
if attention_mask is None:
attention_mask = torch.ones( # pylint: disable=no-member
embedding_input.shape[0], embedding_input.shape[1]
).to(embedding_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(model_bert.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(model_bert.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(model_bert.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * model_bert.config.num_hidden_layers
encoder_outputs = model_bert.encoder(
embedding_input, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = model_bert.pooler(sequence_output)
outputs = (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return outputs
def forward(self, embeddings, attention_mask=None):
"""Forward function.
Args:
embeddings : bert embeddings.
attention_mask: Attention mask value
"""
outputs = self.compute_bert_outputs(self.model.bert_model, embeddings, attention_mask)
pooled_output = outputs[1]
output = F.relu(self.model.fc1(pooled_output))
output = self.model.drop(output)
output = self.model.out(output)
return output
|
StarcoderdataPython
|
16919
|
<filename>tests/ut/python/nn/test_activation.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test Activations """
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import _cell_graph_executor
from ..ut_filter import non_graph_engine
class SoftmaxNet(nn.Cell):
def __init__(self, dim):
super(SoftmaxNet, self).__init__()
self.softmax = nn.Softmax(dim)
def construct(self, x):
return self.softmax(x)
@non_graph_engine
def test_compile():
net = SoftmaxNet(0)
input_tensor = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
net(input_tensor)
@non_graph_engine
def test_compile_axis():
net = SoftmaxNet(-1)
prob = 355
input_data = np.random.randn(4, 16, 1, 1).astype(np.float32) * prob
input_tensor = Tensor(input_data)
net(input_tensor)
class LogSoftmaxNet(nn.Cell):
def __init__(self, dim):
super(LogSoftmaxNet, self).__init__()
self.logsoftmax = nn.LogSoftmax(dim)
def construct(self, x):
return self.logsoftmax(x)
@non_graph_engine
def test_compile_logsoftmax():
net = LogSoftmaxNet(0)
input_tensor = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
net(input_tensor)
class Net1(nn.Cell):
def __init__(self):
super(Net1, self).__init__()
self.relu = nn.ReLU()
def construct(self, x):
return self.relu(x)
def test_compile_relu():
net = Net1()
input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
_cell_graph_executor.compile(net, input_data)
class Net_gelu(nn.Cell):
def __init__(self):
super(Net_gelu, self).__init__()
self.gelu = nn.GELU()
def construct(self, x):
return self.gelu(x)
def test_compile_gelu():
net = Net_gelu()
input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
_cell_graph_executor.compile(net, input_data)
class NetLeakyReLU(nn.Cell):
def __init__(self, alpha):
super(NetLeakyReLU, self).__init__()
self.leaky_relu = nn.LeakyReLU(alpha)
def construct(self, x):
return self.leaky_relu(x)
def test_compile_leaky_relu():
net = NetLeakyReLU(alpha=0.1)
input_data = Tensor(np.array([[1.6, 0, 0.6], [6, 0, -6]], dtype=np.float32))
_cell_graph_executor.compile(net, input_data)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.