query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Entry point for ansible girder client module
def main(): # Default spec for initalizing and authenticating argument_spec = { # __init__ 'host': dict(), 'port': dict(), 'apiRoot': dict(), 'apiUrl': dict(), 'scheme': dict(), 'dryrun': dict(), 'blacklist': dict(), # authenticate 'username': dict(), 'password': dict( no_log=True ), 'token': dict( no_log=True ), 'apiKey': dict( no_log=True ), # General 'state': dict(default='present', choices=['present', 'absent']) } gcm = GirderClientModule() for method in gcm.required_one_of: argument_spec[method] = dict(type=gcm.spec[method]['type']) module = AnsibleModule( argument_spec=argument_spec, required_one_of=[gcm.required_one_of, ['token', 'username', 'user', 'apiKey']], required_together=[['username', 'password']], mutually_exclusive=gcm.required_one_of, supports_check_mode=False) if not HAS_GIRDER_CLIENT: module.fail_json(msg='Could not import GirderClient!') try: gcm(module) except requests.HTTPError as e: import traceback module.fail_json(msg='%s:%s\n%s\n%s' % (e.__class__, str(e), e.response.text, traceback.format_exc())) except Exception as e: import traceback # exc_type, exc_obj, exec_tb = sys.exc_info() module.fail_json(msg='%s: %s\n\n%s' % (e.__class__, str(e), traceback.format_exc()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n module = AnsibleModule(\n argument_spec=dict(\n passphrase=dict(type='str', required=False,\n default=None, no_log=True),\n key=dict(type='str', required=False,\n default=None),\n rpms=dict(type='list', required=True),\n state=dict(type='str', required=False,\n default='present', choices=['present', 'absent']),\n macros=dict(type='dict', required=False, default=None)\n ),\n supports_check_mode=True\n )\n\n if not HAS_RPM:\n module.fail_json(rc=1, msg='Error: python2 rpm module is needed for this ansible module')\n\n if module.params['state'] == \"present\":\n sign(module)\n else:\n del_sign(module)", "def main():\n argument_spec = dict(\n command=dict(required=True),\n )\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n command = module.params['command']\n\n connection = Connection(module._socket_path)\n output = connection.get(command=command)\n\n result = {\n 'changed': False,\n 'text': output\n }\n\n module.exit_json(**result)", "def main():\n argument_spec = {\n 'gather_subset': dict(default=['software_info', 'software_images',\n 'host_name', 'platform_name',\n 'management_interface',\n 'software_version', 'fans',\n 'power_supplies', 'product_info',\n 'physical_interfaces',\n 'resource_utilization', 'domain_name'],\n type='list',\n choices=['software_info', 'software_images',\n 'host_name', 'platform_name',\n 'management_interface',\n 'software_version',\n 'config', 'fans', 'power_supplies',\n 'product_info', 'physical_interfaces',\n 'resource_utilization', 'domain_name']),\n 'gather_network_resources': dict(type='list',\n choices=['interfaces', 'vlans',\n 'vrfs'])\n }\n\n argument_spec.update(aoscx_http_argument_spec)\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n module._connection = get_connection(module) # noqa\n\n warnings = []\n if module.params[\"gather_subset\"] == \"!config\":\n warnings.append(\n 'default value for `gather_subset` will be changed '\n 'to `min` from `!config` v2.11 onwards')\n\n result = Facts(module).get_facts()\n\n ansible_facts, additional_warnings = result\n warnings.extend(additional_warnings)\n\n module.exit_json(ansible_facts=ansible_facts, warnings=warnings)", "def main():\n module = AnsibleModule(\n argument_spec=dict(\n session=dict(required=True, type='dict'),\n blueprint_id=dict(required=True,),\n name=dict(required=False),\n id=dict(required=False),\n state=dict(required=False,\n choices=['present', 'absent'],\n default=\"present\",),\n vni_id=dict(required=False),\n vlan_id=dict(required=False),\n ),\n mutually_exclusive=[('name', 'id')],\n required_one_of=[('name', 'id')],\n supports_check_mode=True\n )\n\n sec_zone(module)", "def main() -> None:\n return AnsibleModule(\n argument_spec={\n \"data\": {\"default\": None},\n \"path\": {\"default\": None},\n \"file\": {\"default\": None},\n },\n )", "def client():", "def main():\n module = AnsibleModule(\n argument_spec=dict(\n pn_loopback_ip=dict(required=False, type='str', default='109.109.109.3/32'),\n pn_vrrp_id=dict(required=False, type='str', default='18'),\n pn_current_switch=dict(required=False, type='str'),\n pn_spine_list=dict(required=True, type='list'),\n pn_leaf_list=dict(required=True, type='list'),\n pn_pim_ssm=dict(required=False, type='bool'),\n pn_ospf_redistribute=dict(required=False, type='str',\n choices=['none', 'static', 'connected',\n 'rip', 'ospf'],\n default='none'),\n pn_bgp_redistribute=dict(required=False, type='str',\n choices=['none', 'static', 'connected',\n 'rip', 'ospf']),\n pn_bgp_as=dict(required=False, type='str'),\n pn_loopback_ip_v6=dict(required=False, type='str'),\n )\n )\n\n global CHANGED_FLAG\n global task\n global msg\n\n task = 'Vrouter creation'\n msg = 'Vrouter creation failed'\n\n results = []\n message = ''\n loopback_address = module.params['pn_loopback_ip']\n current_switch = module.params['pn_current_switch']\n vrrp_id = module.params['pn_vrrp_id']\n ospf_redistribute = module.params['pn_ospf_redistribute']\n pim_ssm = module.params['pn_pim_ssm']\n bgp_redistribute = module.params['pn_bgp_redistribute']\n bgp_as = module.params['pn_bgp_as']\n\n # Create vrouters\n change_flag = list()\n change_flag, output = create_vrouter(module, current_switch, change_flag, task, msg,\n vrrp_id, ospf_redistribute, pim_ssm, bgp_redistribute, bgp_as)\n message += output\n if True in change_flag:\n CHANGED_FLAG.append(True)\n\n # Assign loopback ip to vrouters\n output = ''\n change_flag = list()\n change_flag, output = assign_loopback_and_router_id(module, loopback_address, current_switch,\n change_flag, task, msg)\n if output:\n message += output\n if True in change_flag:\n CHANGED_FLAG.append(True)\n\n replace_string = current_switch + ': '\n for line in message.splitlines():\n if replace_string in line:\n results.append({\n 'switch': current_switch,\n 'output': (line.replace(replace_string, '')).strip()\n })\n\n # Exit the module and return the required JSON.\n module.exit_json(\n unreachable=False,\n msg='Vrouter creation succeeded',\n summary=results,\n exception='',\n failed=False,\n changed=True if True in CHANGED_FLAG else False,\n task='Create vrouter'\n )", "def main():\n\n # the AnsibleModule object will be our abstraction for working with Ansible.\n # This includes instantiation, a couple of common attr that will be the\n # args/params passed to the execution, as well as if the module\n # supports check mode\n module = AnsibleModule(\n argument_spec=dict(\n hostvars=dict(type='raw', required=True),\n report_timestamp=dict(type=str, required=False, default=''),\n registered_dict_name=dict(type=str, required=False, default=\"get_sas_host_details_results\"),\n include_hotfix_report=dict(type=bool, required=False, default=True),\n hotfix_url = dict(type=str, required=True),\n hotfix_master_file = dict(type=str, required=True)\n ),\n supports_check_mode=True\n )\n\n # get module parameters\n hostvars = module.params['hostvars']\n report_timestamp = module.params['report_timestamp']\n registered_dict_name = module.params['registered_dict_name']\n include_hotfix_report = module.params['include_hotfix_report']\n hotfix_url = module.params['hotfix_url']\n hotfix_master_file = module.params['hotfix_master_file']\n\n # Starting in Ansible 2.8.1, there is the potential for hostvars\n # to be passed as a byte string, if the dict is too large\n # This will convert the str back to a dict before proceeding\n if isinstance(hostvars, str):\n hostvars = ast.literal_eval(hostvars.decode())\n\n results = dict()\n results['sas_hosts'] = dict()\n results['created'] = report_timestamp\n\n for inventory_hostname, host_vars in hostvars.items():\n\n # set up returnable values\n unreachable = True\n failed = True\n failure_details = dict(\n msg=\"\",\n rc=0,\n stderr=\"\",\n stdout=\"\",\n )\n\n # get the host details dict\n host_details = host_vars.get(registered_dict_name)\n\n # check if the host has the registered dict\n if host_details is not None:\n\n # host details exist, so host was reachable\n unreachable = False\n\n # check if the host failed\n failed = host_details['failed']\n\n # if the module reported a failure, collect details\n if failed:\n failure_details['msg'] = host_details['msg']\n failure_details['rc'] = host_details['rc']\n failure_details['stderr'] = host_details['module_stderr']\n failure_details['stdout'] = host_details['module_stdout']\n else:\n # get module results\n host_results = host_details.get('sas_host_details')\n\n if host_results is not None:\n results['sas_hosts'].update(host_results)\n else:\n failed = True\n\n # if the results dict could not be found, mark the host as unreachable\n if failed or unreachable:\n host_groups = host_vars.get('group_names')\n\n if host_groups is not None and 'sas_all' in host_groups:\n hostname = host_vars.get('ansible_fqdn')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('ansible_hostname')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('ansible_host')\n if hostname is None or hostname == \"\":\n hostname = host_vars.get('inventory_hostname')\n if hostname is None or hostname == \"\":\n hostname = inventory_hostname\n\n try:\n host_groups.remove('sas_all')\n host_groups.remove('sas-all')\n except ValueError:\n pass # do nothing\n\n results['sas_hosts'][hostname] = dict(\n _id=hostname.replace('.', '-'),\n _unreachable=unreachable,\n _failed=failed,\n _failure_details=failure_details,\n ansible_host_groups=host_groups\n )\n else:\n pass # this host isn't in sas_all so there's no need to try and report on it\n\n ##################################################################################\n # This section will find all of the hotfixes available and add them to the report.\n ##################################################################################\n\n # There are a few data structures that are complicated enough to warrant a description:\n # fullReport\n # This will hold all of the data in a format condusive to printing it out in the final report. This is how\n # It is structured:\n # fullReport (dict):\n # key=Hot Fix Name, point to another dict:\n # key=\"released\", points to a string containing the release date of the hotfix.\n # key= \"installed\", points to a boolean that will reflect whether any of the packages used by this hotfix are installed on any of the machines in the deployment.\n # key=\"upToDate\", point to a boolean that will reflest whether ALL of the packages used by this hotfix are up to date on ALL of the machines in the deployment.\n # key=\"sasnote\", points to another dict:\n # key=SASNote number, points to the description of the SASNote.\n # key=\"package\", points to another dict:\n # key=\"platform\" , points to another dict:\n # key=OS, points to another dict:\n # key=\"version\", points to the string of the version of the package.\n # key=\"installed\", points to a boolean which reflects whether this package is installed on any machine in the deployment.\n # key=\"upToDate\", points to a boolean which reflects whether this package is up to data on ALL of the machines in the deployment.\n # key=\"os\", points to the fully qualified name of the operating system.\n # key=\"arch\", points to the architecture of the OS (NOTE: This does not exist on Windows systems.)\n # key=\"alreadyUpdated\", points to a boolean, which is used to keep track of whether the upToDate has already been set.\n # key=\"installedVersions\", points to another dict:\n # key=machineName, points to a 2 element list:\n # [0]=string containing package version that is currently installed.\n # [1]=boolean reflecting whether this version is at or above the package delevered in this hotfix.\n #\n ###########################################################################\n #\n # packageToHotFix\n # This will hold a dict of lists:\n # key: package name, pointing to a 2 element list:\n # [0] OS\n # [1] The Hotfix that this package is associated with.\n #\n ###########################################################################\n #\n # environmentReportDict\n # This is inherited from the environment report, but it's probably worth documenting what it looks like.\n # There is a lot of data inerherited, and I'm only describing what is used in this script.\n # environmentReportDict\n # key=hostname (for each machine in the deployment), pointing to another dict:\n # key=\"OS\", pointing to string for the OS family.\n # key=\"arch\", pointing to the string for the architecture of the host.\n # key=\"sas_packages\", pointing to another dict:\n # key=package number, pointing to another dict:\n # key=\"attributes\", pointing to another dict:\n # key=\"version\", pointing to a string of the package versions currently installed on the host.\n ############################################################################\n\n results[\"include_hotfix_report\"] = include_hotfix_report\n if include_hotfix_report:\n # This is the URL from which to pull the hotfix files.\n if hotfix_url[-1:] == '/':\n baseURL = hotfix_url\n else:\n baseURL = hotfix_url + '/'\n # This is the master file that lists which other files should be examined for the actual hotfixes themselves.\n masterFile = hotfix_master_file\n # This is the top level object to store the hotfix report information (see above).\n fullReport = {}\n # This is a dict of package to hotfixes (see above).\n packageToHotfix = {}\n # This boolean will help with debugging.\n debug = False\n\n try:\n # Parse the master file to obtain where the hotfix files are.\n masterFileXML = urllib2.urlopen(baseURL + masterFile)\n\n # Parse the master file and build a list of all files.\n allFilesRoot = ET.fromstring(masterFileXML.read())\n results[\"contact_hotfix_website\"] = True\n except urllib2.URLError :\n results[\"contact_hotfix_website\"] = False\n results[\"master_website\"] = baseURL + masterFile\n if debug:\n print(\"***** Error parsing \" + baseURL + masterFile)\n print(traceback.format_exc())\n print(\"***** No hot fix information obtained. Skipping hot fix report.\\n\\n\")\n\n if results[\"contact_hotfix_website\"]:\n # Loop through the files discoverd in the master file\n if debug:\n print(\"Building hot fix report, based on master file input.\")\n for file_tag in allFilesRoot.findall('File'):\n currentFile = file_tag.get('fileName')\n fileToParse = baseURL + currentFile\n # Retrieve each file.\n # Inside of each file, the lines are keyed by the hot fix id. There are three types of lines, in order:\n # 1) id and release date\n # 2) id, sasnote, sasnotetitle\n # 3) id, OS, package.\n # This script loops through to build a dictionary of dictonaries with the basic structure:\n # ID\n # Release Date\n # SASNotes\n # SASNote and Title\n # ...\n # Packages\n # Package Name, Version, and OS\n try:\n currentFileXML = urllib2.urlopen(fileToParse)\n currentFileRoot = ET.fromstring(currentFileXML.read())\n updateID = \"\"\n for update_tag in currentFileRoot.findall('update'):\n currentUpdate = update_tag.get('id')\n releaseDate = update_tag.get('released')\n # To get the top level Dictionary seeded with the hot fix Name and release date.\n if releaseDate is not None:\n if currentUpdate in fullReport:\n if debug:\n print(\"WARNING! Hot Fix \" + currentUpdate + \" already discovered. Skipping\")\n updateID = \"DUPLICATE-SKIP\"\n else:\n # The SCXXXX hot fixes are special. The package files are only included in\n # Viya_<version>_<platform>_home.xml files. So, the entries in the\n # scheduled_update_<platform>_<shipevent>.xml files can be skipped.\n if currentUpdate.startswith(\"SC\") and currentFile.find(\"scheduled_update_\") < 0:\n continue\n updateID = currentUpdate\n fullReport[updateID] = {}\n fullReport[updateID][\"release_date\"] = releaseDate\n fullReport[updateID][\"installed\"] = False\n fullReport[updateID][\"upToDate\"] = False\n # To get the SASNote information under the hot fix\n else:\n if updateID == \"DUPLICATE-SKIP\":\n continue\n sasNote = update_tag.get('sasnote')\n sasNoteTitle = update_tag.get('sasnoteTitle')\n if sasNote is not None:\n if \"sasnote\" not in fullReport[updateID]:\n fullReport[updateID][\"sasnote\"] = {}\n # This string needs to be encoded because some non-ASCII characters are\n # in some of the titles.\n fullReport[updateID][\"sasnote\"][sasNote] = sasNoteTitle.encode('utf-8')\n # To get the Package information under the hot fix.\n else:\n os = update_tag.get(\"os\")\n fullPackage = update_tag.get(\"package\")\n if fullPackage is not None:\n if \"package\" not in fullReport[updateID]:\n fullReport[updateID][\"package\"] = {}\n\n lastPeriodIndex = fullPackage.rfind(\".\")\n # Format the package information.\n # Windows does not have a dash in the version; Linux does. So, we need to break differently,\n # depending on the OS.\n if os.lower().find(\"windows\") > -1:\n versionStartIndex = fullPackage.rfind(\"-\")\n achitectureStartIndex = -1\n versionEndIndex = lastPeriodIndex\n osFamily = \"Windows\"\n else:\n versionStartIndex = fullPackage.rfind(\"-\", 0, fullPackage.rfind(\"-\"))\n # Linux has architecture in the package. This will be stored in its own key.\n achitectureStartIndex = fullPackage.rfind(\".\", 0, lastPeriodIndex)\n # SLES has the string 'suse' in its package. This will strip it out (as well as an extra .).\n if os.lower().find(\"suse\") > -1:\n versionEndIndex = achitectureStartIndex - 5\n osFamily = \"Suse\"\n else:\n if os.lower().find(\"yocto\") > -1:\n versionEndIndex = achitectureStartIndex - 6\n osFamily = \"Yocto\"\n else:\n if os.lower().find(\"ubuntu\") > -1:\n versionStartIndex = fullPackage.rfind(\"_\", 0, fullPackage.rfind(\"_\"))\n versionEndIndex = fullPackage.rfind(\"_\")\n achitectureStartIndex = versionEndIndex\n osFamily = \"Ubuntu\"\n else:\n if os.lower().find(\"red hat enterprise linux 7\") > -1:\n versionStartIndex = versionStartIndex = fullPackage.rfind(\":\")\n versionEndIndex = len(fullPackage)\n achitectureStartIndex = -1\n osFamily = \"RedHat\"\n else:\n versionEndIndex = achitectureStartIndex\n osFamily = \"RedHat\"\n package = fullPackage[:versionStartIndex]\n packageVersion = fullPackage[versionStartIndex + 1:versionEndIndex]\n architecture = fullPackage[achitectureStartIndex + 1:lastPeriodIndex]\n\n if package not in fullReport[updateID][\"package\"]:\n fullReport[updateID][\"package\"][package] = {}\n if \"platform\" not in fullReport[updateID][\"package\"][package]:\n fullReport[updateID][\"package\"][package][\"platform\"] = {}\n if osFamily not in fullReport[updateID][\"package\"][package][\"platform\"]:\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily] = {}\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"version\"] = packageVersion\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"installed\"] = False\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"upToDate\"] = False\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"os\"] = os\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"installedVersions\"] = {}\n if achitectureStartIndex != -1:\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"arch\"] = architecture\n # This property is used to make sure that when evaluating the installed packages,\n # the upToDate=false does not get overridden by a True at the end.\n fullReport[updateID][\"package\"][package][\"platform\"][osFamily][\"alreadyUpdated\"] = False\n\n # Add to the package to hot fix dict.\n if package not in packageToHotfix:\n packageToHotfix[package] = []\n packageToHotfix[package].append([osFamily, updateID])\n\n except ET.ParseError:\n if debug:\n print(\"***** Error parsing \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping file.\\n\\n\")\n except urllib2.HTTPError:\n if debug:\n print(\"***** Cannot access \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping the file.\\n\\n\")\n except:\n if debug:\n print(\"***** Error encountered with \" + fileToParse)\n print(traceback.format_exc())\n print(\"***** Skipping the file.\\n\\n\")\n\n if debug:\n print(\"**** Build complete. Here are the hot fixes:\")\n print_Full_Report(fullReport)\n print(\"***********************************************************************************\")\n print(\"**** Here is the package to hot fix dict:\")\n print(\"***********************************************************************************\")\n for current_package in packageToHotfix:\n print(\" \" + current_package)\n for machine_list in packageToHotfix[current_package]:\n print(\" \" + machine_list[0] + \" @ \" + machine_list[1] + \".\")\n print(\"***********************************************************************************\")\n print(\"Report built.\")\n print(\"Accessing environment Data.\")\n\n for currentMachine in results['sas_hosts']:\n if not results['sas_hosts'][currentMachine][\"_unreachable\"] and not results['sas_hosts'][currentMachine][\"_failed\"]:\n currentOS = results['sas_hosts'][currentMachine]['os']['family']\n for currentPackage in results['sas_hosts'][currentMachine]['sas_packages']:\n if currentPackage in packageToHotfix:\n for osHotfix in packageToHotfix[currentPackage]:\n if osHotfix[0] == currentOS:\n currentHotfix = osHotfix[1]\n installedVersion = \\\n results['sas_hosts'][currentMachine]['sas_packages'][currentPackage]['attributes']['version']\n if installedVersion.endswith('.suse'):\n installedVersion = installedVersion[:-5]\n else:\n if installedVersion.endswith('.yocto'):\n installedVersion = installedVersion[:-6]\n else:\n if '_' in installedVersion:\n installedVersion = installedVersion[0:installedVersion.rfind(\"_\")]\n hotfixVersion = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"version\"]\n upToDate = compare_versions(installedVersion, hotfixVersion) >= 0\n fullReport[currentHotfix][\"installed\"] = True\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installed\"] = True\n # If a previous pacakage marked updateToDate=True, it can still be pulled back to false if another package isn't\n # up to date. If the previous package was marked upToDate=false, the hotfix cannot be marked true.\n if not fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] or \\\n (fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] and\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"upToDate\"]):\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"upToDate\"] = upToDate\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"alreadyUpdated\"] = True\n fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentMachine] = [installedVersion, upToDate]\n\n if debug:\n print(\"Comparing evironment data to hotfix data.\")\n for currentHotFix in fullReport:\n cumulativeOverallUpToDate = True\n # This will only allow the top level \"upToDate\" property to be set, if there is a package installed on this OS.\n allowTopLevelUpdate = False\n for currentPackage in fullReport[currentHotFix][\"package\"]:\n cumulativeOSUpToDate = True\n for currentOS in fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"]:\n if len(fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"]) > 0:\n cumulativeOSUpToDate = cumulativeOSUpToDate and \\\n fullReport[currentHotFix][\"package\"][currentPackage][\"platform\"][currentOS][\n \"upToDate\"]\n allowTopLevelUpdate = True\n\n cumulativeOverallUpToDate = cumulativeOverallUpToDate and cumulativeOSUpToDate\n if allowTopLevelUpdate:\n fullReport[currentHotFix][\"upToDate\"] = cumulativeOverallUpToDate\n\n # Now that the fullReport has been updated, go back and add to results, for the final report.\n results[\"available_hotfixes\"] = {}\n results[\"installed_hotfixes\"] = {}\n\n for currentHotfix in fullReport:\n if not fullReport[currentHotfix][\"installed\"]:\n continue\n if fullReport[currentHotfix][\"upToDate\"]:\n hotfix_dict_to_use = \"installed_hotfixes\"\n else:\n hotfix_dict_to_use = \"available_hotfixes\"\n results[hotfix_dict_to_use][currentHotfix] = {}\n results[hotfix_dict_to_use][currentHotfix][\"release_date\"] = fullReport[currentHotfix][\"release_date\"]\n results[hotfix_dict_to_use][currentHotfix][\"packages\"] = []\n for currentPackage in fullReport[currentHotfix][\"package\"]:\n for currentOS in fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"]:\n if not fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installed\"]:\n continue\n for currentHost in fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"]:\n temp_dict = {}\n temp_dict[\"hostname\"] = currentHost\n temp_dict[\"package\"] = currentPackage\n temp_dict[\"installed_version\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentHost][0]\n temp_dict[\"hotfix_version\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"version\"]\n temp_dict[\"up_to_date\"] = fullReport[currentHotfix][\"package\"][currentPackage][\"platform\"][currentOS][\"installedVersions\"][currentHost][1]\n results[hotfix_dict_to_use][currentHotfix][\"packages\"].append(temp_dict)\n # Format the SAS Note description so that we can respect any HTML tags that are included in the text.\n results[hotfix_dict_to_use][currentHotfix][\"sas_notes\"] = {}\n for current_number in fullReport[currentHotfix][\"sasnote\"]:\n # Honor any html that is coming through.\n temp_sasnote_description = fullReport[currentHotfix][\"sasnote\"][current_number]\n temp_sasnote_description = temp_sasnote_description.replace(\"&lt;\", \"<\")\n temp_sasnote_description = temp_sasnote_description.replace(\"&gt;\", \">\")\n # Build a link to the URL for the SAS Note.\n hot_fix_prefix = current_number[:2]\n hot_fix_postfix = current_number[2:]\n sas_note_url = \"http://support.sas.com/kb/\" + hot_fix_prefix + \"/\" + hot_fix_postfix + \".html\"\n sas_note_html_link = \"<a href=\\\"\" + sas_note_url + \"\\\"\\>\" + current_number + \"</a>\"\n results[hotfix_dict_to_use][currentHotfix][\"sas_notes\"][current_number] = {\"sas_note_link\":sas_note_html_link, \"description\":temp_sasnote_description}\n\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n #\n # changed will always be 'False' since we'll never alter state on a host\n module.exit_json(changed=False, processed_host_details=results)", "def main():\n module = AnsibleModule(\n argument_spec=dict(\n host=dict(type='str', required=True),\n destination=dict(type='str', required=True),\n repeat_count=dict(type='int', default=5),\n vrf_name=dict(type='str'),\n min_success_rate=dict(type='int', default=100)\n ),\n supports_check_mode=True\n )\n\n if module.check_mode:\n module.exit_json(changed=False)\n\n try:\n retvals = ping(module.params['host'],\n module.params['destination'],\n module.params['repeat_count'],\n module.params['vrf_name'])\n except Exception as exc:\n module.fail_json(msg='Reachability validation failed ({})'.format(exc))\n\n retvals['changed'] = False\n\n if retvals['success_rate'] >= module.params['min_success_rate']:\n module.exit_json(**retvals)\n else:\n module.fail_json(msg=('Success rate lower than expected ({}<{})').\n format(retvals['success_rate'],\n module.params['min_success_rate']))", "def client():\n\n gcomics_scrape.APP.config['TESTING'] = True\n test_client = gcomics_scrape.APP.test_client()\n\n yield test_client", "def init():\n\n @click.group()\n def aws():\n \"\"\"Manage treadmill on AWS\"\"\"\n pass\n\n @aws.command(name='init')\n def init():\n \"\"\"Initialise ansible files for AWS deployment\"\"\"\n pass\n # destination_dir = os.getcwd() + '/deploy'\n # try:\n # os.makedirs(destination_dir)\n # except OSError as e:\n # if e.errno == errno.EEXIST:\n # print('''AWS \"deploy\" directory already exists in this folder\n # \\n''', destination_dir)\n # copy_tree(deploy_path_join('../deploy'), destination_dir)\n\n @aws.command(name='cell')\n @click.option('--create', required=False, is_flag=True,\n help='Create a new treadmill cell on AWS',)\n @click.option('--destroy', required=False, is_flag=True,\n help='Destroy treadmill cell on AWS',)\n @click.option('--playbook', help='Playbok file',)\n @click.option('--inventory',\n 'controller.inventory',\n help='Inventory file',)\n @click.option('--key-file',\n default='key.pem',\n help='AWS ssh pem file',)\n @click.option('--aws-config',\n 'config/aws.yml',\n help='AWS config file',)\n @click.option('--with-freeipa/--no-freeipa',\n default=False,\n help='Create Cell with freeIPA',)\n def cell(create, destroy, playbook,\n inventory, key_file,\n aws_config, with_freeipa):\n \"\"\"Manage treadmill cell on AWS\"\"\"\n pass\n # playbook_args = [\n # 'ansible-playbook',\n # '-i',\n # inventory,\n # '-e',\n # 'aws_config={}'.format(aws_config) +\n # ' freeipa={}'.format(with_freeipa),\n # ]\n # if create:\n # playbook_args.extend([\n # playbook or deploy_path_join('cell.yml'),\n # '--key-file',\n # key_file,\n # ])\n # elif destroy:\n # playbook_args.append(\n # playbook or deploy_path_join('destroy-cell.yml')\n # )\n # else:\n # return\n\n # playbook_cli = PlaybookCLI(playbook_args)\n # playbook_cli.parse()\n # playbook_cli.run()\n\n @aws.command(name='node')\n @click.option('--create',\n required=False,\n is_flag=True,\n help='Create a new treadmill node',)\n @click.option('--playbook',\n 'node.yml',\n help='Playbok file',)\n @click.option('--inventory',\n 'controller.inventory',\n help='Inventory file',)\n @click.option('--key-file',\n default='key.pem',\n help='AWS ssh pem file',)\n @click.option('--aws-config',\n 'config/aws.yml',\n help='AWS config file',)\n def node(create, playbook, inventory, key_file, aws_config):\n \"\"\"Manage treadmill node\"\"\"\n pass\n # if create:\n # playbook_cli = PlaybookCLI([\n # 'ansible-playbook',\n # '-i',\n # inventory,\n # playbook,\n # '--key-file',\n # key_file,\n # '-e',\n # 'aws_config={}'.format(aws_config),\n # ])\n # playbook_cli.parse()\n # playbook_cli.run()\n\n del cell\n del node\n\n return aws", "def main():\n kernel_params = _parse_kernel_cmdline()\n api_url = kernel_params.get('ironic_api_url')\n deployment_id = kernel_params.get('deployment_id')\n inspect = kernel_params.get('inspect')\n # TODO(aarefiev): change ssh driver\n ironic_driver = kernel_params.get('callback-driver-name', 'ansible_ssh')\n if inspect and api_url is None:\n _process_error('Ironic ansible callback: Mandatory parameter '\n '\"ironic_api_url\" is missing.')\n if api_url is None or deployment_id is None:\n _process_error('Mandatory parameter (\"ironic_api_url\" or '\n '\"deployment_id\") is missing.')\n\n boot_mac = kernel_params.get('BOOTIF')\n if boot_mac is None:\n _process_error('Cannot define boot interface, \"BOOTIF\" parameter is '\n 'missing.')\n\n # There is a difference in syntax in BOOTIF variable between pxe and ipxe\n # boot with Ironic. For pxe boot the the leading `01-' denotes the device type\n # (Ethernet) and is not a part of the MAC address\n if boot_mac.startswith('01-'):\n boot_mac = boot_mac[3:].replace('-', ':')\n\n for n in range(_GET_ADDR_MAX_ITERATION):\n boot_ip = _get_interface_ip(boot_mac)\n if boot_ip is not None:\n break\n time.sleep(_RETRY_INTERVAL)\n else:\n _process_error('Cannot find IP address of boot interface.')\n\n data = {\"callback_url\": \"ssh://\" + boot_ip}\n\n if inspect:\n passthru = ('%(api-url)s/v1/drivers/%(driver)s/vendor_passthru'\n '/inspect' % {'api-url': api_url,\n 'driver': ironic_driver}\n else:\n passthru = '%(api-url)s/v1/nodes/%(deployment_id)s/vendor_passthru' \\\n '/heartbeat' % {'api-url': api_url,\n 'deployment_id': deployment_id}\n\n for attempt in range(_POST_CALLBACK_MAX_ITERATION):\n try:\n resp = requests.post(passthru, data=json.dumps(data),\n headers={'Content-Type': 'application/json',\n 'Accept': 'application/json'})\n except Exception as e:\n error = str(e)\n else:\n if resp.status_code != 202:\n error= ('Wrong status code %d returned from Ironic API' %\n resp.status_code)\n else:\n break\n\n if attempt == (_POST_CALLBACK_MAX_ITERATION - 1):\n _process_error(error)\n\n time.sleep(_RETRY_INTERVAL)", "def main():\n module = AnsibleModule(\n argument_spec=dict(\n pn_cliusername=dict(required=False, type='str'),\n pn_clipassword=dict(required=False, type='str', no_log=True),\n pn_switch_list=dict(required=False, type='list', default=[]),\n pn_vrrp_id=dict(required=False, type='str', default='18'),\n pn_vrrp_data=dict(required=False, type='str', default=''),\n )\n )\n\n global CHANGED_FLAG\n results = []\n message = ''\n switch_list = module.params['pn_switch_list']\n\n # Configure vrrp\n vrrp_data = module.params['pn_vrrp_data']\n if vrrp_data:\n vrrp_data = vrrp_data.replace(' ', '')\n vrrp_data_list = vrrp_data.split('\\n')\n for row in vrrp_data_list:\n if row.startswith('#'):\n continue\n else:\n elements = row.split(',')\n vlan_id = elements.pop(0).strip()\n gateway_ip = elements.pop(0).strip()\n primary_ip = elements.pop(0).strip()\n secondary_ip = elements.pop(0).strip()\n active_switch = elements.pop(0).strip()\n\n message += create_vlan(module, switch_list[0], vlan_id)\n\n for switch in switch_list:\n if switch == active_switch:\n vrrp_priority = '110'\n vrrp_ip = primary_ip\n else:\n vrrp_priority = '109'\n vrrp_ip = secondary_ip\n\n message += create_vrouter_interface(module, switch, vrrp_ip,\n gateway_ip, vlan_id,\n vrrp_priority)\n\n for switch in switch_list:\n replace_string = switch + ': '\n for line in message.splitlines():\n if replace_string in line:\n results.append({\n 'switch': switch,\n 'output': (line.replace(replace_string, '')).strip()\n })\n\n # Exit the module and return the required JSON.\n module.exit_json(\n unreachable=False,\n msg='Vrrp configuration succeeded',\n summary=results,\n exception='',\n failed=False,\n changed=True if True in CHANGED_FLAG else False,\n task='Configure vrrp'\n )", "def main():\n\n\n fab_list = get_fabric_list(SANNAV_IP_ADDRESS, SANNAV_FOS_USERNAME, SANNAV_FOS_PASSWORD)\n\n # Print all known facts about the fabrics and the switches\n # Comment out this print statement if this code will be used to generate\n # an Ansible Tower inventory.\n print(json.dumps(fab_list))\n\n # This section of code formats the results to be in a format acceptable to Ansible Tower (awx).\n # To use it, unblock the following block of code and comment out the preceeding print statement.\n\n _ = \"\"\"\n toAwx = {'_meta': {'hostvars': {}}}\n\n for fabric in fab_list[\"Fabrics\"]:\n toAwx[fabric[\"name\"]] = { 'hosts': []}\n for switch in fabric[\"Switches\"]:\n toAwx[fabric[\"name\"]]['hosts'].append(switch['ipAddress'])\n print(json.dumps(toAwx));\n \"\"\"", "def main():\n\n required_if = [\n (\"state\", \"merged\", (\"config\",)),\n (\"state\", \"replaced\", (\"config\",)),\n (\"state\", \"overridden\", (\"config\",)),\n (\"state\", \"rendered\", (\"config\",)),\n (\"state\", \"parsed\", (\"running_config\",)),\n ]\n\n mutually_exclusive = [(\"config\", \"running_config\")]\n\n module = AnsibleModule(\n argument_spec=OGsArgs.argument_spec,\n required_if=required_if,\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True,\n )\n result = OGs(module).execute_module()\n\n module.exit_json(**result)", "def main():\n demisto.info('Command being called is ' + demisto.command())\n\n \"\"\"\n PARSE AND VALIDATE INTEGRATION PARAMS\n \"\"\"\n\n rest_client = RestClient(\n base_url=BASE_URL,\n verify=VERIFY_CERT,\n )\n\n try:\n if demisto.command() == 'test-module':\n test_module(rest_client)\n demisto.results('ok')\n\n elif demisto.command() == 'fetch-incidents':\n # get all tenant ids\n next_run, incidents = fetch_incidents(rest_client, demisto.getLastRun())\n demisto.setLastRun(next_run)\n demisto.incidents(incidents)\n\n elif demisto.command() == 'mad-close-incident':\n return_outputs(close_incident_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-assign-user':\n return_outputs(assign_user_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-remove-user':\n return_outputs(remove_user_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-get-incident':\n return_results(get_incident_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'update-remote-system':\n return_results(update_remote_system_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'get-mapping-fields':\n return_results(get_mapping_fields_command())\n\n elif demisto.command() == 'get-remote-data':\n return_results(get_remote_data_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-get-escalations':\n return_results(get_escalations_command(rest_client, demisto.args()))\n\n else:\n raise NotImplementedError('Command not implemented')\n\n except NotImplementedError:\n raise\n except Exception as err:\n demisto.error(traceback.format_exc()) # print the traceback\n return_error(f'Failed to execute {demisto.command()} command.\\nError:\\n{str(err)}')", "def main() -> None:\n\n user = demisto.params().get(\"credentials\", {}).get(\"identifier\")\n password = demisto.params().get(\"credentials\", {}).get(\"password\")\n\n base_url = urljoin(demisto.params()[\"url\"], \"/Konfigurator/REST\")\n verify_certificate = not demisto.params().get(\"insecure\", False)\n proxy = demisto.params().get(\"proxy\", False)\n headers = {\"Content-Type\": \"application/mwg+xml\"}\n\n command = demisto.command()\n\n demisto.debug(f\"Command being called is {command}\")\n\n try:\n with Client(\n username=user,\n password=password,\n base_url=base_url,\n verify=verify_certificate,\n headers=headers,\n proxy=proxy,\n ) as client:\n commands = {\n \"test-module\": test_module,\n \"swg-get-available-lists\": get_lists_command,\n \"swg-get-list\": get_list_command,\n \"swg-get-list-entry\": get_list_entry_command,\n \"swg-modify-list\": modify_list_command,\n \"swg-insert-entry\": insert_entry_command,\n \"swg-delete-entry\": delete_entry_command,\n \"swg-create-list\": create_list_command,\n \"swg-delete-list\": delete_list_command,\n }\n if command not in commands:\n raise NotImplementedError(f'Command {command} was not implemented.')\n return_results(commands[command](client, demisto.args()))\n except Exception as e:\n return_error(f'Failed to execute {command} command.\\nError:\\n{str(e)}')", "def main():\n # Creating resources/clients for all needed infrastructure: EC2, IAM, Redshift\n ec2 = create_client('ec2', boto3.resource)\n iam = create_client('iam', boto3.client)\n redshift = create_client('redshift', boto3.client)\n \n # Create needed IAM / ARN roles for Redshift\n create_iam_role(iam)\n arn_role = create_arn_role(iam)\n \n # Create cluster and await its completion\n create_redshift_cluster(redshift, arn_role)\n cluster_props = query_redshift_status(redshift)\n \n # Get endpoint into to allow querying\n info = get_redshift_endpoint_info(redshift, cluster_props)\n print(info)\n # TODO: Save info to aws.cfg\n \n # Update security groups to ACTUALLY allow querying\n update_cluster_security_group(ec2, cluster_props)\n \n # Test connection to see that everything (hopefully) went well\n test_connection()\n \n # End of main\n return", "def main(args=None):\n\n program = Program(\n name='Ansible Customer Invoke taks to run \"ansible-galaxy\" commands',\n namespace=Collection.from_module(ansible_galaxy_tasks),\n version='0.1.0-alpha+001')\n\n program.run(args)", "async def run():\n # Get the arguments from the parser\n args = client.arguments\n\n # If the help argument was used, return\n if hasattr(args, \"help\"):\n return\n # Otherwise, check the correct command and invoke the respective function\n # BUILD\n if args.command == \"build\":\n if args.action == \"delete\":\n await client.delete_build(args.build)\n elif args.action == \"download\":\n await client.download_build(args.build, args.force)\n elif args.action == \"info\":\n await client.show_build(args.build)\n # BUILDS\n elif args.command == \"builds\":\n if args.refresh:\n await client.update_builds()\n await client.show_builds(args.ready_only)\n # FOLDER\n elif args.command == \"folder\":\n if args.action == \"create\":\n await client.create_folder(args.folder, args.no_resources)\n elif args.action == \"info\":\n await client.get_folder(args.folder)\n elif args.action == \"resources\":\n await client.get_resources(args.folder)\n elif args.action == \"delete\":\n await client.delete_folder(args.folder)\n # FOLDERS\n elif args.command == \"folders\":\n if args.refresh:\n await client.post(\"/folders\")\n await client.show_folders()\n # SERVER\n elif args.command == \"server\":\n if args.action == \"start\":\n await client.start_server(args.server, args.build)\n elif args.action == \"info\":\n await client.get_server(args.server)\n elif args.action == \"stop\":\n await client.stop_server(args.server)\n # SERVERS\n elif args.command == \"servers\":\n await client.print_servers()\n # INFO\n else:\n await client.show_info()", "def main():\n local = salt.client.LocalClient()\n\n if len(sys.argv) == 2 and sys.argv[1] == '--list':\n print json.dumps(local.cmd('*', 'grains.items'), indent=4, sort_keys=True)\n elif len(sys.argv) == 3 and sys.argv[1] == '--host':\n print json.dumps(local.cmd(sys.argv[2], 'grains.items'), indent=4, sort_keys=True)\n else:\n print \"Need an argument, either --list or --host <host>\"", "def main():\n module = AnsibleModule(\n argument_spec=dict(\n pn_cliusername=dict(required=False, type='str'),\n pn_clipassword=dict(required=False, type='str', no_log=True),\n pn_switch_list=dict(required=False, type='list', default=[]),\n )\n )\n\n global CHANGED_FLAG\n results = []\n message = ''\n switch_list = module.params['pn_switch_list']\n\n # Create cluster\n if len(switch_list) == 2:\n message += create_cluster(module, switch_list)\n\n for switch in switch_list:\n replace_string = switch + ': '\n for line in message.splitlines():\n if replace_string in line:\n results.append({\n 'switch': switch,\n 'output': (line.replace(replace_string, '')).strip()\n })\n\n # Exit the module and return the required JSON.\n module.exit_json(\n unreachable=False,\n msg='cluster creation succeeded',\n summary=results,\n exception='',\n failed=False,\n changed=True if True in CHANGED_FLAG else False,\n task='Create clusters'\n )", "def main():\n argument_spec = infinibox_argument_spec()\n null_list = list()\n argument_spec.update(\n dict(\n host=dict(required=True),\n state=dict(default='present', choices=['stat', 'present', 'absent']),\n wwns=dict(type='list', default=list()),\n iqns=dict(type='list', default=list()),\n )\n )\n\n module = AnsibleModule(argument_spec, supports_check_mode=True)\n\n if not HAS_INFINISDK:\n module.fail_json(msg=missing_required_lib('infinisdk'))\n\n check_options(module)\n execute_state(module)", "def main():\n argument_spec = dict(\n devices=dict(type='dict', required=True),\n cvp_facts=dict(type='dict', required=True),\n device_filter=dict(type='list', default='none'),\n state=dict(type='str',\n choices=['present', 'absent'],\n default='present',\n required=False))\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n result = dict(changed=False, data={})\n messages = dict(issues=False)\n # Connect to CVP instance\n module.client = connect(module)\n\n # if 'state' not in module.params:\n # module.params['state']=present\n\n if module.params['state'] == 'present':\n # Configure devices on CVP\n # Pass module params to configlet_action to act on configlet\n result['changed'], result['data'] = device_action(module)\n elif module.params['state'] == 'absent':\n # Reset devices when user configured state=absent\n result['changed'] = True\n result['data'] = devices_reset(module)\n\n module.exit_json(**result)", "def main(sys_args=None):\n\n parser = AnsibleRunnerArgumentParser(\n prog='ansible-runner',\n description=\"Use 'ansible-runner' (with no arguments) to see basic usage\"\n )\n subparser = parser.add_subparsers(\n help=\"Command to invoke\",\n dest='command',\n description=\"COMMAND PRIVATE_DATA_DIR [ARGS]\"\n )\n add_args_to_parser(parser, DEFAULT_CLI_ARGS['generic_args'])\n subparser.required = True\n\n # positional options\n run_subparser = subparser.add_parser(\n 'run',\n help=\"Run ansible-runner in the foreground\"\n )\n add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['positional_args'])\n add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['playbook_group'])\n start_subparser = subparser.add_parser(\n 'start',\n help=\"Start an ansible-runner process in the background\"\n )\n add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['positional_args'])\n add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['playbook_group'])\n stop_subparser = subparser.add_parser(\n 'stop',\n help=\"Stop an ansible-runner process that's running in the background\"\n )\n add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['positional_args'])\n isalive_subparser = subparser.add_parser(\n 'is-alive',\n help=\"Check if a an ansible-runner process in the background is still running.\"\n )\n add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['positional_args'])\n\n # streaming commands\n transmit_subparser = subparser.add_parser(\n 'transmit',\n help=\"Send a job to a remote ansible-runner process\"\n )\n add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['positional_args'])\n\n worker_subparser = subparser.add_parser(\n 'worker',\n help=\"Execute work streamed from a controlling instance\"\n )\n worker_subcommands = worker_subparser.add_subparsers(\n help=\"Sub-sub command to invoke\",\n dest='worker_subcommand',\n description=\"ansible-runner worker [sub-sub-command]\",\n )\n cleanup_command = worker_subcommands.add_parser(\n 'cleanup',\n help=\"Cleanup private_data_dir patterns from prior jobs and supporting temporary folders.\",\n )\n cleanup.add_cleanup_args(cleanup_command)\n\n worker_subparser.add_argument(\n \"--private-data-dir\",\n help=\"base directory containing the ansible-runner metadata \"\n \"(project, inventory, env, etc)\",\n )\n\n worker_subparser.add_argument(\n \"--worker-info\",\n dest=\"worker_info\",\n action=\"store_true\",\n help=\"show the execution node's Ansible Runner version along with its memory and CPU capacities\"\n )\n worker_subparser.add_argument(\n \"--delete\",\n dest=\"delete_directory\",\n action=\"store_true\",\n default=False,\n help=(\n \"Delete existing folder (and everything in it) in the location specified by --private-data-dir. \"\n \"The directory will be re-populated when the streamed data is unpacked. \"\n \"Using this will also assure that the directory is deleted when the job finishes.\"\n )\n )\n worker_subparser.add_argument(\n \"--keepalive-seconds\",\n dest=\"keepalive_seconds\",\n default=None,\n type=int,\n help=(\n \"Emit a synthetic keepalive event every N seconds of idle. (default=0, disabled)\"\n )\n )\n process_subparser = subparser.add_parser(\n 'process',\n help=\"Receive the output of remote ansible-runner work and distribute the results\"\n )\n add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args'])\n process_subparser.add_argument(\n \"-i\", \"--ident\",\n default=None,\n help=(\n \"An identifier to use as a subdirectory when saving artifacts. \"\n \"Generally intended to match the --ident passed to the transmit command.\"\n )\n )\n\n # generic args for all subparsers\n add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args'])\n add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['generic_args'])\n add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['generic_args'])\n add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['generic_args'])\n add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['generic_args'])\n add_args_to_parser(worker_subparser, DEFAULT_CLI_ARGS['generic_args'])\n add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['generic_args'])\n\n # runner group\n ansible_runner_group_options = (\n \"Ansible Runner Options\",\n \"configuration options for controlling the ansible-runner \"\n \"runtime environment.\",\n )\n base_runner_group = parser.add_argument_group(*ansible_runner_group_options)\n run_runner_group = run_subparser.add_argument_group(*ansible_runner_group_options)\n start_runner_group = start_subparser.add_argument_group(*ansible_runner_group_options)\n stop_runner_group = stop_subparser.add_argument_group(*ansible_runner_group_options)\n isalive_runner_group = isalive_subparser.add_argument_group(*ansible_runner_group_options)\n transmit_runner_group = transmit_subparser.add_argument_group(*ansible_runner_group_options)\n add_args_to_parser(base_runner_group, DEFAULT_CLI_ARGS['runner_group'])\n add_args_to_parser(run_runner_group, DEFAULT_CLI_ARGS['runner_group'])\n add_args_to_parser(start_runner_group, DEFAULT_CLI_ARGS['runner_group'])\n add_args_to_parser(stop_runner_group, DEFAULT_CLI_ARGS['runner_group'])\n add_args_to_parser(isalive_runner_group, DEFAULT_CLI_ARGS['runner_group'])\n add_args_to_parser(transmit_runner_group, DEFAULT_CLI_ARGS['runner_group'])\n\n # mutually exclusive group\n run_mutually_exclusive_group = run_subparser.add_mutually_exclusive_group()\n start_mutually_exclusive_group = start_subparser.add_mutually_exclusive_group()\n stop_mutually_exclusive_group = stop_subparser.add_mutually_exclusive_group()\n isalive_mutually_exclusive_group = isalive_subparser.add_mutually_exclusive_group()\n transmit_mutually_exclusive_group = transmit_subparser.add_mutually_exclusive_group()\n add_args_to_parser(run_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])\n add_args_to_parser(start_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])\n add_args_to_parser(stop_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])\n add_args_to_parser(isalive_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])\n add_args_to_parser(transmit_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])\n\n # ansible options\n ansible_options = (\n \"Ansible Options\",\n \"control the ansible[-playbook] execution environment\",\n )\n run_ansible_group = run_subparser.add_argument_group(*ansible_options)\n start_ansible_group = start_subparser.add_argument_group(*ansible_options)\n stop_ansible_group = stop_subparser.add_argument_group(*ansible_options)\n isalive_ansible_group = isalive_subparser.add_argument_group(*ansible_options)\n transmit_ansible_group = transmit_subparser.add_argument_group(*ansible_options)\n add_args_to_parser(run_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])\n add_args_to_parser(start_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])\n add_args_to_parser(stop_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])\n add_args_to_parser(isalive_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])\n add_args_to_parser(transmit_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])\n\n # roles group\n roles_group_options = (\n \"Ansible Role Options\",\n \"configuration options for directly executing Ansible roles\",\n )\n run_roles_group = run_subparser.add_argument_group(*roles_group_options)\n start_roles_group = start_subparser.add_argument_group(*roles_group_options)\n stop_roles_group = stop_subparser.add_argument_group(*roles_group_options)\n isalive_roles_group = isalive_subparser.add_argument_group(*roles_group_options)\n transmit_roles_group = transmit_subparser.add_argument_group(*roles_group_options)\n add_args_to_parser(run_roles_group, DEFAULT_CLI_ARGS['roles_group'])\n add_args_to_parser(start_roles_group, DEFAULT_CLI_ARGS['roles_group'])\n add_args_to_parser(stop_roles_group, DEFAULT_CLI_ARGS['roles_group'])\n add_args_to_parser(isalive_roles_group, DEFAULT_CLI_ARGS['roles_group'])\n add_args_to_parser(transmit_roles_group, DEFAULT_CLI_ARGS['roles_group'])\n\n # modules groups\n\n modules_group_options = (\n \"Ansible Module Options\",\n \"configuration options for directly executing Ansible modules\",\n )\n run_modules_group = run_subparser.add_argument_group(*modules_group_options)\n start_modules_group = start_subparser.add_argument_group(*modules_group_options)\n stop_modules_group = stop_subparser.add_argument_group(*modules_group_options)\n isalive_modules_group = isalive_subparser.add_argument_group(*modules_group_options)\n transmit_modules_group = transmit_subparser.add_argument_group(*modules_group_options)\n add_args_to_parser(run_modules_group, DEFAULT_CLI_ARGS['modules_group'])\n add_args_to_parser(start_modules_group, DEFAULT_CLI_ARGS['modules_group'])\n add_args_to_parser(stop_modules_group, DEFAULT_CLI_ARGS['modules_group'])\n add_args_to_parser(isalive_modules_group, DEFAULT_CLI_ARGS['modules_group'])\n add_args_to_parser(transmit_modules_group, DEFAULT_CLI_ARGS['modules_group'])\n\n # container group\n container_group_options = (\n \"Ansible Container Options\",\n \"configuration options for executing Ansible playbooks\",\n )\n run_container_group = run_subparser.add_argument_group(*container_group_options)\n start_container_group = start_subparser.add_argument_group(*container_group_options)\n stop_container_group = stop_subparser.add_argument_group(*container_group_options)\n isalive_container_group = isalive_subparser.add_argument_group(*container_group_options)\n transmit_container_group = transmit_subparser.add_argument_group(*container_group_options)\n add_args_to_parser(run_container_group, DEFAULT_CLI_ARGS['container_group'])\n add_args_to_parser(start_container_group, DEFAULT_CLI_ARGS['container_group'])\n add_args_to_parser(stop_container_group, DEFAULT_CLI_ARGS['container_group'])\n add_args_to_parser(isalive_container_group, DEFAULT_CLI_ARGS['container_group'])\n add_args_to_parser(transmit_container_group, DEFAULT_CLI_ARGS['container_group'])\n\n args = parser.parse_args(sys_args)\n\n vargs = vars(args)\n\n if vargs.get('command') == 'worker':\n if vargs.get('worker_subcommand') == 'cleanup':\n cleanup.run_cleanup(vargs)\n parser.exit(0)\n if vargs.get('worker_info'):\n cpu = get_cpu_count()\n mem = get_mem_in_bytes()\n errors = []\n uuid = ensure_uuid()\n if not isinstance(mem, int):\n errors.append(mem)\n mem = None\n if \"Could not find\" in uuid:\n errors.append(uuid)\n uuid = None\n info = {'errors': errors,\n 'mem_in_bytes': mem,\n 'cpu_count': cpu,\n 'runner_version': VERSION,\n 'uuid': uuid,\n }\n print(safe_dump(info, default_flow_style=True))\n parser.exit(0)\n\n private_data_dir = vargs.get('private_data_dir')\n delete_directory = vargs.get('delete_directory', False)\n if private_data_dir and delete_directory:\n shutil.rmtree(private_data_dir, ignore_errors=True)\n register_for_cleanup(private_data_dir)\n elif private_data_dir is None:\n temp_private_dir = tempfile.mkdtemp()\n vargs['private_data_dir'] = temp_private_dir\n register_for_cleanup(temp_private_dir)\n\n if vargs.get('command') == 'process':\n # the process command is the final destination of artifacts, user expects private_data_dir to not be cleaned up\n if not vargs.get('private_data_dir'):\n temp_private_dir = tempfile.mkdtemp()\n vargs['private_data_dir'] = temp_private_dir\n\n if vargs.get('command') in ('start', 'run', 'transmit'):\n if vargs.get('hosts') and not (vargs.get('module') or vargs.get('role')):\n parser.exit(status=1, message=\"The --hosts option can only be used with -m or -r\\n\")\n if not (vargs.get('module') or vargs.get('role')) and not vargs.get('playbook'):\n parser.exit(status=1, message=\"The -p option must be specified when not using -m or -r\\n\")\n\n output.configure()\n\n # enable or disable debug mode\n output.set_debug('enable' if vargs.get('debug') else 'disable')\n\n # set the output logfile\n if ('logfile' in args) and vargs.get('logfile'):\n output.set_logfile(vargs.get('logfile'))\n\n output.debug('starting debug logging')\n\n # get the absolute path for start since it is a daemon\n vargs['private_data_dir'] = os.path.abspath(vargs.get('private_data_dir'))\n\n pidfile = os.path.join(vargs.get('private_data_dir'), 'pid')\n\n try:\n os.makedirs(vargs.get('private_data_dir'), mode=0o700)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(vargs.get('private_data_dir')):\n pass\n else:\n raise\n\n stderr_path = None\n context = None\n if vargs.get('command') not in ('run', 'transmit', 'worker'):\n stderr_path = os.path.join(vargs.get('private_data_dir'), 'daemon.log')\n if not os.path.exists(stderr_path):\n os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))\n\n if vargs.get('command') in ('start', 'run', 'transmit', 'worker', 'process'):\n\n if vargs.get('command') == 'start':\n context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))\n else:\n context = threading.Lock()\n\n streamer = None\n if vargs.get('command') in ('transmit', 'worker', 'process'):\n streamer = vargs.get('command')\n\n with context:\n with role_manager(vargs) as vargs:\n run_options = {\n \"private_data_dir\": vargs.get('private_data_dir'),\n \"ident\": vargs.get('ident'),\n \"binary\": vargs.get('binary'),\n \"playbook\": vargs.get('playbook'),\n \"module\": vargs.get('module'),\n \"module_args\": vargs.get('module_args'),\n \"host_pattern\": vargs.get('hosts'),\n \"verbosity\": vargs.get('v'),\n \"quiet\": vargs.get('quiet'),\n \"rotate_artifacts\": vargs.get('rotate_artifacts'),\n \"ignore_logging\": False,\n \"json_mode\": vargs.get('json'),\n \"omit_event_data\": vargs.get('omit_event_data'),\n \"only_failed_event_data\": vargs.get('only_failed_event_data'),\n \"inventory\": vargs.get('inventory'),\n \"forks\": vargs.get('forks'),\n \"project_dir\": vargs.get('project_dir'),\n \"artifact_dir\": vargs.get('artifact_dir'),\n \"roles_path\": [vargs.get('roles_path')] if vargs.get('roles_path') else None,\n \"process_isolation\": vargs.get('process_isolation'),\n \"process_isolation_executable\": vargs.get('process_isolation_executable'),\n \"process_isolation_path\": vargs.get('process_isolation_path'),\n \"process_isolation_hide_paths\": vargs.get('process_isolation_hide_paths'),\n \"process_isolation_show_paths\": vargs.get('process_isolation_show_paths'),\n \"process_isolation_ro_paths\": vargs.get('process_isolation_ro_paths'),\n \"container_image\": vargs.get('container_image'),\n \"container_volume_mounts\": vargs.get('container_volume_mounts'),\n \"container_options\": vargs.get('container_options'),\n \"directory_isolation_base_path\": vargs.get('directory_isolation_base_path'),\n \"cmdline\": vargs.get('cmdline'),\n \"limit\": vargs.get('limit'),\n \"streamer\": streamer,\n \"suppress_env_files\": vargs.get(\"suppress_env_files\"),\n \"keepalive_seconds\": vargs.get(\"keepalive_seconds\"),\n }\n try:\n res = run(**run_options)\n except Exception:\n e = traceback.format_exc()\n if stderr_path:\n with open(stderr_path, 'w+') as ep:\n ep.write(e)\n else:\n sys.stderr.write(e)\n return 1\n return res.rc\n\n try:\n with open(pidfile, 'r') as f:\n pid = int(f.readline())\n except IOError:\n return 1\n\n if vargs.get('command') == 'stop':\n Runner.handle_termination(pid, pidfile=pidfile)\n return 0\n\n if vargs.get('command') == 'is-alive':\n try:\n os.kill(pid, signal.SIG_DFL)\n return 0\n except OSError:\n return 1\n\n return 0", "def main() -> None:\n commands: Dict[str, Callable] = {\n 'swis-alert-list': swis_alert_list_command,\n 'swis-event-list': swis_event_list_command,\n 'swis-query': swis_query_command\n }\n command = demisto.command()\n demisto.debug(f'Command being called is {command}')\n try:\n params = demisto.params()\n args = demisto.args()\n server = params['server']\n credentials = params.get('credentials', {})\n\n verify_certificate = not params.get('insecure', False)\n proxy = params.get('proxy', False)\n\n client = Client(\n server=server,\n credentials=credentials,\n verify=verify_certificate,\n proxy=proxy)\n\n for key, value in args.items():\n if isinstance(value, str):\n args[key] = value.strip()\n\n remove_nulls_from_dictionary(args)\n\n if command == 'test-module':\n # This is the call made when pressing the integration Test button.\n result = test_module(client, params)\n return_results(result)\n\n elif command in commands:\n return_results(commands[command](client, args))\n\n elif command == 'fetch-incidents':\n last_run = demisto.getLastRun()\n next_run, incidents = fetch_incidents(client, last_run, params)\n demisto.incidents(incidents)\n demisto.setLastRun(next_run)\n\n # Log exceptions and return errors\n except Exception as e:\n return_error(f'Failed to execute {demisto.command()} command.\\nError:\\n{str(e)}')", "def main():\n rospy.init_node(\"rsdk_ik_service_client\")\n \n test()", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def __call__(self):\n\n try:\n results = self.run()\n if results and isinstance(results, dict):\n self.ansible.exit_json(**results)\n\n except Exception as e:\n self.ansible.fail_json(msg=str(e))" ]
[ "0.64768285", "0.64604247", "0.6410106", "0.63225335", "0.6256897", "0.60829186", "0.6063106", "0.60240316", "0.60228515", "0.5964343", "0.59607345", "0.5950873", "0.59385973", "0.5934552", "0.5917794", "0.5884459", "0.586978", "0.58608925", "0.5838486", "0.5816458", "0.57575464", "0.575652", "0.5723714", "0.5722114", "0.5710298", "0.57024914", "0.5700625", "0.56796175", "0.56796175", "0.56762576" ]
0.8046157
0
Resize image for np.array
def _np_resize_image(image, size, dtype='int'): if dtype == 'int': _size = (size[1], size[0]) # (H,W) to (W,H) return cv2.resize(image.astype('uint8'), _size, interpolation=cv2.INTER_LINEAR) elif dtype == 'float': return skresize(image, size, order=0, mode='constant', preserve_range=True) else: raise NotImplementedError(f"'{dtype}' is not a valid dtype.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resize(image, size):\n return np.array(Image.fromarray(image).resize(size))", "def image_resize(image: Image, size: (int, int)) -> array:\n image = Image.fromarray(uint8(image))\n\n return array(image.resize(size))", "def np_resize(img, input_shape):\n height, width = input_shape\n return cv2.resize(img, (width, height))", "def np_resize(img, input_shape):\n height, width = input_shape\n return cv2.resize(img, (width, height))", "def resize_image(arr, img_height, img_width):\n arr_pil = Image.fromarray((arr * 255).astype(np.uint8)) # Convert to a PIL Python Image Library format\n out = arr_pil.resize((img_height, img_width))\n a = image.img_to_array(out)\n return a", "def resize_image(image, size):\n return skimage.transform.resize(image, size, mode='reflect', preserve_range=True).astype(image.dtype)", "def rescale_image(image: np.ndarray, scale: float) -> np.ndarray:\n (height, width) = image.shape[:2]\n new_dims = (int(width * scale), int(height * scale))\n return cv2.resize(image, new_dims, interpolation=cv2.INTER_CUBIC)", "def np_resize(img,input_shape,graystyle=False):\n height,width = input_shape\n resized_img = cv2.resize(img,(width,height))\n\n if graystyle:\n resized_img = resized_img[...,None]\n return resized_img", "def imresize(im,sz):\n pil_im = Image.fromarray(uint8(im))\n return np.array(pil_im.resize(sz))", "def img_resize(img, img_height):\n\n\timg_width = int(img_height*img.shape[1]/img.shape[0])\n\n\timg_pil = Image.fromarray(img) # convert array back to image\n\n\timg_pil_resize = img_pil.resize((img_width, img_height), Image.LANCZOS) # resize\n\n\treturn np.array(img_pil_resize)", "def scale_down(image:np.array)->np.array:\n src = image\n scale_percent = 25\n width = int(src.shape[1] * scale_percent / 100)\n height = int(src.shape[0] * scale_percent / 100)\n dsize = (width, height)\n output = cv2.resize(src, dsize)\n return output", "def _resize(img, max_dim=128):\n if max(img.shape[:3]) <= max_dim:\n return img\n else:\n new_size = [max_dim / s if s >= max_dim else 1.0 for s in img.shape[:3]]\n new_size.append(1.0) # for channel\n return scipy.ndimage.zoom(img, new_size, order=2)", "def resize_arr(arr, W, H, mode=cv2.INTER_LINEAR):\n arr = arr.transpose(1,2,0)\n arr = cv2.resize(arr, (W, H), mode)\n if len(arr.shape) < 3:\n arr = np.expand_dims(arr, 2)\n arr = arr.transpose(2,0,1)\n return arr", "def imresize(im, sz):\n pil_im = Image.fromarray(np.uint8(im))\n return np.array(pil_im.resize(sz))", "def rescale_image(image, scale=0.50):\r\n \r\n wi = int(image.shape[1]*scale)\r\n hei = int(image.shape[0]*scale)\r\n dimension = (wi, hei)\r\n return cv.resize(image, dimension, interpolation = cv.INTER_AREA)", "def preprocess(img: 'np.ndarray') -> 'np.ndarray':\n return cv2.resize(img, (45, 45), interpolation=cv2.INTER_AREA)[:,:,:3]", "def resize(image):\r\n return cv2.resize(image, (200, 66), interpolation=cv2.INTER_AREA)", "def __resize_image(self, img):\n return cv2.resize(img, self.g.img_size, \n interpolation = cv2.INTER_CUBIC)", "def resizeImage(IMG,IMAGE_SIZE):\n\n RESCALED_IMAGE = skimage.transform.resize(IMG,[IMG.shape[0],IMAGE_SIZE,IMAGE_SIZE])\n return RESCALED_IMAGE", "def resize(img, size):\n img = tf.expand_dims(img, 0)\n return tf.image.resize_bilinear(img, size)[0,:,:,:]", "def resizeImages(img_path,size=(50,50)):\n image = cv2.imread(img_path)\n resized = cv2.resize(image, size, interpolation=cv2.INTER_CUBIC)\n img=np.array([resized.flatten()])\n #print('resized image')\n return img", "def resize(img, size):\n img = cv2.resize(img, tuple(size[::-1]))\n return img", "def resizeImage(image, newSize):\n\n # scale image\n scaledImage = cv2.resize(image, newSize)\n return scaledImage", "def resize_array(images, dim=None):\n size = images.shape[0]\n imgs = np.zeros((size, dim, dim))\n\n for i in range(size):\n imgs[i, :, :] = skimage_resize(images[i, :, :], (dim, dim))\n\n return imgs", "def resize_128(img): \n return cv2.resize(img,(128,128))", "def resize_image(img: npt.ArrayLike, target_size: tuple(int, int)) -> npt.ArrayLike:\n\n ht, wt = target_size\n h, w = img.shape\n # scaling coefficient\n sc = min(wt / w, ht / h)\n tx = (wt - w * sc) / 2\n ty = (ht - h * sc) / 2\n \n # M = [[x_scale, x_shear, X_up_left], \n # [y_shear, y_scale, Y_up_left]]\n M = np.float32([[sc, 0, tx], [0, sc, ty]])\n img = cv2.warpAffine(img, M, dsize=(wt, ht), borderValue=0)\n\n return img / 255.", "def resize(img, size, interpolation=Image.BILINEAR):\n\n if not _is_numpy_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):\n raise TypeError('Got inappropriate size arg: {}'.format(size))\n\n if isinstance(size, int):\n size = (size, size)\n \n if img.dtype == np.int32:\n resized = img.astype(np.uint16)\n resized = cv2.resize(resized, size , interpolation)\n else:\n resized = cv2.resize(img, size , interpolation)\n resized = resized.astype(img.dtype)\n return resized", "def resized_crop(img, i, j, h, w, size, interpolation=cv2.INTER_LINEAR):\n assert _is_numpy_image(img), 'img should be nparray Image'\n img = crop(img, i, j, h, w)\n img = resize(img, size, interpolation)\n return img", "def resize_function(input):\n\n from keras.backend import tf as ktf\n return ktf.image.resize_images(input, (64, 64))", "def resize_frame(\n frame: numpy.ndarray, width: int, height: int, mode: str = \"RGB\"\n) -> numpy.ndarray:\n from PIL import Image\n\n frame = Image.fromarray(frame)\n frame = frame.convert(mode).resize(size=(width, height))\n return numpy.array(frame)" ]
[ "0.80775344", "0.75024486", "0.73508173", "0.73508173", "0.7299563", "0.7239478", "0.723774", "0.6936804", "0.69231737", "0.6879766", "0.68316567", "0.67977756", "0.6782235", "0.6751836", "0.6731369", "0.67199236", "0.67041594", "0.66855896", "0.66484493", "0.66157305", "0.6612449", "0.66063964", "0.65848213", "0.654496", "0.65412223", "0.65386325", "0.65281314", "0.6516968", "0.6508454", "0.65036315" ]
0.76700854
1
Binarize probability map into mask.
def _np_get_mask(prob_map, prob_thresh=0.5): mask = (prob_map > prob_thresh) * 255 return mask.astype(np.uint8)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bin_binarise(self):\n pass", "def apply_mask(binary, mask_dict):\n result = \"\"\n for i, val in enumerate(binary):\n if mask_dict[i] in ('X', '1'):\n result += mask_dict[i]\n else:\n result += binary[i]\n return result", "def remap_histogram_key(original_key: int, bit_map: Dict[int, int]) -> int:\n result = 0\n for k, v in bit_map.items():\n result = result | (((original_key >> k) & 1) << v)\n return result", "def _mask(self, map_):\n return None", "def apply_mask(binmask, num):\n binrep_masked = ''\n binrep = num2binrep(num)\n for bit, m in zip(binrep, binmask):\n if m == '1':\n binrep_masked += '1'\n elif m == '0':\n binrep_masked += '0'\n else:\n binrep_masked += bit\n return binrep2num(binrep_masked)", "def encode_segmap(self, mask):\n mask = mask.astype(int)\n label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)\n for ii, label in enumerate(self.get_pascal_labels()):\n label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii\n label_mask = label_mask.astype(int)\n return label_mask", "def apply_mask_v2(binmask, memaddress):\n binrep_masked = ''\n binrep = num2binrep(memaddress)\n for bit, m in zip(binrep, binmask):\n if m == '1':\n binrep_masked += '1'\n elif m == 'X':\n binrep_masked += 'X'\n elif m == '0':\n binrep_masked += bit\n return binrep_masked", "def binarize_labels(labels):\n labels = np.where(labels == 0, labels, 1)\n\n return labels", "def map_binary_values(x) -> int:\n return _bm.get(x, -1)", "def mask(mask_key, data):\r\n _m = array.array(\"B\", mask_key)\r\n _d = array.array(\"B\", data)\r\n for i in xrange(len(_d)):\r\n _d[i] ^= _m[i % 4]\r\n return _d.tostring()", "def binarize(self, image, threshold):\n bin_img = image.copy()\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n if image[i, j] >= threshold:\n bin_img[i, j] = 0\n else:\n bin_img[i, j] = 255\n return bin_img", "def preprocess_mask(y):\n y[y <= 255./2] = 0 # Needs to be in this order, otherwise 1 gets overwritten\n y[y > 255./2] = 1\n binary_mask = y.astype(np.uint8)\n\n return binary_mask", "def _binarize_predictions(self, input):\r\n _, max_index = torch.max(input, dim=0, keepdim=True)\r\n return torch.zeros_like(input, dtype=torch.uint8).scatter_(0, max_index, 1)", "def create_binary_mask(self, type='negative'):\n if not self.thresh_map_name:\n return None\n mode = self.thresh_mode\n limits = self.thresh_limits\n map = self.map_scalars\n if mode=='mask lower':\n m = (map < limits[0]) if type=='negative' else (map >= limits[0])\n elif mode=='mask higher':\n m = (map > limits[1]) if type=='negative' else (map <= limits[1])\n elif mode=='mask between':\n m = ( (map > limits[0]) & (map < limits[1]) ) \\\n if type=='negative' \\\n else ( (map <= limits[0]) | (map >= limits[1]) )\n else: # mask outside\n m = ( (map < limits[0]) | (map > limits[1]) ) \\\n if type=='negative' \\\n else ( (map >= limits[0]) & (map <= limits[1]) )\n return m", "def graytobin(x: int) -> int:\n assert x >= 0\n mask = x >> 1\n while(mask != 0):\n x = x ^ mask\n mask = mask >> 1\n return x", "def get_binary_mask(self,index):\n mask = self.load_mask_png(index)\n (rows,cols) = np.where(mask>0)[0:2] #pixels in mask disregarding the color\n new_mask = np.zeros(shape=mask.shape[0:2], dtype=np.uint8)\n new_mask[(rows,cols)] = 255\n return new_mask", "def pred_from_prob(a,threshold):\n bin_preds = np.zeros((np.size(a,0),))\n bin_preds[np.where(a[:,1]>threshold)]=1.0\n return bin_preds", "def _get_elements_by_bin_map(self, bin_map):\n result = []\n indexes = range(len(bin_map))\n for index, m_str in zip(indexes, bin_map):\n if int(m_str) == 1:\n result.append(self._elements[index])\n return result", "def apply_binning_to_image(binmap_file, image_file, root=None, clobber=False):\n if root == None:\n root = os.path.splitext(image_file)[0] + '_binned'\n\n # Check if min bin is negative or starts or ends on the image boundary.\n # If so, assume it is not wanted (e.g., for wvt bin maps).\n import pyfits\n binmap = pyfits.open(binmap_file)\n binimage = binmap[0].data\n minbin = int(binimage.min())\n maxbin = int(binimage.max())\n if minbin < 0:\n minbin = 0\n inbin = numpy.where(binimage == minbin)\n if 0 in inbin[0] or numpy.size(binimage,0)-1 in inbin[0]:\n minbin += 1\n nbins = maxbin - minbin + 1\n image = pyfits.open(image_file)\n im = image[0].data\n\n # Check that the binmap and image have the same shape\n if binimage.shape != im.shape:\n sys.exit('ERROR: Input binmap and image must have the same shape.')\n\n # make copy of the binmap\n binimage_out = binimage.astype(float)\n\n for i in range(nbins):\n inbin = numpy.where(binimage == i + minbin)\n binimage_out[inbin] = numpy.mean(im[inbin])\n\n binmap[0].data = binimage_out\n binmap.writeto(root+'.fits', clobber=clobber)", "def preprocess_mask(mask):\n # Project values interval on [0.0; 1.0]\n if mask.max() > 1:\n mask[mask <= 127.5] = 0.\n mask[mask > 127.5] = 1.\n else:\n mask[mask <= .5] = 0.\n mask[mask > .5] = 1.\n return mask", "def binarize(self, image, threshold):\n\n bin_img = image.copy()\n [h, w] = bin_img.shape\n opt_threshold = threshold\n print(opt_threshold)\n for row in range(h):\n for col in range(w):\n if bin_img[row, col] > opt_threshold: #greater than threshld white(general)\n bin_img[row, col] = 255 #0 instead of 1\n else: #less than threshold black(general)\n bin_img[row, col] = 0 #0 instead of 1\n\n\n #reverse the cases\n\n return bin_img", "def calculate_binaries(dict_data):\n list_all_preprocessed_binaries = []\n for index_patient, patient in enumerate(dict_data):\n # pick and convert image\n image = dict_data[patient][1]\n image = image.astype(\"uint8\")\n # blur image\n image_blurred = cv2.medianBlur(image, 29)\n # segment image using k-means segmentation\n image_segmented = run_kmean_on_single_image(image_blurred, k=10,\n precision=10000, max_iterations=1000)\n # find lower threshold for binarizing images\n \"\"\" the idea i had here was that all the electrodes always occupy the same area on each picture.\n this function basically returns the pixel value, at which we need to threshold in our binary\n function, so that all pixels that have a higher intensity will collectively make up at least \n \"fraction_of_image_threshold\" percent of the picture - electrodes seem to take up about 5-10% of each\n image\"\"\"\n lower_threshold = intelligent_get_threshold(image_segmented,\n fraction_of_image_threshold=0.08)\n # binarize image\n image_binary = binarize_image(image_segmented, \n lower_threshold=lower_threshold, upper_threshold=255)\n list_all_preprocessed_binaries.append(image_binary)\n return list_all_preprocessed_binaries", "def binarize(X, *, threshold=..., copy=...):\n ...", "def binary_converter(probs):\n return np.array([[1 - p, p] for p in probs])", "def bin_random_mat(m,n,p_0 = 0.5):\n\treturn np.array((np.random.randn(m,n) >= p_0), dtype = np.float)", "def test_binary_mapping(load_database):\n dbf = load_database()\n my_phases = ['LIQUID', 'FCC_A1', 'HCP_A3', 'AL5FE2',\n 'AL2FE', 'AL13FE4', 'AL5FE4']\n comps = ['AL', 'FE', 'VA']\n conds = {v.T: (1200, 1300, 50), v.P: 101325, v.X('AL'): (0, 1, 0.2)}\n zpf_boundaries = map_binary(dbf, comps, my_phases, conds)\n num_boundaries = len(zpf_boundaries.all_compsets)\n assert num_boundaries > 0\n # calling binplot again can add more boundaries\n map_binary(dbf, comps, my_phases, conds, boundary_sets=zpf_boundaries)\n assert len(zpf_boundaries.all_compsets) == 2*num_boundaries", "def binarize(feature_vectors, class_id):\n for key in feature_vectors:\n if feature_vectors[key][-1] == class_id:\n feature_vectors[key][-1] = 1\n else:\n feature_vectors[key][-1] = 0", "def encode_segmap(self, mask):\n for voidc in self.void_labels:\n mask[mask == voidc] = self.ignore_index\n for validc in self.valid_labels:\n mask[mask == validc] = self.class_map[validc]\n # remove extra idxs from updated dataset\n mask[mask > 33] = self.ignore_index\n return mask", "def get_biomass(binary_mask):\n\n white_pixels = cv2.countNonZero(binary_mask)\n return white_pixels", "def isolate_key_ponits(mask, keypoints):\n new_mask = np.zeros(mask.shape) * 255\n for y, x in keypoints[0].convert(keypoints):\n x = int(x)\n y = int(y)\n print(x, y)\n new_mask[x - 100: x + 100, y - 100: y + 100] = mask[x - 100: x + 100, y - 100: y + 100]\n return new_mask" ]
[ "0.642523", "0.6356742", "0.62461436", "0.60823035", "0.60694367", "0.59785223", "0.5968543", "0.591298", "0.5908036", "0.5904688", "0.58887273", "0.5858042", "0.58433956", "0.5843204", "0.5840894", "0.5832284", "0.5817506", "0.57884526", "0.5750518", "0.5745882", "0.57396334", "0.57093555", "0.5706599", "0.5706491", "0.56932527", "0.5682301", "0.56623316", "0.5650882", "0.5632971", "0.5624188" ]
0.68136746
0
Transform original points list into flipped points and concatenate these two list.
def _points_transform(clicks_lists, image_width): clicks_lists_flipped = [] for clicks_list in clicks_lists: clicks_list_flipped = [] for click in clicks_list: # Horizontal flip _y = image_width - click.coords[1] - 1 _click = clicker.Click(is_positive=click.is_positive, coords=(click.coords[0], _y)) clicks_list_flipped.append(_click) clicks_lists_flipped.append(clicks_list_flipped) clicks_lists = clicks_lists + clicks_lists_flipped return clicks_lists
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mirror_points_point(points, mirror):\n return [mirror_point_point(point, mirror) for point in points]", "def flip_points(a):\n a = np.array(a)\n return np.flip(a, 1)", "def _reversePoints(points):\n # copy the points\n points = _copyPoints(points)\n # find the first on curve type and recycle\n # it for the last on curve type\n firstOnCurve = None\n for index, point in enumerate(points):\n if point.segmentType is not None:\n firstOnCurve = index\n break\n lastSegmentType = points[firstOnCurve].segmentType\n # reverse the points\n points = reversed(points)\n # work through the reversed remaining points\n final = []\n for point in points:\n segmentType = point.segmentType\n if segmentType is not None:\n point.segmentType = lastSegmentType\n lastSegmentType = segmentType\n final.append(point)\n # move any offcurves at the end of the points\n # to the start of the points\n _prepPointsForSegments(final)\n # done\n return final", "def _copyPoints(points):\n copied = [point.copy() for point in points]\n return copied", "def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:\n return [[1- x for x in reversed(row)] for row in A]", "def transform_points(points, T):\n\n homo_points = np.array([(x, y, 1) for (y, x) in points])\n t_points = np.array([T.dot(v) for v in homo_points ])\n swap = np.array([(x,y) for (y,x,z) in t_points])\n return swap", "def transform_points(transf_matrix, points):\n if(type(points)==list):\n temp_pts = [np.array([x[0],x[1],x[2],1]) for x in points]\n newpts = []\n for pt in temp_pts:\n newpts.append((transf_matrix@pt)[:3])\n else:\n temp_pts = np.array([points[0],points[1],points[2],1])\n newpts=(transf_matrix@temp_pts)[:3]\n return newpts", "def reorder(pts):\r\n # pts is a numpy array tha looks like [ [[numX numY]] [[num num]] [[num num]] [[num num]] ]\r\n pts = pts.reshape((4, 2)) # make it look like [ [numX numY] [num num] [num num] [num num] ]\r\n pts_new = np.zeros((4, 2), np.float32)\r\n\r\n add = pts.sum(1) # array like [ numX+numY num+num num+num num+num ]\r\n pts_new[0] = pts[np.argmin(add)] # the dot that is the nearest to the (0, 0)\r\n pts_new[2] = pts[np.argmax(add)] # the remotest one\r\n\r\n diff = np.diff(pts, 1) # array like [ [numY-numX] [num-num] [num-num] [num-num] ]\r\n pts_new[1] = pts[np.argmin(diff)]\r\n pts_new[3] = pts[np.argmax(diff)]\r\n\r\n return pts_new", "def _rotate(self):\n new_points = list()\n for i in range(len(self._points)):\n x = self._points[i][0]\n y = self._points[i][1]\n new_x = self._center[0] + self._center[1] - y\n new_y = self._center[1] - self._center[0] + x\n new_points.append((new_x, new_y))\n return new_points", "def augment_data(X, y):\n X_flip = np.copy(X)\n X_flip[:,:,::2] = -X_flip[:,:,::2]\n X = np.concatenate((X, X_flip))\n y = np.concatenate((y, y))\n \n return X, y", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def flip(pancakes):\r\n res = ''\r\n for pancake in pancakes:\r\n if pancake == '+':\r\n res += '-'\r\n else:\r\n res += '+'\r\n return res", "def flip_back(output_flipped, flip_pairs, target_type='GaussianHeatmap'):\n assert output_flipped.ndim == 4, 'output_flipped should be [batch_size, num_keypoints, height, width]'\n shape_ori = output_flipped.shape\n channels = 1\n if target_type.lower() == 'CombinedTarget'.lower():\n channels = 3\n output_flipped[:, 1::3, ...] = -output_flipped[:, 1::3, ...]\n output_flipped = output_flipped.reshape(shape_ori[0], -1, channels, shape_ori[2], shape_ori[3])\n output_flipped_back = output_flipped.copy()\n for left, right in flip_pairs:\n output_flipped_back[:, left, ...] = output_flipped[:, right, ...]\n output_flipped_back[:, right, ...] = output_flipped[:, left, ...]\n output_flipped_back = output_flipped_back.reshape(shape_ori)\n output_flipped_back = output_flipped_back[..., ::-1]\n return output_flipped_back", "def _swapxy(data):\n return [(y, x) for (x, y) in data]", "def applyToPoints(self, points):\n return [point + self for point in points]", "def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:\n return [list(map(lambda x: 0 if x else 1, row[::-1])) for row in A]", "def reverseCurves(self):\n self.data.reverse()\n return True", "def _flip_adjacent_fluxes(l: Lattice, bonds: np.ndarray, fluxes: np.ndarray):\n for edge_index, (p_a, p_b) in enumerate(l.edges.adjacent_plaquettes):\n if (p_a == INVALID) or (p_b == INVALID):\n break\n if (fluxes[p_a] == -1) and (fluxes[p_b] == -1):\n bonds[edge_index] *= -1\n fluxes[p_a] *= -1\n fluxes[p_b] *= -1\n\n #attempt at vectorising, check this at somepoint\n #adj_fluxes = fluxes[l.edges.adjacent_plaquettes]\n #to_flip = np.where((adj_fluxes[:, 0] == -1) & (adj_fluxes[:, 1] == -1))\n #bonds[to_flip] *= -1\n #fluxes_to_flip = l.edges.adjacent_plaquettes[to_flip].flatten()\n #fluxes[fluxes_to_flip] *= -1\n\n return bonds, fluxes", "def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points", "def flipAndInvertImage2(self, A: List[List[int]]) -> List[List[int]]:\n def invert(x):\n if x:\n return 0\n else:\n return 1\n\n for row in A:\n left, right = 0, len(row)-1\n while left <= right:\n row[left], row[right] = invert(row[right]), invert(row[left])\n left += 1\n right -= 1\n\n return A", "def flipAndInvertImage(A: List[List[int]]) -> List[List[int]]:\n # 1'st pass: reverse every row\n for r in range(len(A)):\n reverse_row(r, A)\n # 2'nd pass: flip every row\n for r in range(len(A)):\n flip_row(r, A)\n return A", "def flipXYZ(oldXYZ): # This is an example of a nice Modular function.\n coordList = oldXYZ.split()\n x = int(coordList[0]) * -1\n y = int(coordList[1]) * -1\n xyz = ' '.join([str(x), str(y), coordList[2]])\n return xyz", "def flipXYZ(oldXYZ): # This is an example of a nice Modular function.\n coordList = oldXYZ.split()\n x = int(coordList[0]) * -1\n y = int(coordList[1]) * -1\n xyz = ' '.join([str(x), str(y), coordList[2]])\n return xyz", "def inverseCoordinates(coords):\n newlist = []\n if isPoint(coords):\n return [coords[1], coords[0]]\n elif not isinstance(coords, list) and not isinstance(coords, tuple):\n raise ValueError('coordinates to inverse must be minimum a point')\n for i, it in enumerate(coords):\n p = isPoint(it)\n if not p and (isinstance(it, list) or isinstance(it, tuple)):\n newlist.append(inverseCoordinates(it))\n else:\n newp = [it[1],it[0]]\n newlist.append(newp)\n return newlist", "def __get_points_lips(self, lips_points):\n uol = []\n uil = []\n lol = []\n lil = []\n for i in range(0, 14, 2):\n uol.append([int(lips_points[i]), int(lips_points[i + 1])])\n for i in range(12, 24, 2):\n lol.append([int(lips_points[i]), int(lips_points[i + 1])])\n lol.append([int(lips_points[0]), int(lips_points[1])])\n for i in range(24, 34, 2):\n uil.append([int(lips_points[i]), int(lips_points[i + 1])])\n for i in range(32, 40, 2):\n lil.append([int(lips_points[i]), int(lips_points[i + 1])])\n lil.append([int(lips_points[24]), int(lips_points[25])])\n return uol, uil, lol, lil", "def flip(self, bev_direction='horizontal', points=None):\n assert bev_direction in ('horizontal', 'vertical')\n if bev_direction == 'horizontal':\n self.tensor[:, 0::7] = -self.tensor[:, 0::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6] + np.pi\n elif bev_direction == 'vertical':\n self.tensor[:, 2::7] = -self.tensor[:, 2::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6]\n\n if points is not None:\n assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints))\n if isinstance(points, (torch.Tensor, np.ndarray)):\n if bev_direction == 'horizontal':\n points[:, 0] = -points[:, 0]\n elif bev_direction == 'vertical':\n points[:, 2] = -points[:, 2]\n elif isinstance(points, BasePoints):\n points.flip(bev_direction)\n return points", "def rescale(self, points, inplace=True):\n if inplace == False:\n points = points.copy()\n points *= self.scale_factor\n points += self.origin\n return points", "def mirror_point_point(point, mirror):\n return add_vectors(mirror, subtract_vectors(mirror, point))", "def transform_points(Points,R,t):\r\n return [transform_point(p,R,t) for p in Points]", "def flip(self):\n self._start, self._end = self._end, self._start" ]
[ "0.6846456", "0.62525344", "0.62357557", "0.60575736", "0.57666177", "0.575885", "0.5750715", "0.5610349", "0.5597628", "0.55869466", "0.55826664", "0.5582364", "0.5554964", "0.5532528", "0.5522956", "0.54966503", "0.54485905", "0.5446705", "0.5442356", "0.54414785", "0.54312885", "0.5428488", "0.5428488", "0.5404245", "0.53860486", "0.53466475", "0.5326534", "0.53215504", "0.53193456", "0.5304299" ]
0.63959026
1
Preprocessing the user clicks to points array
def _preprocessing(self): if self.resize: self.click_list = self._remapping_coord(self.click_list, self.input_size, self.orig_size) clickers = self._get_clickers(self.click_list) clicks_list = clickers.get_clicks() clicks_lists = self._points_transform([clicks_list], self.image_width) points_nd = self._get_points_nd(clicks_lists, self.net_clicks_limit) return points_nd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_mouse():\n\timport matplotlib.pyplot as plt\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, xlim=(-1,2), ylim=(-1,2))\n\tX = np.zeros( (0,2) )\n\tY = np.zeros( (0,) )\n\tcol = ['bs','gx','ro']\n\t\n\tdef on_click(event):\n\t\tX.resize( (X.shape[0]+1,X.shape[1]) )\n\t\tX[-1,:] = [event.xdata,event.ydata]\n\t\tY.resize( (Y.shape[0]+1,) )\n\t\tY[-1] = event.button\n\t\tax.plot( event.xdata, event.ydata, col[event.button-1])\n\t\tfig.canvas.draw()\n\n\tfig.canvas.mpl_connect('button_press_event',on_click)\n inter=plt.isinteractive(); hld=plt.ishold();\n plt.ioff(); plt.hold(True); plt.show();\n if inter: plt.ion();\n if not hld: plt.hold(False);\n\treturn X,Y", "def onclick(event):\n\t#~ cords = [] #This is an empty list which will store the x and y coordinates of each click on the graph\n\t#It's fine to keep this as a list because we won't be operating on it\n\tglobal ix, iy\n\tix,iy = event.xdata, event.ydata\n\tprint 'x = %.5f, y = %.2e' %(ix,iy) #This will print out the x and y values so you can check that no shifting occured\n\n\tglobal cords\n\tcords.append((ix,iy)) #Stores the x and y click in the array\n\n\treturn", "def click_action(event, ax):\n global newcoords, oldcoords, count\n\n if count % 2 == 0:\n newcoords.append((event.xdata, event.ydata))\n print('NEW', event.xdata, event.ydata)\n else:\n oldcoords.append((event.xdata, event.ydata))\n print('OLD', event.xdata, event.ydata)\n # update count\n count += 1", "def point_from_rays(self):\n print \"generating the 3d point from given clicked points\"\n \n #gather cams and points clicked \n uvs = []\n cams = []\n for iFrame in self.frames:\n if iFrame.lastClick : \n uv = numpy.multiply(iFrame.lastClick,self.reduceFactor)\n uvs.append(uv)\n cam = load_perspective_camera(self.camList[iFrame.currImg])\n cams.append(cam)\n point = get_3d_from_cams(cams, uvs)\n self.point3d = point;\n self.pointLabel.set(\"3d Point: \" + str(self.point3d))\n\n # project 3d point into each image, and gather intensities \n values = []\n ims = []\n for idx, img in enumerate(self.imgList):\n cam = load_perspective_camera(self.camList[idx])\n imgPoint = project_point(cam, point[0], point[1], point[2])\n imgPoint = numpy.divide(imgPoint, self.reduceFactor)\n self.allUVs.append(imgPoint)\n \n #grab float intensity value at this point \n imgView,ni,nj = load_image(img)\n val = pixel(imgView, imgPoint)\n if val > 0.0:\n values.append(val)\n ims.append(idx)\n \n #cleanup\n remove_from_db([imgView, cam])\n \n\n #write mean/std of intensities \n self.meanLabel.set(\"Mean: \" + str(numpy.mean(values)) )\n self.stdLabel.set(\"Std Dev: \" + str(numpy.std(values)) )\n #plot the intensities by image number \n self.f.clf();\n self.a = self.f.add_subplot(311)\n self.a.set_xlabel(\"img #\")\n self.a.set_ylabel(\"intensity\")\n self.a.plot(ims, values)\n #plot the histogram of intensities by image number \n pdf, bins, patches = plt.hist(values)\n self.b = self.f.add_subplot(313)\n self.b.set_xlabel(\"bin val\")\n self.b.set_ylabel(\"freq\")\n self.b.hist(values, 15, normed=1, facecolor=\"green\" )\n self.canvas.show();", "def __on_click(self,event, x, y, p1, p2): \r\n \r\n # global variables of the class with mouse click position\r\n global mouse_click_pos, mouse_click_list \r\n \r\n mouse_click_list = []\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n \r\n mouse_click_pos = (x,y)\r\n print(mouse_click_pos)\r\n mouse_click_list.append((x, y))", "def figure_mouse_press(self, event):\n \n # Add/remove an additional point?\n if event.dblclick:\n\n if event.button == 1:\n # Add a point.\n points = np.vstack([\n self.ax_order.collections[0].get_offsets(),\n [event.xdata, event.ydata]\n ])\n # TODO: set size by their weight?\n self.ax_order.collections[0].set_offsets(points)\n\n else:\n # Are we within <tolerance of a point?\n points = self.ax_order.collections[0].get_offsets()\n\n # Need to scale x-distance to convert to pixels.\n idx = self.current_order.dispersion.searchsorted(event.xdata)\n xscale = np.nanmean(\n np.diff(self.current_order.dispersion[idx-5:idx+5]))\n\n \"\"\"\n bbox = self.ax_order.get_window_extent().transformed(\n self.norm_plot.dpi_scale_trans.inverted())\n width = bbox.width * self.norm_plot.dpi\n height = bbox.height * self.norm_plot.dpi\n print(width, height)\n \"\"\"\n # TODO: Fix this distance thing.\n\n distance = np.sqrt(\n ((points[:, 0] - event.xdata)/xscale)**2 \\\n + (points[:, 1] - event.ydata)**2)\n \n if distance.size > 0:\n\n index = np.argmin(distance)\n if distance[index] < PIXEL_PICKER_TOLERANCE:\n # Remove that point.\n keep = np.ones(points.shape[0], dtype=bool)\n keep[index] = False\n self.ax_order.collections[0].set_offsets(points[keep])\n\n else:\n print(\"Closest point {} px away\".format(distance[index]))\n\n # Update the cache.\n idx = self.current_order_index\n N = points.shape[0]\n # TODO: adhere to the knot weights\n self._cache[\"input\"][\"additional_points\"] \\\n = np.hstack((points, 100 * np.ones(N).reshape((N, 1))))\n self.fit_continuum(clobber=True)\n self.draw_continuum(refresh=True)\n\n return None\n \n if event.button != 1: return None\n # Single click.\n # Set up/update the excluded region.\n xmin, xmax, ymin, ymax = (event.xdata, np.nan, -1e8, +1e8)\n try:\n self._exclude_selected_region\n except AttributeError:\n self._exclude_selected_region = self.ax_order.axvspan(**{\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"ymin\": ymin,\n \"ymax\": ymax,\n \"facecolor\": \"r\",\n \"edgecolor\": \"none\",\n \"alpha\": 0.25,\n \"zorder\": -1\n })\n\n else:\n self._exclude_selected_region.set_xy([\n [xmin, ymin],\n [xmin, ymax],\n [xmax, ymax],\n [xmax, ymin],\n [xmin, ymin]\n ])\n\n # Set the signal and the time.\n self._exclude_selected_region_signal = (\n time(),\n self.norm_plot.mpl_connect(\n \"motion_notify_event\", self.update_exclude_selected_region)\n )\n return None", "def getMouseClicks(plotcoords = 0):\n nmax = 1000\n xlist, ylist = [-92171]*nmax,[-92171]*nmax\n nclicks = dislin.csrpts(xlist, ylist, nmax)\n xlist, ylist = xlist[:nclicks], ylist[:nclicks]\n if plotcoords:\n return xlist, ylist\n else:\n x = [dislin.xinvrs(i) for i in xlist]\n y = [dislin.yinvrs(i) for i in ylist]\n return x,y", "def handle_press( self, x, y ):\n self.pressed_flag = True\n self.first_point = (x, y)", "def _collect_points(self, image, point_value=0):\n return zip(*np.where(image == point_value))", "def _points_transform(clicks_lists, image_width):\n clicks_lists_flipped = []\n for clicks_list in clicks_lists:\n clicks_list_flipped = []\n for click in clicks_list:\n # Horizontal flip\n _y = image_width - click.coords[1] - 1\n _click = clicker.Click(is_positive=click.is_positive,\n coords=(click.coords[0], _y))\n clicks_list_flipped.append(_click)\n clicks_lists_flipped.append(clicks_list_flipped)\n clicks_lists = clicks_lists + clicks_lists_flipped\n return clicks_lists", "def predict(self, predPoints=None):", "def _correct_clicks(self, clicked, timestamp, relative_to):\n if len(clicked):\n clicked = [(b, x, y, s + self.time_correction) for\n b, x, y, s in clicked]\n self.log_clicks(clicked)\n buttons = [(b, x, y) for b, x, y, _ in clicked]\n self._check_force_quit()\n if timestamp:\n clicked = [(b, x, y, s - relative_to) for\n b, x, y, s in clicked]\n else:\n clicked = buttons\n return clicked", "def on_press(self, event):\n if event.inaxes is None:\n return\n mX = event.xdata\n mY = event.ydata\n index = None\n for i in range(len(self.x)):\n if self.is_inside(mX, mY, (self.x[i], self.y[i])):\n index = i\n break\n self.current_point = index", "def on_mouse_press(self, x, y, button):\n\n pass", "def _press(self, event):\n # Check for selection of a tool handle.\n if ((self._selection_completed or 'move_vertex' in self._state)\n and len(self._xys) > 0):\n h_idx, h_dist = self._polygon_handles.closest(event.x, event.y)\n if h_dist < self.grab_range:\n self._active_handle_idx = h_idx\n # Save the vertex positions at the time of the press event (needed to\n # support the 'move_all' state modifier).\n self._xys_at_press = self._xys.copy()", "def handle_mouse_press(self, event):", "def user_click(self):\n\n\t\tif len(self.drawn_list):\n\t\t\tfor s in self.drawn_list:\n\t\t\t\tshape = self.coord_list[s[0]:s[1]]\n\t\t\t\tx, y = self.calc_centroid(shape)\n\n\t\t\tself.canvas.create_line(0, y, 600, y, fill=\"blue\")\n\t\t\tself.canvas.create_line(x, 0, x, 500, fill=\"blue\")\n\t\telse:\n\t\t\tprint(\"There's no contained shapes\")", "def getMouseClick(plotcoords = 0):\n coords = dislin.csrpt1()\n if plotcoords:\n return coords\n else:\n return dislin.xinvrs(coords[0]), dislin.yinvrs(coords[1])", "def Canvas_onclick(event):\n global ix, iy\n ix, iy = event.xdata, event.ydata\n print 'x = %f -> i = %d, y = %f' % (ix,ix/0.5*fig.Fulllength, iy)\n\n global coords\n coords = [ix, iy]\n\n return coords", "def on_trace_click(click_data,date,freq):\n p = click_data['points'][0]\n # here, use 'customdata' property of clicked point, \n # could also use 'curveNumber', 'pointIndex', etc.\n key=pd.to_datetime(0)\n if 'x' in p:\n key = pd.to_datetime(p['x'])\n df_f = get_corresponding_rows(df, key,date,freq)\n return df_f.to_dict('records')", "def on_trace_click(click_data,date,freq):\n p = click_data['points'][0]\n # here, use 'customdata' property of clicked point, \n # could also use 'curveNumber', 'pointIndex', etc.\n key=pd.to_datetime(0)\n if 'x' in p:\n key = pd.to_datetime(p['x'])\n df_f = get_corresponding_rows(df, key,date,freq)\n return df_f.to_dict('records')", "def scatter_array(self, U, copy=True):\n pass", "def get_img_reference_points():\n # The following line is just for test.\n input('Enter to capture image.')\n image = baxter.getImageFromRightHandCamera()\n cvimage = baxter.getLastCvImage()\n while n_clicks <= tot_clicks-1:\n # displays the image\n cv2.imshow(\"Click\", cvimage)\n #cv.ShowImage(\"Click\", cvimage)\n #calls the callback function \"on_mouse_click'when mouse is clicked inside window\n cv2.setMouseCallback(\"Click\", on_mouse_click, param=1)\n #cv.SetMouseCallback(\"Click\", on_mouse_click, param=1)\n #cv.WaitKey(1000)\n cv2.waitKey(1000)\n \n #print points\n cv2.destroyAllWindows() \n return points", "def on_click(name: str, fig, event):\n if name not in punts:\n punts[name] = []\n punts[name].append([event.xdata, event.ydata])\n plt.scatter(event.xdata, event.ydata)\n fig.canvas.draw()", "def handle_mouse_data(data):\n pass", "def xy(event):\n return map(int, event.get_coords())", "def test_processed_points_calculation(self):\n\n assert self.test_shape.processed_points == [\n (1030.0, 525.0, \"straight\"),\n (1030.0, 475.0, \"straight\"),\n (970.0, 475.0, \"straight\"),\n (970.0, 525.0, \"straight\"),\n (1030.0, 525.0, \"straight\"),\n ]", "def handleClick(self, event):\n\n # filter for events inside image:\n pos = event.pos()\n mappedPos = self.img.mapFromScene(pos)\n xmp = int(mappedPos.x())\n ymp = int(mappedPos.y())\n\n if xmp < 0 or \\\n xmp > self.dat3d.shape[1] or \\\n ymp < 0 or \\\n ymp > self.dat3d.shape[0]:\n return # discard click events originating outside the image\n\n pw = pqg.plot(self.elist, self.dat3d[ymp, xmp, :], title=\"LEEM-I(V)\")\n pw.setLabel('bottom', 'Energy', units='eV')\n pw.setLabel('left', 'Intensity', units='a.u.')\n pw.show()", "def onpick(cls, event):\n if cls.rate_limiting():\n return True\n\n if len(event.ind) != 1:\n print(\"Two or more points are too close! Please zoom in.\")\n print(\"Showing the one with higher fitness score\")\n\n cloud_plot = gs.canvas2cloud_plot[event.canvas]\n artist = event.artist\n ind = event.ind[-1]\n button = event.mouseevent.button\n\n if button == 1:\n cls.button_1(cloud_plot, artist, ind)\n elif button == 3:\n cls.button_3(cloud_plot, artist, ind)", "def data_preprocessing(points):\n mean_coords = points.mean(0)\n points -= mean_coords\n \n max_norm = np.max(np.linalg.norm(points, axis = 1))\n points /= max_norm\n\n return points, mean_coords, max_norm" ]
[ "0.6028115", "0.5976332", "0.5928237", "0.588722", "0.5848616", "0.56758183", "0.5582898", "0.5569146", "0.5567416", "0.55399483", "0.55177397", "0.5503829", "0.55011344", "0.54957795", "0.5451826", "0.54001564", "0.5395018", "0.5391376", "0.53872997", "0.53311664", "0.53311664", "0.5316275", "0.53156483", "0.52991986", "0.52941215", "0.5247954", "0.5194414", "0.519049", "0.51790833", "0.51685655" ]
0.65283585
0
Delete the empty related analytic account
def unlink(self): analytic_accounts_to_delete = self.env['account.analytic.account'] for project in self: if project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result = super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_account(self, account):\n \n pass", "def delete_account(self):\n Credential.account_list.remove(self)", "def test_duo_account_delete(self):\n pass", "def delete(account):\n account.stripe_account.delete()\n account.delete()", "def delete_non_activated_account():\r\n trans = transaction.begin()\r\n UserMgr.delete_non_activated_account()\r\n trans.commit()", "def delete_my_account():\n # Remove user ownerships\n for p in current_user.projects:\n p.user_id = None\n p.save()\n # Delete user posts\n [ a.delete() for a in current_user.activities ]\n # Delete user account\n current_user.delete()\n logout_user()\n flash('We are sorry to see you go. Your profile has been deleted.', 'info')\n return redirect(url_for('public.home'))", "def delete_account(self):\n signals.before_gameaccount_deleted.send(gameaccount=self.gameaccount)\n db.delete(self.gameaccount)", "def delete(self):\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n DELETESQL = \"\"\"DELETE FROM accounts WHERE id=:id \"\"\"\n cursor.execute(DELETESQL, {\"id\": self.id})\n self.id = None", "def delete_account(request):\n ubanks = request.user.userbank.all()\n for ubank in ubanks:\n ubank.delete()\n user = request.user\n log_out(request)\n user.delete()\n return HttpResponse(\"Account succesfully deleted\")", "def delete_account():\n print(\"\\n\")\n print(messages.delete_account)\n u_id = pyip.inputInt(\"User Id: \", greaterThan=0)\n\n credentials = {\"id\":u_id}\n result = BankOperationsBackend.delete_account(credentials)\n start_again() if result else BankOperationsUi.delete_account()", "def test_delete_account(self):\n id = Account.objects.first().id\n url = reverse('account:accounts-detail', kwargs={\"id\":id})\n data = {}\n response = self.client.delete(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Account.objects.count(), 0)", "def delete():", "def delete_wim_account(self, uuid):\n # Since we have foreign keys configured with ON CASCADE, we can rely\n # on the database engine to guarantee consistency, deleting the\n # dependant records\n return self.db.delete_row_by_id('wim_accounts', uuid)", "def delete_account(user):\n\n # first delete all owned categories and all the items in those\n # categories, including items that other users added to the category.\n for category in user.categories:\n for item in category.items:\n db.session.delete(item)\n db.session.delete(category)\n db.session.commit()\n\n # then delete all remaining owned items\n for item in user.items:\n db.session.delete(item)\n db.session.commit()\n\n # finally, delete the user\n db.session.delete(user)\n db.session.commit()", "def unlink(self):\n if self._context.get('is_landlord_rent'):\n rent_ids = []\n for tenancy_rec in self:\n analytic_ids = self.env['account.analytic.line'].search(\n [('account_id', '=', tenancy_rec.id)])\n if analytic_ids and analytic_ids.ids:\n analytic_ids.unlink()\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', tenancy_rec.id)])\n post_rent = [x.id for x in rent_ids if x.move_check is True]\n if post_rent:\n raise Warning(\n _('''You cannot delete Tenancy record, if any related Rent'''\n '''Schedule entries are in posted.'''))\n else:\n rent_ids.unlink()\n return super(AccountAnalyticAccount, self).unlink()", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def delete(self):\n self.deleted = True\n # Deactivate the user to disallow authentication and also\n # to let the user verify the email again after recovery.\n self.is_active = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.DELETION)", "def _delete_all_acls(self):\n for acl_ref in self.created_entities['acl']:\n entity_ref = acl_ref.replace(\"/acl\", \"\")\n blank_acl_entity = self.barbicanclient.acls.create(\n entity_ref=entity_ref)\n blank_acl_entity.remove()", "def delete(self, account_id):\n self.client.delete_account(account_id)", "def delete_account(self):\n print('-=' * 12 + \" Delete Account \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n delete_flag = self.auth.delete_account(mob_num, password)\n if delete_flag:\n print(\"The account is permently deleted\")\n self.logging_page()\n else:\n print(\"Mobile Number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.delete_account, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)", "def delete_account_id(account_id):\n conn = get_connect()\n conn.execute(\"DELETE from account WHERE accountId = ?\", [account_id])\n conn.commit()\n conn.close()\n return", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def test_delete_grading_period_accounts(self):\r\n account_id = None # Change me!!\r\n id = None # Change me!!\r\n\r\n r = self.client.delete_grading_period_accounts(id, account_id)", "def delete_account(self) -> None:\n\n msg = QtWidgets.QMessageBox()\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/newPrefix/new.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n msg.setWindowIcon(QtGui.QIcon(icon))\n msg.setIcon(QtWidgets.QMessageBox.Warning)\n password = self.lineEdit_9.text()\n self.lineEdit_9.clear()\n if not password:\n msg.setWindowTitle(\"Delete account\")\n msg.setText(\"Please fill all fields.\")\n msg.exec_()\n else:\n if validate_mp(self.email, password):\n msg.setWindowTitle(\"Delete account\")\n msg.setText(\"Are you sure you want delete your account?\")\n msg.setInformativeText(\"Deleting your account cannot be undone-you will no longer have access to any data you have stored in Vault Plus.\")\n msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n msg.setDefaultButton(QtWidgets.QMessageBox.No)\n reply = msg.exec()\n if reply == QtWidgets.QMessageBox.Yes:\n vaultplusDB.delete_user(self.email)\n adminDB.delete_user(self.email)\n path = Path(\"users\", self.uid[1:])\n if path.exists():\n shutil.rmtree(path)\n return True", "def delete(self):\n ...", "def destroy(self):\n\t\tos.remove(self.account_file)", "def delete_user():", "def delete(self, data):\n url = self.base_url + '/v2/account/delete/'\n return self._call_vendasta(url, data)", "def delete(self):\n pass", "def delete(self):\n pass" ]
[ "0.7033174", "0.67283726", "0.6711199", "0.6671805", "0.65966976", "0.6531173", "0.6522223", "0.6496321", "0.64545316", "0.64010954", "0.62627757", "0.61838365", "0.6163211", "0.6157718", "0.6136624", "0.6132036", "0.60669327", "0.6063105", "0.6054789", "0.6036722", "0.6034808", "0.60317385", "0.60230535", "0.6022009", "0.60210574", "0.6019024", "0.6017054", "0.6013707", "0.5976995", "0.5976995" ]
0.68331313
1
Given two particles, calculate number of ticks before they collide for the first time. None if they never collide.
def calculate_collision(p1: Particle, p2: Particle): # First find all the tick numbers, when one coordinate matches solutions = [] solutions.append( _collision_one_coord(p1.ax, p1.vx, p1.px, p2.ax, p2.vx, p2.px) ) solutions.append( _collision_one_coord(p1.ay, p1.vy, p1.py, p2.ay, p2.vy, p2.py) ) solutions.append( _collision_one_coord(p1.az, p1.vz, p1.pz, p2.az, p2.vz, p2.pz) ) if not all(solutions): return None # We need to intersect tick numbers for all coordinates. # If one coordinate is `ANY_TIME` - we don't care about it. solutions = [item for item in solutions if item != ANY_TIME] if not solutions: # All coordinates ever match return 0 intersection = set(solutions[0]) for i in range(1, len(solutions)): intersection &= set(solutions[i]) if not intersection: return None return int(min(intersection))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def particleCollisionTime(self, first_element, second_element):\n\n # Quantities required in following formula\n r = self.measure.distance(first_element, second_element)\n r2 = np.dot(r, r)\n #v = relativeVelocity(first_element, second_element, self.vel)\n v = self.measure.relativeVelocity(first_element, second_element)\n v2 = np.dot(v, v)\n b = np.dot(r, v)\n d = 2*self.particle_radius\n # We name 'inner_term' everything that will fall under the square root \n inner_term = b*b - v2*(r2 - d*d)\n \n # We need to filter non-valid results\n if (inner_term<0 or v2<=0 or first_element==second_element):\n t = 'inf'\n else:\n # The following formula has been taken from Eq: 14.2.2 in\n # 'The Art of Molecular Dynamics Simulations', D. Rapaport.\n t = (-b - np.sqrt(inner_term))/v2\n t = infIfNegative(t) # The collision ocurred in the past \n return t", "def collide(self, p1, p2):\n distance = p1.pos.distance(p2.pos) # distance between to particles\n if distance.length() < (p1.size + p2.size):\n pass", "def time_to_collision(self, other) -> float:\n rad = self.radius + other.radius\n w = self.pos - other.pos\n c = w.dot(w) - pow(rad, 2)\n if c < 0:\n return 0\n v = self.vel - other.vel\n a = v.dot(v)\n b = w.dot(v)\n if b > 0:\n return float(\"inf\")\n\n discr = pow(b, 2) - a * c\n if discr <= 0:\n return float(\"inf\")\n\n tau = c / (-b + sqrt(discr))\n if tau < 0:\n return float(\"inf\")\n\n return tau", "def collide(b1, b2):\n dx = b1.x - b2.x\n dy = b1.y - b2.y\n\n distance = math.hypot(dx, dy)\n \n if distance < b1.size + b2.size: # If they have collided\n tangent = math.atan2(dy, dx) # Find the tangent of the point\n angle = 0.5 * math.pi + tangent # We use this later on\n b1.angle = 2*tangent - b1.angle # Alter angles\n b2.angle = 2*tangent - b2.angle\n (b1.speed, b2.speed) = (b2.speed, b1.speed) # Swap speeds\n b1.speed *= elasticity # Reduce speed due to elasticity\n b2.speed *= elasticity\n\n b1.x += math.sin(angle) # Move particles away from each other\n b1.y -= math.cos(angle)\n b2.x -= math.sin(angle)\n b2.y += math.cos(angle)", "def wallCollisionTime(self, first_element, second_element):\n x = self.pos[first_element, 0]\n y = self.pos[first_element, 1]\n vx = self.vel[first_element, 0]\n vy = self.vel[first_element, 1]\n \n # Calculate collision times with walls\n\n if second_element == 'leftWall':\n if vx>=0:\n t = 'inf'\n else:\n x_leftWall = 0\n t = infIfNegative((self.particle_radius + x_leftWall - x)/vx)\n elif second_element == 'rightWall':\n if vx<=0:\n t = 'inf'\n else:\n x_rightWall = self.size_X\n t = infIfNegative((-self.particle_radius + x_rightWall - x)/vx)\n elif second_element == 'topWall':\n if vy<=0:\n t = 'inf'\n else:\n y_topWall = self.size_Y\n t = infIfNegative((-self.particle_radius + y_topWall - y)/vy)\n elif second_element == 'bottomWall':\n if vy>=0:\n t = 'inf'\n else:\n y_bottomWall = 0\n t = infIfNegative((self.particle_radius + y_bottomWall - y)/vy) \n \n return t", "def time_at_collision_particles(self, particle_number, simulation_time):\n # difference from particle particle_number to all other particles\n delta_x = self.positions - np.tile(self.positions[particle_number, :], reps=(len(self.positions), 1))\n # difference in velocity from particle particle_number to all other particles\n delta_v = self.velocities - np.tile(self.velocities[particle_number, :], reps=(len(self.velocities), 1))\n r_squared = (self.radii[particle_number] + self.radii) ** 2 # array of center to center distances\n dvdx = np.sum(delta_v*delta_x, axis=1) # dot product between delta_v and delta_x\n dvdv = np.sum(delta_v*delta_v, axis=1) # dot product between delta_v and delta_v\n d = dvdx ** 2 - dvdv * (norm(delta_x, axis=1) ** 2 - r_squared) # help array quantity\n time_until_collisions = np.ones(self.N)*np.inf # assume no particles is going to collide\n boolean = np.logical_and(dvdx < 0, d > 0) # both these conditions must be valid particle-particle collision\n # check if there exist some valid particle-particle collisions for particle particle_number\n if np.sum(boolean) > 0:\n # compute time until collision\n time_until_collisions[boolean] = -1 * ((dvdx[boolean] + np.sqrt(d[boolean])) / (dvdv[boolean]))\n return time_until_collisions + simulation_time", "def get_collisions(self) -> int:\n return 0 # no obstacles are spawned for Circle tasks", "def collision_particles(self, particle_one, particle_two, restitution_coefficient, box_particle_two):\n pos_particle_two = self.positions[particle_two, :].copy() # get position of second particle\n # check if the box of the second particle is not 13, which is system box (offset=(0, 0, 0)).\n if box_particle_two != 13:\n # collision is through a wall and one must use offset to correct collision position to get correct dx\n # meaning that particle_two is in one of the copied systems due to pbc\n pos_particle_two += [self.offsets[box_particle_two][0], self.offsets[box_particle_two][1],\n self.offsets[box_particle_two][2]]\n\n mass_particle_one, mass_particle_two = self.masses[particle_one], self.masses[particle_two] # get masses\n delta_x = pos_particle_two - self.positions[particle_one, :] # difference in position\n delta_v = self.velocities[particle_two, :] - self.velocities[particle_one, :] # difference in velocity\n r_squared = (self.radii[particle_one] + self.radii[particle_two]) ** 2 # distance from center to center\n\n # update velocities of the particles\n self.velocities[particle_one, :] += delta_x*((1+restitution_coefficient)*mass_particle_two*np.dot(delta_v, delta_x)/((mass_particle_one+mass_particle_two)*r_squared))\n self.velocities[particle_two, :] -= delta_x*((1+restitution_coefficient)*mass_particle_one*np.dot(delta_v, delta_x)/((mass_particle_one+mass_particle_two)*r_squared))", "def num_particles(self) -> Optional[Tuple[int, int]]:\n return None", "def ifCollide( ball1, ball2 ):\n\t\n\tb1_x, b1_y = ball1.position.xy\n\tb2_x, b2_y = ball2.position.xy\n\t\n\t#vector connect center of particles\n\tdistant = Vector.from_points((b2_x, b2_y), (b1_x, b1_y))\n\t\n\t#if lenght of vector above is less( equal ) than sum of radius ( they overlapping )\n\tif ( ball1.r + ball2.r ) ** 2 >= distant.norm():\n\t\treturn True\n\telse:\n\t\treturn False", "def overlap_cost(track_a, track_b):\n return 1 - overlap(track_a.bbox, track_b.bbox)", "def handle_collisions(self):\n\n def change_velocities(p1, p2):\n \"\"\"\n persons p1 and p2 have collided elastically: update their\n velocities.\n\n \"\"\"\n\n m1, m2 = p1.radius**2, p2.radius**2\n M = m1 + m2\n r1, r2 = p1.r, p2.r\n d = np.linalg.norm(r1 - r2)**2\n v1, v2 = p1.v, p2.v\n u1 = v1 - 2*m2 / M * np.dot(v1-v2, r1-r2) / d * (r1 - r2)\n u2 = v2 - 2*m1 / M * np.dot(v2-v1, r2-r1) / d * (r2 - r1)\n if p1.health == -100 or p1.social_dist == 1:\n p2.v *= -1\n elif p2.health == -100 or p2.social_dist == 1:\n p1.v *= -1\n else:\n p1.v = u1\n p2.v = u2\n\n\n def update_health(p1, p2):\n '''\n If collision between two persons, change their health status depending on health of both\n the persons that collided\n '''\n if p1.health == -1 and p2.health == 1:\n p2.health = -1\n elif p2.health == -1 and p1.health == 1:\n p1.health = -1\n\n # We're going to need a sequence of all of the pairs of persons when\n # we are detecting collisions. combinations generates pairs of indexes\n # into the self.persons list of persons on the fly.\n pairs = combinations(range(self.n), 2)\n\n for i,j in pairs:\n if self.persons[i].overlaps(self.persons[j]):\n change_velocities(self.persons[i], self.persons[j])\n update_health(self.persons[i], self.persons[j])\n self.collisions += 1", "def collide(b1,b2):\n if mag(b1.pos-b2.pos) < (b1.radius + b2.radius - .05):\n return True", "def time_at_collision_horizontal_wall(self, particle_number, simulation_time):\n velocity_y = self.velocities[particle_number, 1] # velocity in the y-direction of the particle\n position_y = self.positions[particle_number, 1] # y position of the particle\n # check for pbc\n if self.pbc:\n # if pbc: set radius equal to 0 in order to get the time when the center touches the wall\n radius = 0\n else:\n radius = self.radii[particle_number] # radius of the particle\n # compute time until collision based on the sign of the velocity\n if velocity_y > 0:\n time_until_collision = (1 - radius - position_y) / velocity_y\n elif velocity_y < 0:\n time_until_collision = (radius - position_y) / velocity_y\n else:\n time_until_collision = np.inf\n return time_until_collision + simulation_time", "def collide(self, actor, other, current, collisions, floatx, floaty):\n return floatx, floaty", "def time_at_collision_particles_pbc(self, particle_number, simulation_time):\n positions = np.zeros((len(self.positions)*27, 3)) # need 27 boxes/copies of the system\n # set correct positions of the particles in all boxes with all 27 offsets\n for i, offset in enumerate(self.offsets):\n # position of the particles in box i is given as 'positions + offset[i]'\n positions[i*len(self.positions):(i+1)*len(self.positions)] = \\\n self.positions + np.array([offset[0], offset[1], offset[2]])\n # difference from particle particle_number to all other particles\n delta_x = positions - np.tile(self.positions[particle_number, :], reps=(len(positions), 1))\n # difference in velocity from particle particle_number to all other particles\n delta_v = self.velocities - np.tile(self.velocities[particle_number, :], reps=(len(self.velocities), 1))\n delta_v = np.tile(delta_v, reps=(27, 1)) # all copies have the same velocity as the original particles\n r_squared = (self.radii[particle_number] + self.radii) ** 2 # array of center to center distance\n r_squared = np.tile(r_squared, reps=(27, )) # r_squares is the same for all copies\n dvdx = np.sum(delta_v * delta_x, axis=1) # dot product between delta_v and delta_x\n dvdv = np.sum(delta_v * delta_v, axis=1) # dot product between delta_v and delta_v\n d = dvdx ** 2 - dvdv * (norm(delta_x, axis=1) ** 2 - r_squared) # help array quantity\n time_until_collisions = np.ones(self.N*27) * np.inf # assume no particles is going to collide\n boolean = np.logical_and(dvdx < 0, d > 0) # both these conditions must be valid for particle-particle collision\n # check if there exist some valid particle-particle collisions for particle particle_number\n if np.sum(boolean) > 0:\n # compute time until collision\n time_until_collisions[boolean] = -1 * ((dvdx[boolean] + np.sqrt(d[boolean])) / (dvdv[boolean]))\n return time_until_collisions + simulation_time", "def count_overlap(self, time, other_object, other_time):\n ti = np.where(time == self.times)[0][0]\n ma = np.where(self.masks[ti].ravel() == 1)\n oti = np.where(other_time == other_object.times)[0]\n obj_coords = np.zeros(self.masks[ti].sum(), dtype=[('x', int), ('y', int)])\n other_obj_coords = np.zeros(other_object.masks[oti].sum(), dtype=[('x', int), ('y', int)])\n obj_coords['x'] = self.i[ti].ravel()[ma]\n obj_coords['y'] = self.j[ti].ravel()[ma]\n other_obj_coords['x'] = other_object.i[oti][other_object.masks[oti] == 1]\n other_obj_coords['y'] = other_object.j[oti][other_object.masks[oti] == 1]\n return float(np.intersect1d(obj_coords,\n other_obj_coords).size) / np.maximum(self.masks[ti].sum(),\n other_object.masks[oti].sum())", "def collide(obj1, obj2):\n offset_x = obj2.x - obj1.x #The difference between obj1 and obj 2\n offset_y = obj2.y - obj1.y \n return obj1.mask.overlap(obj2.mask, (int(offset_x), int(offset_y))) != None # (x,y)", "def secondsCount(timestamp1, timestamp2):\n return timestamp1 - timestamp2", "def entity_relatedness(self, a, b):\n occ_a = self.occurrences(a)\n occ_b = self.occurrences(b)\n occ_common = occ_a.intersection(occ_b)\n\n try:\n logmax = max(len(occ_a), len(occ_b))\n logmin = min(len(occ_a), len(occ_b))\n logint = len(occ_common)\n return (logmax - logint) / (self.LOGW - logmin)\n except ValueError:\n return 0.0", "def percentOverlap(x1, x2):\n nonZeroX1 = np.count_nonzero(x1)\n nonZeroX2 = np.count_nonzero(x2)\n minX1X2 = min(nonZeroX1, nonZeroX2)\n percentOverlap = 0\n if minX1X2 > 0:\n percentOverlap = float(np.dot(x1.T, x2)) / float(minX1X2)\n return percentOverlap", "def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n return 0\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n return sum(collision_list)", "def overlap(g, node_1, node_2):\n inter = len(set(nx.neighbors(g, node_1)).intersection(set(nx.neighbors(g, node_2))))\n return float(inter)", "def time_at_collision_tb_wall(self, particle_number, simulation_time):\n velocity_z = self.velocities[particle_number, 2] # velocity in the z-direction of the particle\n position_z = self.positions[particle_number, 2] # z position of the particle\n # check for pbc\n if self.pbc:\n # if pbc: set radius equal to 0 in order to get the time when the center touches the wall\n radius = 0\n else:\n radius = self.radii[particle_number] # radius of the particle\n # compute time until collision based on the sign of the velocity\n if velocity_z > 0:\n time_until_collision = (1 - radius - position_z) / velocity_z\n elif velocity_z < 0:\n time_until_collision = (radius - position_z) / velocity_z\n else:\n time_until_collision = np.inf\n return time_until_collision + simulation_time", "def iOverlapLen (a1, a2, b1, b2):\n if a1<=b1 and b2<=a2: # if b1 and b2 is between a1 and a2\n return float( (a2-a1) - ((b1-a1)+(a2-b2)) )\n elif b1<=a1 and a2<=b2: # if a1 and a2 is between b1 and b2\n return float( (b2-b1) - ((a1-b1)+(b2-a2)) )\n elif (a1>=b1 and a1<=b2) or (a1<=b2 and b2<=a2):\n # # if a1 is between b1 and b2 OR if b1 is between a1 and a2\n return float(b2-a1)\n elif (b1<=a2 and a2<=b2) or (b1>=a1 and b1<=a2):\n # if a2 is between b1 and b2 OR if b2 is between a1 and a2\n return float(a2-b1)\n else:\n return float(0)", "def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n return 0\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n\n return sum(collision_list)", "def collisions(x1,y1,x2,y2):\n collisions = 0\n m = (y2 - y1) / (x2 - x1)\n y = lambda x: m*(x-x1) + y1\n \n under = None\n for obstacle in obstacles:\n rx = obstacle.get_x()\n ry = obstacle.get_y()\n rh = obstacle.get_height()\n rw = obstacle.get_width()\n \n intersects = False \n if y(x) < ry + rh and y(rx) > ry:\n intersects = True\n if y(rx) > ry + rh and y(rx+rw) < ry+rh:\n intersects = True\n if y(rx) < ry + rh and y(rx+rw) > ry+rh:\n intersects = True\n if y(rx) > ry and y(rx+rw) < ry:\n intersects = True\n if y(rx) < ry and y(rx+rw) > ry:\n intersects = True \n \n if intersects:\n collisions += 1\n \n return collisions", "def same_water_present(x, y):\n if (x == None) or (y == None): return 0.0\n if len(x.intersection(y)) > 0: return 1.0\n return 0.0", "def count_points(p1,p2):\n\tif p1 > p2:\n\t\tdrawWinner(1)\n\t\treturn 1\n\telif p2 > p1:\n\t\tdrawWinner(2)\n\t\treturn 2\n\telse:\n\t\tdrawWinner(3)\n\t\treturn 3", "def get_overlap_time(begin_at_infected, end_at_infected, begin_at_contact, end_at_contact):\n\n\tbegin_at_infected = begin_at_infected\n\tbegin_at_contact = begin_at_contact\n\tend_at_infected = end_at_infected\n\tend_at_contact = end_at_contact\n\treturn (min(end_at_infected, end_at_contact) - max(begin_at_infected, begin_at_contact))" ]
[ "0.7132978", "0.66651785", "0.63942224", "0.6128394", "0.60699826", "0.59047437", "0.5892432", "0.5862217", "0.58521605", "0.5847946", "0.583856", "0.56933767", "0.5688538", "0.5648261", "0.5645475", "0.5642052", "0.5617919", "0.5595813", "0.55799955", "0.55546814", "0.55213296", "0.5512043", "0.55100185", "0.5470565", "0.54661775", "0.54626316", "0.54582196", "0.54569036", "0.5405961", "0.53957415" ]
0.7031036
1
Return participant documents with encoded names.
def get_encoded_participants(case, error_handler): log = parse_file.get_logger() for participant in case['participants']: for encoder_name, encoder in encodings.ENCODERS.items(): result = {'encoding': encoder_name, 'case': case['_id'], } result.update(participant) for field in FIELDS_TO_ENCODE: try: orig = participant[field] encoded = encoder(orig) if orig else [''] result[field] = encoded except Exception as err: msg = 'Error encoding %s to %s for %s/%s "%s" (%s)' % \ (field, encoder_name, case['book'], case['number'], participant[field], err) log.error(msg) error_handler(msg) yield result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def documents(pmid_23982599, civic_aid6_document):\n return [pmid_23982599, civic_aid6_document]", "def get_documents(self, value, key='name'):\n documents = []\n for doc in value:\n if doc.endswith('.json'):\n key = 'filename'\n documents.append([x for x in self.vocab if x[key] == doc])\n return documents", "def all_participants_data(study_name: str):\n # get all participants' name-ids\n participants = CC_driver.get_all_participants(study_name)\n\n if len(participants) > 0:\n participants_rdd = CC_driver.sc.parallelize(participants)\n results = participants_rdd.map(\n lambda participant: diagnose_pipeline(participant[\"identifier\"], CC_worker, config))\n results.count()\n else:\n print(study_name, \"- Study contains no participant.\")", "def retrieveDocuments(self):\n documents = {}\n for reuter in self.REUTERS_FILES:\n print(reuter)\n reuter_stream = open(self.BASEPATH + reuter, encoding=\"latin-1\")\n reuter_content = reuter_stream.read()\n soup = BeautifulSoup(reuter_content, \"html.parser\")\n articles = soup.find_all('reuters')\n for article in articles:\n body = \"\"\n title = \"\"\n words = \"\"\n newid = article['newid']\n if not article.title is None:\n title = article.title.string\n if not article.body is None:\n body = article.body.string\n words = title + \" \" + body\n documents[newid] = words\n print(f\"Retrieval Complete! - Total Documents: {len(documents)}\")\n return documents", "def researchbyname():\n if request.method == 'GET':\n user = request.args.get('newb')\n data = {}\n data = Beers.find({\"Nom\":user}, {\"_id\":0})\n return fct.returning(data)", "def organize():\n\n corpusNames = []\n for keyword in range(1, 10):\n keyword = str(keyword)\n corpusNames.append(organize_files_by_regex(keyword))\n # Calls the helper function 'organize_files_by_regex' for each participant number keyword\n # Appends the returned corpus name to the 'corpusNames' list\n return corpusNames\n # Returns the list of corpus names created based on participant numbers", "def get_exercise_recording_full_names(self):\n full_names = set()\n for er in self.exercise_recordings:\n full_names.add(er.full_name)\n return full_names", "def get_filenames(self):\n return [doc['filename'] for doc in self.vocab]", "def query_by_person_many(self, names: list): #-> cursor object\n if not self.client:\n self.connect()\n query = templates.query_titles_by_person_many(names)\n return self.db.find(query).limit(25)", "def get_file_by_name(self, file_name: list, projection={'_id': 0}, collation=None) -> list:\n result = []\n if collation is None:\n collation = self.collation\n query = {'file_name': {'$in': file_name}}\n projection = projection\n docs = self.collection.find(filter=query, projection=projection)\n for doc in docs:\n result.append(doc)\n return result", "def names():\n\n SamplesFirstRow = session.query(Samples).first()\n results = SamplesFirstRow.__dict__ \n\n names = []\n for aName in results:\n namesDict = {}\n # namesDict[\"Name\"] = \"Sample ID\"\n namesDict[\"Value\"] = aName\n # names.append(namesDict)\n names.append(aName)\n\n return jsonify(names)", "def participant_list(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n request_terms = request.GET.get('search')\n search_terms_array = request_terms.split()\n\n initial_term = search_terms_array[0]\n participant_list = Participant.objects.filter(\n Q(fullname__icontains=initial_term) |\n Q(email__icontains=initial_term))\n\n if len(search_terms_array) > 1:\n for term in range(1, len(search_terms_array)):\n participant_list = participant_list.filter(Q(fullname__icontains=search_terms_array[term]) |\n Q(email__icontains=search_terms_array[term]))\n else:\n participant_list = get_list_or_404(Participant)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(participant_list, request)\n serializer = ParticipantSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def _mixed_documents(self, raw_documents) -> List[str]:\n if not self._ner:\n return raw_documents\n\n mixed_documents = []\n for doc in raw_documents:\n entities = [\n \"{\" + entity[\"type\"] + \"}\" for entity in self._ner.process(doc)]\n if entities:\n mixed_documents.append(\" \".join(entities) + \" \" + doc)\n else:\n mixed_documents.append(doc)\n\n return mixed_documents", "def f1results():\n\n FIELDS = {'_id': False, }\n\n with MongoClient(MONGO_URI) as conn:\n collection = conn[DBS_NAME][COLLECTION_NAME]\n results = collection.find(projection=FIELDS)\n return json.dumps(list(results))", "def extract_documents():\n client = MongoClient()\n conn = client.data\n coll = conn.germanwings\n\n query = {'text': {'$exists': 1}, 'exc': {'$exists': 0}}\n selection = {'text': 1, 'short_url': 1}\n for i, doc in enumerate(coll.find(query, selection)):\n short_url, text = tuple(doc[x] for x in (\"short_url\", \"text\"))\n print(\"Extracting {0} {1}\".format(i, short_url), file=stderr)\n filename = os.path.join(RAW_DIR, short_url)\n with open(filename, \"w\") as f:\n ascii = text.encode('ascii', 'ignore')\n f.write(ascii)", "def get_cites_species():\n mongo_db = mongo_client_db()\n cursor = mongo_db[CITES_COLLECTION].find({'full_name': {'$ne': None}}, {'full_name':1})\n return [r['full_name'].encode('utf8') for r in cursor]", "def return_artistnames(): \n\n names = [] #list for artist names\n rows = db.session.query(Artist.name).all()\n for row in rows: \n names.append(row[0])\n\n return jsonify(names)", "def documents(self) -> list[str]:\n return self._documents", "def load_train_subjects_names(self):\n for tf in self.train_db_batches:\n files = ns.natsorted(os.listdir(os.path.join(self.db_path, tf)))\n for f in files:\n if f.startswith('volume'):\n s_name = str.split(str.split(f, '.')[0], '-')[-1]\n self.training_subjects.append(s_name)\n np.random.seed(1)\n np.random.shuffle(self.training_subjects)\n self.n_train = len(self.training_subjects)", "def get_doc_id_titles(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id, title FROM documents\")\n results = [(r[0], r[1]) for r in cursor.fetchall()]\n cursor.close()\n return results", "def speaker_vocab(filenames, target_speaker):\n return unique_ngrams(filenames, target_speaker, gram_size=1)", "def all_names():\n for x in RecordIOShard.all(keys_only=True).filter(\"index =\", True):\n yield RecordIOShard.get_name(x.name())", "def get_participants(reactome_id):\n react_url = 'http://www.reactome.org/ContentService/data/event/' \\\n + reactome_id + '/participatingReferenceEntities'\n headers = {'Accept': 'application/json'}\n res = requests.get(react_url, headers=headers)\n if not res.status_code == 200:\n return []\n json = res.json()\n up_ids = []\n for res in json:\n if not res.get('databaseName') == 'UniProt':\n continue\n up_id = res.get('identifier')\n if up_id is not None:\n up_ids.append(up_id)\n return up_ids", "def get_documents(path_to_dir, qn_id):\n files_to_question = []\n directory = os.path.join(path_to_dir, str(qn_id))\n filenames = os.listdir(directory)\n filenames.sort(key=int)\n\n for filename in filenames:\n doc_name, _ = os.path.splitext(filename)\n document_filepath = os.path.join(directory, filename)\n with open(document_filepath, 'rb') as subfile:\n subfile_data = subfile.readlines()\n\n # Use tried-and-tested tokenizer code from P1...\n tokenized_sentences = string_to_tokens(subfile_data)\n if not tokenized_sentences:\n continue\n\n doc = Document(doc_id=int(doc_name), qn_id=qn_id, content=tokenized_sentences)\n files_to_question.append(doc)\n\n return files_to_question", "def fetch_candidate_name(self):\r\n # variable to save possible matches\r\n possible_names = []\r\n\r\n # source text is input document in text format\r\n nlp_text = self.doc # := nlp(self.stringtext)\r\n\r\n # Add patterns to match proper names\r\n patterns = [[{'POS': 'PROPN'}]]\r\n self.matcher.add('NAME', patterns) \r\n matches = self.matcher(nlp_text) \r\n\r\n # fetch the matches\r\n for match_id, start, end in matches:\r\n span = nlp_text[start:end] \r\n possible_names += [span.text] \r\n if len(possible_names) >= 2: \r\n break\r\n\r\n # Extract candidates\r\n doc_entities = self.doc.ents\r\n\r\n # Subset to person type entities\r\n doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)\r\n doc_persons = filter(lambda x: len(\r\n x.text.strip().split()) >= 2, doc_persons)\r\n doc_persons = map(lambda x: x.text.strip(), doc_persons)\r\n doc_persons = list(doc_persons)\r\n\r\n # Assume the first Person entity with more than two tokens is the candidate's name\r\n if len(doc_persons) > 0:\r\n return possible_names + [doc_persons[0]]\r\n\r\n return \"NOT FOUND\"", "def get_play_names(corpus):\n play_names = []\n request_url = \"https://dracor.org/api/corpora/{}\".format(corpus)\n response = requests.get(request_url)\n if response:\n all_plays = response.json()[\"dramas\"]\n for play in all_plays:\n play_names.append(play[\"name\"])\n return play_names", "def search():\n results = []\n for row in db.session.query(DBcorpus):\n serialized = fix_corpus_format(CorpusSchema().dump(row).data)\n results.append(serialized)\n return results, 200", "async def query_txt_record(\n self, resolver: dns.asyncresolver.Resolver, name: str\n ) -> typing.Set[str]:\n txt_records = []\n\n with contextlib.suppress(\n dns.asyncresolver.NXDOMAIN, dns.asyncresolver.NoAnswer\n ):\n resp = await resolver.resolve(name, \"TXT\")\n\n for records in resp.rrset.items.keys():\n txt_records.extend([record.decode() for record in records.strings])\n\n return set(txt_records)", "def get_gene_names(outdir, confidence, validate_sg, verbose):\n \n gene_file = _get_gene_fname(outdir, confidence, validate_sg)\n gene_name_file = re.sub(r'.pickle$', '', gene_file) + '.names.pickle'\n if not os.path.exists(gene_name_file):\n if verbose:\n print('Generating list of gene names for easy access')\n tmp_genes = load_genes(outdir, confidence, validate_sg, verbose)\n gene_names = sp.array([x.name.split('.')[0] for x in tmp_genes])\n pickle.dump(gene_names, open(gene_name_file, 'wb'), -1)\n else:\n gene_names = pickle.load(open(gene_name_file, 'rb'), encoding='latin1')\n\n return gene_names", "def get_documents(self):\n documents = self.tree.execute(\"$.documents\")\n for doc in documents:\n sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}\n self.document_dict[doc['@id']] = {'sentences': sentences,\n 'location': doc['location']}\n return" ]
[ "0.5037065", "0.5026983", "0.4952229", "0.4952091", "0.49487412", "0.49179173", "0.49000576", "0.48417312", "0.48328254", "0.482964", "0.47789705", "0.47716552", "0.47558188", "0.47499445", "0.47499314", "0.4745078", "0.47300333", "0.47276202", "0.470836", "0.47082222", "0.4707869", "0.4700531", "0.46846554", "0.4672982", "0.46681517", "0.46570948", "0.46569106", "0.4655831", "0.46513835", "0.46367678" ]
0.50994873
0
Samples skills and users
def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0): # Sampling self.sample_skills_to_be_covered(skills_sample_fraction) self.sample_users(users_sample_fraction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,\n popular_sample_fraction=0.33, rare_threshold=0.33,\n popular_threshold=0.33, user_sample_fraction=1.0):\n print('In freelancer.')\n self.sample_users(user_sample_fraction)\n df_users = pd.DataFrame(self.users)\n df_users_sampled = df_users[df_users['user_id'].isin(self.E)]\n\n # Get categorized skills\n r, c, p = self.categorize_skills(df_users_sampled, rare_threshold, popular_threshold)\n\n # Sample skills from each category\n num_rare_skills = int(num_sampled_skills * rare_sample_fraction)\n num_popular_skills = int(num_sampled_skills * popular_sample_fraction)\n num_common_skills = num_sampled_skills - num_rare_skills - num_popular_skills\n\n # Ensure that skills to be sampled in each category is >= number of skills in that category\n if num_rare_skills > len(r):\n num_rare_skills = len(r)\n if num_common_skills > len(c):\n num_common_skills = len(c)\n if num_common_skills < 0:\n num_common_skills = 0\n if num_popular_skills > len(p):\n num_popular_skills = len(p)\n\n sampled_rare_skills = np.random.choice(r, size=num_rare_skills, replace=False)\n sampled_common_skills = np.random.choice(c, size=num_common_skills, replace=False)\n sampled_popular_skills = np.random.choice(p, size=num_popular_skills, replace=False)\n\n # Merge indices of all sampled skills\n sampled_skills = np.concatenate((sampled_rare_skills, sampled_common_skills, sampled_popular_skills))\n\n # Create final skills sample\n self.skills_covered = np.zeros(self.num_skills)\n \n for skill_id in range(self.num_skills):\n if skill_id not in sampled_skills:\n self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered\n\n self.skills_covered = self.skills_covered.astype(bool)\n self.num_rare_skills = num_rare_skills\n self.num_common_skills = num_common_skills\n self.num_popular_skills = num_popular_skills", "def sample_user(self):", "def test_skills(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n mock_get_ai_details.return_value['skills'] = [\n {'name': 'bot 1'},\n {'name': 'bot 2'},\n {'name': 'bot 3'},\n {'name': 'bot 4'},\n {'name': 'bot 5'},\n {'name': 'bot 6'},\n ]\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertContains(response, 'bot 1')\n self.assertContains(response, 'bot 2')\n self.assertContains(response, 'bot 3')\n self.assertContains(response, 'bot 4')\n self.assertContains(response, 'bot 5')\n self.assertNotContains(response, 'bot 6')\n self.assertNotContains(response, 'Speed up your bot building process by '\n 'starting with one of our Templates from the store.')", "def skills():\n with app.app_context():\n results = Skill.query.all()\n return SkillsResponse(skills=results).json(), 200", "def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]", "def prepareSamples(username, testType):\r\n \r\n #userSamps = DBController().getUserSamples(username, testType)", "def test_search_your_skills(self):\n self.client.login(username='[email protected]', password='testpass')\n\n resp = self.client.get(reverse('profiles:search_your_skills'))\n\n # self.user2 has a profile with the Django developer skill\n # self.project has one open position for Django developer, so\n # we should find one result\n self.assertContains(\n resp,\n '1 results were found with: Your Skills'\n )\n self.assertContains(resp, str(self.project))\n\n # various page information\n self.assertContains(resp, 'Test Project')\n self.assertContains(resp, 'All Needs')\n self.assertContains(resp, 'Projects')\n\n self.assertTemplateUsed('homepage.html')", "def endpoint_skills():\n q = \"\"\"\n SELECT ?localName\n WHERE { ?entity rdfs:subClassOf* cogrobtut:Skill .\n\t bind( strafter(str(?entity), \"#\") as ?localName) .\n }\n \"\"\"\n res = utils.kb.query(q, initNs=utils.namespaces)\n res_rows = [x for x in res]\n individuals=[]\n for row in res_rows:\n for elem in row:\n individuals.append(elem)\n return jsonify({\"result\" : individuals})", "def test_get_skill_with_questions(self):\n\n # map a skill to two questions\n skill_graph = SkillGraph.load()\n skill = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))\n q1 = self._create_mc_question('description 1')\n q2 = self._create_mc_question('description 2')\n q1.dict[SKILLS_KEY] = [skill.id]\n q2.dict[SKILLS_KEY] = [skill.id]\n models.QuestionDAO.save_all([q1, q2])\n\n # get skills\n actions.login(ADMIN_EMAIL)\n response = transforms.loads(self.get(self.URL).body)\n self.assertEqual(200, response['status'])\n skills = transforms.loads(response['payload'])['skills']\n self.assertEqual(1, len(skills))\n\n # assert that it's linked to two questions\n self.assertEqual(2, len(skills[0]['questions']))", "def sample(self):", "def test_get_skills_multiple_lessons(self):\n skill_graph = SkillGraph.load()\n\n skill_1 = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))\n unit = self.course.add_unit()\n unit.title = 'Test Unit'\n lesson1 = self.course.add_lesson(unit)\n lesson1.title = 'Test Lesson 1'\n lesson2 = self.course.add_lesson(unit)\n lesson2.title = 'Test Lesson 2'\n self.course.save()\n lesson1.properties[SKILLS_KEY] = [skill_1.id]\n lesson2.properties[SKILLS_KEY] = [skill_1.id]\n self.course.save()\n\n actions.login(ADMIN_EMAIL)\n response = transforms.loads(self.get(self.URL).body)\n self.assertEqual(200, response['status'])\n\n skills = transforms.loads(response['payload'])['skills']\n self.assertEqual(1, len(skills))\n # All lessons listed\n self.assertEqual(2, len(skills[0]['lessons']))", "def sample_survey(self, **kwargs):", "def test_skills(self):\n yield self.nodes[0].overlay.trustchain.add_skill('test')\n yield self.deliver_messages()\n peer1_pub_key = self.nodes[0].overlay.trustchain.my_peer.public_key.key_to_bin()\n self.assertTrue(self.nodes[0].overlay.trustchain.persistence.get_skills(peer1_pub_key))\n\n skills = self.nodes[1].overlay.trustchain.persistence.get_skills(peer1_pub_key)\n self.assertTrue(skills)\n\n # Peer 2 endorses peer 1 now\n block, _ = yield self.nodes[1].overlay.trustchain.endorse_skill(peer1_pub_key, skills[0]['block_num'])\n yield self.deliver_messages()\n self.assertTrue(self.nodes[1].overlay.trustchain.persistence.did_endorse_skill(block))\n\n skills = self.nodes[0].overlay.trustchain.persistence.get_skills(peer1_pub_key)\n self.assertEqual(skills[0]['endorsements'], 1)", "def cli_run_samples(log_level, endpoint, email, password, org_name, grp_name):\n logging.basicConfig(\n level=log_level,\n format='%(levelname)s: %(message)s',\n )\n knex = Knex(endpoint)\n User(knex, email, password).login()\n org = Organization(knex, org_name).get()\n grp = org.sample_group(grp_name).get()\n for sample in grp.get_samples():\n try:\n run_sample(sample)\n except Exception as e:\n click.echo(f'Sample {sample.name} failed with exception: {e}')", "def sample(self, context: Context) -> T:\n ...", "def test_personalized_skill_map_w_measures(self):\n\n self._build_sample_graph()\n skill_map = SkillMap.load(self.course, self.user_id)\n assert skill_map.personalized()\n skills = skill_map.skills()\n self.assertEqual(6, len(skills))\n for skill in skills:\n self.assertEqual(0.0, skill.score)\n self.assertEqual(\n competency.SuccessRateCompetencyMeasure.NOT_STARTED,\n skill.score_level)\n assert not skill.proficient", "def sample(self, *args, **kwargs):", "def add_skill(skill_list, skill): #inputs the skill dictionary and skill\r\n\tif skill==\"Gun Combat\":\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in guns:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\t\telse:\r\n\t\t\t\t\tskill=stellagama.random_choice(guns)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(guns)\r\n\telif skill in [\"Blade Combat\", \"Blade Cbt\"]:\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in melee:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\t\telse:\r\n\t\t\t\t\tskill=stellagama.random_choice(melee)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(melee)\r\n\telif skill==\"Vehicle\":\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in vehicles:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\telse:\r\n\t\t\t\tskill=stellagama.random_choice(vehicles)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(vehicles)\r\n\tif skill in skill_list:\r\n\t\tskill_list[skill] += 1\r\n\telif skill not in skill_list:\r\n\t\tskill_list[skill] = 1\r\n\treturn skill_list #outputs the skill dictionary\r", "def search(query):\n has_some = []\n # list of all users\n users = database_controller.get_all_users()\n # has_all starts off with every user, removes those who don't have one of the required skills\n has_all = users.copy()\n # iterate through every user and look up their skills\n for user in users:\n for skillpath, min_level in query.items():\n # get the skill_id from the database\n skill_id = database_controller.get_skill(skillpath).id\n # get the association of the user with current skill and according minimum level\n skill_assoc = database_controller.get_assocs(users_id=user.id,\n skill_id=skill_id,\n level=min_level,\n type=\"first\")\n # skill_assoc will be None if the user does not have the skill on the desired level or higher\n if skill_assoc is not None:\n # adds user to has_some, in case he does not have other skills\n if user not in has_some:\n has_some.append(user)\n # if the user does not have the current skill at the required level, he gets removed from has_all\n else:\n if user in has_all:\n has_all.remove(user)\n # remove intersection of has_all and has_some\n for user in has_all:\n if user in has_some:\n has_some.remove(user)\n # extract ProfileModels from the results and return them in a dictionary\n has_all = database_controller.get_profile_models(has_all)\n has_some = database_controller.get_profile_models(has_some)\n # sort the results by descending order of sum of queried skills\n has_all.sort(key=lambda p: database_controller.sum_relevant_skills(p, list(query.keys())), reverse=True)\n has_some.sort(key=lambda p: database_controller.sum_relevant_skills(p, list(query.keys())), reverse=True)\n return dict(has_all=has_all, has_some=has_some)", "def test_sample_users():\n ratings = lktu.ml_test.ratings\n ratings = ratings.set_index('user') ##forces non-unique index\n with pytest.raises(ValueError):\n for split in xf.sample_users(ratings, 5, 100, xf.SampleN(5)):\n pass", "def getSkills(self,number):\n skills = ['java','javascript','nodejs','css','scss','angular',\n 'express','sql','mongodb','spark','python','opencv',\n 'native-script','reactjs','backbone-js','docker','unix']\n returnSkills = []\n\n if number< skills.__len__():\n for item in range(0,number):\n tempSkill = skills[randint(0,skills.__len__()-1)]\n if tempSkill not in returnSkills:\n returnSkills.append(tempSkill)\n else:\n continue\n return returnSkills\n else:\n return skills", "def test_intent_classifier_get_testing_samples(self):\n pass", "def samples(self):\n pass", "def sample(self):\r\n raise NotImplementedError", "def test_intent_classifier_add_testing_samples(self):\n pass", "def test_can_create_many_skills(self):\n\t\tskill2 = self.skill\n\t\tskill2.tag = 'Test Driven Development'\n\t\tself.skill.save()\n\t\tskill2.save()\n\t\tself.assertEqual(\n\t\t\tSkill.objects.first().user,\n\t\t\tSkill.objects.last().user,\n\t\t\t'Skill instances don\\'t belong to the same user.'\n\t\t)", "def sample(self, like_params):\n\t\traise NotImplementedError", "def insert_skills(cursor):\n # Get the class of every skill\n skills_classes = dict()\n with open(CLASSES_PATH, encoding='UTF-8') as classes_file:\n classes_dict = ujson.load(classes_file)\n for class_id, _class in classes_dict.items():\n class_skills = _class.get(\"skills\", list())\n for class_skill in class_skills:\n skills_classes[class_skill.lower()] = class_id\n\n with open(SKILLS_PATH, encoding='UTF-8') as skills_file:\n skills_dict = ujson.load(skills_file)\n skills = list()\n # Get list of sorted skills\n sorted_skills_ids = list()\n for skill_id, skill in skills_dict.items():\n if skill_id:\n sorted_skills_ids.append((skill_id, int(skill.get(\"id\", 0))))\n else:\n sorted_skills_ids.append((skill_id, 0))\n sorted_skills_ids.sort(key=lambda tup: tup[1])\n # Start processing them\n for skill_id, _ in sorted_skills_ids:\n skill = skills_dict[skill_id]\n skill_info = list()\n # Get Skill Id\n skill_info.append(int(get_value(skill, \"Skill\", \"id\", str)))\n # Get Skill Name\n skill_info.append(get_value(skill, \"Skill\", \"name\", str))\n # Get Skill Identifier\n identifier = get_value(skill, \"Skill\", \"ident\", str).lower()\n skill_info.append(identifier)\n # Get Skill Icon\n skill_info.append(format_icon(get_value(skill, \"Skill\", \"icon\", str)))\n # Get Skill Circle\n skill_info.append(int(get_value(skill, \"Skill\", \"circle\", str)))\n # Get Skill Rank Level\n skill_info.append(int(get_value(skill, \"Skill\", \"rankLevel\", str)))\n # Get Skill Max Level\n skill_info.append(int(get_value(skill, \"Skill\", \"maxLevel\", str)))\n # Get Skill Video\n skill_info.append(get_value(skill, \"Skill\", \"video\", str))\n # Get Skill Desc\n skill_info.append(get_value(skill, \"Skill\", \"desc\", str))\n # Get Skill Details\n skill_info.append(get_value(skill, \"Skill\", \"desc2\", str))\n # Get Skill Type 1\n skill_info.append(get_value(skill, \"Skill\", \"type1\", str))\n # Get Skill Type 2\n skill_info.append(get_value(skill, \"Skill\", \"type2\", str))\n # Get Skill Cooldown\n skill_info.append(get_value(skill, \"Skill\", \"cooldown\", int))\n # Get Skill Element\n skill_info.append(get_value(skill, \"Skill\", \"element\", str))\n # Get Skill Required Stance\n skill_info.append(get_value(skill, \"Skill\", \"reqStance\", str))\n # Get Skill Level List\n skill_info.append(ujson.dumps(get_value(skill, \"Skill\", \"levelList\", dict)))\n # Get Skill Use Overheat\n skill_info.append(get_value(skill, \"Skill\", \"useOverHeat\", int))\n # Get Skill Class\n skill_info.append(get_skill_class(cursor, skills_classes.get(identifier, '')))\n\n\n skills.append(tuple(skill_info))\n\n skills = tuple(skills)\n\n cursor.executemany(\"INSERT INTO skills (id, name, identifier, icon, circle, rank_level, max_level, video, \"\n \"desc, details, type1, type2, cooldown, element, req_stance, level_list, use_overheat, \"\n \"class) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", skills)", "def test_recommender(self):\n\n self._build_sample_graph()\n\n # set skill sa score to 1.0 and skill sb score to 0.5\n measure_sa = competency.SuccessRateCompetencyMeasure.load(\n self.user_id, self.sa.id)\n measure_sa.add_score(1.0)\n measure_sa.save()\n measure_sb = competency.SuccessRateCompetencyMeasure.load(\n self.user_id, self.sb.id)\n measure_sb.add_score(0.0)\n measure_sb.add_score(1.0)\n measure_sb.save()\n\n # verify that the proficient skill list equals [sa]\n # verify that the recommended skill list equals [sb, sc]\n skill_map = SkillMap.load(self.course, self.user_id)\n recommender = SkillRecommender.instance(skill_map)\n recommended, learned = recommender.recommend()\n self.assertEqual(1, len(learned))\n self.assertEqual(2, len(recommended))\n self.assertEqual(self.sb.id, recommended[0].id)\n self.assertEqual(self.sc.id, recommended[1].id)\n assert learned[0].competency_measure.last_modified\n\n # add second successful attempt for skill b and:\n # verify that the proficient skill list equals [sa, sb]\n # verify that the recommended skill list equals [sc, sd]\n measure_sb = competency.SuccessRateCompetencyMeasure.load(\n self.user_id, self.sb.id)\n measure_sb.add_score(1.0)\n assert measure_sb.proficient\n measure_sb.save()\n skill_map = SkillMap.load(self.course, self.user_id)\n recommender = SkillRecommender.instance(skill_map)\n recommended, proficient = recommender.recommend()\n self.assertEqual(2, len(proficient))\n self.assertEqual(2, len(recommended))\n self.assertEqual(self.sc.id, recommended[0].id)\n self.assertEqual(self.sd.id, recommended[1].id)", "def sample(self):\n raise NotImplementedError(\"Override me!\")" ]
[ "0.7143267", "0.67321247", "0.6420536", "0.62872666", "0.62241715", "0.62220424", "0.6190365", "0.6159566", "0.61140394", "0.61028713", "0.6063896", "0.5995409", "0.5967122", "0.5901189", "0.58642733", "0.5856139", "0.5851461", "0.582702", "0.5779618", "0.57707846", "0.5762338", "0.5661979", "0.56357276", "0.56237644", "0.56214607", "0.5583483", "0.55757594", "0.5575507", "0.5501091", "0.54926544" ]
0.6959373
1
Samples a fraction of skills that need to be covered instead of all the skills based on the sampling scheme.
def sample_skills_to_be_covered(self, fraction=1.0): self.skills_covered = np.zeros(self.num_skills) if fraction < 1.0: num_sampled_skills = int(fraction * self.num_skills) sampled_skills = np.random.choice(self.num_skills, size=num_sampled_skills, replace=False) for skill_id in range(self.num_skills): if skill_id not in sampled_skills: self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered self.skills_covered = self.skills_covered.astype(bool)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,\n popular_sample_fraction=0.33, rare_threshold=0.33,\n popular_threshold=0.33, user_sample_fraction=1.0):\n print('In freelancer.')\n self.sample_users(user_sample_fraction)\n df_users = pd.DataFrame(self.users)\n df_users_sampled = df_users[df_users['user_id'].isin(self.E)]\n\n # Get categorized skills\n r, c, p = self.categorize_skills(df_users_sampled, rare_threshold, popular_threshold)\n\n # Sample skills from each category\n num_rare_skills = int(num_sampled_skills * rare_sample_fraction)\n num_popular_skills = int(num_sampled_skills * popular_sample_fraction)\n num_common_skills = num_sampled_skills - num_rare_skills - num_popular_skills\n\n # Ensure that skills to be sampled in each category is >= number of skills in that category\n if num_rare_skills > len(r):\n num_rare_skills = len(r)\n if num_common_skills > len(c):\n num_common_skills = len(c)\n if num_common_skills < 0:\n num_common_skills = 0\n if num_popular_skills > len(p):\n num_popular_skills = len(p)\n\n sampled_rare_skills = np.random.choice(r, size=num_rare_skills, replace=False)\n sampled_common_skills = np.random.choice(c, size=num_common_skills, replace=False)\n sampled_popular_skills = np.random.choice(p, size=num_popular_skills, replace=False)\n\n # Merge indices of all sampled skills\n sampled_skills = np.concatenate((sampled_rare_skills, sampled_common_skills, sampled_popular_skills))\n\n # Create final skills sample\n self.skills_covered = np.zeros(self.num_skills)\n \n for skill_id in range(self.num_skills):\n if skill_id not in sampled_skills:\n self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered\n\n self.skills_covered = self.skills_covered.astype(bool)\n self.num_rare_skills = num_rare_skills\n self.num_common_skills = num_common_skills\n self.num_popular_skills = num_popular_skills", "def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):\n # Sampling\n self.sample_skills_to_be_covered(skills_sample_fraction)\n self.sample_users(users_sample_fraction)", "def sample(self):\r\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def sample(self, n):\n raise NotImplementedError", "def sample(self, n=1):\n raise NotImplementedError", "def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)", "def biased_sample(self):\n return self.STARS", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def scenario(self, resistance, sample_size=100000):\n success = []\n for _ in range(sample_size):\n roll = [random.choice(colors) for _ in range(self.dice)]\n if self._success(roll, resistance):\n success.append(roll)\n else:\n if 'Second Wind' in self.special_powers:\n roll = [random.choice(colors) for _ in range(self.dice)]\n if self._success(roll, resistance):\n success.append(roll)\n return len(success) / sample_size", "def sample_count(self):", "def should_sample(self, span_context):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError(\"Override me!\")", "def sample(self):", "def initial_sampling(y):\n samples = list(np.random.randint(0, len(y), 2))\n while len(np.unique(y[samples] > 0.5)) != 2:\n samples = list(np.random.randint(0, len(y), 2))\n return samples", "def sample(self, n, include=True):\n return [self(t / n) for t in range(n + int(include))]", "def sample(self, size=1):\n pass", "def samples(self):\n pass", "def test_checks_population_size(self):\n with pm.Model() as model:\n n = pm.Normal(\"n\", mu=0, sigma=1)\n for stepper in TestPopulationSamplers.steppers:\n step = stepper()\n with pytest.raises(ValueError, match=\"requires at least 3 chains\"):\n pm.sample(draws=10, tune=10, chains=1, cores=1, step=step)\n # don't parallelize to make test faster\n pm.sample(\n draws=10,\n tune=10,\n chains=4,\n cores=1,\n step=step,\n compute_convergence_checks=False,\n )", "def sample(self, like_params):\n\t\traise NotImplementedError", "def sampleWeight(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n #Distribute the exploration weight evenly among all the actions that have been\r\n #taken up to this point in time by any of the users\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n #Compute the normalization factor. If no action has been sampled by this user yet,\r\n #then each action k has weight eta*pi_k, where pi_k is the weight of k in the\r\n #prior distribution. Then, the normalization factor is the sum(eta*pi_k) for all k,\r\n #which is equal to eta*sum(pi_k), which is just eta, since the sum of the previous\r\n #weights has to add up to 1.\r\n #If one or more actions have been already sampled, the normalization factor is the\r\n #sum of 1) the weights already in self.weights, 2) the exploration fund, and 3) the\r\n #weights of the actions that are not yet in self.weights. Each one of these actions\r\n #has weight eta*pi_k (because it hasn't been sampled yet), so the total weight of the\r\n #mass of actions not yet in self.weights is eta*(1-sum(pi_l)), where the sum is over all\r\n #the weights already in self.weights\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n #Keep getting the next weight until the combined mass of the weights is less than the\r\n #random number x\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return w\r\n i += 1", "def sample(self):\n\n # pick sample type according to probability\n samplers = [\"unif\", \"geo\", \"diverse\"]\n sample_idx = np.random.multinomial(\n 1, [self.unif_prob, self.geo_prob, self.diverse_prob])\n idx = np.argmax(sample_idx)\n sampler = samplers[idx]\n\n if sampler == \"unif\":\n return self.unif_sampler()\n if sampler == \"geo\":\n return self.geo_sampler()\n if sampler == \"diverse\":\n return self.diverse_sampler()", "def sampling_ratio(self):\n return self.coincidences / self.n", "def get_sample_rate(self):\n return 1", "def sample_selection(points, gap_fraction):\n if gap_fraction == 0:\n return points\n n = len(points)\n k = int(n * (1. - gap_fraction / 100.))\n return sample(points, k)", "def sample(self, observation):\n raise NotImplementedError", "def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)", "def sampling_algorithm(self, X, y):\r\n\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed.\")\r\n\r\n # standardization is needed to make the range of the propensity scores\r\n # similar to that of the features\r\n mms = MinMaxScaler()\r\n X_trans = mms.fit_transform(X) # pylint: disable=invalid-name\r\n\r\n X_min = X_trans[y == self.min_label]\r\n\r\n # adding propensity scores as a new feature\r\n X_new = np.column_stack([X_trans, self.propensity_scores(X_trans, y)])\r\n X_min_new = X_new[y == self.min_label] # pylint: disable=invalid-name\r\n\r\n # finding nearest neighbors of minority samples\r\n n_neighbors = min([len(X_new), self.n_neighbors+1])\r\n\r\n ind = self.neighborhood_structure(X_new, y, n_neighbors, X_min_new)\r\n\r\n # noise removal\r\n t_hat = np.sum(y[ind[:, 1:]] == self.min_label, axis=1)\r\n to_remove = np.where(t_hat < self.t * n_neighbors)[0]\r\n\r\n if len(to_remove) >= len(X_min) - 1:\r\n return self.return_copies(X, y,\r\n \"most minority samples indentified as noise\")\r\n\r\n n_to_sample = n_to_sample + to_remove.shape[0]\r\n\r\n samples = self.generate_samples(X_min=X_min,\r\n to_remove=to_remove,\r\n X_trans=X_trans,\r\n y=y,\r\n ind=ind,\r\n n_to_sample=n_to_sample)\r\n\r\n X_min = np.delete(X_min, to_remove, axis=0)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.randint(len(X_min))\r\n # # finding the number of minority neighbors\r\n # t_hat = np.sum(y[ind[idx][1:]] == self.min_label)\r\n # if t_hat < self.t*n_neighbors:\r\n # # removing the minority point if the number of minority\r\n # # neighbors is less then the threshold\r\n # # to_remove indexes X_min\r\n # if idx not in to_remove:\r\n # to_remove.append(idx)\r\n # # compensating the removal of the minority point\r\n # n_to_sample = n_to_sample + 1\r\n #\r\n # if len(to_remove) == len(X_min):\r\n # _logger.warning(self.__class__.__name__ + \": \" +\r\n # \"all minority samples identified as noise\")\r\n # return X.copy(), y.copy()\r\n # else:\r\n # # otherwise do the sampling\r\n # X_b = X_trans[self.random_state.choice(ind[idx][1:])]\r\n # samples.append(self.sample_between_points(X_min[idx], X_b))\r\n\r\n return (mms.inverse_transform(np.vstack([X_trans[y == self.maj_label],\r\n X_min,\r\n samples])),\r\n np.hstack([np.repeat(self.maj_label,\r\n np.sum(y == self.maj_label)),\r\n np.repeat(self.min_label, len(X_min)),\r\n np.repeat(self.min_label, len(samples))]))" ]
[ "0.78624916", "0.7106903", "0.64429855", "0.6430922", "0.6430922", "0.62151647", "0.6180233", "0.61468625", "0.61047155", "0.60914946", "0.60914946", "0.6078987", "0.60781646", "0.606959", "0.6035406", "0.6023221", "0.60061115", "0.6001572", "0.5997152", "0.5982087", "0.5957968", "0.59363973", "0.5927024", "0.5883289", "0.5881622", "0.5881051", "0.58749163", "0.58521473", "0.5841312", "0.5826327" ]
0.7834701
1
Categorizes skills of sampled users into three categories based on frequency histogram 1. rare skills (e.g., bottom 33% frequencies) 2. common skills (rest of the skills) 3. popular skills (e.g., top 33% frequencies)
def categorize_skills(self, df_sampled_users, rare_threshold=0.33, popular_threshold=0.33): # Get frequency of each skills skills_array = np.array(df_sampled_users['skills_array'].values) freq = np.sum(skills_array, axis=0) freq_skills_available = freq[freq > 0] num_skills_available = freq_skills_available.shape[0] # Get indices of ascending order sorted frequencies sorted_idx = np.argsort(freq_skills_available) rare_threshold_idx = int(num_skills_available * rare_threshold) popular_threshold_idx = int(num_skills_available * (1 - popular_threshold)) # Split the sampled skills into categories using frequencies rare_skills = sorted_idx[:rare_threshold_idx] common_skills = sorted_idx[rare_threshold_idx: popular_threshold_idx] popular_skills = sorted_idx[popular_threshold_idx:] return (rare_skills, common_skills, popular_skills)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33,\n popular_sample_fraction=0.33, rare_threshold=0.33,\n popular_threshold=0.33, user_sample_fraction=1.0):\n print('In freelancer.')\n self.sample_users(user_sample_fraction)\n df_users = pd.DataFrame(self.users)\n df_users_sampled = df_users[df_users['user_id'].isin(self.E)]\n\n # Get categorized skills\n r, c, p = self.categorize_skills(df_users_sampled, rare_threshold, popular_threshold)\n\n # Sample skills from each category\n num_rare_skills = int(num_sampled_skills * rare_sample_fraction)\n num_popular_skills = int(num_sampled_skills * popular_sample_fraction)\n num_common_skills = num_sampled_skills - num_rare_skills - num_popular_skills\n\n # Ensure that skills to be sampled in each category is >= number of skills in that category\n if num_rare_skills > len(r):\n num_rare_skills = len(r)\n if num_common_skills > len(c):\n num_common_skills = len(c)\n if num_common_skills < 0:\n num_common_skills = 0\n if num_popular_skills > len(p):\n num_popular_skills = len(p)\n\n sampled_rare_skills = np.random.choice(r, size=num_rare_skills, replace=False)\n sampled_common_skills = np.random.choice(c, size=num_common_skills, replace=False)\n sampled_popular_skills = np.random.choice(p, size=num_popular_skills, replace=False)\n\n # Merge indices of all sampled skills\n sampled_skills = np.concatenate((sampled_rare_skills, sampled_common_skills, sampled_popular_skills))\n\n # Create final skills sample\n self.skills_covered = np.zeros(self.num_skills)\n \n for skill_id in range(self.num_skills):\n if skill_id not in sampled_skills:\n self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered\n\n self.skills_covered = self.skills_covered.astype(bool)\n self.num_rare_skills = num_rare_skills\n self.num_common_skills = num_common_skills\n self.num_popular_skills = num_popular_skills", "def get_frequency_based_priors(n_common=3000, width_under_sigmoid=10):\n freq_map = get_word_frequencies()\n words = np.array(list(freq_map.keys()))\n freqs = np.array([freq_map[w] for w in words])\n arg_sort = freqs.argsort()\n sorted_words = words[arg_sort]\n\n # We want to imagine taking this sorted list, and putting it on a number\n # line so that it's length is 10, situating it so that the n_common most common\n # words are positive, then applying a sigmoid\n x_width = width_under_sigmoid\n c = x_width * (-0.5 + n_common / len(words))\n xs = np.linspace(c - x_width / 2, c + x_width / 2, len(words))\n priors = dict()\n for word, x in zip(sorted_words, xs):\n priors[word] = sigmoid(x)\n return priors", "def gender_word_counts(data):\n\n # We use the stopwords package from NLTK corpus.\n stop_words = set(stopwords.words('english'))\n data['tweet_words'] = data['text_cleaned'].str.split()\n # Ignoring all the stop words\n data['tweet_words'] = data['tweet_words'].apply(lambda tweet: [word for word in tweet if word not in stop_words])\n\n # Separating Male, Female and Brand profiles.\n male_profiles = data[data['gender'] == 'male']\n female_profiles = data[data['gender'] == 'female']\n brand_profiles = data[data['gender'] == 'brand']\n\n print(\"Top 20 most frequent words used by Men\")\n all_male_tweets = ' '.join(male_profiles['tweet_words'].astype(str))\n Male_words = pd.Series(all_male_tweets.split(\" \")).value_counts()[:20]\n print(Male_words)\n print()\n\n print(\"Top 20 most frequent words used by Women\")\n all_female_tweets = ' '.join(female_profiles['tweet_words'].astype(str))\n Female_words = pd.Series(all_female_tweets.split(\" \")).value_counts()[:20]\n print(Female_words)\n print()\n\n print(\"Top 20 most frequent words used by Brands\")\n all_brand_tweets = ' '.join(brand_profiles['tweet_words'].astype(str))\n Brand_words = pd.Series(all_brand_tweets.split(\" \")).value_counts()[:20]\n print(Brand_words)\n\n # Plotting horizontal bar graphs showing Top 20 tweet words used Vs. the word frequency.\n mp = Male_words.plot(kind='barh', stacked=True, colormap='plasma', title=\"Top 20 most frequently words used by Men\")\n mp.set_ylabel(\"Tweet words used by Males\")\n mp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n fp = Female_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Women\")\n fp.set_ylabel(\"Tweet words used by Females\")\n fp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n bp = Brand_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Brands\")\n bp.set_ylabel(\"Tweet words used by Brands\")\n bp.set_xlabel(\"Word Frequency\")\n plt.show()", "def rank_skill_support():\n import collections\n score_dict = collections.defaultdict(int)\n for item in classes.Crusader.items:\n for name in classes.Crusader.skill_names:\n if name in inspect.getsource(item):\n score_dict[name] += 1\n\n for name, freq in sorted(score_dict.items(), key=lambda x: -x[1]):\n print(name, freq)", "def count_words(self,top_only=True):\n if top_only:\n self.top_skill_list()\n else:\n self.all_skill_list()\n word_counts = Counter(self.skill_list)\n top_n = word_counts.most_common(len(word_counts))\n self.feature = []\n proportion = []\n for i in top_n:\n self.feature.append(i[0])\n proportion.append(i[1])\n self.coff = 1./(np.log(proportion)+1)\n return", "def add_skill(skill_list, skill): #inputs the skill dictionary and skill\r\n\tif skill==\"Gun Combat\":\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in guns:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\t\telse:\r\n\t\t\t\t\tskill=stellagama.random_choice(guns)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(guns)\r\n\telif skill in [\"Blade Combat\", \"Blade Cbt\"]:\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in melee:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\t\telse:\r\n\t\t\t\t\tskill=stellagama.random_choice(melee)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(melee)\r\n\telif skill==\"Vehicle\":\r\n\t\tif stellagama.dice(1,6)>=3:\r\n\t\t\tfor item in vehicles:\r\n\t\t\t\tif item in skill_list:\r\n\t\t\t\t\tskill=item\r\n\t\t\telse:\r\n\t\t\t\tskill=stellagama.random_choice(vehicles)\r\n\t\telse:\r\n\t\t\tskill=stellagama.random_choice(vehicles)\r\n\tif skill in skill_list:\r\n\t\tskill_list[skill] += 1\r\n\telif skill not in skill_list:\r\n\t\tskill_list[skill] = 1\r\n\treturn skill_list #outputs the skill dictionary\r", "def concept_categorization(self):\n dataset = pd.read_csv(\"data/Categorization data set.csv\", sep=\";\", header=None)\n dataset.columns = ['concept','word']\n\n cti = {}\n for i,c in enumerate(np.unique(dataset.concept.values)):\n cti[c] = i\n y_true = dataset.concept.apply(lambda x: cti[x]).values\n vs = []\n preds = [''] * dataset.shape[0]\n for ind,w in enumerate(dataset.word.values):\n try:\n vs.append(self.embeddings_index[w])\n except:\n preds[ind] = 0 \n km = KMeans(n_clusters=22, random_state=0)\n km.fit(np.array(vs).astype(np.float32))\n for ind,w in enumerate(dataset.word.values):\n if preds[ind] == '':\n preds[ind] = km.predict(np.array([self.embeddings_index[w]]))[0]\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, preds)\n #purity score\n return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)", "def scoreSkills(self, skills, work_hist_skills, req_skills):\n\n if work_hist_skills:\n score = len(set(work_hist_skills).intersection(req_skills))\n else:\n score = len(set(skills).intersection(req_skills))\n\n req_skills_len = len(req_skills)\n\n return score/req_skills_len if score != 0 else 0", "def _generate_interaction_histogram(interactions, num_users, num_items):\n histogram = np.zeros(num_items)\n np.add.at(histogram, interactions, 1)\n # Check that there's one interaction per user\n if histogram.sum() != num_users:\n raise ValueError(\"The sum of interactions must be equal to the number of users\")\n return histogram", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def most_frequent_train(train_data):\n ### YOUR CODE HERE\n tags_counts_for_each_word = {}\n # Filling a dictionary from words and tag tags to their counters\n # Going over the words and counting their tags appearances\n for sentance in train_data:\n for word, tag in sentance:\n # If first time seeing word, adding it's tags count dictionary\n if word not in tags_counts_for_each_word:\n tags_counts_for_each_word[word] = {}\n # Fetching word tags count dictionary\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n # If tag not in word's tags dictionary, initializing the counter\n if tag not in word_tags_count_dictionary:\n word_tags_count_dictionary[tag] = 0\n # Incrementing word tag counter\n word_tags_count_dictionary[tag] += 1\n \n words_maximal_tags = {}\n # Going over each word and finding it's maximal tag\n for word in tags_counts_for_each_word:\n # Fetching all word tags counts\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n \n maximal_tag, maximal_tag_counter = '', 0\n # Finding word tag with maximal tag counter\n for curent_tag, current_counter in word_tags_count_dictionary.items():\n if current_counter > maximal_tag_counter:\n maximal_tag, maximal_tag_counter = curent_tag, current_counter\n \n # Setting the maximal tag for current word\n words_maximal_tags[word] = maximal_tag\n \n return words_maximal_tags\n ### END CODE HERE", "def hog_histograms(*args, **kwargs): # real signature unknown\n pass", "def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def income_cat(housing):\n logging.info(\"Creating Income Category.....\")\n housing[\"income_cat\"] = pd.cut(\n housing[\"median_income\"],\n bins=[0.0, 1.5, 3.0, 4.5, 6.0, np.inf],\n labels=[1, 2, 3, 4, 5],\n )\n return housing", "def build_all_skill_frame(self, binary=True, top_effect=10):\n self.count_words(top_only=False)\n array = scipy.sparse.lil_matrix((len(self.data_profile), len(self.feature)))\n effect = 1\n for i in tqdm(range(len(self.data_profile))):\n rang = np.zeros(len(self.feature))\n if 'skills' in self.data_profile[i].keys():\n for skills in self.data_profile[i]['skills']:\n for j in range(len(self.data_profile[i]['skills'])):\n if self.data_profile[i]['skills'][j]['title'] == 'Top Skills':\n effect = top_effect\n else:\n effect = 1\n for skill in self.data_profile[i]['skills'][j]['skills']:\n if skill['title'] in self.feature:\n if 'endoresementCount' in skill.keys():\n if '+' in skill['endoresementCount']:\n count = 100\n else:\n count = int(skill['endoresementCount'])\n index = self.feature.index(skill['title'])\n array[i,index] = count * self.coff[index] * effect\n self.df = pd.DataFrame(data=array.A, columns=self.feature)\n if binary:\n self.df = (self.df != 0).astype('int')\n return self.df", "def search_for_skill(self, search, skill_list):\n\n def extract_tokens(s, tokens):\n s = s.lower()\n extracted = []\n for token in tokens:\n extracted += token * s.count(token)\n s = s.replace(token, '')\n s = ' '.join(i for i in s.split(' ') if i)\n return s, extracted\n\n def compare_seq(a, b):\n return SequenceMatcher(a=a, b=b).ratio()\n\n for close_word in self.SKILL_WORDS:\n search = search.replace(close_word, 'skill')\n search, search_common = extract_tokens(search, self.COMMON_TOKENS)\n search_tokens = [i for i in search.split(' ') if i]\n\n confidences = {}\n for skill in skill_list:\n skill = skill.replace('[installed]', '').strip()\n if not skill:\n continue\n full_name = skill\n skill = skill.replace('-', ' ')\n skill, skill_common = extract_tokens(skill, self.COMMON_TOKENS)\n\n char_conf = compare_seq(skill, search)\n word_conf = compare_seq(skill.split(' '), search_tokens)\n common_conf = compare_seq(skill_common, search_common)\n conf = (0.45 * char_conf + 0.45 * word_conf + 0.1 * common_conf)\n\n confidences[full_name] = conf\n\n best_skill, best_conf = max(confidences.items(), key=lambda x: x[1])\n best_skills = \\\n [s for s, c in confidences.items() if c > best_conf - 0.1]\n\n self.log.info('Highest Confidence Skill: ' + best_skill)\n self.log.info('Highest Confidence: ' + str(best_conf))\n\n if best_conf < 0.4:\n return []\n elif best_conf == 1.0:\n return [best_skill]\n else:\n return best_skills", "def top_10_features(df):\n df = df.groupby(\"role\").tail(10).reset_index(drop=True)\n df[\"i\"] = df.index.tolist()\n categories = CategoricalDtype(categories=df[\"i\"].tolist(), ordered=True)\n df[\"i\"] = df[\"i\"].astype(categories)\n\n def convert_label(labels):\n return OrderedDict([\n (df[df.i == l[0]].feature.tolist()[0], l[1])\n for l in list(labels.items())\n ])\n\n return (\n ggplot(df, aes(\"i\", \"value\", group=\"category\"))\n + geom_segment(\n aes(x=\"i\", xend=\"i\", y=\"min(value)\",\n yend=\"max(value)\"),\n linetype=\"dashed\",\n size=1,\n color=\"grey\"\n )\n + geom_point(aes(color=\"category\", shape=\"category\"), size=7)\n + scale_x_discrete(labels=convert_label)\n + scale_y_continuous(labels=lambda x: [\"%d%%\" % (v * 100) for v in x])\n + scale_color_brewer(type=\"qual\", palette=7)\n + guides(\n color=guide_legend(title=\"Category\"),\n shape=guide_legend(title=\"Category\")\n )\n + labs(y=\"% Relevance\", x=\"Features\", color=\"category\",\n shape=\"category\")\n + theme_matplotlib()\n + theme(strip_text=element_text(size=18),\n axis_title=element_text(size=18),\n axis_text=element_text(size=16),\n axis_text_x=element_text(size=16),\n legend_position=\"top\",\n legend_text=element_text(size=16),\n legend_title=element_text(size=18, margin={\"b\": 10}),\n legend_title_align=\"center\",\n aspect_ratio=1.4,\n panel_spacing_y=0.5,\n panel_spacing_x=2.8,\n figure_size=(14, 9))\n + coord_flip()\n + facet_wrap(\"~ role\", ncol=3, scales=\"free\",\n labeller=as_labeller({\n \"Backend\": \"Backend\",\n \"Frontend\": \"Frontend\",\n \"Mobile\": \"Mobile\"\n })\n )\n )", "def document_skill_counts(self, source_object: Dict):\n skill_counts = Counter()\n for candidate_skill in self.candidate_skills(source_object):\n skill_counts[self.nlp.lowercase_strip_punc(candidate_skill.skill_name).lstrip().rstrip()] += 1\n return skill_counts", "def histogram_quartiles(cur,variable1, variable2, table):\n print(\"Sentiment distribution based on position of sentence in description\")\n sentence_scores = select(cur,variable1, table) # multiple list of strings\n sentence_mags = select(cur,variable2, table) # multiple list of strings\n \n quartileBottom_score = []\n quartileBottom_mag = []\n halfMiddle_score = []\n halfMiddle_mag = []\n quartileTop_score = []\n quartileTop_mag = []\n \n for i in range(len(sentence_scores)):\n sentence_score = eval(sentence_scores[i]) # simple list of floats\n sentence_mag = eval(sentence_mags[i])\n for i in range(len(sentence_score)):\n if i < round((0.25*len(sentence_score))):\n quartileBottom_score.append(sentence_score[i])\n quartileBottom_mag.append(sentence_mag[i])\n if i > round((0.75*len(sentence_score))):\n quartileTop_score.append(sentence_score[i])\n quartileTop_mag.append(sentence_mag[i])\n else:\n halfMiddle_score.append(sentence_score[i])\n halfMiddle_mag.append(sentence_mag[i])\n \n n_groups = 3\n means_score = (np.average(quartileBottom_score), np.average(halfMiddle_score), np.average(quartileTop_score))\n# std_score = (np.std(quartileBottom_score), np.std(halfMiddle_score), np.std(quartileTop_score))\n\n means_mag = (np.average(quartileBottom_mag), np.average(quartileTop_mag), np.average(quartileTop_mag))\n# std_mag = (np.std(quartileBottom_mag), np.std(quartileTop_mag), np.std(quartileTop_mag))\n fig, ax = plt.subplots()\n \n print(\"Means Sentiment Score: \", means_score)\n print(\"Means Magnitude: \", means_mag)\n \n index = np.arange(n_groups)\n bar_width = 0.35\n \n opacity = 0.4\n error_config = {'ecolor': '0.3'}\n \n rects1 = ax.bar(index, means_score, bar_width,\n alpha=opacity, color='b',\n error_kw=error_config,\n label='Sentiment')\n \n rects2 = ax.bar(index + bar_width, means_mag, bar_width,\n alpha=opacity, color='r',\n error_kw=error_config,\n label='Magnitude')\n \n# ax.set_xlabel('Quartiles')\n ax.set_ylabel('Scores')\n ax.set_title('Scores by sentiment and magnitude')\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(('Bottom quartile', 'Middle half', 'Top quartile')) \n ax.legend((rects1[0], rects2[0]), ('Sentiment', 'Magnitude'))\n \n \n fig.tight_layout()\n plt.show()", "def create_user_bar_chart(content):\n conten_count = Counter(content).most_common()\n conten_sorted = sorted(conten_count, key=lambda x: x[0])\n\n for county in conten_sorted:\n\n print('{:<21}| {}'.format(county[0], '+' * county[1]))", "def generate_statistics_for_recommends(mat,k=20):\r\n dict_userid_to_recommends = create_dict_user_id_to_recommends_from_mat(mat)\r\n dict_userid_to_moviesliked = load_or_create('/DICT/UserIdToLikedMovies.dict', create_dict_user_id_to_liked_items)\r\n dict_ecc = load_or_create('/DICT/MovieIdToItemEccentricity.dict', create_dict_ecc)\r\n user_to_ecc = load_or_create('/DICT/UserIdToUserEccentricity.dict',createDictUserIdToUserEccentricity)\r\n\r\n top_items_ecc_all=[]\r\n user_ecc=[]\r\n user_avg_rec_ecc=[]\r\n to_iter=[i for i in dict_userid_to_recommends]\r\n print(\"starting to calculate plot data...\")\r\n counter_ecc=0\r\n counter_none_ecc=0\r\n print(\"length dict:\",len(dict_userid_to_recommends))\r\n for user in tqdm(to_iter):\r\n\r\n #delete vals which user alreay liked\r\n list_recommends_not_liked_yet=[[i,j]for i,j in dict_userid_to_recommends[user] if i not in dict_userid_to_moviesliked[user]]\r\n list_recommends_not_liked_yet=sorted(list_recommends_not_liked_yet, key=itemgetter(1))\r\n #only take top k\r\n top_items=list_recommends_not_liked_yet[-20:]\r\n top_items_ecc=[dict_ecc[item] for item,val in top_items]\r\n #append ecc vals to plot list\r\n counter_ignored=0\r\n if len(top_items_ecc) > 0:\r\n user_ecc.append(user_to_ecc[user])\r\n if user_to_ecc[user]>0:\r\n counter_ecc+=1\r\n else:\r\n counter_none_ecc+=1\r\n user_avg_rec_ecc.append(mean(top_items_ecc))\r\n else:\r\n print('ignored')\r\n counter_ignored+=1\r\n for i in top_items_ecc:\r\n top_items_ecc_all.append(i)\r\n if user==0:\r\n print(50*\"THIS SHOULD NOT HAPPEN\\n\")\r\n regr = linear_model.LinearRegression()\r\n a=np.array(user_ecc).reshape((len(user_ecc),1))\r\n b=np.array(user_avg_rec_ecc)\r\n print(a.shape,b.shape)\r\n user_ecc_np=np.array(user_ecc).reshape((len(user_ecc),1))\r\n user_avg_rec_ecc_np=np.array(user_avg_rec_ecc)\r\n print(len(user_ecc_np),len(user_avg_rec_ecc_np))\r\n print(user_ecc_np.shape,user_avg_rec_ecc_np.shape)\r\n regr.fit(user_ecc_np, user_avg_rec_ecc_np)\r\n y_pred = regr.predict(user_ecc_np)\r\n print(y_pred[:],user_avg_rec_ecc[:10])\r\n print('Coefficients: \\n', regr.coef_)\r\n # The mean squared error\r\n print(\"Mean squared error: %.2f\"\r\n % mean_squared_error(user_ecc_np, y_pred))\r\n # Explained variance score: 1 is perfect prediction\r\n print('Variance score: %.2f' % r2_score(user_avg_rec_ecc_np, y_pred))\r\n print(\"Pearson relation:\",stats.pearsonr(np.array(user_ecc), np.array(user_avg_rec_ecc)))\r\n # Plot outputs\r\n print('Starting to plot:')\r\n print(\"ecc users:\",counter_ecc)\r\n print(\"none ecc users:\",counter_none_ecc)\r\n print(\"ignored users:\",counter_ignored)\r\n #Now plot box plot of all ecc\r\n print(user_ecc_np.shape, y_pred.shape)\r\n plt.scatter(x=user_ecc,y=user_avg_rec_ecc,s=0.3)\r\n plt.text(-2.9, 1.3, \"Mean squared error: %.2f\"\r\n % mean_squared_error(user_avg_rec_ecc_np, y_pred),\r\n color='black', fontsize=12)\r\n plt.text(-2.9, 1.6, \"Correlation:\"+str(stats.pearsonr(np.array(user_ecc), np.array(user_avg_rec_ecc))),\r\n color='black', fontsize=12)\r\n plt.plot(user_ecc_np.tolist(), y_pred.tolist(), color='red')\r\n\r\n plt.ylim([-3, +3])\r\n plt.xlim([-3, +3])\r\n plt.xlabel(\"User Eccentricity\")\r\n plt.ylabel(\"Avg. Item Eccentricity in top-20 recommendations\")\r\n plt.show()\r\n print('Overall avg ecc of users in box:',mean(user_ecc))\r\n plt.boxplot(top_items_ecc_all)\r\n plt.show()", "def recommend(user_id):\n\n df = pd.read_sql(DATABASE_URL, index_col=\"id\", columns=[\"sex\", \"age\", \"haversine_distance\"])\n\n k = 5\n similarity = get_demographic_similarity(df, user_id)\n similarity = similarity.sort()[::-1]\n\n users = similarity[1:1 + k]\n\n # Get the charities then select the most common\n charity_counts = {}\n for user in users:\n charity_counts.ad", "def get_mutual_information(filename):\n categories = {} #{category: speakers of this category}\n features = {} #{feat: speakers who use this feature}\n pos_categories_features = {} #{category: {feat: speakers of category who use this feat}}\n neg_categories_features = {} #{category: {feat: speakers of category who do not use this feat}}\n users = set() #set of all users in data\n \n for line in open(filename):\n userid, c, date, statusid, rawtweet, toktweet, tagtweet = line.split('\\t')\n users.add(userid)\n \n if c not in categories:\n categories[c] = set()\n pos_categories_features[c] = {}\n categories[c].add(userid)\n \n feats = set(toktweet.lower().split()) #lowercase tweet and split into words\n\n for feat in feats:\n if feat not in pos_categories_features[c]:\n pos_categories_features[c][feat] = set()\n pos_categories_features[c][feat].add(userid)\n \n if feat not in features:\n features[feat] = set()\n features[feat].add(userid)\n\n print \"Parsed data\"\n\n numfeats = len(features) #num of features\n print numfeats, \"features\"\n numusers = len(users) #num of users \n print numusers, \"users\"\n\n #keep sizes of sets, not sets themselves\n for feat in features:\n features[feat] = len(features[feat])\n for c in categories:\n categories[c] = len(categories[c])\n for c in pos_categories_features:\n for feat in features:\n if feat in pos_categories_features[c]:\n pos_categories_features[c][feat] = len(pos_categories_features[c][feat])\n else:\n pos_categories_features[c][feat] = 0\n\n for c in categories:\n print c, categories[c], \"users\"\n\n print \"Computed counts\"\n \n mi = {}\n for feat in features:\n mi[feat] = 0.0\n for c in categories:\n #print c, feat, features[feat], pos_categories_features[c][feat]\n \n catprob = categories[c]/numusers\n\n #prob of speakers of category c using feat\n featprob = features[feat]/numusers\n jointprob = pos_categories_features[c][feat]/numusers\n if jointprob > 0 and featprob > 0:\n mi[feat] += jointprob * log2(jointprob/(catprob * featprob))\n \n #prob of speakers of category c NOT using feat\n featprob = 1 - featprob\n jointprob = (categories[c] - pos_categories_features[c][feat])/numusers\n if jointprob > 0 and featprob > 0:\n mi[feat] += jointprob * log2(jointprob/(catprob * featprob))\n\n print \"Computed mutual information\"\n\n feature_scores = sorted(mi.items(), key=lambda x:x[1], reverse=True)\n refcat = categories.keys()[0] #pick one of the categories\n print 'Feature\\tMI\\tP({0}|Feature)\\tNum. users'.format(refcat)\n for feat, score in feature_scores[:200]:\n prob = pos_categories_features[refcat][feat]/features[feat]\n print '{0}\\t{1:.3f}\\t{2:.3f}\\t{3}'.format(feat, score, prob, features[feat])", "def build_histogram_data(X, Y, df, role):\n df = df.groupby(\"role\").tail(10).reset_index(drop=True)\n df[\"i\"] = df.index.tolist()\n categories = CategoricalDtype(categories=df[\"i\"].tolist(), ordered=True)\n df[\"i\"] = df[\"i\"].astype(categories)\n\n features = df.loc[df[\"role\"] == role].feature.values\n devs = []\n for i, row in pd.concat([X, Y], axis=1).iterrows():\n for f in features:\n devs.append({\n \"feature\": f,\n \"value\": row[f],\n role: \"Yes\" if row[role] else \"No\"\n })\n dataframe = pd.DataFrame(devs, columns=[\"feature\", \"value\", role])\n dataframe[role] = dataframe[role].astype(\"category\")\n return dataframe", "def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]", "def keyword_classifier(utterance):\n categories = {\n 'hello': ['hi ', 'greetings', 'hello', 'what\\'s up', 'hey ', 'how are you?', 'good morning', 'good night',\n 'good evening', 'good day', 'howdy', 'hi-ya', 'hey ya'],\n 'bye': ['bye', 'cheerio', 'adios', 'sayonara', 'peace out', 'see ya', 'see you', 'c ya', 'c you', 'ciao'],\n 'ack': ['okay', 'whatever', 'ok ', 'o.k. ', 'kay ', 'fine '],\n 'confirm': ['is it', 'is that', 'make sure', 'confirm', 'double check', 'check again', 'does it'],\n 'deny': ['dont want', 'don\\'t want', 'wrong', 'dont like', 'don\\'t like'],\n 'inform': ['dont care', 'don\\'t care', 'whatever', 'bakery', 'bar', 'cafe', 'coffeeshop', 'pub', 'restaurants',\n 'roadhouse', 'african',\n 'american', 'arabian', 'asian', 'international', 'european', 'central american', 'middle eastern',\n 'world', 'vegan', 'vegetarian', 'free', 'kosher', 'traditional', 'fusion', 'modern', 'afghan',\n 'algerian', 'angolan', 'argentine',\n 'austrian', 'australian', 'bangladeshi', 'belarusian', 'belgian', 'bolivian', 'bosnian',\n 'herzegovinian', 'brazilian', 'british', 'bulgarian', 'cambodian',\n 'cameroonian', 'canadian', 'cantonese', 'catalan', 'caribbean', 'chadian', 'chinese', 'colombian',\n 'costa rican', 'czech', 'congolese', 'cuban', 'danish', 'ecuadorian', 'salvadoran', 'emirati',\n 'english', 'eritrean',\n 'estonian',\n 'ethiopian', 'finnish', 'french', 'german', 'ghanaian', 'greek', 'guatemalan', 'dutch', 'honduran',\n 'hungarian', 'icelandic',\n 'indian', 'indonesian', 'iranian', 'iraqi', 'irish', 'israeli', 'italian', 'ivorian', 'jamaican',\n 'japanese',\n 'jordanian', 'kazakh', 'kenyan', 'korean', 'lao', 'latvian', 'lebanese', 'libyan', 'lithuanian',\n 'malagasy', 'malaysian',\n 'mali', 'mauritanian', 'mediterranean', 'mexican', 'moroccan', 'namibian', 'new zealand',\n 'nicaraguan',\n 'nigerien', 'nigerian', 'norwegian', 'omani', 'pakistani', 'panamanian', 'paraguayan', 'peruvian',\n 'persian', 'philippine', 'polynesian', 'polish', 'portuguese', 'romanian', 'russian', 'scottish',\n 'senegalese', 'serbian',\n 'singaporean', 'slovak', 'somalian', 'spanish', 'sudanese', 'swedish', 'swiss', 'syrian', 'thai',\n 'tunisian', 'turkish',\n 'ukranian', 'uruguayan', 'vietnamese', 'welsh', 'zambian', 'zimbabwean', 'west', 'north', 'south',\n 'east', 'part of town', 'moderate', 'expensive', 'cheap', 'any ', 'priced', 'barbecue', 'burger',\n 'chicken',\n 'doughnut', 'fast food',\n 'fish and chips', 'hamburger', 'hot dog', 'ice cream', 'noodles', 'pasta', 'pancake', 'pizza',\n 'ramen', 'restaurant', 'seafood', 'steak',\n 'sandwich', 'sushi'],\n 'negate': ['no ', 'false', 'nope'],\n 'repeat': ['repeat', 'say again', 'what was that'],\n 'reqalts': ['how about', 'what about', 'anything else'],\n 'reqmore': ['more', 'additional information'],\n 'request': ['what', 'whats' 'what\\'s', 'why', 'where', 'when', 'how much', 'may', 'address', 'post code',\n 'location', 'phone number'],\n 'restart': ['reset', 'start over', 'restart'],\n 'thankyou': ['thank you', 'cheers', 'thanks'],\n 'affirm': ['ye ', 'yes', 'right ']\n }\n\n classification = []\n sentence_to_classify = utterance.lower()\n for category, keywords in categories.items():\n keywords_found = [keyword for keyword in keywords if keyword in sentence_to_classify]\n if len(keywords_found) > 0: classification.append(category)\n\n return classification if len(classification) > 0 else ['null']", "def personalization(prediction, n):\n # prediction\n # n top n recommendation\n\n top_n = get_top_n(prediction, n)\n\n rec_dict = {}\n for uid, user_ratings in top_n.items():\n rec_dict[uid] = [iid for (iid, _) in user_ratings]\n\n rec_user_ls = [pred[0] for pred in prediction]\n rec_item_ls = [pred[1] for pred in prediction]\n\n unique_rec_user_ls = np.unique(rec_user_ls)\n unique_rec_item_ls = np.unique(rec_item_ls)\n\n # assign each item with index number\n unique_rec_item_dict = {item: ind for ind,\n item in enumerate(unique_rec_item_ls)}\n\n n_unique_rec_user = len(unique_rec_user_ls)\n n_unique_rec_item = len(unique_rec_item_ls)\n\n # recommended user item matrix\n rec_matrix = np.zeros(shape=(n_unique_rec_user, n_unique_rec_item))\n\n # represent recommended item for each user as binary 0/1\n for user in range(n_unique_rec_user):\n # get userid\n user_id = unique_rec_user_ls[user]\n # get rec item list\n item_ls = rec_dict[user_id]\n\n for item_id in item_ls:\n # get item index\n item = unique_rec_item_dict[item_id]\n rec_matrix[user, item] = 1\n\n # calculate cosine similarity matrix across all user recommendations\n similarity = cosine_similarity(X=rec_matrix, dense_output=False)\n # calculate average of upper triangle of cosine matrix\n upper_right = np.triu_indices(similarity.shape[0], k=1)\n # personalization is 1-average cosine similarity\n score = 1 - np.mean(similarity[upper_right])\n return score", "def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def generate_counthist(counts, label, view_lim=[1e-6,1e0,1e0,1e5]):\n max_size = max(counts.values())\n num_chains = sum(counts.values())\n sizes = np.arange(1,max_size+1)\n freqs = np.float_(sizes) / num_chains\n (hist,garbage) = np.histogram(counts.values(),bins=sizes)\n idxs = hist > 0\n \n fig = plt.figure()\n \n ax = fig.add_subplot(111)\n ax2 = ax.twiny()\n \n ax.spines['top'].set_position(('outward',5))\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward',5))\n ax.spines['left'].set_position(('outward',5))\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.plot(freqs[idxs],hist[idxs],marker='o',linestyle='None',color='#e31a1c',markeredgewidth=0,markersize=4,clip_on=False,label=label)\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(view_lim[:2])\n ax.set_ylim(view_lim[2:])\n \n ax2.spines['top'].set_position(('outward',5))\n ax2.spines['right'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.xaxis.set_ticks_position('top')\n ax2.yaxis.set_ticks_position('none')\n ax2.set_xscale('log')\n ax2.set_xlim([view_lim[0]*num_chains,view_lim[1]*num_chains])\n \n ax.set_xlabel('junction frequency (bottom) or count (top)')\n ax.set_ylabel('number of junctions')\n \n leg = ax.legend(loc=0,numpoints=1,prop=mpl.font_manager.FontProperties(size='small'))\n leg.get_frame().set_visible(False)\n \n return fig", "def entropyCategorical(attr, X, y):\n uniques = X[attr].unique().tolist()\n idxLists = []\n entropies = []\n weights = []\n for u in uniques:\n idxLists.append(X.index[X[attr] == u].tolist())\n entropies.append(entropy(y, idxLists[-1]))\n weights.append(len(idxLists[-1]))\n\n entropies = np.array(entropies).reshape(1, -1)\n weights = np.array(weights).reshape(-1, 1).astype(np.float32)\n weights /= np.sum(weights)\n\n return (uniques, idxLists, (entropies @ weights)[0, 0])" ]
[ "0.5880891", "0.57333535", "0.54879767", "0.5449425", "0.54090947", "0.539615", "0.536603", "0.5337189", "0.5275776", "0.51925325", "0.5185017", "0.51759356", "0.5171471", "0.5154309", "0.51523465", "0.51490986", "0.5146992", "0.51279056", "0.51225543", "0.5111562", "0.5107161", "0.51063454", "0.510286", "0.50896895", "0.5080412", "0.5049142", "0.5043476", "0.50322133", "0.5014929", "0.4999854" ]
0.7330615
0
Creates a sample of skills of size 'num_skills'. In this sample, 'rare_sample_fraction' of them are from rare skills category, 'popular_sample_fraction' of them are from popular skills category.
def sample_skills_to_be_covered_controlled(self, num_sampled_skills=50, rare_sample_fraction=0.33, popular_sample_fraction=0.33, rare_threshold=0.33, popular_threshold=0.33, user_sample_fraction=1.0): print('In freelancer.') self.sample_users(user_sample_fraction) df_users = pd.DataFrame(self.users) df_users_sampled = df_users[df_users['user_id'].isin(self.E)] # Get categorized skills r, c, p = self.categorize_skills(df_users_sampled, rare_threshold, popular_threshold) # Sample skills from each category num_rare_skills = int(num_sampled_skills * rare_sample_fraction) num_popular_skills = int(num_sampled_skills * popular_sample_fraction) num_common_skills = num_sampled_skills - num_rare_skills - num_popular_skills # Ensure that skills to be sampled in each category is >= number of skills in that category if num_rare_skills > len(r): num_rare_skills = len(r) if num_common_skills > len(c): num_common_skills = len(c) if num_common_skills < 0: num_common_skills = 0 if num_popular_skills > len(p): num_popular_skills = len(p) sampled_rare_skills = np.random.choice(r, size=num_rare_skills, replace=False) sampled_common_skills = np.random.choice(c, size=num_common_skills, replace=False) sampled_popular_skills = np.random.choice(p, size=num_popular_skills, replace=False) # Merge indices of all sampled skills sampled_skills = np.concatenate((sampled_rare_skills, sampled_common_skills, sampled_popular_skills)) # Create final skills sample self.skills_covered = np.zeros(self.num_skills) for skill_id in range(self.num_skills): if skill_id not in sampled_skills: self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered self.skills_covered = self.skills_covered.astype(bool) self.num_rare_skills = num_rare_skills self.num_common_skills = num_common_skills self.num_popular_skills = num_popular_skills
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):\n # Sampling\n self.sample_skills_to_be_covered(skills_sample_fraction)\n self.sample_users(users_sample_fraction)", "def sample_skills_to_be_covered(self, fraction=1.0):\n self.skills_covered = np.zeros(self.num_skills)\n if fraction < 1.0:\n num_sampled_skills = int(fraction * self.num_skills)\n sampled_skills = np.random.choice(self.num_skills, size=num_sampled_skills, replace=False)\n\n for skill_id in range(self.num_skills):\n if skill_id not in sampled_skills:\n self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered\n\n self.skills_covered = self.skills_covered.astype(bool)", "def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)", "def sample(self, num):\n state, action, reward, next_state, not_terminal = [], [], [], [], []\n for exp in self.experiences.sample(num):\n state.append(exp.state)\n action.append(exp.action)\n reward.append(exp.reward)\n next_state.append(exp.next_state)\n not_terminal.append(exp.not_terminal)\n\n return np.asarray(state), np.asarray(action), np.asarray(reward), \\\n np.asarray(next_state), np.asarray(not_terminal)", "def getSkills(self,number):\n skills = ['java','javascript','nodejs','css','scss','angular',\n 'express','sql','mongodb','spark','python','opencv',\n 'native-script','reactjs','backbone-js','docker','unix']\n returnSkills = []\n\n if number< skills.__len__():\n for item in range(0,number):\n tempSkill = skills[randint(0,skills.__len__()-1)]\n if tempSkill not in returnSkills:\n returnSkills.append(tempSkill)\n else:\n continue\n return returnSkills\n else:\n return skills", "def categorize_skills(self, df_sampled_users, rare_threshold=0.33, popular_threshold=0.33):\n # Get frequency of each skills\n skills_array = np.array(df_sampled_users['skills_array'].values)\n freq = np.sum(skills_array, axis=0)\n freq_skills_available = freq[freq > 0]\n num_skills_available = freq_skills_available.shape[0]\n\n # Get indices of ascending order sorted frequencies\n sorted_idx = np.argsort(freq_skills_available)\n\n rare_threshold_idx = int(num_skills_available * rare_threshold)\n popular_threshold_idx = int(num_skills_available * (1 - popular_threshold))\n\n # Split the sampled skills into categories using frequencies\n rare_skills = sorted_idx[:rare_threshold_idx]\n common_skills = sorted_idx[rare_threshold_idx: popular_threshold_idx]\n popular_skills = sorted_idx[popular_threshold_idx:]\n\n return (rare_skills, common_skills, popular_skills)", "def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]", "def sample(self, num_samples, **kwargs):\n pass", "def sample_from_mixture(x, pred_weights, pred_means, pred_std, amount):\n samples = np.zeros((amount, 2))\n n_mix = len(pred_weights[0])\n to_choose_from = np.arange(n_mix)\n for j, (weights, means, std_devs) in enumerate(\n zip(pred_weights, pred_means, pred_std)):\n index = np.random.choice(to_choose_from, p=weights)\n samples[j, 1] = np.random.normal(means[index], std_devs[index], size=1)\n samples[j, 0] = x[j]\n\n if j == amount - 1:\n break\n return samples", "def small_sample(num):\n sample = [0] * num\n for i in range(num):\n u = random.randint(0, 3)\n if u == 3:\n sample[i] = -1\n if u == 2:\n sample[i] = 1\n return sample", "def sample(self):\n\n # pick sample type according to probability\n samplers = [\"unif\", \"geo\", \"diverse\"]\n sample_idx = np.random.multinomial(\n 1, [self.unif_prob, self.geo_prob, self.diverse_prob])\n idx = np.argmax(sample_idx)\n sampler = samplers[idx]\n\n if sampler == \"unif\":\n return self.unif_sampler()\n if sampler == \"geo\":\n return self.geo_sampler()\n if sampler == \"diverse\":\n return self.diverse_sampler()", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def generate_samples(self, num_samples=100):\n samples = np.random.random_sample((num_samples, self.input_size))\n samples[:, self.attribute_size:] *= (self.sample_range[1] - self.sample_range[0]) + self.sample_range[0]\n # convert the attribute vector part to 0/1 representation\n samples[:, :self.attribute_size] = np.rint(samples[:, :self.attribute_size])\n return samples", "def sample(self, size=1):\n raise NotImplementedError", "def sample(self, size=1):\n raise NotImplementedError", "def sample(self, size=1):\n raise NotImplementedError", "def sample(self, size=1):\n raise NotImplementedError", "def sample(self, sample_size: int = 20):\n return self._adapter.sample(sample_size)", "def sample(self, n, include=True):\n return [self(t / n) for t in range(n + int(include))]", "def random_sampling(predictions, number):\n return random.sample(range(len(predictions)), number)", "def sample(self, n):\n raise NotImplementedError", "def grow_sample(smp, num):\n\tsmp = np.asarray(smp)\n\tsmp_len = len(smp)\n\tif smp_len > num:\n\t\treturn np.array(random.sample(list(smp), num))\n\n\tdiv = int(np.floor(num / smp_len))\n\trem = num - div * smp_len\n\trem_el = np.array(random.sample(list(smp), rem))\n\tbase_el = np.tile(smp, div)\n\treturn np.concatenate((base_el, rem_el))", "def sample(self, n=1):\n raise NotImplementedError", "def sample(self, n, items_per=1, weight=False):\n if weight:\n item_count = self.item_count()\n p = self.n_per_item()\n p = p / p.sum()\n return np.array([np.random.choice(item_count, size=items_per, replace=False, p=p) for _ in range(n)])\n else:\n return self.fast_sample(n, items_per)", "def sample(self, batch_size):\n max_IS_weight = 0\n assert len(self) >= batch_size, \"Can't sample \" + str(batch_size) +\\\n \" unique elements if there aren't that many elements.\"\n ids = set()\n IS_weights = dict()\n while len(ids) < batch_size and len(self.unplayed_experiences) > 0:\n # Make sure to replay new experiences before sampling. This\n # guarantees all experiences get 1 replay. Which is important so\n # that the network gets a variety of experiences, and also so that\n # the experience can be given a legitimate weight.\n idx = self.unplayed_experiences.popleft()\n ids.add(idx)\n # On first replay the experience is of full importance.\n IS_weights[idx] = 1\n\n if len(ids) < batch_size:\n assert self.total_weight > 0,\\\n \"Attempting to re-sample without updating weights.\"\n # Precompute for Importance Sampling.\n P_min = (self.min_weight / self.total_weight)\n max_IS_weight = (self.capacity * P_min) ** -self.beta\n\n # sample from the weighted buffer, but make sure to exclude the ids\n # that were chosen from the unplayed ones. Break the tree up into\n # ranges of weights so that we sample from a range of different\n # episodes.\n ids |= self.experiences.sample_n_subsets(batch_size - len(ids),\n exclude=ids)\n assert len(ids) == batch_size,\\\n \"Internal Error: incorrect sample size. len(ids)=\" + str(len(ids))\n\n state, action, reward, next_state, not_terminal = [], [], [], [], []\n for idx, exp in zip(ids, self.experiences[ids]):\n state.append(exp.state)\n action.append(exp.action)\n reward.append(exp.reward)\n next_state.append(exp.next_state)\n not_terminal.append(exp.not_terminal)\n if idx not in IS_weights.keys():\n # For previously experienced weights, calculate and append\n # the Importance Sampling weight.\n P_i = exp.weight / self.total_weight\n weight_i = (self.capacity * P_i) ** -self.beta\n IS_weights[idx] = weight_i / max_IS_weight\n\n return ids, np.array(state), np.array(action), np.array(reward), \\\n np.array(next_state), np.array(not_terminal),\\\n np.array([IS_weights[idx] for idx in ids])", "def sample(self, number_samples: int = 1) -> List[Any]:\n # if prompt is provided, use it\n if self.prompt:\n item = self.model(batch_size=number_samples, prompt=self.prompt)\n else:\n item = self.model(batch_size=number_samples)\n\n # To support old diffusers versions (<0.6.0)\n if DIFFUSERS_VERSION_LT_0_6_0 or self.model_type in [\"geodiff\"]:\n item = item[\"sample\"]\n else:\n item = item.images\n\n return item", "def sample(self, num_items: int) -> SampleBatchType:\n idxes = [\n random.randint(0,\n len(self._storage) - 1) for _ in range(num_items)\n ]\n self._num_sampled += num_items\n return self._encode_sample(idxes)", "def sample(self, size=1):\n pass", "def take_samples(self, num_samples: int) -> List:\n if num_samples > len(self.samples):\n return random.sample(self.samples, len(self.samples))\n return random.sample(self.samples, num_samples)" ]
[ "0.69684005", "0.6896007", "0.57801753", "0.57077175", "0.5643414", "0.5603849", "0.5503905", "0.5475773", "0.5417366", "0.5333956", "0.5306661", "0.5280854", "0.5280854", "0.52536625", "0.5248631", "0.5248631", "0.5248631", "0.5248631", "0.52307445", "0.5207248", "0.52070117", "0.5203807", "0.5201163", "0.52000093", "0.5190628", "0.5190392", "0.5166584", "0.51574266", "0.51549894", "0.51414335" ]
0.7567981
0
Assigns the ground set elements to partitions uniformly at random
def assign_ground_set_to_random_partitions(self, num_of_partitions, cardinality_constraint): print('In freelancer random partition.') self.partitions = defaultdict(dict,{i:{'users':set(), 'k':cardinality_constraint} for i in range(0,num_of_partitions)}) partition_ids = np.arange(start=0, stop=num_of_partitions, step=1) for user_id in self.E: p_id = np.random.choice(a = partition_ids) self.partitions[p_id]['users'].add(user_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomize(self):\n \n spins = [np.random.random() > 0.5 for x in range(self.size)]\n self.spins_initial = bitarray.bitarray(spins)", "def randPlace(self):\r\n random.seed(self.seed)\r\n \r\n # Start placement on Partition A\r\n partA = True\r\n for node in self.G.nodes():\r\n \r\n randSite = random.randint(0,int(self.sitesNum/2)-1)\r\n \r\n if partA:\r\n partSite = self.sitesA\r\n self.G.node[node][\"part\"] = 'A'\r\n \r\n else:\r\n partSite = self.sitesB\r\n self.G.node[node][\"part\"] = 'B'\r\n \r\n while (partSite[randSite].isOcp()):\r\n randSite = random.randint(0,int(self.sitesNum/2)-1) \r\n\r\n partSite[randSite].setCell(node)\r\n self.G.node[node][\"site\"] = partSite[randSite]\r\n \r\n # Toggle partition for next placement\r\n partA = not partA", "def setRandom(self):\n pass # define each VarElement family", "def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations", "def randomize(self, seed_density):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (rand.random() <= seed_density):\r\n self.cells[x][y] = 1", "def shuffle(self):\n self.__c_elem().melange()", "def distribute_waterbag(self):\n # Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n while ptclsMade < self.npart:\n ranU = 0.0\n while ranU <= 0:\n ranU = random.random()\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n trialH = np.sqrt(ranU)\n newH = self.emit*trialH\n y0 = np.sqrt(newH)\n #self.emittance = newH\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n #xMax = yMax\n\n trialValue = 1e10\n while trialValue >= newH:\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n\n initialValue = trialValue\n if initialValue < newH:\n pMag = np.sqrt(2*(newH - initialValue))\n pDir = 2*np.pi* random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n else:\n print(\"Initial value generated exceeds limiting H. Sampling new value.\")\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def implement_random(self):\n shape = set()\n for coord in INDICES:\n if randint(0, 1):\n shape.add(coord)\n self.implement_shape(shape)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def rnd_pset(self):\n\t\treturn [rnd() * 10, rnd() * 10, rnd() * 12 * 15, rnd() * 12 * 15]", "def random(self, af=False):\n rank = randrange(self.order())\n return self.coset_unrank(rank, af)", "def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)", "def randomize_components(self):\n # For loop applied to components named (4, 5, 6)\n for i in range(4, 7):\n while True:\n rand_pos = randrange(len(self.tiles)+1)\n if self.tiles[rand_pos] == 0:\n self.tiles[rand_pos] = i\n break", "def random_element(self):\n from sage.graphs.schnyder import minimal_schnyder_wood\n from sage.graphs.generators.random import RandomTriangulation\n n = self._size\n tri = RandomTriangulation(n + 3)\n TIP = TamariIntervalPosets\n schnyder = minimal_schnyder_wood(tri, root_edge=('a', 'b'),\n check=False)\n return TIP.from_minimal_schnyder_wood(schnyder)", "def uniform_random(self) -> None:\n\n size = self.circ_size\n random.seed(self.seed)\n\n gates = [self.h, self.x, self.y, self.z, self.s, self.t, self.cx]\n candidates = set(range(size))\n\n for i in range(size):\n for j in range(size):\n to_apply = random.choice(gates)\n\n num_qubits = 2 if to_apply == self.cx else 1\n targets = random.sample(candidates, num_qubits)\n to_apply(*targets)\n\n if self.meas: self.measure(self.qr, self.cr)", "def shuffle(self):\n for i in range(10):\n random.shuffle(self.set)", "def set_generator(random, args):\n representation = args.get('representation')\n indices = list(range(len(representation)))\n max_size = args.get('max_size', 9)\n variable_size = args.get('variable_size', True)\n if variable_size and max_size > 1:\n size = random.randint(1, max_size)\n else:\n size = max_size\n candidate = random.sample(indices, size)\n return sorted(candidate)", "def generatoze(b):\r\n l = []\r\n for i in range(b):\r\n k = random.randint(0, 100)\r\n l.append(k)\r\n return l", "def mutate(individual):\n for idx, q in enumerate(individual):\n rng = random.random()\n if rng < 1 / (len(individual)):\n pos = random.randint(1, len(individual))\n individual[idx] = pos\n return individual", "def generateSupplies(self):\n typePart = ['wrench','resistor','bulb','mushroom','coin']\n chosenPart = []\n for i in range(3):\n randomPart = choice(typePart)\n chosenPart.append(randomPart)\n typePart.remove(randomPart)\n for part in chosenPart:\n amount = randint(1,3)\n self._supplies.append(Node(part,amount))", "def generate_with_antichain(set_sizes, n_sets, antichain_sizes, seed):\n np.random.seed(seed)\n\n for n in set_sizes:\n for k, size in zip(range(n_sets), antichain_sizes):\n antichain_size = int(np.round(size*n))\n # Generiramo nakljucni permutaciji (zadnjih antichain_size elementov predstavlja antiverigo, zato jih ni v permutaciji)\n p1 = np.random.permutation(n-antichain_size)\n p2 = np.random.permutation(n-antichain_size)\n # Potrebujemo razlicni permutaciji, sicer imamo delno urejenost reda 1\n while np.all(p1 == p2):\n p2 = np.random.permutation(n-antichain_size)\n # Slovarja, ki za vsako stevilo vrneta mesto v posamezni permutaciji\n inverse1 = {i: p1[i] for i in range(n-antichain_size)}\n inverse2 = {i: p2[i] for i in range(n-antichain_size)}\n\n # Gremo cez vse kombinacije, ce je eno stevilo manjse od drugega v obeh primerih, ju dodamo v delno urejenost\n partial_order = []\n for u in range(n-antichain_size):\n for v in range(n-antichain_size):\n if inverse1[u] < inverse1[v] and inverse2[u] < inverse2[v]:\n partial_order.append((u, v))\n\n # Izpisemo delno urejenost v datoteko\n f = open(f'../data/vhod_antichain_{n}_{k+1}.txt', 'w')\n # Prva vrstica\n f.write(f'{n} {len(partial_order)}\\n')\n # Primerljivi elementi\n for u, v in partial_order:\n # Dodamo +1, da ni indeksiranja z 0\n f.write(f'{u+1} {v+1}\\n')\n f.close()", "def __generate_random_nodes(self,k=3):\n if k < 3:\n k = 3\n\n k = min(k,len(self.G.nodes()))\n self.__logger.info(\"RANDOM_NODES: try to generate a set of {} nodes sampled with uniform distribution\".format(k))\n \n return random.sample(self.G.nodes(),k)", "def new_tile(self):\n random.shuffle(self.tiles) # shuffle the list of tiles tuples\n count = 0\n while self.get_tile(self.tiles[0][0], self.tiles[0][1]) != 0 and count < self.grid_height*self.grid_width: \n self.tiles.append(self.tiles.pop(0)) \n \n # next, select value as 2 with a 90% probability (percentage) and 4 with 10%\n percentage = random.random() \n if percentage > 0.1:\n value = 2\n else:\n value = 4\n row = self.tiles[0][0]\n col = self.tiles[0][1]\n self.set_tile(row , col,value)", "def generator(self, random, args):\r\n if self.duplicates:\r\n max_count = [self.capacity // item[0] for item in self.items]\r\n return [random.randint(0, m) for m in max_count]\r\n else:\r\n return [random.choice([0, 1]) for _ in range(len(self.items))]", "def linear_set_generator(random, args):\n bounder = args.get(\"_ec\").bounder\n representation = args.get('representation')\n max_size = args.get('max_size', 9)\n variable_size = args.get('variable_size', True)\n if variable_size:\n size = random.randint(1, max_size)\n else:\n size = max_size\n\n indices = random.sample(range(len(representation)), size)\n values = random.uniform(next(bounder.lower_bound), next(bounder.upper_bound), len(indices))\n return OrderedDict({i: v for i, v in zip(indices, values)})", "def randomSolution(self):\n # seed the random number generator\n random.seed()\n # loop through all the features\n for feature in self.features:\n # pick a random number based on the size of the feature's domain\n domainIndex = random.randint(0, len(feature.domain) - 1)\n # assign the value from the domain\n feature.value = feature.domain[domainIndex]", "def growPopulation(P,G):\n population = []\n for i in range(P):\n basicPerm = range(1,G)\n random.shuffle(basicPerm)\n population.append([0]+basicPerm)\n return population" ]
[ "0.6404953", "0.6304296", "0.62770027", "0.6260475", "0.62467414", "0.6226484", "0.6215011", "0.62098956", "0.6190984", "0.61088425", "0.608441", "0.6081554", "0.60681367", "0.6066866", "0.60614324", "0.6058705", "0.6013652", "0.6003242", "0.59814346", "0.59783477", "0.59728765", "0.59334373", "0.5932814", "0.5916872", "0.59140193", "0.58876264", "0.58614314", "0.5850711", "0.58472776", "0.5839463" ]
0.73680335
0
Assigns the ground set elements to partitions based on their salary
def assign_ground_set_to_equi_salary_partitions(self, num_of_partitions, cardinality_constraint): print('In freelancer salary partition.') costs = set() for user_id in self.E: costs.add(self.cost_vector[user_id]) sorted_costs = sorted(list(costs)) # each cost is a partition if len(sorted_costs) <= num_of_partitions: self.partitions = defaultdict(dict,{i:{'users':set(), 'k':cardinality_constraint} for i in sorted_costs}) else: partition_costs = list(self.split(sorted_costs, num_of_partitions)) self.partitions = defaultdict(dict,{i[-1]:{'users':set(), 'k':cardinality_constraint} for i in partition_costs}) for user_id in self.E: user_cost = self.cost_vector[user_id] min_val = 0 for cost, users in self.partitions.items(): max_val = cost if user_cost > min_val and user_cost <= max_val: self.partitions[max_val]['users'].add(user_id) break min_val = max_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assign_ground_set_to_random_partitions(self, num_of_partitions, cardinality_constraint):\n print('In freelancer random partition.')\n self.partitions = defaultdict(dict,{i:{'users':set(), 'k':cardinality_constraint} for i in range(0,num_of_partitions)})\n partition_ids = np.arange(start=0, stop=num_of_partitions, step=1)\n\n for user_id in self.E:\n p_id = np.random.choice(a = partition_ids)\n self.partitions[p_id]['users'].add(user_id)", "def test_employees_by_salary_index(self):\n key=\"employees-by-salary\"\n emps_by_salary = {50000: ['5'], 75000: ['4'], 80000: ['3'], 120000: ['2'],\n 100000: ['1']}\n self.mapper.map(\"select id, salary from redmate.employees\") \\\n .to_sorted_set(key_pattern=key, score=\"salary\")\n self.mapper.run()\n\n for sal in emps_by_salary.items():\n self.assertEqual(sal[1],\n self.redis.zrangebyscore(key, sal[0] - 1, sal[0] + 1))", "def test_greedy_partition(self):\r\n\r\n #(non) partition into one bucket\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 1)\r\n self.assertEquals(obs_levels, [6])\r\n self.assertEquals(obs_part, [['3', '1', '2']])\r\n\r\n # two buckets\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 2)\r\n\r\n self.assertEquals(obs_levels, [3, 3])\r\n self.assertEquals(obs_part, [['3'], ['1', '2']])\r\n\r\n # larger input\r\n obs_part, obs_levels = greedy_partition({'1': 1, '2': 2, '3': 3,\r\n '4': 4, '5': 5, '6': 6}, 2)\r\n self.assertEquals(obs_levels, [11, 10])\r\n self.assertEquals(obs_part, [['6', '3', '2'], ['5', '4', '1']])", "def partition_book(self):\n ...", "def perform_partition(rows, partition_attribute):\n data_partitions = defaultdict(list)\n for row in rows:\n data_partitions[row[partition_attribute]].append(row)\n return data_partitions", "def populate_homes(self, breakdown):\n #check!#\n\n ###your code here###\n tot=self.nx*self.ny\n for n in range(len(breakdown)):\n breakdown[n]=int(round(tot*breakdown[n]))\n for i in range(breakdown[n]):\n new_home=self.empty_homes.pop(random.randrange(len(self.empty_homes)))\n home_address=(new_home.x,new_home.y)\n x=Person(group=n,home=self.homes[home_address])\n self.homes[home_address].occupant=x\n self.people.append(x)", "def assign_fields(x, l, group, binner):\n rows = x.shape[0]\n x = x.assign(binner=pd.Series([binner] * rows, index=x.index))\n x = x.assign(min_contig_length=pd.Series([l] * rows, index=x.index))\n x = x.assign(assembly=pd.Series([group] * rows, index=x.index))\n return x", "def _assign_by_bfs(self, start, worker, initial_sizes, spread_limits,\n keys_to_assign, assigned_record, graph=None):\n if initial_sizes[worker] <= 0:\n return\n\n graph = graph or self._graph\n if self._assign_graph is None:\n undigraph = self._assign_graph = graph.build_undirected()\n else:\n undigraph = self._assign_graph\n\n assigned = 0\n spread_range = 0\n for v in undigraph.bfs(start=start, visit_predicate='all'):\n op_key = v.op.key\n if op_key in assigned_record:\n continue\n spread_range += 1\n if op_key not in keys_to_assign:\n continue\n assigned_record[op_key] = worker\n assigned += 1\n if spread_range >= spread_limits[worker] \\\n or assigned >= initial_sizes[worker]:\n break\n initial_sizes[worker] -= assigned", "def computePartitions(S):\n # gets unique list of cases which are used as keys for dictionaries\n a = list(set([tuple(i) for i in S]))\n partitionset = dict.fromkeys(a, [])\n for i in range(len(a)):\n # Creating a dictionary with all unique cases\n partitionset[a[i]] = []\n for i in range(len(a)):\n for j in range(len(S)):\n if(a[i] == tuple(S[j])):\n partitionset[a[i]].append(j)\n partset = []\n concept = []\n for keys in partitionset:\n concept.append(keys)\n partset.append(set(partitionset[keys]))\n return partset,concept", "def construct_assignments(priest_list, group_list):\n priest_list = copy(priest_list)\n group_list = copy(group_list)\n buff_assignments = []\n if len(priest_list) == len(group_list):\n \"\"\" 1 priest per group \"\"\"\n priest_group = zip(priest_list, group_list)\n for priest_assign in priest_group:\n priest, group = priest_assign\n buff_assignments.append({\"priest\": priest, \"groups_assigned\": [group]})\n elif len(priest_list) < len(group_list):\n \"\"\" Fewer priests than groups, some will have more than 1 group assigned. \n Function will attempt to give consecutive group assignments in these cases. \"\"\"\n priest_parties_each, priest_additionals = divmod(len(group_list), len(priest_list))\n for priest in priest_list:\n buff_allocation = {\"priest\": priest, \"groups_assigned\": []}\n if priest_additionals > 0:\n for x in range(priest_parties_each+1):\n group_pop = group_list.pop(0)\n buff_allocation[\"groups_assigned\"].append(group_pop)\n priest_additionals -= 1\n else:\n for x in range(priest_parties_each):\n group_pop = group_list.pop(0)\n buff_allocation[\"groups_assigned\"].append(group_pop)\n buff_assignments.append(buff_allocation)\n print(\"Outcome: \", buff_assignments)\n return buff_assignments", "def metis_partition(G):\n partition_list = partition(G, 2)[1]\n for i in range(2):\n for username in partition_list[i]:\n G.add_node(username, cluster=i)\n \n return G", "def __init__(self, my_partition: List[int]):\n self.my_partition = my_partition\n self.my_partition.sort(reverse=True)\n if self.my_partition[-1]==0:\n first_zero = self.my_partition.index(0)\n self.my_partitition = self.my_partition[0:first_zero]\n self.my_n = sum(self.my_partition)", "def get_partitions(self, persistence=None):\n if persistence is None:\n persistence = self.persistence\n partitions = {}\n # TODO: Possibly cache at the critical persistence values,\n # previously caching was done at every query level, but that\n # does not make sense as the partitions will only change once\n # the next value in self.persistences is attained. Honestly,\n # this is probably not a necessary optimization that needs to\n # be made. Consider instead, Yarden's way of storing the points\n # such that merged arrays will be adjacent.\n for key, items in self.base_partitions.items():\n min_index = key[0]\n max_index = key[1]\n while (\n self.merge_sequence[min_index][0] < persistence\n and self.merge_sequence[min_index][1] != min_index\n ):\n min_index = self.merge_sequence[min_index][1]\n while (\n self.merge_sequence[max_index][0] < persistence\n and self.merge_sequence[max_index][1] != max_index\n ):\n max_index = self.merge_sequence[max_index][1]\n new_key = (min_index, max_index)\n if new_key not in partitions:\n partitions[new_key] = []\n partitions[new_key].extend(items.tolist())\n\n for key in partitions:\n partitions[key] = sorted(list(set(partitions[key])))\n return partitions", "def Partitioning(self, *args):\n return _hypre.HypreParVector_Partitioning(self, *args)", "def occupation_distribution(data):", "def partition_by_eigenvector(graph):\n ###TODO\n pass", "def partition(self, data, labels):\n\t\traise Exception(\"Not implmented\")", "def make_salaried(self,salary,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"2\"\n print(\"{}{}\".format(name,\" was successfully changed to be a salaried employee\"))\n self.emp_dict[id][7] = salary\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def __partition1(self, lst, n):\n \n if lst is None:\n lst = []\n \n # Create a dictionary for the parted spws:\n rdict = {}\n division = len(lst)/float(n)\n for i in xrange(int(n)):\n part = lst[int(round(division * i)):int(round(division * (i+1)))]\n rdict[i] = part\n \n return rdict", "def squeeze_accept(partition):\n Write a function that\n - Sort districts by most Democratic heavy and most Republican heavy\n\n - Assign a base value of competitiveness for each district\n - Run chain, accept only if districts satisfy values under or order\n \"\"\"\n\n#--- CONSTRAINTS\n\n\"\"\"", "def employer_contribution_initialization(self, salary, employee_contribution, employer_match_tier_match_list):\r\n self.salary = salary\r\n self.employee_contribution = employee_contribution\r\n self.employer_match_tier_match_list = employer_match_tier_match_list\r\n self.limit_salary()", "def partitionData(data, labels, partition):\n\treturn [s[partition] for s in data], labels[partition]", "def __iter__(self):\n for partition in self._partition_set:\n yield partition", "def get_stable_manifolds(self, persistence=None):\n if persistence is None:\n persistence = self.persistence\n partitions = {}\n for key, items in self.base_partitions.items():\n max_index = key[1]\n while (\n self.merge_sequence[max_index][0] < persistence\n and self.merge_sequence[max_index][1] != max_index\n ):\n max_index = self.merge_sequence[max_index][1]\n new_key = max_index\n if new_key not in partitions:\n partitions[new_key] = []\n partitions[new_key].extend(items.tolist())\n\n for key in partitions:\n partitions[key] = sorted(list(set(partitions[key])))\n\n return partitions", "def create_partition(mesh,polygons,enforce_exact=False):", "def split():\n flag = 0\n for chromosome in region:\n for inf in region[chromosome]:\n if flag == 0:\n if chromosome not in test_set:\n test_set[chromosome] = [inf]\n else:\n test_set[chromosome].append(inf)\n else:\n if chromosome not in train_set:\n train_set[chromosome] = [inf]\n else:\n train_set[chromosome].append(inf)\n\n flag += 1\n flag %= 10", "def test_employees_in_departments_set_mapping(self):\n emps_by_dep = {'1': {'1', '2'}, '2': {'3', '4'}, '3': {'5'}}\n self.mapper.map(query=\"select id, department_id from redmate.employees\") \\\n .to_set(key_pattern=\"dep:{department_id}:employees\")\n self.mapper.run()\n for entry in emps_by_dep.items():\n self.assertEqual(entry[1],\n self.redis.smembers(\"dep:{0[0]}:employees\".format(entry)))", "def set_particle_IDs_partition(index, iterator):\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr", "def final_seat_assignment():\n parties = get_sorted_parties()\n allocated_seats = get_sorted_allocated_seats() \n #list(zip(parties, allocated_seats))\n #pandas.concat([parties, allocated_seats], axis=1)\n distributed_seats = []\n for i in range(0, len(parties)):\n list_votes2 = get_sorted_votes2([\"state\"], parties[i]) \n list_min_seats = get_sorted_min_seats([\"state\"], parties[i]) \n list_ueberhang = get_sorted_ueberhang([\"state\"], parties[i])\n seats2dist = allocated_seats[i] - sum(list_ueberhang)\n print(parties[i])\n distributed_seats.append((parties[i]\n , max(distributeSeats(seats2dist, list_votes2, False, 100) , list_min_seats)\n )) # adding tuples\n \n return distributed_seats", "def rpartition(self, x):\n pass" ]
[ "0.5667653", "0.55090714", "0.53606564", "0.5289445", "0.5275607", "0.52265185", "0.5104114", "0.5090907", "0.507885", "0.5077779", "0.50641537", "0.50145876", "0.5004258", "0.49790886", "0.4966237", "0.49300385", "0.492593", "0.48953396", "0.48378688", "0.4824251", "0.47849864", "0.47757387", "0.47739175", "0.47670373", "0.47633338", "0.475789", "0.47427133", "0.473054", "0.47293076", "0.4724463" ]
0.72019356
0
Reads a n lines from f with an offset of offset lines.
def tail(f, n, offset=0): avg_line_length = 74 to_read = n + offset while 1: try: f.seek(-(avg_line_length * to_read), 2) except IOError: # woops. apparently file is smaller than what we want # to step back, go to the beginning instead f.seek(0) pos = f.tell() lines = f.read().splitlines() if len(lines) >= to_read or pos == 0: return lines[-to_read:offset and -offset or None] avg_line_length *= 1.3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _seek_to_n_lines_from_end(f, numlines=10):\n\tbuf = \"\"\n\tbuf_pos = 0\n\tf.seek(0, 2) # seek to the end of the file\n\tline_count = 0\n\n\twhile line_count < numlines:\n\t\tnewline_pos = buf.rfind(\"\\n\", 0, buf_pos)\n\t\tfile_pos = f.tell()\n\n\t\tif newline_pos == -1:\n\t\t\tif file_pos == 0:\n\t\t\t\t# start of file\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttoread = min(1024, file_pos)\n\t\t\t\tf.seek(-toread, 1)\n\t\t\t\tbuf = f.read(toread) + buf[:buf_pos]\n\t\t\t\tf.seek(-toread, 1)\n\t\t\t\tbuf_pos = len(buf) - 1\n\t\telse:\n\t\t\t# found a line\n\t\t\tbuf_pos = newline_pos\n\t\t\tline_count += 1\n\n\tif line_count == numlines:\n\t\tf.seek(buf_pos + 1, 1)", "def _seek_to_n_lines_from_end_ng(f, numlines=10):\n\tline_count = 0;\n\n\tfor line in f:\n\t\tline_count += 1;\n\tpos = line_count - numlines;\n\tif (pos >= 0):\n\t\tf.seek(pos, 0);\n\telse:\n\t\tf.seek(0, 0);", "def newtail(f, n, offset=0):\n for i, line in enumerate(f):\n print(\"newtail stats\", i, n, line, )\n if i == n:\n return line", "def tail(f, lines=1, _buffer=4098):\n # place holder for the lines found\n lines_found = []\n\n # block counter will be multiplied by buffer\n # to get the block size from the end\n block_counter = -1\n\n # loop until we find X lines\n while len(lines_found) < lines:\n try:\n f.seek(block_counter * _buffer, os.SEEK_END)\n except IOError: # either file is too small, or too many lines requested\n f.seek(0)\n lines_found = f.readlines()\n break\n\n lines_found = f.readlines()\n\n # we found enough lines, get out\n if len(lines_found) > lines:\n break\n\n # decrement the block counter to get the\n # next X bytes\n block_counter -= 1\n\n return lines_found[-lines:]", "def read_lines(filename=\"\", nb_lines=0):\n line_count = 0\n with open(filename, mode='r', encoding='utf-8') as f:\n for line_count, lines in enumerate(f):\n pass\n if nb_lines <= 0 or nb_lines > (line_count + 1):\n f.seek(0)\n print(f.read(), end='')\n else:\n f.seek(0) # return to file beginning\n for line in range(nb_lines):\n print(f.readline(), end='')", "def getlines(self, n, m):\n return self.__contents[n:m]", "def read_list(self, n):\n i = self.pos\n ret = self._buffer[i:i + n]\n if len(ret) < n:\n raise self._eof\n\n self.pos += n\n return ret", "def line_offsets(fname):\n line_offset = []\n offset = 0\n for _, line in enumerate( open(fname) ):\n line_offset.append(offset)\n offset += len(line)\n return line_offset", "def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n if nb_lines <= 0:\n read_data = f.read()\n print(read_data, end='')\n else:\n n_lines = 0\n for x in f:\n print(x, end='')\n n_lines += 1\n if n_lines == nb_lines:\n break", "def read_count(f, n):\n buf = ''\n while len(buf) < n:\n nextchunk = f.read(n - len(buf))\n if not nextchunk:\n return ''\n buf += nextchunk\n return buf", "def read_lines(filename=\"\", nb_lines=0):\n\n line_count = 0\n with open(filename, mode='r', encoding='utf-8') as f:\n for lines in f:\n line_count += 1\n with open(filename, mode='r', encoding='utf-8') as f:\n if nb_lines <= 0 or nb_lines >= line_count:\n result = f.read()\n print(\"{:s}\".format(result), end=\"\")\n else:\n for j, line in enumerate(f):\n if j < nb_lines:\n print(\"{:s}\".format(line), end=\"\")", "def all_lines_at_idx(mm, idx_list):\n lines = []\n for idx in idx_list:\n mm.seek(idx)\n # row back to beginning of line\n ibegin = mm.rfind(b'\\n', 0, idx)\n if ibegin == -1:\n ibegin = 0\n mm.seek(ibegin)\n mm.readline()\n # read desired line\n line = mm.readline()\n lines.append(line.decode())\n return lines", "def read_lines(filename=\"\", nb_lines=0):\n with open(filename) as file:\n n_lines = 0\n for line in file:\n n_lines += 1\n if nb_lines <= 0 or nb_lines >= n_lines:\n file.seek(0)\n for line in file:\n print(line, end=\"\")\n else:\n file.seek(0)\n for line in range(nb_lines):\n print(file.readline(), end=\"\")", "def read_lines(filename=\"\", nb_lines=0):\n\n line_counter = 0\n with open(filename, 'r', encoding='utf-8') as my_file:\n for lines in my_file:\n line_counter += 1\n my_file.seek(0)\n if nb_lines <= 0 or nb_lines >= line_counter:\n print(my_file.read(), end=\"\")\n else:\n for i in range(nb_lines):\n print(my_file.readline(), end=\"\")", "def read_lines(filename=\"\", nb_lines=0):\n\n num = 0\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n num += 1\n\n with open(filename, encoding=\"utf-8\") as f:\n if nb_lines <= 0 or nb_lines >= num:\n for line in f:\n print(line, end='')\n else:\n for line in range(nb_lines):\n print(f.readline(), end='')", "def read_lines(filename=\"\", nb_lines=0):\n\n with open(filename, encoding=\"UTF8\") as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n i = 0\n while i < nb_lines:\n print(f.readline(), end=\"\")\n i += 1", "def read_lines(filename=\"\", nb_lines=0):\n read_buffer = []\n\n with open(filename, \"r\", encoding=\"utf-8\") as fd:\n count = 0\n for count, line in enumerate(fd):\n if (count < nb_lines or nb_lines == 0):\n read_buffer.append(line)\n else:\n break\n\n print(\"{}\".format(\"\".join(read_buffer)), end=\"\")", "def read_lines(filename=\"\", nb_lines=0):\n with open(filename, encoding=\"utf-8\") as myFile:\n if nb_lines <= 0:\n print(myFile.read(), end=\"\")\n for i in range(nb_lines):\n print(myFile.readline(), end=\"\")", "def read(self, n):\n return self.file.read(n)", "def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding='utf8') as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n else:\n for line in f:\n if nb_lines == 0:\n break\n print(line, end=\"\")\n nb_lines -= 1", "def tail(filepath, n):\n with open(filepath) as file_fd:\n lines = ''.join(file_fd.readlines())\n lines = lines.splitlines()[-n:]\n return lines", "def print_a_line(line_count, f):\n\tprint line_count, f.readline()", "def read_block(f, offset, length, delimiter=None):\n if delimiter:\n f.seek(offset)\n seek_delimiter(f, delimiter, 2**16)\n start = f.tell()\n length -= start - offset\n\n f.seek(start + length)\n seek_delimiter(f, delimiter, 2**16)\n end = f.tell()\n\n offset = start\n length = end - start\n\n f.seek(offset)\n b = f.read(length)\n return b", "def read_lines(filename=\"\", nb_lines=0):\n number_of_lines = __import__('1-number_of_lines').number_of_lines\n count = 0\n num_file_lines = number_of_lines(filename)\n with open(filename, mode='r', encoding=\"utf=8\") as myFile:\n if nb_lines <= 0 or nb_lines >= num_file_lines:\n print(myFile.read(), end='')\n else:\n while count != nb_lines:\n print(myFile.readline(), end='')\n count += 1", "def getRow(self, n, offset=0):\n return self._c[n + offset*self.height::self.height]", "def tailNLinesFromFile(file, n):\n\n if not os.path.isfile(file):\n return None\n\n command = ['tail', '-n', str(n), file]\n\n output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]\n\n return output.split('\\n')", "def readMultipleFileLinesAndPositions(filePath,startPosition=None, bytesToRead=1): \n \n f = open(filePath, 'rb') \n \n if not startPosition is None: \n f.seek(startPosition) \n \n lines = f.readlines(bytesToRead) \n position = f.tell() \n \n f.close() \n \n return lines, position", "def tail(filep, n=10):\n with open(filep) as f:\n return list(deque(f, maxlen=n))", "def print_a_line(line_count, f):\n print (line_count, f.readline())", "def FileRead(offset, bytes):\r\n return _hiew.HiewGate_FileRead(offset, bytes)" ]
[ "0.6995713", "0.69830865", "0.6557249", "0.6479133", "0.6015872", "0.6004799", "0.5924873", "0.5825385", "0.58244157", "0.58227545", "0.5806487", "0.5802106", "0.5796131", "0.5785858", "0.577081", "0.5706994", "0.56648135", "0.5641311", "0.5623573", "0.5615154", "0.56085676", "0.5586863", "0.55723643", "0.5567044", "0.55544454", "0.5546624", "0.5536878", "0.55067647", "0.54938257", "0.5475843" ]
0.75444746
0
Returns number of subnets, given the breakdown; or 1 if breakdown doesn't work.
def calculate_subnets(total, breakdown): sanity_percent = 0 # if this isn't 100% by the end, we got issues. subnets = 0 for nodep, netp in breakdown: sanity_percent += nodep if (sanity_percent > 100): return -1 subtotal = int(total * .01 * nodep) groupby = int(254 * .01 *netp) subnets += math.ceil(subtotal/groupby) if (sanity_percent < 100): return -1 return subnets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_country_count():\n numbers=country_populations.split('\\n')\n count_numbers= len(numbers)-1\n return count_numbers", "def getStackCountWith(b):\n options = [t for t in boxes if canStack(b,t)]\n _,height,_ = b\n if not options:\n return height\n return height + max(getStackCountWith(t) for t in options)", "def test_unordered_subnets(self):\n\n self._test_find_next_subnet(\n network=\"172.31.0.0/16\",\n subnets=[\"172.31.48.0/20\", \"172.31.0.0/20\", \"172.31.16.0/20\", \"172.31.32.0/20\"],\n requests=[24],\n expected=[\"172.31.64.0/24\"],\n )", "def total_height_blocks(validator):\n res = 0\n for bhash, b in validator.processed.items():\n if isinstance(b, Block):\n res += 1\n return res", "def part_2(ranges: 'RangeSet', total_ips_count: int = 1 << 32) -> int:\n\n allowed_count = total_ips_count - len(ranges)\n print(f\"part 2: there are total {allowed_count} allowed IPs\")\n return allowed_count", "def test_one_subnet(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/16\", \n subnets=[\"10.0.0.0/24\"],\n requests=[24],\n expected=[\"10.0.1.0/24\"],\n )", "def subnets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"subnets\")", "def get_sshuttle_args_count(proc_name):\n procs = subprocess.check_output(['ps', '-eo', 'comm,args']).splitlines()\n name_procs = [proc for proc in procs if proc_name.encode() in proc]\n\n if len(name_procs) > 1:\n return -1\n elif len(name_procs) == 0:\n return 0\n else:\n nets = re.split('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}/\\d{1,3}', name_procs[0])\n return len(nets)-1", "def get_load_balancer_managed_outbound_ipv6_count(self) -> Union[int, None]:\n count_ipv6 = self.raw_param.get('load_balancer_managed_outbound_ipv6_count')\n\n if self.decorator_mode == DecoratorMode.CREATE:\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if (\n not self.get_load_balancer_outbound_ips() and\n not self.get_load_balancer_outbound_ip_prefixes() and\n count_ipv6 is None\n ):\n if (\n self.mc and\n self.mc.network_profile and\n self.mc.network_profile.load_balancer_profile and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None\n ):\n count_ipv6 = (\n self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6\n )\n\n return count_ipv6", "def split_count(self) -> int:\n return int(self.graph_tuple_stats.split_count or 0)", "def get_contracts_count():\n url = 'http://www.base.gov.pt/base2/rest/contratos'\n\n response = requests.get(url, headers={'Range': 'items=0-24'})\n\n # should be \"items 0-%d/%d\", we want the second %d that represents the\n # total\n results_range = response.headers['content-range']\n _, count = results_range.split('/')\n\n return int(count)", "def get_num_bags():\n with open('adventofcode/twentytwenty/static_data/day7.txt', 'r') as f:\n lines = f.readlines()\n\n rules, bags = format_data(lines)\n\n total = 0\n included_bags, num_bags = run_recursion([BAG_TYPE], bags, rules, total)\n # print(included_bags)\n\n return num_bags", "def _count_occupied_seats(grid: List[List[str]]) -> int:\n total = 0\n for row in grid:\n total += row.count('#')\n return total", "def street_segment_count(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return len(Gu.edges)", "def GetNumberCodeBlocks(separators):\n num_blocks = len(separators) + 1\n assert num_blocks >= 2\n return num_blocks", "def getSegmentCount(self) -> int:\n ...", "def _cv_len(cv, X, y):\n return len(cv) if not SK18 else cv.get_n_splits(X, y)", "def subnets(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"subnets\")", "def subnets(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"subnets\")", "def count_containers(lines: Lines) -> int:\n rules = parse_rules(lines)\n allowed_containers = containers(\"shiny gold\", rules)\n assert allowed_containers is not None\n return len(allowed_containers) - 1", "def subnets(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"subnets\")", "def get_neighbours_count(self, cell: Position) -> int:\n possible_neighbours = self.get_neighbours(cell)\n return sum(self.is_alive(n) for n in possible_neighbours)", "def find_total_numbeats(nb1, nb2, nb3, nb4, nb5, nb6):\n numbeats = nb1 + nb2 + nb3 + nb4 + nb5 + nb6\n\n logging.info('Calculated total number of beats: %s', numbeats)\n return numbeats", "def Test_NumSegments(Daten):\n N_Leitungen = len(Daten.PipeSegments)\n\n return N_Leitungen", "def subnets(self) -> Sequence[str]:\n return pulumi.get(self, \"subnets\")", "def get_net_adapters_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetNetAdaptersCount', self.handle)", "def subnet_prefix_length(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"subnet_prefix_length\")", "def get_number_of_soil_levels(description):\n if description == \"none\":\n return \"0\"\n else:\n return \"5\"", "def countSubDomain(subdomain):\r\n if not subdomain:\r\n return 0\r\n else:\r\n return len(subdomain.split('.'))", "def number_of_containing_bags(self) -> int:\n\n bag_count = 0\n for sub_bag_count, sub_bag_color in self.containing_bags:\n bag_count += sub_bag_count\n bag_count += (\n sub_bag_count * bag_registry[sub_bag_color].number_of_containing_bags\n )\n return bag_count" ]
[ "0.55934674", "0.5563919", "0.5528462", "0.5488039", "0.54619956", "0.5447456", "0.53395575", "0.5290311", "0.52890843", "0.52565587", "0.52516896", "0.5245732", "0.5244731", "0.5234548", "0.5229551", "0.522947", "0.5148768", "0.51222277", "0.51222277", "0.5111655", "0.5100949", "0.5090096", "0.5058145", "0.5044439", "0.5036617", "0.5029722", "0.5029219", "0.50194985", "0.5017681", "0.50088906" ]
0.7305358
0
Method to pad PIL Image on all sides with constant `fill` value.
def pad(img, padding, fill=0, mode='constant'): check_type(img) if not isinstance(padding, (numbers.Number, tuple)): raise TypeError('Got inappropriate padding arg') if not isinstance(fill, (numbers.Number, str, tuple)): raise TypeError('Got inappropriate fill arg') if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]: raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " + "{} element tuple".format(len(padding))) assert mode in ['constant', ], 'Padding mode should be constant' return ImageOps.expand(img, border=padding, fill=fill)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad(img, padding, fill=0, padding_mode='constant'):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if not isinstance(padding, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Padding mode should be either constant, edge, reflect or symmetric'\n\n # if padding_mode == 'constant':\n # aug = iaa.Pad(px=padding, pad_mode=padding_mode, pad_cval=fill, keep_size=False)\n # return aug.augment_image(img)\n # else:\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, Sequence) and len(padding) == 2:\n pad_top = pad_bottom = padding[0]\n pad_left = pad_right = padding[1]\n if isinstance(padding, Sequence) and len(padding) == 4:\n pad_top = padding[0]\n pad_left = padding[1]\n pad_bottom = padding[2]\n pad_right = padding[3]\n\n aug = iaa.CropAndPad(px=(pad_top, pad_right, pad_bottom, pad_left), pad_mode=padding_mode, pad_cval=fill,\n keep_size=False)\n # aug = iaa.CropAndPad(px=(pad_top, pad_right, pad_bottom, pad_left), pad_mode=padding_mode, keep_size=False)\n return aug.augment_image(img)\n\n # # RGB image\n # if len(img.shape) == 3:\n # aug = iaa.Pad(px=((pad_top, pad_bottom), (pad_left, pad_right)),\n # pad_mode=padding_mode, keep_size=False)\n # return aug.augment_image(img)\n # # Grayscale image\n # if len(img.shape) == 2:\n # aug = iaa.Pad(px=((pad_top, pad_bottom), (pad_left, pad_right)),\n # pad_mode=padding_mode, keep_size=False)\n # return aug.augment_image(img)\n\n # return img", "def pad(img, padding, fill=0, padding_mode='constant', data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n if not isinstance(padding, (numbers.Number, list, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, list, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, (list, tuple)) and len(padding) not in [2, 4]:\n raise ValueError(\n \"Padding must be an int or a 2, or 4 element tuple, not a \"\n + f\"{len(padding)} element tuple\"\n )\n\n assert padding_mode in [\n 'constant',\n 'edge',\n 'reflect',\n 'symmetric',\n ], 'Padding mode should be either constant, edge, reflect or symmetric'\n\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n elif len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n else:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n padding = [pad_left, pad_right, pad_top, pad_bottom]\n\n if padding_mode == 'edge':\n padding_mode = 'replicate'\n elif padding_mode == 'symmetric':\n raise ValueError('Do not support symmetric mode')\n\n img = img.unsqueeze(0)\n # 'constant', 'reflect', 'replicate', 'circular'\n img = F.pad(\n img,\n pad=padding,\n mode=padding_mode,\n value=float(fill),\n data_format='N' + data_format,\n )\n\n return img.squeeze(0)", "def pad(img, padding, fill=0, padding_mode='constant'):\n if not _is_numpy_image(img):\n raise TypeError('img should be nparray Image. Got {}'.format(type(img)))\n\n if not isinstance(padding, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Padding mode should be either constant, edge, reflect or symmetric'\n\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, collections.Sequence) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n if isinstance(padding, collections.Sequence) and len(padding) == 4:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n if padding_mode == 'constant':\n if isinstance(fill, int):\n fill_R = fill_G = fill_B = fill\n else:\n fill_R = fill[0]\n fill_G = fill[1]\n fill_B = fill[2]\n img_b, img_g, img_r = cv2.split(img)\n img_b = np.pad(img_b, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode, constant_values=((fill_B, fill_B),(fill_B, fill_B)))\n img_g = np.pad(img_g, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode, constant_values=((fill_G, fill_G),(fill_G, fill_G)))\n img_r = np.pad(img_r, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode, constant_values=((fill_R, fill_R),(fill_R, fill_R)))\n img = cv2.merge([img_b,img_g,img_r])\n else:\n if len(img.shape) == 3:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)\n\n return img", "def _pad_image(self, img: ndarray, pad_width: int = 10) -> ndarray:\n self.padded_img = np.zeros(\n (img.shape[0] + pad_width*2, img.shape[1]+pad_width*2))\n self.padded_img[pad_width:-pad_width, pad_width:-pad_width] = img\n return self.padded_img", "def _pad_simple(array, pad_width, fill_value=None):\n # Allocate grown array\n new_shape = tuple(\n left + size + right\n for size, (left, right) in zip(array.shape, pad_width)\n )\n order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order\n padded = np.empty(new_shape, dtype=array.dtype, order=order)\n\n if fill_value is not None:\n padded.fill(fill_value)\n\n # Copy old array into correct space\n original_area_slice = tuple(\n slice(left, left + size)\n for size, (left, right) in zip(array.shape, pad_width)\n )\n padded[original_area_slice] = array\n\n return padded, original_area_slice", "def pad(self, *args, **kwargs):\n return _image.image_pad(self, *args, **kwargs)", "def pad(x, padding, fill_value=0):\n input_shape = x.shape\n output_shape = []\n indices = []\n\n for dim, pad in enumerate(padding):\n try:\n left_pad, right_pad = pad\n except TypeError:\n left_pad = right_pad = pad\n output_shape.append(left_pad + input_shape[dim] + right_pad)\n indices.append(slice(left_pad, left_pad + input_shape[dim]))\n\n if fill_value:\n out = T.ones(output_shape) * fill_value\n else:\n out = T.zeros(output_shape)\n return T.set_subtensor(out[tuple(indices)], x)", "def padImage(image, padList):\r\n\r\n #pad along far x:<---->\r\n padFarX= np.zeros((image.shape[0], image.shape[1], padList[0]))\r\n image= np.concatenate((image, padFarX), axis=2)\r\n\r\n #pad along far y\r\n padFarY= np.zeros((image.shape[0], padList[1], image.shape[2]))\r\n image= np.concatenate((image, padFarY), axis=1)\r\n\r\n #pad along far z\r\n padFarZ= np.zeros((padList[2], image.shape[1], image.shape[2]))\r\n image= np.concatenate((image, padFarZ), axis=0)\r\n\r\n #pad along close x, adjust center\r\n padCloseX= np.zeros((image.shape[0], image.shape[1], padList[3]))\r\n image= np.concatenate((padCloseX, image), axis=2)\r\n\r\n #pad along close y adjust center\r\n padCloseY= np.zeros((image.shape[0], padList[4], image.shape[2]))\r\n image= np.concatenate((padCloseY, image), axis=1)\r\n\r\n #pad along close z, adjust center\r\n padCloseZ= np.zeros((padList[5], image.shape[1], image.shape[2]))\r\n image= np.concatenate((padCloseZ, image), axis=0)\r\n\r\n\r\n #print \"PADDED IMAGE SHAPE: \" + str(image.shape)\r\n return image", "def pil_pad_image(img, v_pad_before, v_pad_after, h_pad_before, h_pad_after, cval=None):\n # type: (PImage.Image, int, int, int, int, tuple) -> PImage.Image\n\n width = img.width + h_pad_before + h_pad_after\n height = img.height + v_pad_before + v_pad_after\n mode = img.mode\n\n if width == img.width and height == img.height:\n return img\n\n # Make sure the cval is in the correct format if None default to black\n if cval is not None:\n if isinstance(cval, float):\n cval = int(round(cval))\n elif isinstance(cval, int):\n cval = cval\n else:\n cval = np.round(cval).astype(dtype=np.int32)\n cval = tuple(cval)\n else:\n cval = 0\n\n try:\n padded_img = PImage.new(mode=mode, size=(width, height), color=cval)\n padded_img.paste(img, box=(h_pad_before, v_pad_before))\n except TypeError as e:\n print 'ERROR: Could not create new PIL image PImage.new(mode={}, size={}, color={}), error: {}'.format(mode, (width, height), cval, e.message)\n raise e\n\n return padded_img", "def fill_image(im):\n width, height = im.size\n # Select the larger value of the length and width of the original picture\n # as the radius of the nine palace grid of the new picture\n new_image_len = width if width > height else height\n # Create a white canvas\n new_image = Image.new(im.mode, (new_image_len, new_image_len), color=\"white\")\n # Paste the original image on the canvas at the center\n if width > height:\n new_image.paste(im, (0, int((new_image_len - height) / 2)))\n else:\n new_image.paste(im, (int((new_image_len - width) / 2), 0))\n return new_image", "def pad_image(img, output_path, pad_size=[8,8,8,8], buckets=None):\n top, left, bottom, right = pad_size\n old_im = Image.open(img)\n old_size = (old_im.size[0] + left + right, old_im.size[1] + top + bottom)\n new_size = get_new_size(old_size, buckets)\n new_im = Image.new(\"RGB\", new_size, (255,255,255))\n new_im.paste(old_im, (left, top))\n new_im.save(output_path)", "def pad(img, pad_size=32):\n\n if pad_size == 0:\n return img\n\n height, width = img.shape[:2]\n\n if height % pad_size == 0:\n y_min_pad = 0\n y_max_pad = 0\n else:\n y_pad = pad_size - height % pad_size\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n\n if width % pad_size == 0:\n x_min_pad = 0\n x_max_pad = 0\n else:\n x_pad = pad_size - width % pad_size\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n\n img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)\n\n return img, (x_min_pad, y_min_pad, x_max_pad, y_max_pad)", "def pad_image(img, target_size):\r\n rows_missing = target_size[0] - img.shape[2]\r\n cols_missing = target_size[1] - img.shape[3]\r\n padded_img = np.pad(img, ((0, 0), (0, 0), (0, rows_missing), (0, cols_missing)), 'constant')\r\n return padded_img", "def add_padding(im, pad):\n\n return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')", "def padding(image, padded_size):\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n\n padded_image = np.zeros((image_row + padded_size*2, image_col + padded_size*2)) #matriz de imagen con padding en zeros\n print(\"Padded image zeros:\")\n print(padded_image)\n\n padded_image[padded_size:padded_size + image_row, padded_size:padded_size + image_col] = image #matriz de imagen con padding\n print(\"Padded image:\")\n print(padded_image)\n\n \n return padded_image", "def autofit_fill(image, dest_w, dest_h, fill=None):\n x0, y0, x1, y1 = max_bounds(dest_w, dest_h, image.size[0], image.size[1])\n\n if fill is None:\n fill_color = (255, 255, 255, 0) # transparency\n else:\n fill_color = fill + (255, ) # solid\n\n canvas = Image.new(mode='RGBA', size=(dest_w, dest_h), color=fill_color)\n canvas.paste(image.resize((x1 - x0, y1 - y0), Image.ANTIALIAS), (x0, y0))\n\n return canvas", "def run_padding(self):\n\n image_padded, mask, self.pad_to_right, self.pad_to_bottom = gen_padded_image_and_mask (os.path.join('utils_dfn/temp', self.file_name_with_ext),\n self.new_height, self.new_width)\n cv2.imwrite(os.path.join('utils_dfn/img', self.file_name + '_padded_resized.png'), image_padded)\n cv2.imwrite(os.path.join('utils_dfn/mask', self.file_name + '_mask.png'), mask)", "def padding(img, n):\n img = np.pad(img, [(n, n), (n, n)], mode='constant', constant_values=0)\n\n return img", "def image_pad(image, pad_width=None, axis=0, mode='symmetric'):\n hei, wid = image.shape[0], image.shape[1]\n\n if pad_width is None:\n th = hei // 10\n tw = wid // 10\n pad_width = ((th, th), (tw, tw), (0, 0))\n if axis == 0:\n if type(pad_width[0]) == tuple:\n pad_width = (pad_width[0], (0, 0), (0, 0))\n else:\n pad_width = (pad_width, (0, 0), (0, 0))\n if axis == 1:\n if type(pad_width[0]) == tuple:\n pad_width = ((0, 0), pad_width[1], (0, 0))\n else:\n pad_width = ((0, 0), pad_width, (0, 0))\n if len(image.shape) == 3:\n newimage = np.pad(image, pad_width, mode)\n elif len(image.shape) == 2:\n newimage = np.squeeze(np.pad(image[:, :, np.newaxis], pad_width, mode))\n\n return cv2.resize(newimage, (wid, hei), interpolation=cv2.INTER_NEAREST)", "def pad_to(image,w,h):\n iw,ih = image.shape\n wd = int(w-iw)\n assert wd>=0\n w0 = wd/2\n w1 = wd-w0\n hd = int(h-ih)\n assert hd>=0\n h0 = hd/2\n h1 = hd-h0\n result = zeros((w,h))\n result[w0:w0+iw,h0:h0+ih] = image\n return result", "def pad_to_square(image, min_size, **pad_kwargs):\n\n h, w = image.shape[:2]\n\n if h >= min_size and w >= min_size:\n return image\n\n top = bottom = left = right = 0\n\n if h < min_size:\n top = (min_size - h) // 2\n bottom = min_size - h - top\n if w < min_size:\n left = (min_size - w) // 2\n right = min_size - w - left\n\n return np.pad(image,\n ((top, bottom),\n (left, right),\n (0, 0)), **pad_kwargs)", "def pad(self, pad_width, mode=\"constant\", constant_values=0):\r\n destination = np.zeros(\r\n (\r\n self.dataset.count,\r\n self.__arr.shape[1] + 2 * pad_width,\r\n self.__arr.shape[2] + 2 * pad_width,\r\n ),\r\n self.__arr.dtype,\r\n )\r\n\r\n for i in range(0, self.dataset.count):\r\n destination[i], transform = rasterio.pad(\r\n self.__arr[i],\r\n self.dataset.transform,\r\n pad_width,\r\n mode,\r\n constant_values=constant_values,\r\n )\r\n\r\n self.__arr = destination\r\n self.__update_dataset(self.dataset.crs, transform, nodata=self.dataset.nodata)", "def pad_images(_input_image_paths : list[str], _output_image_dir : str, \\\n _pad_colour : tuple[int,int,int]) -> None:\n for image in _input_image_paths:\n with Image.open(image) as image_object:\n\n #Rotate the image based on the EXIF data's orientation tag.\n #Ensures that images taller than they are wide are kept as such when padding\n image_object = PIL.ImageOps.exif_transpose(image_object)\n\n old_x,old_y = image_object.size\n bigger_dimension = max(old_x,old_y)\n\n #Figure out how much extra should be added to each of the four sides\n x_additive = y_additive = 0\n if old_x > old_y:\n y_additive = (old_x - old_y)//2\n\n elif old_y > old_x:\n x_additive = (old_y - old_x)//2\n\n #Create a new, larger image with the requested padding colour,\n # and then paste the original image overtop in the correct position\n new_canvas = Image.new(\"RGB\", (bigger_dimension,bigger_dimension), _pad_colour)\n new_canvas.paste(image_object, (x_additive, y_additive))\n new_canvas.save(_output_image_dir + os.path.basename(image))", "def wrap(img, padding):\n if not transforms.functional._is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if not isinstance(padding, (int, tuple)):\n raise TypeError('Got inappropriate padding arg')\n \n\n if isinstance(padding, tuple) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, tuple) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n if isinstance(padding, tuple) and len(padding) == 4:\n pad_left = padding[0]\n pad_top = padding[1]\n pad_right = padding[2]\n pad_bottom = padding[3]\n\n if img.mode == 'P':\n palette = img.getpalette()\n img = np.asarray(img)\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), 'wrap')\n img = Image.fromarray(img)\n img.putpalette(palette)\n return img\n\n img = np.asarray(img)\n # RGB image\n if len(img.shape) == 3:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), 'wrap')\n # Grayscale image\n if len(img.shape) == 2:\n img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), 'wrap')\n\n return Image.fromarray(img)", "def pad(img, shape):#pad_size=32):\n\n if shape == 0:\n return img\n pad_shape = np.int16(np.ceil((np.array(shape) - np.array(img.shape[:2]))))\n height, width = img.shape[:2]\n\n # if height % shape == 0:\n # y_min_pad = 0\n # y_max_pad = 0\n # else:\n y_pad = pad_shape[0]\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n\n # if width % pad_size == 0:\n # x_min_pad = 0\n # x_max_pad = 0\n # else:\n x_pad = pad_shape[1]\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n\n # img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)\n img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_CONSTANT, value=0)\n\n return img, (x_min_pad, y_min_pad, x_max_pad, y_max_pad)", "def insert_padding(img, pad_h, pad_w):\n global frame_height, frame_width\n padding_3_dims = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))\n # apply padding in the above dimensions with values 0\n padded_img = numpy.pad(img, padding_3_dims, 'constant', constant_values=0)\n return padded_img", "def impad_to_multiple(self, img, divisor, pad_val=0):\n pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor\n pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor\n # (Channel, Channel, Left, Right, Top, Bottom)\n padding = (0, 0, 0, pad_w - img.shape[1], 0, pad_h - img.shape[0])\n img = F.pad(input=img, pad=padding, mode='constant', value=pad_val)\n return img", "def fillImg(img, fill_colour=(26, 26, 27, 255), size=(1920, 1080)):\n w, h = img.size\n if len(fill_colour) == 3:\n A = 255\n fill_colour = fill_colour + (A,)\n fd_img = Image.new(\"RGBA\", size, fill_colour)\n fd_img.paste(img, ((int((size[0] - w) / 2), int((size[1] - h) / 2))))\n return fd_img", "def _fill_and_warp(src: Tensor, grid: Tensor, mode: str, align_corners: bool, fill_value: Tensor) -> Tensor:\n ones_mask = torch.ones_like(src)\n fill_value = fill_value.to(ones_mask)[None, :, None, None] # cast and add dimensions for broadcasting\n inv_ones_mask = 1 - F.grid_sample(ones_mask, grid, align_corners=align_corners, mode=mode, padding_mode=\"zeros\")\n inv_color_mask = inv_ones_mask * fill_value\n return F.grid_sample(src, grid, align_corners=align_corners, mode=mode, padding_mode=\"zeros\") + inv_color_mask", "def add_white_padding(img, width=WIDTH, height=HEIGHT):\n top = max(0, height)\n right = max(0, width)\n \n result = np.full((top, right), 255)\n\n result[result.shape[0]-img.shape[0]:result.shape[0],:img.shape[1]] = img\n \n return result" ]
[ "0.7385898", "0.73481995", "0.72555906", "0.6851695", "0.6816186", "0.66814405", "0.6671225", "0.66600376", "0.66376644", "0.6515069", "0.65061873", "0.6502234", "0.6499006", "0.64707226", "0.64594185", "0.6350127", "0.63446885", "0.63444966", "0.63180697", "0.6223735", "0.61909693", "0.61769384", "0.61491334", "0.61291015", "0.607934", "0.6071848", "0.6034498", "0.59725434", "0.5919631", "0.59098536" ]
0.7467693
0
Adjust brightness of an Image.
def adjust_brightness(img, brightness_factor): check_type(img) enhancer = ImageEnhance.Brightness(img) img = enhancer.enhance(brightness_factor) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjust_brightness(image, delta):\r\n return _clip(image + delta * 255)", "def adjust_brightness(img, brightness_factor):\n _assert_image_tensor(img, 'CHW')\n assert brightness_factor >= 0, \"brightness_factor should be non-negative.\"\n assert _get_image_num_channels(img, 'CHW') in [\n 1,\n 3,\n ], \"channels of input should be either 1 or 3.\"\n\n extreme_target = paddle.zeros_like(img, img.dtype)\n return _blend_images(img, extreme_target, brightness_factor)", "def increase_brightness(image, value=18):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return image", "def _adjust_brightness_img(self, results, factor=1.0):\n for key in results.get('img_fields', ['image']):\n img = results[key]\n results[key] = mmcv.adjust_brightness(img,\n factor).astype(img.dtype)", "def change_brightness(image, value):\n\n return change_light(image, value, \"v\")", "def adjust_brightness(img, brightness_factor):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if brightness_factor < 1 or brightness_factor > 5:\n raise ValueError(\n f'brightness_factor({brightness_factor}) is outside of the expected value range (1 <= x <= 5)')\n\n aug = iaa.imgcorruptlike.Brightness(severity=brightness_factor)\n img = aug.augment_image(img)\n return img", "def adjust_brightness_contrast(img, brightness=0., contrast=0.):\n beta = 0\n return cv2.addWeighted(img, 1 + float(contrast) / 100., img, beta, float(brightness))", "def augment_brightness(image):\n rand_brightness = .25 + np.random.uniform()\n image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n image[:, :, 2] = image[:, :, 2] * rand_brightness\n image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)\n return image", "def change_brightness(img,k=0):\n\n img_copy = np.copy(img)\n img_copy = img_copy.astype(int)\n img_copy += k\n\n return img_copy", "def adjust_brightness(img, brightness_factor):\n if not _is_numpy_image(img):\n raise TypeError('img should be CV Image. Got {}'.format(type(img)))\n\n im = img.astype(np.float32) * brightness_factor\n im = im.clip(min=0, max=255)\n return im.astype(img.dtype)", "def adjustBrightness(img, fac):\n img2 = np.float32(img) * fac\n img2 = img2.clip(min=0, max=255)\n return np.uint8(img2)", "def change_brightness_conv(image, value):\n image = rescale_image_0255(image)\n image = change_brightness(image, value)\n return rescale_image_01(image)", "def image_brightness(new_bright=0):\n # Scale brightness value\n bright = int(map_range(new_bright, 0, 15, 0x00, 0xFF))\n # Recombine and return a composite RGB888 value\n return (bright << 16) + (bright << 8) + bright", "def adjust_brightness(brightness_factor: float) -> Callable:\n return lambda img: TF.adjust_brightness(img, brightness_factor)", "def change_brightness(image, max_delta):\n return tf.image.adjust_brightness(image, max_delta)", "def _update_brightness(self):\n while self.current_brightness != self.brightness:\n next_color = RGB(r=int(self.color.r * (self.current_brightness/100.0)),\n g=int(self.color.g * (self.current_brightness/100.0)),\n b=int(self.color.b * (self.current_brightness/100.0)))\n self._update_color(next_color)\n diff = self.brightness - self.current_brightness\n # adjust current brightness to +/- 1\n self.current_brightness = self.current_brightness + \\\n (diff) / abs(diff)\n time.sleep(.05)\n # Final update to exact brightness and default if no change in brightness setting\n final_color = RGB(r=int(self.color.r * (self.brightness/100.0)),\n g=int(self.color.g * (self.brightness/100.0)),\n b=int(self.color.b * (self.brightness/100.0)))\n self._update_color(final_color)", "def adjust_brightness(image, mask, gamma):\r\n\r\n\tassert image.shape[:2] == mask.shape and gamma > 0\r\n\r\n\t## to increase the number of channel of the mask to three so that we can apply the masks\r\n\t## to image\r\n\tmasks = np.stack([mask, mask, mask], axis = -1)\r\n\r\n\tscale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])\r\n\r\n\toutput = np.where(masks == 1,\r\n\t\t\t\t\t\t(image / scale) ** (1 / gamma) * scale,\r\n\t\t\t\t\t\timage)\r\n\r\n\t## to make sure the pixel intensity is within the range of uint8\r\n\toutput = np.clip(output, 0, 255).astype(np.uint8)\r\n\r\n\treturn output", "def update(self):\n self._brightness = self._lj.get_load_level(self._index) / 99 * 255", "def adjust_brightness(img, tmp, roi):\n tmp = tmp[roi[1]:roi[1] + roi[3], roi[0]:roi[0] + roi[2]]\n img = img[roi[1]:roi[1] + roi[3], roi[0]:roi[0] + roi[2]]\n tmp_mean = np.full(img.shape, np.mean(tmp))\n img_mean = np.full(img.shape, np.mean(img))\n std_ = np.std(img)\n z_score = np.true_divide((img - tmp_mean), std_)\n d_mean = np.mean(img) - np.mean(tmp)\n if d_mean < 0.1:\n shifted_img = (z_score * std_) + img_mean\n else:\n shifted_img = -(z_score * std_) + img_mean\n\n return shifted_img.astype(dtype=np.uint8)", "def enhance_brightness(self, delta, p=None):\n if self._max_aug_nums>0:\n if self._nums>self._max_aug_nums:\n return self\n self._nums += 1\n if p is None:\n p = self._p\n self.image = enhance_brightness(self.image, delta, p)\n return self", "def brightness(self, factor):\n\n channels = [\"r\", \"g\", \"b\"]\n total_lumes = clamp(self.get_luminance() + (255.0 * factor) - 255.0, 0.0, 255.0)\n\n if total_lumes == 255.0:\n # white\n self.r, self.g, self.b = 0xFF, 0xFF, 0xFF\n elif total_lumes == 0.0:\n # black\n self.r, self.g, self.b = 0x00, 0x00, 0x00\n else:\n # Adjust Brightness\n pts = (total_lumes - 0.299 * self.r - 0.587 * self.g - 0.114 * self.b)\n slots = set(channels)\n components = [float(self.r) + pts, float(self.g) + pts, float(self.b) + pts]\n count = 0\n for c in channels:\n overage, components[count] = self._get_overage(components[count])\n if overage:\n slots.remove(c)\n components = list(self._distribute_overage(components, overage, slots))\n count += 1\n\n self.r = clamp(round_int(components[0]), 0, 255) & 0xFF\n self.g = clamp(round_int(components[1]), 0, 255) & 0xFF\n self.b = clamp(round_int(components[2]), 0, 255) & 0xFF", "def brighten(image_path, factor=1.5):\n BasicTransform.darken(image_path, factor)", "def darken(image_path, factor=0.5):\n BasicTransform.convert_image(image_path)\n\n with Image.open(image_path) as img:\n filter = ImageEnhance.Brightness(img)\n new_image = filter.enhance(factor)\n new_image.save(image_path)", "def shift_brightness_contrast(image, brightness=-100, contrast=300): \n\tdef vect(a):\n\t\tc = contrast\n\t\tb = 100 * brightness\n\t\tres = ((a - 127.5) * c + 127.5) + b\n\t\tif res <0 :\n\t\t\treturn 0\n\t\tif res > 255:\n\t\t\treturn 255\n\t\treturn res\n\t\n\ttransform = np.vectorize(vect)\n\tdata = transform(fromimage(image)).astype(np.uint8)\n\treturn toimage(data)", "def print_brightness(image):\n target = image.copy()\n for y in range(len(image)):\n for x in range(len(image[y])):\n rgb = image[y, x]\n target[y, x] = brightness(rgb)\n\n return target", "def setBrightness(self, brightness):\n raise NotImplementedError", "def augment_brightness_camera_images(image):\n\n # The HSV - Hue Saturation Value representation converts the image from RGB space to HSV space\n # where the Value(brightness) represents the brightness that is randomly increased\n\n image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n random_bright = .25+np.random.uniform()\n #print(random_bright)\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1", "def adjust(im, col, startcol=None):\n\tif startcol is None:\n\t\tstartcol = meancol(im)\n\trband, gband, bband = im.split()\n\trbri, gbri, bbri = ImageEnhance.Brightness(rband), ImageEnhance.Brightness(gband), ImageEnhance.Brightness(bband)\n\trband = rbri.enhance((float(col[0]) / float(startcol[0])))\n\tgband = gbri.enhance((float(col[1]) / float(startcol[1])))\n\tbband = bbri.enhance((float(col[2]) / float(startcol[2])))\n\tim = Image.merge(\"RGB\",(rband, gband, bband))\n\treturn im", "def adjust_saturation(img, saturation_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img", "def setBrightness(self, brightness):\n self._logger.debug(\"setBrightness\")" ]
[ "0.82204413", "0.78174996", "0.77475053", "0.7690998", "0.76791644", "0.7669285", "0.76448554", "0.75564486", "0.75472367", "0.7507929", "0.74771297", "0.74748397", "0.74423105", "0.7440186", "0.7276459", "0.70266074", "0.70151705", "0.69986284", "0.698788", "0.6980698", "0.6859017", "0.68559825", "0.68450487", "0.6803455", "0.67671335", "0.6698116", "0.6694176", "0.66893065", "0.6682707", "0.66553515" ]
0.8465688
0
Adjust contrast of an Image.
def adjust_contrast(img, contrast_factor): check_type(img) enhancer = ImageEnhance.Contrast(img) img = enhancer.enhance(contrast_factor) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjust_contrast(img, contrast_factor):\n _assert_image_tensor(img, 'chw')\n assert contrast_factor >= 0, \"contrast_factor should be non-negative.\"\n\n channels = _get_image_num_channels(img, 'CHW')\n dtype = img.dtype if paddle.is_floating_point(img) else paddle.float32\n if channels == 1:\n extreme_target = paddle.mean(\n img.astype(dtype), axis=(-3, -2, -1), keepdim=True\n )\n elif channels == 3:\n extreme_target = paddle.mean(\n to_grayscale(img).astype(dtype), axis=(-3, -2, -1), keepdim=True\n )\n else:\n raise ValueError(\"channels of input should be either 1 or 3.\")\n\n return _blend_images(img, extreme_target, contrast_factor)", "def adjust_contrast(img, contrast_factor):\n if not _is_numpy_image(img):\n raise TypeError('img should be CV Image. Got {}'.format(type(img)))\n im = img.astype(np.float32)\n mean = round(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY).mean())\n im = (1-contrast_factor)*mean + contrast_factor * im\n im = im.clip(min=0, max=255)\n return im.astype(img.dtype)", "def adjust_contrast(img, contrast_factor):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if contrast_factor < 1 or contrast_factor > 5:\n raise ValueError(\n f'contrast_factor({contrast_factor}) is outside of the expected value range (1 <= x <= 5)')\n\n aug = iaa.imgcorruptlike.Contrast(severity=contrast_factor)\n img = aug.augment_image(img)\n return img", "def adjust_brightness_contrast(img, brightness=0., contrast=0.):\n beta = 0\n return cv2.addWeighted(img, 1 + float(contrast) / 100., img, beta, float(brightness))", "def IncreaseContrast(self, image, percentage):\n # copyImage = [ [0...0][0...0][0...0][0...0]]\n # copyImage = np.zeros(image.shape, image.dtype) # dtype is the data type ex:int\n # newImg = loop x,y,c (old_image) * a + b\n\n a = 1.5 # [1.0..3.0] Responsible for Contrast #TODO GET PARAMS\n b = 35 # [0.. 100] Responsible for Brightness #TODO GET PARAMS\n newImg = cv2.convertScaleAbs(image, alpha=a, beta=b)\n return newImg", "def _adjust_contrast_img(self, results, factor=1.0):\n for key in results.get('img_fields', ['image']):\n img = results[key]\n results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)", "def AutoContrast(img: Image, _: float) -> Image:\n return PIL.ImageOps.autocontrast(img)", "def augment(self, image):\n\n contrast = ImageEnhance.Contrast(image)\n return contrast.enhance(self._contrast)", "def enhance_contrast(img):\n for y in range(frame_height):\n for x in range(frame_width):\n if img[y, x, 1] > 100:\n # range of blues to limit of puppet motion 255/(frame_width - 150)\n img[y][x][0] = x*0.4\n if img[y, x, 1] <= 100:\n img[y][x][2] = img[y][x][2]*0.5\n cv2.imwrite(\"contrasted.png\", img)", "def adjustContrast(img, fac):\n img2 = np.float32(img)\n mean = round(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY).mean())\n img2 = (1 - fac) * mean + fac * img2\n img2 = np.uint8(img2.clip(min=0, max=255))\n return img2", "def adjust_contrast(image, factor):\r\n mean = image.mean(axis=0).mean(axis=0)\r\n return _clip((image - mean) * factor + mean)", "def contrast_adjust(image, alpha=1.3, beta=20):\n newimage = image.astype(np.float32) * alpha + beta\n\n if type(image[0, 0, 0]) == np.uint8:\n newimage[newimage < 0] = 0\n newimage[newimage > 255] = 255\n return np.uint8(newimage)\n else:\n newimage[newimage < 0] = 0\n newimage[newimage > 1] = 1.\n return newimage", "def enhance_contrast(img):\n # CLAHE (Contrast Limited Adaptive Histogram Equalization)\n clahe = cv2.createCLAHE(clipLimit=3., tileGridSize=(8, 8))\n\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space\n l, a, b = cv2.split(lab) # split on 3 different channels\n\n l2 = clahe.apply(l) # apply CLAHE to the L-channel\n\n lab = cv2.merge((l2, a, b)) # merge channels\n img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # convert from LAB to BGR\n\n return img2", "def adjust_contrast(constrast_factor: float) -> Callable:\n return lambda img: TF.adjust_contrast(img, constrast_factor)", "def image_local_enhance_contrast(image: np.ndarray):\n \n #  Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n image = filters.rank.enhance_contrast(image, morphology.disk(2))\n\n #  Resize the iamge back to a shape of (2304, )\n return image_as_array(image)", "def img_contrast(img):\n\n return img.max()-img.min()", "def shift_brightness_contrast(image, brightness=-100, contrast=300): \n\tdef vect(a):\n\t\tc = contrast\n\t\tb = 100 * brightness\n\t\tres = ((a - 127.5) * c + 127.5) + b\n\t\tif res <0 :\n\t\t\treturn 0\n\t\tif res > 255:\n\t\t\treturn 255\n\t\treturn res\n\t\n\ttransform = np.vectorize(vect)\n\tdata = transform(fromimage(image)).astype(np.uint8)\n\treturn toimage(data)", "def increase_contrast(img, channels=(0, 1, 2)):\n equalized = img.copy()\n\n for k in channels:\n equalized[:, :, k] = cv2.equalizeHist(img[:, :, k])\n\n return equalized", "def adjust_brightness(image, delta):\r\n return _clip(image + delta * 255)", "def adjust_brightness(img, brightness_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Brightness(img)\n img = enhancer.enhance(brightness_factor)\n return img", "def contrast_brightness_demo(img, ctr, bri): # 亮度b 对比度c\n blank = np.zeros(img.shape, img.dtype)\n out = cv.addWeighted(img, ctr, blank, 1 - ctr, bri)\n\n return out", "def estimate_contrast(self):\n\n if self.avg_standard_lum == 0:\n self.estimate_brightness()\n\n intensity = self._get_intensity()\n\n self.avg_contrast = math.sqrt(\n (np.sum(intensity ** 2) / (self.img_height * self.img_width)) - (self.avg_standard_lum ** 2))", "def enhanceContrast(image, mask, target_path, name, save=False):\n \n\n \n # Contrast stretching\n p2, p98 = np.percentile(image, (2, 98))\n image_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))\n \n # Equalization\n image_eq = exposure.equalize_hist(image)\n \n # Adaptive Equalization\n image_adapteq = exposure.equalize_adapthist(image, clip_limit=0.03)\n \n # Display results\n fig = plt.figure(figsize=(19, 13))\n axes = np.zeros((2, 4), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 4, 1)\n for i in range(1, 4):\n axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 4):\n axes[1, i] = fig.add_subplot(2, 4, 5+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image, mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('Low contrast image')\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_rescale, mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('Contrast stretching')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_eq, mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('Histogram equalization')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_adapteq,mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('Adaptive equalization')\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n return image_adapteq", "def adjust_saturation(img, saturation_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img", "def adjust_saturation(img, saturation_factor):\n _assert_image_tensor(img, 'CHW')\n assert saturation_factor >= 0, \"saturation_factor should be non-negative.\"\n channels = _get_image_num_channels(img, 'CHW')\n if channels == 1:\n return img\n elif channels == 3:\n extreme_target = to_grayscale(img)\n else:\n raise ValueError(\"channels of input should be either 1 or 3.\")\n\n return _blend_images(img, extreme_target, saturation_factor)", "def Contrast(img):\r\n factor = 2 * (np.random.rand() - 0.5) * 128\r\n assert (factor <= 128 and factor >= -128), 'contract factor value wrong'\r\n fvalue = 259.0/255.0 * (factor + 255.0)/(259.0-factor)\r\n img = np.round((img - 128.0)*fvalue + 128.0)\r\n img = np.where(img > 255, 255, img)\r\n img = np.where(img < 0, 0, img)\r\n img = np.uint8(img)\r\n return img", "def adjust_brightness(img, brightness_factor):\n _assert_image_tensor(img, 'CHW')\n assert brightness_factor >= 0, \"brightness_factor should be non-negative.\"\n assert _get_image_num_channels(img, 'CHW') in [\n 1,\n 3,\n ], \"channels of input should be either 1 or 3.\"\n\n extreme_target = paddle.zeros_like(img, img.dtype)\n return _blend_images(img, extreme_target, brightness_factor)", "def setContrast(self, contrast):\n raise NotImplementedError", "def OnSetContrast(self, evt=None):\n\t\twith self.playerLock :\n\t\t\t#self.player.video_set_adjust_int( vlc.VideoAdjustOption.Enable, 1 )\n\t\t\tcontrast = self.contrastSlider.GetValue() * 2\n\t\t\tprint( 'new contrast: {}'.format( contrast ) )\n\t\t\tif self.player.video_set_adjust_float( vlc.VideoAdjustOption.Contrast, contrast/100.0 ) == -1:\n\t\t\t\tself.errorDialog(\"Failed to set contrast\")\n\t\t\telse:\n\t\t\t\tself.State.contrast = contrast/2\n\t\t\t\tself.SaveState()", "def adjustBrightness(img, fac):\n img2 = np.float32(img) * fac\n img2 = img2.clip(min=0, max=255)\n return np.uint8(img2)" ]
[ "0.80959105", "0.79809284", "0.79799145", "0.7915904", "0.7864022", "0.784802", "0.7763845", "0.7725997", "0.7657669", "0.7654457", "0.75754154", "0.75321746", "0.7374148", "0.73587877", "0.7324188", "0.72749984", "0.72118354", "0.7140224", "0.7107081", "0.709643", "0.70953214", "0.7065547", "0.7064155", "0.7045752", "0.6879955", "0.6828435", "0.6784375", "0.67831326", "0.6781242", "0.6780945" ]
0.8614103
0
Adjust color saturation of an image.
def adjust_saturation(img, saturation_factor): check_type(img) enhancer = ImageEnhance.Color(img) img = enhancer.enhance(saturation_factor) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjust_saturation(img, saturation_factor):\n if not _is_numpy_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n im = img.astype(np.float32)\n degenerate = cv2.cvtColor(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)\n im = (1-saturation_factor) * degenerate + saturation_factor * im\n im = im.clip(min=0, max=255)\n return im.astype(img.dtype)", "def adjust_saturation(image, factor):\r\n image[..., 1] = np.clip(image[..., 1] * factor, 0, 255)\r\n return image", "def change_saturation_conv(image, value):\n image = rescale_image_0255(image)\n image = change_saturation(image, value)\n return rescale_image_01(image)", "def adjust_saturation(img, saturation_factor):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if saturation_factor < 1 or saturation_factor > 5:\n raise ValueError(\n f'saturation_factor({saturation_factor}) is outside of the expected value range (1 <= x <= 5)')\n\n aug = iaa.imgcorruptlike.Saturate(severity=saturation_factor)\n img = aug.augment_image(img)\n return img", "def adjust_saturation(img, saturation_factor):\n _assert_image_tensor(img, 'CHW')\n assert saturation_factor >= 0, \"saturation_factor should be non-negative.\"\n channels = _get_image_num_channels(img, 'CHW')\n if channels == 1:\n return img\n elif channels == 3:\n extreme_target = to_grayscale(img)\n else:\n raise ValueError(\"channels of input should be either 1 or 3.\")\n\n return _blend_images(img, extreme_target, saturation_factor)", "def adjustSaturation(img, fac):\n img2 = np.float32(img)\n tmp = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n tmp = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR)\n\n img2 = (1 - fac) * tmp + fac * img2\n img2 = np.uint8(img2.clip(0, 255))\n return img2", "def adjust_saturation(self, amount):\n h, s, v = self.as_hsv()\n s *= 1.0 + amount\n return ScreenColor.from_hsv(h, s, v)", "def shift_hue_saturation(image, hue = -90, saturation = 0.65): \n\tcopy = image.copy()\n\tld = copy.load()\n\twidth, height = copy.size\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\tpixel = ld[x,y]\n\t\t\tr = pixel[0]\n\t\t\tg = pixel[1]\n\t\t\tb = pixel[2]\n\t\t\t\n\t\t\th,s,v = colorsys.rgb_to_hsv(r/255., g/255., b/255.)\n\t\t\th = (h + hue/360.0) % 1.0\n\t\t\ts = s**saturation\n\t\t\tr,g,b = colorsys.hsv_to_rgb(h, s, v)\n\t\t\tld[x,y] = (int(r * 255.9999), int(g * 255.9999), int(b * 255.9999))\n\treturn copy", "def saturation(self):\n raise NotImplementedError", "def saturate(image_path, factor=4):\n BasicTransform.convert_image(image_path)\n\n with Image.open(image_path) as img:\n filter = ImageEnhance.Color(img)\n new_image = filter.enhance(factor)\n new_image.save(image_path)", "def preprocess_image(img):\r\n\r\n hsvImg = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n\r\n hsvImg[...,1] = hsvImg[...,1]*1.75 #increase saturation by 175%\r\n\r\n image_f =cv2.cvtColor(hsvImg,cv2.COLOR_HSV2BGR)\r\n\r\n return image_f", "def enhance_saturation(self, delta, p=None):\n if self._max_aug_nums>0:\n if self._nums>self._max_aug_nums:\n return self\n self._nums += 1\n if p is None:\n p = self._p\n self.image = enhance_saturation(self.image, delta, p)\n return self", "def Saturation(img):\r\n factor = 2 * np.random.rand()\r\n HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n H, S, V = cv2.split(HSV)\r\n S= S* np.float(factor)\r\n S = np.where( S>255, 255,S)\r\n S = np.where( S<0, 0, S)\r\n HSV[:,:,1] = np.uint8(S)\r\n BGR = cv2.cvtColor(HSV, cv2.COLOR_HSV2BGR)\r\n return BGR", "def saturation(self, factor):\n\n h, l, s = self.tohls()\n s = clamp(s + factor - 1.0, 0.0, 1.0)\n self.fromhls(h, l, s)", "def adjust_brightness(image, delta):\r\n return _clip(image + delta * 255)", "def saturation(value):\n value = int(value)\n if value < 0 or value > 254:\n raise ValueError('Minimum saturation is 0, to the maximum 254')\n return value", "def adjust_hue(image, delta):\r\n image[..., 0] = np.mod(image[..., 0] + delta * 180, 180)\r\n return image", "def increase_brightness(image, value=18):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return image", "def adjustHue(img, fac):\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV_FULL)\n hsv[..., 0] += np.uint8(fac * 255)\n img2 = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB_FULL)\n return img2", "def adjust_brightness(img, brightness_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Brightness(img)\n img = enhancer.enhance(brightness_factor)\n return img", "def adjust_hue(img, hue_factor):\n _assert_image_tensor(img, 'CHW')\n assert (\n hue_factor >= -0.5 and hue_factor <= 0.5\n ), \"hue_factor should be in range [-0.5, 0.5]\"\n channels = _get_image_num_channels(img, 'CHW')\n if channels == 1:\n return img\n elif channels == 3:\n dtype = img.dtype\n if dtype == paddle.uint8:\n img = img.astype(paddle.float32) / 255.0\n\n img_hsv = _rgb_to_hsv(img)\n h, s, v = img_hsv.unbind(axis=-3)\n h = h + hue_factor\n h = h - h.floor()\n img_adjusted = _hsv_to_rgb(paddle.stack([h, s, v], axis=-3))\n\n if dtype == paddle.uint8:\n img_adjusted = (img_adjusted * 255.0).astype(dtype)\n else:\n raise ValueError(\"channels of input should be either 1 or 3.\")\n\n return img_adjusted", "def change_brightness_conv(image, value):\n image = rescale_image_0255(image)\n image = change_brightness(image, value)\n return rescale_image_01(image)", "def darken(image_path, factor=0.5):\n BasicTransform.convert_image(image_path)\n\n with Image.open(image_path) as img:\n filter = ImageEnhance.Brightness(img)\n new_image = filter.enhance(factor)\n new_image.save(image_path)", "def enhance_contrast(img):\n for y in range(frame_height):\n for x in range(frame_width):\n if img[y, x, 1] > 100:\n # range of blues to limit of puppet motion 255/(frame_width - 150)\n img[y][x][0] = x*0.4\n if img[y, x, 1] <= 100:\n img[y][x][2] = img[y][x][2]*0.5\n cv2.imwrite(\"contrasted.png\", img)", "def adjust_contrast(img, contrast_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Contrast(img)\n img = enhancer.enhance(contrast_factor)\n return img", "def adjust_hue(img, hue_factor):\n if not(-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))\n\n if not _is_numpy_image(img):\n raise TypeError('img should be CV Image. Got {}'.format(type(img)))\n\n im = img.astype(np.uint8)\n hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV_FULL)\n hsv[..., 0] += np.uint8(hue_factor * 255)\n\n im = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB_FULL)\n return im.astype(img.dtype)", "def adjust_brightness_contrast(img, brightness=0., contrast=0.):\n beta = 0\n return cv2.addWeighted(img, 1 + float(contrast) / 100., img, beta, float(brightness))", "def saturate(self, amount):\n h, l, s = colorsys.rgb_to_hls(self.r, self.g, self.b)\n\n s = s + amount\n\n if s < 0.0:\n s = 0.0\n if s > 1.0:\n s = 1.0\n\n r, g, b = colorsys.hls_to_rgb(h, l, s)\n return Color(from_rgba=(c(r), c(g), c(b), c(self.a)))", "def applyHSV(img):\n\treturn applyColorMap(img, \"hsv\")", "def adjustBrightness(img, fac):\n img2 = np.float32(img) * fac\n img2 = img2.clip(min=0, max=255)\n return np.uint8(img2)" ]
[ "0.7918249", "0.78994155", "0.7829689", "0.7721216", "0.7650373", "0.76433337", "0.73215264", "0.71374863", "0.6895094", "0.6852125", "0.67068046", "0.6642146", "0.66122895", "0.658319", "0.65360445", "0.6521743", "0.64246887", "0.641921", "0.639464", "0.6353775", "0.62883955", "0.6257121", "0.62506795", "0.6247128", "0.62370914", "0.62123466", "0.6210423", "0.6207983", "0.61788344", "0.61766183" ]
0.855761
0
Adjust hue of an image. The image hue is adjusted by converting the image to HSV and cyclically shifting the intensities in the hue channel (H). The image is then converted back to original image mode. `hue_factor` is the amount of shift in H channel and must be in the interval `[0.5, 0.5]`. See `Hue`_ for more details.
def adjust_hue(img, hue_factor): if not(-0.5 <= hue_factor <= 0.5): raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor)) check_type(img) input_mode = img.mode assert img.mode not in {'L', '1', 'I', 'F'}, \ "Input image mode should not be {'L', '1', 'I', 'F'}" h, s, v = img.convert('HSV').split() np_h = np.array(h, dtype=np.uint8) # uint8 addition take cares of rotation across boundaries with np.errstate(over='ignore'): np_h += np.uint8(hue_factor * 255) h = Image.fromarray(np_h, 'L') img = Image.merge('HSV', (h, s, v)).convert(input_mode) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjust_hue(img, hue_factor):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if hue_factor < -255 or hue_factor > 255:\n raise ValueError(\n f'hue_factor({hue_factor}) is outside of the expected value range (-255 <= x <= 255)')\n\n aug = iaa.color.AddToHue(value=hue_factor, from_colorspace='RGB')\n img = aug.augment_image(img)\n return img", "def adjust_hue(img, hue_factor):\n if not(-0.5 <= hue_factor <= 0.5):\n raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))\n\n if not _is_numpy_image(img):\n raise TypeError('img should be CV Image. Got {}'.format(type(img)))\n\n im = img.astype(np.uint8)\n hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV_FULL)\n hsv[..., 0] += np.uint8(hue_factor * 255)\n\n im = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB_FULL)\n return im.astype(img.dtype)", "def adjust_hue(img, hue_factor):\n _assert_image_tensor(img, 'CHW')\n assert (\n hue_factor >= -0.5 and hue_factor <= 0.5\n ), \"hue_factor should be in range [-0.5, 0.5]\"\n channels = _get_image_num_channels(img, 'CHW')\n if channels == 1:\n return img\n elif channels == 3:\n dtype = img.dtype\n if dtype == paddle.uint8:\n img = img.astype(paddle.float32) / 255.0\n\n img_hsv = _rgb_to_hsv(img)\n h, s, v = img_hsv.unbind(axis=-3)\n h = h + hue_factor\n h = h - h.floor()\n img_adjusted = _hsv_to_rgb(paddle.stack([h, s, v], axis=-3))\n\n if dtype == paddle.uint8:\n img_adjusted = (img_adjusted * 255.0).astype(dtype)\n else:\n raise ValueError(\"channels of input should be either 1 or 3.\")\n\n return img_adjusted", "def adjustHue(img, fac):\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV_FULL)\n hsv[..., 0] += np.uint8(fac * 255)\n img2 = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB_FULL)\n return img2", "def change_hue(image, delta):\n\n imHueChange = tf.image.adjust_hue(image, delta=delta, name=None)\n return imHueChange", "def adjust_hue(image, delta):\r\n image[..., 0] = np.mod(image[..., 0] + delta * 180, 180)\r\n return image", "def shift_hue_saturation(image, hue = -90, saturation = 0.65): \n\tcopy = image.copy()\n\tld = copy.load()\n\twidth, height = copy.size\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\tpixel = ld[x,y]\n\t\t\tr = pixel[0]\n\t\t\tg = pixel[1]\n\t\t\tb = pixel[2]\n\t\t\t\n\t\t\th,s,v = colorsys.rgb_to_hsv(r/255., g/255., b/255.)\n\t\t\th = (h + hue/360.0) % 1.0\n\t\t\ts = s**saturation\n\t\t\tr,g,b = colorsys.hsv_to_rgb(h, s, v)\n\t\t\tld[x,y] = (int(r * 255.9999), int(g * 255.9999), int(b * 255.9999))\n\treturn copy", "def hue_shift(input_image, degrees):\n\timage = input_image if 180 + degrees <= 255 else input_image.astype('uint16')\n\timage[:, :, 0] += degrees\n\timage[:, :, 0] %= 180\n\treturn image", "def adjust_saturation(img, saturation_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img", "def adjust_contrast(img, contrast_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Contrast(img)\n img = enhancer.enhance(contrast_factor)\n return img", "def adjust_contrast(img, contrast_factor):\n if not _is_numpy_image(img):\n raise TypeError('img should be CV Image. Got {}'.format(type(img)))\n im = img.astype(np.float32)\n mean = round(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY).mean())\n im = (1-contrast_factor)*mean + contrast_factor * im\n im = im.clip(min=0, max=255)\n return im.astype(img.dtype)", "def adjust_saturation(img, saturation_factor):\n _assert_image_tensor(img, 'CHW')\n assert saturation_factor >= 0, \"saturation_factor should be non-negative.\"\n channels = _get_image_num_channels(img, 'CHW')\n if channels == 1:\n return img\n elif channels == 3:\n extreme_target = to_grayscale(img)\n else:\n raise ValueError(\"channels of input should be either 1 or 3.\")\n\n return _blend_images(img, extreme_target, saturation_factor)", "def adjust_saturation(img, saturation_factor):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if saturation_factor < 1 or saturation_factor > 5:\n raise ValueError(\n f'saturation_factor({saturation_factor}) is outside of the expected value range (1 <= x <= 5)')\n\n aug = iaa.imgcorruptlike.Saturate(severity=saturation_factor)\n img = aug.augment_image(img)\n return img", "def adjust_contrast(img, contrast_factor):\n _assert_image_tensor(img, 'chw')\n assert contrast_factor >= 0, \"contrast_factor should be non-negative.\"\n\n channels = _get_image_num_channels(img, 'CHW')\n dtype = img.dtype if paddle.is_floating_point(img) else paddle.float32\n if channels == 1:\n extreme_target = paddle.mean(\n img.astype(dtype), axis=(-3, -2, -1), keepdim=True\n )\n elif channels == 3:\n extreme_target = paddle.mean(\n to_grayscale(img).astype(dtype), axis=(-3, -2, -1), keepdim=True\n )\n else:\n raise ValueError(\"channels of input should be either 1 or 3.\")\n\n return _blend_images(img, extreme_target, contrast_factor)", "def rotateHue ( self, hue_inc ):\n if isinstance( hue_inc, int ):\n hue_inc /= 360.0\n newhue = self.h + hue_inc\n if newhue > 1.0:\n newhue, whole = math.modf(newhue) # Keep decimal part\n if newhue < 0.0:\n newhue, whole = math.modf(newhue) # Keep decimal part\n newhue = 1.0 + newhue\n self.h = newhue\n self.hsl[0] = self.h\n self.hsla[0] = self.h\n self.updateFromHsl()", "def setHue ( self, newhue ):\n if isinstance( newhue, int ):\n newhue /= 360.0\n if newhue > 1.0:\n newhue, whole = math.modf(newhue) # Keep decimal part\n self.h = newhue\n self.hsl[0] = newhue\n self.hsla[0] = newhue\n self.updateFromHsl()", "def adjust_contrast(img, contrast_factor):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if contrast_factor < 1 or contrast_factor > 5:\n raise ValueError(\n f'contrast_factor({contrast_factor}) is outside of the expected value range (1 <= x <= 5)')\n\n aug = iaa.imgcorruptlike.Contrast(severity=contrast_factor)\n img = aug.augment_image(img)\n return img", "def adjust_saturation(img, saturation_factor):\n if not _is_numpy_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n im = img.astype(np.float32)\n degenerate = cv2.cvtColor(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)\n im = (1-saturation_factor) * degenerate + saturation_factor * im\n im = im.clip(min=0, max=255)\n return im.astype(img.dtype)", "def adjust_hsv(image, delta_h = 0, delta_s = 0, delta_v = 0):\r\n\r\n\tassert image.shape[-1] == 3\r\n\tassert 0 <= delta_h <= 1 and 0 <= delta_s <= 1 and 0 <= delta_v <= 1\r\n\r\n\timage = rgb_to_hsv(image / 255.0)\r\n\r\n\timage[:, :, 0] += delta_h\r\n\timage[:, :, 1] += delta_s\r\n\timage[:, :, 2] += delta_v\r\n\r\n\timage = hsv_to_rgb(image) * 255\r\n\r\n\r\n\treturn image.astype(\"uint8\")", "def enhance_hue(self, delta, p=None):\n if self._max_aug_nums>0:\n if self._nums>self._max_aug_nums:\n return self\n self._nums += 1\n if p is None:\n p = self._p\n self.image = enhance_hue(self.image, delta, p)\n return self", "def adjust_brightness(img, brightness_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Brightness(img)\n img = enhancer.enhance(brightness_factor)\n return img", "def adjust_contrast(image, factor):\r\n mean = image.mean(axis=0).mean(axis=0)\r\n return _clip((image - mean) * factor + mean)", "def adjust_brightness(img, brightness_factor):\n _assert_image_tensor(img, 'CHW')\n assert brightness_factor >= 0, \"brightness_factor should be non-negative.\"\n assert _get_image_num_channels(img, 'CHW') in [\n 1,\n 3,\n ], \"channels of input should be either 1 or 3.\"\n\n extreme_target = paddle.zeros_like(img, img.dtype)\n return _blend_images(img, extreme_target, brightness_factor)", "def adjust_brightness(img, brightness_factor):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if brightness_factor < 1 or brightness_factor > 5:\n raise ValueError(\n f'brightness_factor({brightness_factor}) is outside of the expected value range (1 <= x <= 5)')\n\n aug = iaa.imgcorruptlike.Brightness(severity=brightness_factor)\n img = aug.augment_image(img)\n return img", "def saturate(image_path, factor=4):\n BasicTransform.convert_image(image_path)\n\n with Image.open(image_path) as img:\n filter = ImageEnhance.Color(img)\n new_image = filter.enhance(factor)\n new_image.save(image_path)", "def compute_new_hsv(im):\n eps = 1e-10\n r,g,b = np.array(cv2.split(im)) + eps\n traditional_hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV)\n numerator = np.log(r) - np.log(g)\n denominator = np.log(r) + np.log(g) - 2*np.log(b) + eps\n new_hue = np.clip(np.round(numerator/denominator).astype(np.uint8), 0, 180)\n new_hsv = np.zeros_like(traditional_hsv).astype(np.uint8)\n new_hsv[:, :, 0] = new_hue\n new_hsv[:, :, 1] = traditional_hsv[:, :, 1]\n new_hsv[:, :, 2] = traditional_hsv[:, :, 2]\n return new_hsv", "def adjust_saturation(image, factor):\r\n image[..., 1] = np.clip(image[..., 1] * factor, 0, 255)\r\n return image", "def increase_brightness(image, value=18):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return image", "def adjust_brightness(img, brightness_factor):\n if not _is_numpy_image(img):\n raise TypeError('img should be CV Image. Got {}'.format(type(img)))\n\n im = img.astype(np.float32) * brightness_factor\n im = im.clip(min=0, max=255)\n return im.astype(img.dtype)", "def preprocess_image(img):\r\n\r\n hsvImg = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n\r\n hsvImg[...,1] = hsvImg[...,1]*1.75 #increase saturation by 175%\r\n\r\n image_f =cv2.cvtColor(hsvImg,cv2.COLOR_HSV2BGR)\r\n\r\n return image_f" ]
[ "0.85895795", "0.8572448", "0.85619026", "0.7205861", "0.69402057", "0.69187206", "0.62650293", "0.61591804", "0.60515624", "0.60091466", "0.59828484", "0.5952519", "0.5854867", "0.5839377", "0.58338064", "0.58218634", "0.58043385", "0.57956964", "0.5656194", "0.56120825", "0.56045073", "0.55965817", "0.55440885", "0.54897386", "0.5392603", "0.53885597", "0.5342203", "0.52907187", "0.5259001", "0.5144924" ]
0.8744694
0
Display the attributes of the layer.
def display_layer_parameters(self): pprint.pprint(vars(self)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_attribute(self):\n print(vars(self))", "def display(self):\n # type: ()->None\n print('============')\n for key, value in self._ifAttributes.items():\n if isinstance(value, list):\n print(key + ': ')\n for item in value:\n print('\\t' + item)\n elif isinstance(value, dict):\n print(key + ': ')\n for item in value.keys():\n print('\\t' + item + ': ' + value[item])\n else:\n print(key + ': ' + str(value))\n print('============')", "def print_attrs(self):\n for attribute in self.__dict__.keys():\n print(attribute)", "def print_attr(self):\n return \"name : {0}\\nprice : {1}\\ndescription : {2}\".format(\n self.name, self.price, self.description\n )", "def print_attribute(attributes):\n for attribute in attributes:\n print ' ',\n change_color_by_tag(attribute)\n if attribute['ExtAttributes']:\n print_extattributes_of_member(attribute['ExtAttributes'])\n print attribute['Type'],\n print attribute['Name']", "def show_properties(self):\n print(\"L:\", self.L)\n print(\"d:\", self.d)\n print(\"D:\", self.D)\n print(\"dtype:\", self.dtype)\n print(\"R[-1]:\", self.R[-1])\n print(\"F[-1]:\", self.F[-1])\n print(\"Cummulated norm C:\", self.normC)", "def __repr__(self):\n return misc.describe_layer(self, name=\"model\")", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def attributes(self):", "def __repr__(self):\n return '{} (AR layer)'.format(self.name)", "def get_all_attribute(self):\n for attr, value in self.__dict__.items():\n print(attr, value)", "def attributes(self):\n ...", "def print_extattribute(extattributes):\n for extattribute in extattributes:\n print ' ',\n change_color_by_tag(extattribute)\n print '{NAME}'.format(NAME=extattribute['Name'])", "def ListAttributes(self):\n\n print(\"\\n\")\n print(\"Attributes List of: \" + repr(self.__dict__[\"name\"]) + \" - \" + self.__class__.__name__ + \" Instance\\n\")\n self_keys = self.__dict__.keys()\n self_keys.sort()\n for key in self_keys:\n if key != \"name\":\n print(str(key) + \" : \" + repr(self.__dict__[key]))\n # end\n # end\n print(\"\\n\")", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def print_run_attributes (ins, exp, run) :\n for attr in run_attributes(ins, exp, run) :\n print('class: %s name: %s type (of the value): %s value (optional): %s description (optional):%s' \\\n % (attr['class'].ljust(16), attr['name'].ljust(32), attr['type'].ljust(8), str(attr['val']).ljust(8), attr['descr'].ljust(8)))", "def attributes(self):\n _attrs = []\n if self.name:\n _attrs.append(\"name\")\n if self.label:\n _attrs.append(\"label\")\n if self.confidence:\n _attrs.append(\"confidence\")\n if self.index:\n _attrs.append(\"index\")\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs + [\"points\"]", "def __str__(self) -> str:\n result = \"\"\n for attr in self.ATTRS:\n if len(result) != 0:\n result += \"\\n\"\n result += f\"{attr}: {getattr(self, attr)}\"\n\n return result", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def summary(self, verbose=False):\n for i, layer in enumerate(self._layers):\n print('%d: %s' % (i, str(layer)))\n if verbose:\n print('weights:', layer.get_weights())\n if layer._use_bias:\n print('bias:', layer._bias)\n print()", "def __str__(self):\r\n\r\n for att in self.__dict__:\r\n print(\"%s: %r\" % (att, getattr(self, att)))\r\n\r\n return \"Planet Population class object attributes\"", "def describe_fields(self):\n opened_file = self.data\n description = []\n\n if not opened_file:\n opened_file = self.open()\n\n for n in range(0, opened_file.GetLayerCount()):\n layer = opened_file.GetLayer(n)\n layer_description = {'name': layer.GetName(),\n 'feature_count': layer.GetFeatureCount(),\n 'fields': [],\n 'index': n,\n 'geom_type': self.geometry_type(layer.GetGeomType())\n }\n\n layer_definition = layer.GetLayerDefn()\n for i in range(layer_definition.GetFieldCount()):\n field_desc = {}\n field = layer_definition.GetFieldDefn(i)\n field_desc['name'] = field.GetName()\n field_desc['type'] = field.GetFieldTypeName(i)\n layer_description['fields'].append(field_desc)\n\n description.append(layer_description)\n\n return description", "def __repr__(self):\n return \"\\nSprite info: \" + self.name + \"\\nx = {0}\\ny = {1}\\nhealth = {2}\\nstrength = {3}\\nloot = {4}\\n\"\\\n .format(self.x, self.y, self.health, self.strength, self.loot)", "def print_ncattr(key):\r\n try:\r\n print (\"\\t\\ttype:\", repr(nc_fid.variables[key].dtype))\r\n for ncattr in nc_fid.variables[key].ncattrs():\r\n print ('\\t\\t%s:' % ncattr,\\\r\n repr(nc_fid.variables[key].getncattr(ncattr)))\r\n except KeyError:\r\n print (\"\\t\\tWARNING: %s does not contain variable attributes\" % key)", "def printValues(self):\n\n for layer in self.LayerNames:\n print \"-------- {0} --------\".format(layer)\n print \"nWorkingModules: {0}\".format(self.nWorkingModules[layer])\n print \"Pixels per Layer\"\n print \" Pixels hit: {0}\".format(self.hitPix[layer])\n print \" Occupancy: {0}\".format(self.occupancies[layer])\n print \" Pixels hit per Module: {0}\".format(self.hitPixPerModule[layer])\n print \" Pixels hit per Area: {0}\".format(self.hitPixPerArea[layer])\n print \" Pixels hit per Area per sec: {0}\".format(self.hitPixPerAreaSec[layer])\n print \"Pixels per Det\"\n print \" Occupancy (Det): {0}\".format(self.Detoccupancies[layer])\n print \" Pixels hit per Det: {0}\".format(self.hitPixPerDet[layer])\n print \" Pixels hit per DetArea: {0}\".format(self.hitPixPerDetArea[layer])\n print \" Pixels hit per DetArea per sec: {0}\".format(self.hitPixPerDetAreaSec[layer])\n print \"Cluster per Layer\"\n print \" Clusters hit: {0}\".format(self.hitClusters[layer])\n print \" Clusters hit per module: {0}\".format(self.hitClustersPerModule[layer])\n print \" Clusters hit per Area: {0}\".format(self.hitClustersPerArea[layer])\n print \" Clusters hit per Area per sec: {0}\".format(self.hitClustersPerAreaSec[layer])\n print \"Clusters per Det\"\n print \" Clusters hit per Det: {0}\".format(self.hitClustersPerDet[layer])\n print \" Clusters hit per DetArea: {0}\".format(self.hitClustersPerDetArea[layer])\n print \" Clusters hit per DetArea per sec: {0}\".format(self.hitClustersPerDetAreaSec[layer])", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def print_ncattr(key):\n try:\n print(\"\\t\\ttype:\", repr(nc_fid.variables[key].dtype))\n for ncattr in nc_fid.variables[key].ncattrs():\n print('\\t\\t%s:' % ncattr,\\\n repr(nc_fid.variables[key].getncattr(ncattr)))\n except KeyError:\n print(\"\\t\\tWARNING: %s does not contain variable attributes\" % key)", "def print_ncattr(key):\n try:\n print(\"\\t\\ttype:\", repr(nc_fid.variables[key].dtype))\n for ncattr in nc_fid.variables[key].ncattrs():\n print('\\t\\t%s:' % ncattr,\\\n repr(nc_fid.variables[key].getncattr(ncattr)))\n except KeyError:\n print(\"\\t\\tWARNING: %s does not contain variable attributes\" % key)" ]
[ "0.6759068", "0.6757934", "0.6757424", "0.6648813", "0.6640005", "0.65391797", "0.6433737", "0.6322798", "0.63007444", "0.6251851", "0.6192194", "0.61783284", "0.6137543", "0.6119068", "0.6101882", "0.6101882", "0.60906076", "0.6051513", "0.59783787", "0.59731185", "0.5908545", "0.58931035", "0.5888154", "0.58544403", "0.584938", "0.5821529", "0.5814852", "0.58046114", "0.58035594", "0.58035594" ]
0.7359366
0
Return the ideal quarter wavelength thickness of the AR coating layer at a given optimization frequency. Arguments
def ideal_thickness(self, opt_freq=160e9): return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_wavelength(period, depth, gravity):\r\n return geometry.gmCalculateWavelength(period, depth, gravity)", "def width(self):\n return (self.norm / max(self.transmit)) * Unit(self.wavelength_unit)", "def wavelength(self,freq):\n return self.phase_velocity()/freq", "def get_wavelength(self):\n E = -self.E0*(1.0/self.n_low**2 - 1.0/self.n_high**2)\n return SI['hc']*1e12/(E*SI['keV'])", "def _frequency_to_wavelength(freq):\n return ifc.SPEED_OF_LIGHT_METRES_PER_SECOND / freq", "def wavelength(self):\n return self.getparam(\"WAVELENGTH\")", "def wavelength(self):\n return self.getparam(\"WAVELENGTH\")", "def fried_parameter_cm(wavelength,arcseconds_of_seeing_500nm=1.,zenith_angle_deg = 0.):\n r0_500nm_cm = (500e-9/(arcseconds_of_seeing_500nm*(np.pi/(180*3600))))*100\n k = r0_500nm_cm/(500e-9)**(6./5)\n r00 = k*wavelength**(6./5.)\n zenith_angle_rad = np.radians(zenith_angle_deg)\n r0z = r00 * np.cos(zenith_angle_rad)**(3/5.) #p60 DFB POI\n return r0z", "def waveform_width(waveform, cutoff=0.75):\n waveform = np.squeeze(waveform)\n if np.ndim(waveform) != 1:\n raise ValueError('Expected 1-dimensional waveform.')\n if len(waveform) < 2:\n raise ValueError('Too short waveform.')\n if not (0 <= cutoff < 1):\n raise ValueError('Cuttoff must be in range [0, 1).')\n\n min_border = max(1, int(len(waveform) * cutoff))\n idx_min = np.argmin(waveform[:min_border])\n idx_max = np.argmax(waveform[idx_min:]) + idx_min\n width = idx_max - idx_min\n\n return width", "def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def calc_thickness(self):\n s = \"::: calculating z-varying thickness :::\"\n print_text(s, cls=self)\n #H = project(self.S - self.x[2], self.Q, annotate=False)\n H = self.vert_integrate(Constant(1.0), d='down')\n Hv = H.vector()\n Hv[Hv < 0] = 0.0\n print_min_max(H, 'H', cls=self)\n return H", "def model_wave(time, period, width) -> float:\n cur_time = time % period\n half_width = width//2\n if cur_time < half_width:\n return float(cur_time) / half_width\n elif cur_time < width:\n return 1 - float(cur_time - half_width) / half_width\n else:\n return 0", "def band_width(self):\n return self._band_width", "def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)", "def DSS28_beamwidth(freq):\n return 0.54/freq", "def acW(self):\n return self.fuselageLength * self.posFraction", "def wavelength(self):\n return self.get(self._names[\"wavelength\"])", "def get_cw_freq(self):\n return self.get_frequency(self.synth)", "def get_wl_band(radar_frequency):\n return 0 if (30 < radar_frequency < 40) else 1", "def getWidth(self):\n wsum = 0.0\n for quad in self._quadrilaterals:\n wsum = wsum + get_quad_width(quad)\n mwidth = (wsum / len(self._quadrilaterals)) / 1000.0\n return mwidth", "def wavelength(self):\n return wavelength(energy)", "def get_wavelength(self, channel):\n\n wavelength = self.device.query(f':WAVEL{channel}:VAL?')\n return int(float(wavelength))", "def TF_wavelength(self):\n return int(self.ask(self.headStr('TF')+'TWL?'))", "def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur", "def _waist_from_q(q, wavelength):\n\n return np.sqrt(wavelength / (np.pi * np.imag(-1 / q)))", "def get_cutoff_dim(self):\n return self.circuit._trunc", "def coating_weight(self, thickness, weight=2400):\n return self.__length * self.__width * thickness * weight / 100", "def calc_optical_thickness(self, atmosphere, t_surface):\n self.set_atmospheric_state(atmosphere, t_surface)\n\n self.ws.propmat_clearsky_fieldCalc()\n\n tau = np.trapz(\n y=self.ws.propmat_clearsky_field.value[:, :, 0, 0, :, 0, 0],\n x=self.ws.z_field.value[:, 0, 0],\n axis=-1,\n )\n\n return self.ws.f_grid.value.copy(), tau", "def waveparameterh(L):\r\n return 8.13 - ((250 - 0.7 * L) / 125) ** 3" ]
[ "0.6247497", "0.61737293", "0.6076563", "0.60169905", "0.5970396", "0.5918256", "0.5918256", "0.5891127", "0.58884025", "0.5886617", "0.5878247", "0.5775444", "0.573283", "0.5727211", "0.5720022", "0.5705462", "0.5659742", "0.5655887", "0.5622704", "0.5589557", "0.5573802", "0.5573037", "0.55384386", "0.55155", "0.5481012", "0.54795915", "0.5474284", "0.5462257", "0.5442602", "0.54375577" ]
0.70882714
0
Connect all the AR coating layer objects, ensuring that the source and terminator layers come first and last, respectively.
def _interconnect(self): self.clear_structure() self.structure.append(self.source) for i in range(len(self.stack)): self.structure.append(self.stack[i]) self.structure.append(self.terminator) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def connect_back(simulation_dict, (index_from, blocks_per_dim_from), (index_to, blocks_per_dim_to), square_size, radius, context_factor):\n logging.info(\"Connecting back additional context from index %d to index %d\" % (index_from, index_to))\n logging.info(\"Connecting back additional context from layer size is %d, receiving layer size is %d\" % (blocks_per_dim_from, blocks_per_dim_to))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim_from):\n for y in range(blocks_per_dim_from):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim_to,\n dim_y_l=blocks_per_dim_to,\n dim_x_u=blocks_per_dim_from,\n dim_y_u=blocks_per_dim_from,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n source = index_from + x * (blocks_per_dim_from) + y # unit in the higher layer\n for xy in surround:\n dest = index_to + xy[0] * blocks_per_dim_to + xy[1] # unit in the lower layer\n context_block = simulation_dict['stage0'][source]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][dest]['context_blocks'].append((context_block, delta_block2, context_factor))", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def connect_forward_and_back(simulation_dict, (index0, blocks_per_dim0, predicted_array), (index1, blocks_per_dim1), square_size, radius, context_factor):\n hidden_size = simulation_dict['hidden_size']\n dx = hidden_size\n dy = hidden_size\n logging.info(\"Connecting from index %d to index %d\" % (index0, index1))\n logging.info(\"Input layer size is %d, receiving layer size is %d\" % (blocks_per_dim0, blocks_per_dim1))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim1):\n for y in range(blocks_per_dim1):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim0,\n dim_y_l=blocks_per_dim0,\n dim_x_u=blocks_per_dim1,\n dim_y_u=blocks_per_dim1,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n dest = index1 + x * (blocks_per_dim1) + y # destination unit\n for xy in surround:\n source = index0 + xy[0] * blocks_per_dim0 + xy[1] # source unit\n # Prepare the input and corresponding delta block at source\n input_block = simulation_dict['stage0'][source]['output_block']\n delta_block = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block)\n # Prepare the context and corresonding delta block at destination\n context_block = simulation_dict['stage0'][dest]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][dest]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][source]['context_blocks'].append((context_block, delta_block2, context_factor))\n # Prepare the predicted blocks\n xx = xy[0]*hidden_size\n yy = xy[1]*hidden_size\n assert(predicted_array[xx:xx+dx, yy:yy+dy].shape == context_block.shape)\n predicted_block = SharedArray.DynamicView(predicted_array)[xx:xx+dx, yy:yy+dy]\n if not (predicted_block.shape == (dx, dy)):\n print predicted_block.shape\n raise\n # Connect the input to the destination together with its predicted blocks and so on.\n past_block = SharedArray.SharedNumpyArray_like(input_block)\n derivative_block = SharedArray.SharedNumpyArray_like(input_block)\n integral_block = SharedArray.SharedNumpyArray_like(input_block)\n pred_block_local = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][dest]['signal_blocks'].append((input_block, delta_block, predicted_block, past_block, derivative_block, integral_block, pred_block_local))", "def connect_corridors(G, all_ml_list, ml_dict, singleGraph):\r\n sensor_rows = run_on_file('d07_stations_2008_11_26.txt')\r\n connector_list, short_lines=find_all_connectors(sensor_rows)\r\n print 'number of connectors parsed in dataset: ', len(connector_list)\r\n for road_i, dir_i, coords, road_j, dir_j in short_lines:\r\n source_list=ml_dict[(road_i, dir_i)]\r\n destination_list=ml_dict[(road_j, dir_j)]\r\n source_line=nearest(coords, source_list, 1)\r\n destination_line=nearest(coords, destination_list, 1)\r\n G.add_edge(source_line[0][1][0], destination_line[0][1][0]) \r\n singleGraph.add_edge(source_line[0][1][0], destination_line[0][1][0]) \r\n G, singleGraph=manual_connections(G, singleGraph, ml_dict) \r\n return G, singleGraph", "def connect_forward_and_back_v1(simulation_dict, (index0, blocks_per_dim0, predicted_array, predicted_array_t2), (index1, blocks_per_dim1), square_size, radius, context_factor):\n hidden_size = simulation_dict['hidden_size']\n dx = hidden_size\n dy = hidden_size\n logging.info(\"Connecting from index %d to index %d\" % (index0, index1))\n logging.info(\"Input layer size is %d, receiving layer size is %d\" % (blocks_per_dim0, blocks_per_dim1))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim1):\n for y in range(blocks_per_dim1):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim0,\n dim_y_l=blocks_per_dim0,\n dim_x_u=blocks_per_dim1,\n dim_y_u=blocks_per_dim1,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n dest = index1 + x * (blocks_per_dim1) + y # destination unit\n for xy in surround:\n source = index0 + xy[0] * blocks_per_dim0 + xy[1] # source unit\n # Prepare the input and corresponding delta block at source\n input_block = simulation_dict['stage0'][source]['output_block']\n delta_block = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block)\n # Prepare the context and corresonding delta block at destination\n context_block = simulation_dict['stage0'][dest]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][dest]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][source]['context_blocks'].append((context_block, delta_block2, context_factor))\n # Prepare the predicted blocks\n xx = xy[0]*hidden_size\n yy = xy[1]*hidden_size\n assert(predicted_array[xx:xx+dx, yy:yy+dy].shape == context_block.shape)\n predicted_block = SharedArray.DynamicView(predicted_array)[xx:xx+dx, yy:yy+dy]\n predicted_block2 = SharedArray.DynamicView(predicted_array_t2)[xx:xx+dx, yy:yy+dy]\n if not (predicted_block.shape == (dx, dy)):\n print predicted_block.shape\n raise\n # Connect the input to the destination together with its predicted blocks and so on.\n simulation_dict['stage0'][dest]['signal_blocks'].append((input_block, delta_block, predicted_block, predicted_block2))", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.rpn_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.rpn_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.rpn_cls = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.rpn_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4 * (self.reg_max + 1), 3, padding=1)\n self.rpn_iou = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.anchor_generator.strides])\n\n ##############V2################\n conf_vector = [nn.Conv2d(self.num_anchors * 4 * self.total_dim, self.num_anchors * self.reg_channels, 1)]\n conf_vector += [self.relu]\n conf_vector += [nn.Conv2d(self.num_anchors * self.reg_channels, self.num_anchors, 1), nn.Sigmoid()]\n\n self.reg_conf = nn.Sequential(*conf_vector)\n ##############V2################", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)", "def connect_layers(self, material_dict=None):\n if material_dict is None:\n material_dict = {k: DEFAULT_BEAM for k in range(self.layers)}\n\n for layer in range(self.layers):\n material = material_dict[layer]\n\n for h in range(self.height):\n for c in range(self.ring_n):\n if layer == 0:\n n0 = f'N.{h}.c'\n else:\n n0 = f'R.{layer}.{h}.{c}'\n n1 = f'R.{layer+1}.{h}.{c}'\n name = f'M.{layer}.{h}.{c}'\n\n self.fem.AddMember(name, n0, n1,\n material[0],\n material[1],\n material[2],\n material[3],\n material[4],\n material[5]\n )", "def comp_add_ao(self):\n scene = self.set_as_active()\n scene.use_nodes = True\n tree = scene.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_rlayer = tree.nodes.new('CompositorNodeRLayers')\n node_rlayer.location = -300, 100\n node_rlayer.scene = scene\n node_rlayer.layer = w_var.rlname\n\n node_mixcolor = tree.nodes.new('CompositorNodeMixRGB')\n node_mixcolor.location = 0, 50\n node_mixcolor.blend_type = 'MULTIPLY'\n node_mixcolor.inputs[0].default_value = 0.730\n\n node_comp = tree.nodes.new('CompositorNodeComposite')\n node_comp.location = 300, 130\n\n node_viewer = tree.nodes.new('CompositorNodeViewer')\n node_viewer.location = 300, -100\n\n # connecting the nodes\n links = tree.links\n links.new(node_rlayer.outputs[0], node_mixcolor.inputs[1])\n links.new(node_rlayer.outputs[10], node_mixcolor.inputs[2])\n links.new(node_mixcolor.outputs[0], node_comp.inputs[0])\n links.new(node_mixcolor.outputs[0], node_viewer.inputs[0])\n\n for node in tree.nodes:\n node.select = False", "def learn_connectome(self):\n episode_nodes = [node for node in self.container.nodes if node.is_episode]\n if len(episode_nodes) < 2:\n return\n connections_counter = {}\n for node in episode_nodes:\n self._collect_episode_callout_stats(node, connections_counter)\n\n pair_list = [(key, connections_counter[key]) for key in connections_counter]\n pair_list.sort(key=lambda item: item[1], reverse=True)\n top_count = pair_list[0][1]\n if top_count < 4:\n return\n # make connections for the top half of pairs\n for pair, cnt in pair_list:\n if cnt > top_count // 2:\n self._make_connection_for_pair(pair)", "def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs", "def connect_cells(self):\n self.nclist = []\n N = self._N\n for i in range(N):\n src = self.cells[i]\n tgt_syn = self.cells[(i+1)%N].synlist[0]\n nc = src.connect2target(tgt_syn)\n nc.weight[0] = self.syn_w\n nc.delay = self.syn_delay\n\n nc.record(self.t_vec, self.id_vec, i)\n self.nclist.append(nc)", "def connect_poly(self):\n # connect pmos1 poly\n nmos_gate = (self.nmos_position1 \n + self.nmos.poly_positions[0]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n for i in range(len(self.pmos.poly_positions)):\n pmos_gate = (self.pmos_position1 \n + self.pmos.poly_positions[i]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n mid1 = [pmos_gate.x, pmos_gate.y - drc[\"poly_to_active\"]]\n self.add_path(\"poly\", [nmos_gate, mid1, pmos_gate])\n\n # connect pmos2 poly\n nmos_gate = vector(self.nmos_position2[0] \n + self.nmos.poly_positions[0].x\n + 0.5 * drc[\"minwidth_poly\"], \n self.nmos_position1.y \n + self.nmos.poly_positions[0].y)\n for i in range(len(self.pmos.poly_positions)):\n pmos_gate = (self.pmos_position2\n + self.pmos.poly_positions[i]\n + vector(0.5 * drc[\"minwidth_poly\"], 0))\n mid1 = vector(pmos_gate.x,\n nmos_gate.y + self.nmos.height \n + drc[\"poly_to_active\"])\n self.add_path(\"poly\", [nmos_gate, mid1, pmos_gate])", "def set_up_all_ao(self):\n self.set_as_active()\n \n # sets up ambient occlusion lighting\n self.set_up_world_ao()\n self.comp_add_ao()", "def create_techanim_connections(self):\n self.import_setup()\n input_info = self.techanim_info[techanim_creator_utils.RENDER_INPUT_KEY]\n self._create_input_layer_connections(input_info)\n rigid_info = self.techanim_info[techanim_creator_utils.RIGID_KEY]\n self._create_input_layer_connections(rigid_info)\n\n # output connections to the rig/alembic\n layers = [self._wrap_ns(self.setup_config[\"render_output\"])]\n render_output_nodes = self.get_layer_nodes_info(layers)\n for layer, output_nodes in render_output_nodes.iteritems():\n for oNode in output_nodes:\n src_plug = \"{}.outMesh\".format(oNode)\n render_node = oNode.rpartition(self.setup_config[\"output_suffix\"])[0]\n render_node = techanim_creator_utils.removeNS(render_node)\n render_node = \"{}:{}\".format(self.target_namespace,\n render_node)\n dest_plug = \"{}.inMesh\".format(render_node)\n # test if already connected so we do not get the warnings\n if not cmds.isConnected(src_plug, dest_plug):\n try:\n cmds.connectAttr(src_plug, dest_plug, f=True)\n except Exception as e:\n plug_str = \"{} >> {}\".format(src_plug, dest_plug)\n msg = str(e)\n self.potentionally_faulty_connections[plug_str] = msg\n if self.potentionally_faulty_connections:\n self.print_faulty_connections()", "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def auto_connect_blendshape(oBlendshapes):\r\n if DRYRUN:\r\n print('connecting blendshapes to rig - DRY RUN ONLY')\r\n return False\r\n\r\n hookNodes = pm.ls('*_HOOKS', type='transform')\r\n blendShapeTransfer = []\r\n for oBlendshape in oBlendshapes:\r\n targetBlendshapeNode = oBlendshape.name()\r\n sourceBlends = [x for x in oBlendshape.listHistory(future=False, levels=5) if type(x) == pm.nodetypes.BlendShape] or None\r\n if sourceBlends:\r\n sourceBlend = sourceBlends[0]\r\n for eachWeight in sourceBlend.weight:\r\n blendshapeName = pm.aliasAttr(eachWeight, q=True)\r\n attributeName = blendshapeName.replace('_BS','')\r\n try:\r\n matchHooks = pm.PyNode('{}_override_MLT.outputX'.format(attributeName))\r\n print(matchHooks)\r\n matchHooks.connect(eachWeight, force=True)\r\n except:\r\n pm.warning('{} failed to connect.'.format(eachWeight))", "def build(self):\n\n layers = GiraffeLayer.get_all_structural()\n \n for layer in layers:\n\n self.add_objects_from_layer(layer)\n\n return self", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n self.retina_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4, 3, padding=1)", "def _init_layers(self):\n self._init_predictor()\n if self.use_edge_fusion:\n self._init_edge_module()", "def connect_data_and_network(self,\n outputs_collector=None,\n gradients_collector=None):\n raise NotImplementedError", "def prepareConnectors(self):\n # Kinematic Connectors require creating node sets\n # These are created and added to the node set collection prior to writing\n\n numConnectors = 1\n\n for connector in self.connectors:\n # Node are created and are an attribute of a Connector\n self._nodeSets.append(connector.nodeset)\n\n numConnectors += 1", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def _init_layers(self) -> None:\n self.convs_all_levels = nn.ModuleList()\n for i in range(self.start_level, self.end_level + 1):\n convs_per_level = nn.Sequential()\n convs_per_level.add_module(\n f'conv{i}',\n ConvModule(\n self.in_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n inplace=False,\n bias=False))\n self.convs_all_levels.append(convs_per_level)\n\n conv_branch = []\n for _ in range(self.num_stacked_convs):\n conv_branch.append(\n ConvModule(\n self.feat_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=False))\n self.conv_branch = nn.Sequential(*conv_branch)\n\n self.conv_pred = nn.Conv2d(\n self.feat_channels, self.out_channels, 1, stride=1)", "def connect_well_contacts(self):\n well_tap_length = self.height - self.nwell_contact_position.y\n xoffset = (self.nwell_contact_position.x \n + self.nwell_contact.second_layer_position.x \n - self.nwell_contact.first_layer_position.x)\n offset = [xoffset, self.nwell_contact_position.y]\n self.add_rect(layer=\"metal1\",\n offset=offset,\n width=drc[\"minwidth_metal1\"],\n height=well_tap_length)\n\n offset = (self.pwell_contact_position.scale(1,0)\n + self.pwell_contact.second_layer_position.scale(1,0)\n - self.pwell_contact.first_layer_position.scale(1,0))\n well_tap_length = self.pwell_contact_position.y\n self.add_rect(layer=\"metal1\",\n offset=offset,\n width=drc[\"minwidth_metal1\"],\n height=well_tap_length)", "def __init__(self, features_number, surfaces_dimensions, taus, first_layer_polarities,\n delay_coeff, net_seed = 0, verbose=False):\n self.basis = []\n self.activations = []\n self.taus = taus\n self.layers = len(features_number)\n self.surfaces_dimensions = surfaces_dimensions\n self.features_number = features_number\n self.delay_coeff = delay_coeff\n self.verbose = verbose\n self.polarities = []\n self.polarities.append(first_layer_polarities)\n # attribute containing all surfaces computed in each layer and sublayer\n self.surfaces = []\n # attribute containing all optimization errors computed in each layer \n # and sublayer\n self.errors = []\n #setting the seed\n rng = np.random.RandomState()\n if (net_seed!=0):\n rng.seed(net_seed)\n # In the first layer I am going to process only 2 polarities corresponging\n # to on off events\n num_polarities = 1 \n for layer, nfeatures in enumerate(features_number):\n #basis and activations of a single sublayer\n sublayers_basis = []\n sublayers_activations = []\n self.polarities.append(nfeatures)\n for sublayer in range(2**layer):\n #basis and activations of a single layer\n basis_set = []\n activations_set = []\n for j in range(nfeatures):\n basis_set.append(rng.rand(surfaces_dimensions[layer][1], surfaces_dimensions[layer][0]*num_polarities))\n basis_set[j][surfaces_dimensions[layer][1]//2, [surfaces_dimensions[layer][0]//2 + surfaces_dimensions[layer][0]*a for a in range(num_polarities)]] = 1\n #activations, or aj (as in the paper) are set randomly between -1 and 1\n activations_set.append((rng.rand()-0.5)*2)\n sublayers_basis.append(np.array(basis_set))\n sublayers_activations.append(np.array(activations_set))\n self.basis.append(sublayers_basis)\n self.activations.append(sublayers_activations)\n num_polarities = nfeatures", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]" ]
[ "0.66146564", "0.58673614", "0.5616601", "0.56008357", "0.5556945", "0.55171543", "0.54460603", "0.53880274", "0.53788203", "0.53654885", "0.5359501", "0.5356802", "0.5317173", "0.53037655", "0.52933365", "0.5251121", "0.5243879", "0.521525", "0.5213942", "0.5212931", "0.5211798", "0.51735294", "0.5158098", "0.51580113", "0.5137086", "0.51092476", "0.51068175", "0.5106817", "0.50969553", "0.50728786" ]
0.6034239
1
Return a 2x2 array quickly. Arguments
def _make_2x2(self, A11, A12, A21, A22, dtype=float): array = np.empty((2,2), dtype=dtype) array[0,0] = A11 array[0,1] = A12 array[1,0] = A21 array[1,1] = A22 return array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def alloc2d(x,y,iv=0):\n return [[iv for j in range(int(x))] for i in range(int(y))]", "def getArray2d(self):\n\t\treturn self.array2d", "def make_2d(x):\n return x.reshape((1, len(x)))", "def getShortArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def array (self, length, width):\n\t\treturn [[0 for i in range(width)] for j in range(length)] #List comprehensions (Works like two for loops)", "def get_2Darray(file,cols='all',nrows='all',verbose='no'):\n if cols=='all':\n #Get the number of columns in the file\n for line in open(file).readlines():\n pieces=split(line)\n if len(pieces)==0: continue\n if line[0]=='#':continue\n nc=len(pieces)\n cols=list(range(nc))\n if verbose=='yes': print('cols=',cols)\n break\n else:\n nc=len(cols)\n \n lista=get_data(file,cols,nrows)\n nl=len(lista[0])\n x=zeros((nl,nc),float)\n for i in range(nc):x[:,i]=lista[i]\n return x", "def createarray(m,n):\n return( np.ones((m,2,n)) )", "def create_array( n ):", "def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def create2d(row_count, col_count, value=None):\n a = [None] * row_count\n for row in range(row_count):\n a[row] = [value] * col_count\n return a", "def n2m(a):\n if not isinstance(a, np.ndarray): a = np.array(a)\n return multiprocessing.Array(a.dtype.char, a.flat, lock=False), tuple(a.shape), a.dtype.char, isinstance(a, np.matrix)", "def init_two_d_array(dimens, val):\n w, x = dimens\n return [[val for j in range(x)] for i in range(w)]", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def read(self, *args):\n return_values = [[] for _ in range(len(args)+2)]\n for row in self.iter_rows(*args):\n for return_array, value in zip(return_values, row):\n return_array.append(value)\n\n return [np.array(x) for x in return_values]", "def generate_2D(X):\n\n\tno_of_images = len(X)\n\tdata = np.zeros((no_of_images, 28, 28))\n\n\tfor i in xrange(no_of_images):\n\t\tdata[i] = np.copy(X[i].reshape(28, 28))\n\n\treturn data", "def two_dim(a: cython.double[:,:]):\n a[0,0] *= 3\n return a[0,0], a[0,1], a.ndim", "def atleast_2d(x):\n return np.atleast_2d(x).T if x.ndim < 2 else x", "def algi(C):\n return np.array([ C[0,2], C[1,2], C[1,0] ])", "def getBooleanArray2D(self) -> typing.List[typing.List[bool]]:\n ...", "def getFloatArray2D(self) -> typing.List[typing.List[float]]:\n ...", "def getDoubleArray2D(self) -> typing.List[typing.List[float]]:\n ...", "def to_array(X, n=2):\n return np.array([np.eye(n)[x] for x in X])", "def get_field_array(self):\n array_j = []\n array_i = []\n n = 3\n i = self.square_size_y / 2\n while i <= self.field_height:\n if n % 2 == 1:\n j = self.square_size_x / 2\n while j <= self.field_width:\n array_j.append((j, i))\n j += self.square_size_x\n array_i.append(array_j)\n array_j = []\n n += 1\n else:\n j = 0\n while j <= self.field_width:\n array_j.append((j, i))\n j += self.square_size_x\n array_i.append(array_j)\n array_j = []\n n += 1\n i += self.square_size_y\n self.array = array_i\n return array_i", "def transform(self, x: Array2D) -> Array2D:", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def Array( *args ):\n tArray = ()\n for arg in args:\n tArray += (arg,)\n return tArray", "def Array( *args ):\n tArray = ()\n for arg in args:\n tArray += (arg,)\n return tArray", "def vec(x, y):\n return numpy.array([x, y], dtype = numpy.int32)" ]
[ "0.6950673", "0.6948435", "0.67974806", "0.67486435", "0.6719765", "0.6714848", "0.66816825", "0.6673269", "0.6661035", "0.6561734", "0.6459632", "0.6361773", "0.6218368", "0.6169503", "0.61442554", "0.61306435", "0.60949576", "0.60715556", "0.60360205", "0.6027216", "0.60221976", "0.6018267", "0.59812814", "0.59275186", "0.59235597", "0.5894452", "0.5862552", "0.5849557", "0.5849557", "0.5845071" ]
0.75258005
0
Organize the refractive indices of the layers in the simulation. Returns
def _sort_ns(self): n = [] for layer in self.structure: n.append(layer.get_index()) n = np.asarray(n) return n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numbering_rafts(rafts_loc, rafts_radii, num_of_rafts):\n orbiting_center = np.mean(rafts_loc, axis=0)\n orbiting_dist = np.sqrt((rafts_loc[:, 0] - orbiting_center[0]) ** 2 + (rafts_loc[:, 1] - orbiting_center[1]) ** 2)\n sorted_index = orbiting_dist.argsort()\n dist_sorted = orbiting_dist[sorted_index]\n rafts_loc_sorted = rafts_loc[sorted_index, :]\n rafts_radii_sorted = rafts_radii[sorted_index]\n\n # assign layer\n layer_index = np.ones(num_of_rafts, dtype=int)\n layer_num = 1\n for raft_id in np.arange(1, num_of_rafts):\n if dist_sorted[raft_id] - dist_sorted[raft_id - 1] > rafts_radii_sorted[raft_id]:\n layer_num = layer_num + 1\n layer_index[raft_id] = layer_num\n\n # calculate orbiting angle, note the two negative signs in front of both y- and x- components.\n # For y-component, it is for flipping image axis.\n # For x-component, it is make the counting start at x-axis and go clockwise.\n # Note the value of arctan2 is [-pi, pi]\n orbiting_angles = np.arctan2(-(rafts_loc_sorted[:, 1] - orbiting_center[1]),\n -(rafts_loc_sorted[:, 0] - orbiting_center[0])) * 180 / np.pi\n\n # concatenate and sort\n rafts_loc_radii_dist_angle_layer = \\\n np.column_stack((rafts_loc_sorted[:, 0], rafts_loc_sorted[:, 1],\n rafts_radii_sorted, dist_sorted, orbiting_angles, layer_index))\n\n sorted_index2 = np.lexsort((orbiting_angles, layer_index))\n\n rafts_loc_radii_dist_angle_layer_sorted = rafts_loc_radii_dist_angle_layer[sorted_index2]\n\n rafts_loc_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 0:2].astype(int)\n rafts_radii_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 2].astype(int)\n dist_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 3]\n angles_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 4]\n layer_index_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 5]\n\n return rafts_loc_sorted2, rafts_radii_sorted2, dist_sorted2, angles_sorted2, layer_index_sorted2", "def alt_reps_idxs(self):\n\n idxs_grp = self.h5['{}/{}'.format(SETTINGS, ALT_REPS_IDXS)]\n return {name : ds[:] for name, ds in idxs_grp.items()}", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def makeIndexMap(self):\n\t\tn = self.numRects\n\t\thalfList = [[(j,n-1-i+j) for j in range(i+1)] for i in range(n)]\n\t\tfullList = halfList + [[(j[1],j[0]) for j in i] for i in halfList[n-2::-1]]\n\t\treturn fullList", "def layer_offsets(self):\n ...", "def mainIndices(self):\n return self.i1, self.i2", "def get_final_pruned_indices(self):\n return self.final_pruned_indices", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def ret_layer_index(file):\n\t\tnames={}\n\t\tfor i in range(len(file[0])):\n\t\t\tprint(file[0][i][0][0][0])\n\t\t\tnames[file[0][i][0][0][0][0]]=i\n\t\tprint(\"Success layer_index\")\n\t\treturn names", "def _compute_indices(self):\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)", "def update_module_indexes(self, generation):\n self.species_module_index_map = {}\n\n if Config.blueprint_nodes_use_representatives:\n # For representatives species_module_index_map becomes: representative -> (species index, member index)\n for rep, module in self.species_module_ref_map.items():\n if module is None:\n continue\n\n for species_index, species in enumerate(generation.module_population.species):\n if module in species:\n self.species_module_index_map[rep] = \\\n (species_index, generation.module_population.species[species_index].members.index(module))\n break\n else:\n for spc_index, module in self.species_module_ref_map.items():\n if module is None:\n continue\n\n if spc_index < len(generation.module_population.species) and \\\n module in generation.module_population.species[spc_index]:\n\n self.species_module_index_map[spc_index] = \\\n generation.module_population.species[spc_index].members.index(module)\n\n elif Config.allow_cross_species_mappings:\n for new_species_index, species in enumerate(generation.module_population.species):\n if module in species:\n \"\"\"found module in new species\"\"\"\n self.species_module_index_map[spc_index] = \\\n (new_species_index,\n generation.module_population.species[new_species_index].members.index(module))\n break", "def ordered_indices(self):\n return self.d1.ordered_indices()\n # RETURN BASED ON D1's sizes", "def ordering(self):\n if self.dim_ordering is None:\n return list(range(self.rank))\n\n orig = self.dim_ordering.dims_and_symbols.dims\n return [orig.index(sym) for sym in self.dim_ordering.map.dims]", "def generate_reverse_index(self):", "def generate_index(self):\n begin_o, end_o, begin_a, end_a = 0, 0, 0, 0\n for obs_space, act_space in zip(self.env.observation_space, self.env.action_space):\n end_o = end_o + obs_space.shape[0]\n if isinstance(act_space, Box):\n end_a = act_space.shape[0]\n else:\n end_a = act_space.n\n range_o = (begin_o, end_o)\n range_a = (begin_a, end_a)\n self.observation_index.append(range_o)\n self.action_index.append(range_a)\n begin_o = end_o\n begin_a = end_a", "def rebuild_indexes(self):\n self.cards = sorted(self.name_to_card.values(), key=lambda card: card.name)\n self.card_sets = sorted(\n self.code_to_card_set.values(), key=lambda cset: cset.release_date\n )\n\n self.set_code_to_printings = collections.defaultdict(list)\n self.card_name_to_printings = collections.defaultdict(list)\n self.set_name_num_mv_to_printings = collections.defaultdict(list)\n\n for printing in self.id_to_printing.values():\n self.set_code_to_printings[printing.set_code].append(printing)\n self.card_name_to_printings[printing.card_name].append(printing)\n # snnm == (set, name, number, multiverseid)\n snnm_index_keys = {\n # pylint: disable=line-too-long\n (\n printing.set_code,\n printing.card_name,\n printing.set_number,\n printing.multiverseid,\n ),\n (printing.set_code, printing.card_name, None, printing.multiverseid),\n (printing.set_code, printing.card_name, printing.set_number, None),\n (printing.set_code, printing.card_name, None, None),\n }\n for key in snnm_index_keys:\n self.set_name_num_mv_to_printings[key].append(printing)\n\n for printings in self.set_code_to_printings.values():\n printings.sort(key=set_code_to_printings_key)\n\n for printings in self.card_name_to_printings.values():\n printings.sort(key=card_name_to_printing_key)\n\n # Build ordered indexes\n self.set_code_to_printing_to_row = {}\n for set_code, printings in self.set_code_to_printings.items():\n self.set_code_to_printing_to_row[set_code] = {\n printing: i for i, printing in enumerate(printings)\n }", "def ordered_indices(self):\r\n '''we need random order'''\r\n if self.shuffle:\r\n indices = np.random.permutation(len(self))\r\n else:\r\n indices = np.arange(len(self))\r\n '''\r\n if self.tgt_sizes is not None:\r\n indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]\r\n return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]\r\n '''\r\n return indices", "def depth_rendering(ref_view, disparity_map, lf_size = (64, 512, 512, 3)):\n lf_one_way = int(math.floor(math.sqrt(lf_size[0])))\n\n x_indices = np.arange(lf_size[1])\n y_indices = np.arange(lf_size[2])\n b_indices = np.arange(lf_size[0])\n\n #Create a grid of size lf_size[:3] consisting of the pixel co ordinates of each image\n _, x, y = np.meshgrid(b_indices, x_indices, y_indices, indexing= 'ij')\n\n # Create a grid of size (lf_size[0], 2) consiting of the row, col lf positions\n grid = np.meshgrid(np.arange(lf_one_way), np.arange(lf_one_way), indexing= 'ij')\n stacked = np.stack(grid, 2)\n positions = stacked.reshape(-1, 2)\n\n # Compute the distance from each lf position from the reference view\n # Repeat the elements of this to match the size of the disparity map\n ref_pos = np.array(\n [lf_one_way // 2, lf_one_way // 2])\n distance = (np.tile(ref_pos, (lf_size[0], 1)) - positions).T\n dis_repeated = np.repeat(distance, lf_size[1] * lf_size[2], axis = 1)\n dis_repeated = dis_repeated.reshape(2, lf_size[0], lf_size[1], lf_size[2])\n\n\n # Tile the disparity map so that there is one for each lf_position - lf_size[0]\n tiled_map = np.tile(disparity_map, (lf_size[0], 1, 1))\n\n # Compute the shifted pixels\n x_shifted = (x.astype(np.float32) - tiled_map * dis_repeated[0]).flatten()\n y_shifted = (y.astype(np.float32) - tiled_map * dis_repeated[1]).flatten()\n\n #indices for linear interpolation in a square around the central point\n x_low = np.around(x_shifted).astype(int)\n #x_high = x_low + 1\n\n y_low = np.around(y_shifted).astype(int)\n #y_high = y_low + 1\n\n #Place co-ordinates outside the image back into the image\n x_low_clip = np.clip(x_low, 0, ref_view.shape[0] - 1)\n #x_high_clip = np.clip(x_high, 0, ref_view.shape[0] - 1)\n y_low_clip = np.clip(y_low, 0, ref_view.shape[1] - 1)\n #y_high_clip = np.clip(y_high, 0, ref_view.shape[1] - 1)\n\n #Gather the interpolation points\n interp_pts_1 = np.stack((x_low_clip, y_low_clip))\n #interp_pts_2 = np.stack((x_low_clip, y_high_clip))\n #interp_pts_3 = np.stack((x_high_clip, y_low_clip))\n #interp_pts_4 = np.stack((x_high_clip, y_high_clip))\n\n #Index into the images\n desired_shape = lf_size\n res_1 = torch_big_sample(ref_view, interp_pts_1, desired_shape)\n return res_1\n res_2 = torch_big_sample(ref_view, interp_pts_2, desired_shape)\n res_3 = torch_big_sample(ref_view, interp_pts_3, desired_shape)\n res_4 = torch_big_sample(ref_view, interp_pts_4, desired_shape)\n\n #Compute interpolation weights\n x_low_f = x_low.astype(np.float32)\n d_x_low = 1.0 - (x_shifted.astype(np.float32) - x_low_f)\n d_x_high = 1.0 - d_x_low\n y_low_f = y_low.astype(np.float32)\n d_y_low = 1.0 - (y_shifted.astype(np.float32) - y_low_f)\n d_y_high = 1.0 - d_y_low\n\n w1 = torch.from_numpy(d_x_low * d_y_low)\n w2 = torch.from_numpy(d_x_low * d_y_high)\n w3 = torch.from_numpy(d_x_high * d_y_low)\n w4 = torch.from_numpy(d_x_high * d_y_high)\n\n #THEY AGREE AT THIS POINT\n weighted_1 = torch.mul(repeat_weights(w1, desired_shape), res_1)\n weighted_2 = torch.mul(repeat_weights(w2, desired_shape), res_2)\n weighted_3 = torch.mul(repeat_weights(w3, desired_shape), res_3)\n weighted_4 = torch.mul(repeat_weights(w4, desired_shape), res_4)\n\n novel_view = torch.add(torch.add(weighted_1, weighted_2), weighted_3)\n torch.add(novel_view, weighted_4, out=novel_view)\n return novel_view", "def indices(self) -> np.ndarray:\n return self.impl.indices", "def get_layer_ids(\n self,\n ):\n name_to_id = {}\n for n, _ in self.named_parameters():\n name_to_id[n] = 0\n return name_to_id", "def resolve_decorations(self, grid, representation_grid):\n j = 0\n inserted_indices = []\n\n for i in range(grid.num_layers):\n layer_operators = _remove_duplicates(grid.layer(i))\n\n decoration_layer = [\"\"] * grid.num_wires\n\n for op in layer_operators:\n if op is None:\n continue\n\n wires = op.wires\n wire_indices = self.active_wires.indices(wires)\n\n if len(wire_indices) > 1:\n min_wire = min(wire_indices)\n max_wire = max(wire_indices)\n\n # If there is a conflict between decorations, we start a new decoration_layer\n if any(\n [decoration_layer[wire] != \"\" for wire in range(min_wire, max_wire + 1)]\n ):\n representation_grid.insert_layer(i + j, decoration_layer)\n inserted_indices.append(i + j)\n j += 1\n\n decoration_layer = [\"\"] * grid.num_wires\n\n self.add_multi_wire_connectors_to_layer(wire_indices, decoration_layer)\n\n representation_grid.insert_layer(i + j, decoration_layer)\n inserted_indices.append(i + j)\n j += 1\n\n return inserted_indices", "def main_rep_idxs(self):\n\n if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:\n return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]\n else:\n return None", "def get_img_indices():\n if K.image_dim_ordering() == 'th':\n return 0, 1, 2, 3\n else:\n return 0, 3, 1, 2", "def _addReferees(self):\n a_ranks = self.division.GetGroupsRanks(['A'])\n b_ranks = self.division.GetGroupsRanks(['B'])\n c_ranks = self.division.GetGroupsRanks(['C'])\n d_ranks = self.division.GetGroupsRanks(['D'])\n\n self._GroupAddReferees('7th', [d_ranks[1]])\n self._GroupAddReferees('5th', [d_ranks[2]])\n\n self._GroupAddReferees('3rd',[d_ranks[0]])\n self._GroupAddReferees('final',[c_ranks[2]])", "def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result", "def _sort_index(self):\n\n allAltPos = np.array(sorted(list(set(list(self.data['altitude'])))))[::-1]\n allAziPos = np.array(sorted(list(set(list(self.data['azimuth'])))))\n\n indON = [[None for azi in allAziPos] for alt in allAltPos]; indOFF = [[None for azi in allAziPos] for alt in allAltPos]\n\n for i, traceItem in enumerate(self.data):\n alt = traceItem['altitude'];azi = traceItem['azimuth'];sign = traceItem['sign']\n for j, altPos in enumerate(allAltPos):\n for k, aziPos in enumerate(allAziPos):\n if alt==altPos and azi==aziPos:\n if sign==1:\n if indON[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign: 1!')\n else: indON[j][k]=i\n\n if sign==-1:\n if indOFF[j][k] is not None: raise LookupError('Duplication of trace items found at location:'+str([alt, azi])+'; sign:-1!')\n else: indOFF[j][k]=i\n\n indON = np.array([np.array(x) for x in indON]); indOFF = np.array([np.array(x) for x in indOFF])\n\n return indON,indOFF,allAltPos,allAziPos", "def get_layers(self):\n layers = []\n\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(self.get_positions() - self.center, n).max()\n d = self.get_layer_distance(s, 2)\n l = 2 * np.round(r / d).astype(int)\n\n ls = np.arange(l-1,l+2)\n ds = np.array([self.get_layer_distance(s, i) for i in ls])\n\n mask = (np.abs(ds - r) < 1e-10)\n\n layers.append(ls[mask][0])\n\n return np.array(layers, int)", "def index(self):\n return copy.deepcopy(self._innercontainer)" ]
[ "0.5970919", "0.5969071", "0.5868115", "0.58487093", "0.5830703", "0.5737197", "0.5721988", "0.5652723", "0.5637402", "0.5637402", "0.56195587", "0.55844927", "0.5548918", "0.55183476", "0.5512431", "0.5509347", "0.5491557", "0.5490198", "0.5486574", "0.54467595", "0.54252297", "0.5414293", "0.54124254", "0.5410892", "0.54075384", "0.5401575", "0.5389764", "0.5375951", "0.53729564", "0.5359633" ]
0.6599312
0
Handle the special case of unpolarized light by running the model for both 's' and 'p' polarizations and computing the mean of the two results. Arguments
def _unpolarized_simulation(self, frequency, theta_0=0): s_data = self.simulate(frequency, 's', theta_0) p_data = self.simulate(frequency, 'p', theta_0) T = (s_data + p_data)/2 return T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modelmean(self, model_params, this_data, this_suff_stat):\n pass", "def mt(P_1,V0_1,meanF_1,rho): \n psi = np.arctan2(V0_1[2],-V0_1[0])\n \n # Find swept ares\n idx_zmax = np.argmax(P_1[:,-1,2])\n idx_ymax = np.argmax(P_1[:,-1,1])\n idx_zmin = np.argmin(P_1[:,-1,2])\n \n Ad = np.linalg.norm(P_1[idx_zmax,-1,2]-P_1[idx_zmin,-1,2])*P_1[idx_ymax,-1,1]\n print P_1[idx_zmax,-1,2]\n V0 = np.linalg.norm(V0_1)\n \n Vi_1new = np.zeros_like(V0_1,dtype=float)\n\n while True:\n Vi_1 = Vi_1new\n \n Vi_1new[0] = meanF_1[0] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n Vi_1new[2] = meanF_1[2] / (2 * rho * Ad * np.sqrt( (V0*np.cos(psi)+Vi_1[0])**2 + (-V0*np.sin(psi)+Vi_1[2])**2 )) \n \n if np.linalg.norm(Vi_1-Vi_1new) < 0.001:\n break\n\n return -Vi_1", "def meanAdjust(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n postchis = []\n prechis = []\n model_complete = []\n meas_complete = []\n Bvec_complete = []\n Sol_complete = []\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n\n if numd < 2:\n continue\n\n Neq = np.eye(numZD,dtype=float) * 0.001\n Apart = np.zeros((numd,numZD))\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = 1.\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Neq = np.add(Neq, np.dot(Apart.T,Apart) )\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n \n prechis.append(np.sqrt(prechi/numd))\n postchis.append(np.sqrt(postchi/numd))\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd))\n model = np.dot(Apart,Sol)\n\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #overallPrechi = np.dot(data[:,3].T,data[:,3])\n numd = np.size(meas_complete)\n #print(\"OVERALL STATS:\", np.mean(prechis),np.mean(postchis),np.sqrt(overallPrechi/numD))\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n f = loglikelihood(meas_complete,model_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n stats = {}\n stats['prechi'] = np.sqrt(prechi/numd)\n stats['postchi'] = np.sqrt(postchi/numd)\n stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)\n stats['aic'] = aic\n stats['bic'] = bic\n\n return pwl_All, pwlSig_All,stats", "def pears():\r\n quad_1 = x_array - x_mean()\r\n quad_2 = y_array - y_mean()\r\n top = sum(quad_1 * quad_2)\r\n quad_3 = sum(quad_1 ** 2)\r\n quad_4 = sum(quad_2 ** 2)\r\n bottom = np.sqrt(quad_3 * quad_4)\r\n pears = top / bottom\r\n return pears", "def average_ps(self):\n\n self.powerspectrum=np.average(self.powerspectra, axis=0)", "def take_one_averaged(self):\n self.na.set_center_frequency(6.160574e9)\n self.na.set_span(10e6)\n self.na.set_power(-5, 1)\n self.na.set_ifbw(1e3)\n\n self.na.set_query_timeout(40e3)\n set_format = self.na.set_format('polar')\n print \"set_format returned: \", set_format\n self.na.set_trigger_source(\"manual\")\n self.na.set_averages(10)\n self.na.set_trigger_average_mode()\n\n self.na.clear_averages(channel=1)\n self.na.trigger_single(channel=1)\n fpts, xs, ys = self.na.read_data()\n #\n plt.figure()\n plt.plot(fpts, xs)\n plt.plot(fpts, ys)\n plt.show()", "def call(self, x, s):\n \n x = self.base(x)\n \n disp = self.disp_layer(x)\n mean = self.mean_layer(x)\n mean = self.rescale([s, mean])\n \n return mean, disp", "def R_adp(data):\n printer('S_adp = ?')\n printer('R_adp = | (U_iso_xxx - U_iso_obs) / U_iso_obs |')\n printer('mean = sum((U_iso_xxx - U_iso_obs) / U_iso_obs) / n')\n printer('abs = sum(R_adp) / n\\n')\n printer('(geometric mean is used)\\n')\n\n printer(' | ADP_calc / ADP_obs | APD_tls / ADP_obs')\n printer(' |--------------------|-------------------')\n printer(' Atom | S_adp | R_adp | S_adp | R_adp')\n printer(' ===============================================')\n S_sum = []\n R_sum = []\n S_sum_tls = []\n R_sum_tls = []\n for atom in data['exp'].atoms:\n if not atom.element == 'H':\n U_rel_calc = cg.Uiso(atom.adp['cart_sum'])\n U_rel_obs = cg.Uiso(atom.adp['cart_meas'])\n R_adp = (U_rel_calc - U_rel_obs) / U_rel_obs\n R_sum.append(R_adp)\n S_adp = ws06(atom.adp['cart_sum'], atom.adp['cart_meas'])\n S_sum.append(S_adp)\n\n U_rel_tls = cg.Uiso(atom.adp['cart_ext'])\n R_tls = (U_rel_tls - U_rel_obs) / U_rel_obs\n R_sum_tls.append(R_tls)\n\n S_tls = ws06(atom.adp['cart_ext'], atom.adp['cart_meas'])\n S_sum_tls.append(S_tls)\n\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format(atom.name,\n S_adp,\n abs(R_adp),\n S_tls,\n abs(R_tls)))\n\n printer(' ------|----------|---------|----------|--------')\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('mean',\n np.mean(S_sum),\n np.mean(R_sum),\n np.mean(S_sum_tls),\n np.mean(R_sum_tls)))\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('abs',\n np.mean(S_sum),\n np.mean([abs(i) for i in R_sum]),\n np.mean(S_sum_tls),\n np.mean(\n [abs(i) for i in R_sum_tls])))\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('SD',\n np.std(S_sum),\n np.std(R_sum),\n np.std(S_sum_tls),\n np.std(R_sum_tls)))\n if config.arg('correlate'):\n printer('\\n\\'mean R_adp (ADP_calc / ADP_obs)\\' can be\\ninterpreted as the ratio 1 - (ADP_int / ADP_obs).')\n else:\n printer('\\n\\'mean R_adp (ADP_tls / ADP_obs)\\' can be\\ninterpreted as the ratio 1 - (ADP_obs / ADP_int).')", "def apply(self, nn_state, samples):\n\n samples = to_pm1(samples) # convert to +/- 1 format\n L = samples.shape[-1] # length of the spin chain\n if self.periodic_bcs:\n perm_indices = [(i + self.c) % L for i in range(L)]\n interaction_terms = samples * samples[:, perm_indices]\n else:\n interaction_terms = samples[:, : -self.c] * samples[:, self.c :]\n\n # average over spin sites.\n # not using mean bc interaction_terms.shape[-1] < num_spins = L\n return interaction_terms.sum(1).div_(L)", "def reprojection_error_mean(*args, **kwargs):\n return np.mean(reprojection_error_vector(*args, **kwargs))", "def _star_pu(rho_l, u_l, p_l, c_l, rho_r, u_r, p_r, c_r, p_guess):\n fl = _flux_fsolve(p_guess, rho_l, c_l, p_l)\n fr = _flux_fsolve(p_guess, rho_r, c_r, p_r)\n f = lambda p: fl(p) + fr(p) + u_r - u_l\n from scipy.optimize import fsolve\n p_star = fsolve(f, 0.0)\n u_star = (\n 0.5 * (u_l + u_r + _flux_fsolve(p_star, rho_r, c_r, p_r)(p_star) -\n _flux_fsolve(p_star, rho_l, c_l, p_l)(p_star))\n )\n return p_star, u_star", "def calculate_mean(cls, sensor):\n try:\n if sensor == 't':\n return cls.calculate_y_pos(sum(cls.mean_t) / len(cls.mean_t), sensor)\n if sensor == 'l':\n return cls.calculate_y_pos(sum(cls.mean_l) / len(cls.mean_l), sensor)\n except ZeroDivisionError:\n return None", "def mean_error(r_t_s, r_t_s_exact):\n \n err = np.mean(np.abs(r_t_s - r_t_s_exact), axis = 0)\n return err", "def _pearson_r(x, y):\n if _allequal(x) or _allequal(y):\n return np.nan\n\n return (np.mean(x * y) - np.mean(x) * np.mean(y)) / np.std(x) / np.std(y)", "def main():\n\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from textwrap import dedent\n parser = ArgumentParser(description=dedent(main.__doc__),\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('--version', action='version', version='%(prog)s 1.0')\n args = parser.parse_args()\n\n import itertools\n from numpy import zeros, matrix, linalg, array\n\n # Create a file ('oa' for orientational average)\n fh_pol = open('oa_raman.data', 'w')\n fh_hpol = open('oa_hyperraman.data', 'w')\n fh_2hpol = open('oa_secondhyperraman.data', 'w')\n\n # +++++ Polarizability +++++\n\n # For the polarizability, we are concerned with the average:\n # <alpha_ij^2> = sum_{ab,cd}[ <T_ia*T_jb*T_ic*T_jd> alpha_ab*alpha_cd ]\n #\n # For Raman scattering measured in a perpendicular orientation, we need\n # the averages <alpha_ii^2> and <alpha_ij^2>. For averaging of the 4th\n # rank tensor on the right side of the equation, only two circumstances\n # give nonzero averages:\n # 1. a = b = c = d\n # 2. a = b, c = d\n # These are stored in the lists below.\n #laaaa = ['a', 'a', 'a', 'a']\n #laabb = ['a', 'a', 'b', 'b']\n laaaa = [1, 1, 1, 1]\n laabb = [1, 1, 2, 2]\n\n saaaa = set()\n saabb = set()\n\n genaaaa = itertools.permutations(laaaa,4)\n genaabb = itertools.permutations(laabb,4)\n\n txt = 'Polarizability Averaging Indices'\n print(len(txt)*'+', file=fh_pol) \n print(txt, file=fh_pol)\n print(len(txt)*'+', file=fh_pol)\n\n # Size of the basis set and number of linearly independent terms\n r4nn, r4qn = fullpermutations(4)\n print('', file=fh_pol)\n txt = 'For a tensor of rank 4'\n print('*'*2*len(txt), file=fh_pol)\n print(txt, file=fh_pol)\n print('*'*2*len(txt), file=fh_pol)\n txt = 'Size of basis set = ' + str(r4nn)\n print(txt, file=fh_pol)\n txt = 'Number of linearly independent terms = ' + str(r4qn)\n print(txt, file=fh_pol)\n print('', file=fh_pol)\n\n # Terms with aa,aa\n txt = 'Indices for aa,aa terms'\n print(len(txt)*'=', file=fh_pol)\n print(txt, file=fh_pol)\n print(len(txt)*'=', file=fh_pol)\n for i in genaaaa:\n if i not in saaaa:\n print(i, file=fh_pol)\n saaaa.add(i)\n\n print('', file=fh_pol)\n # Terms with aa,bb (all permutations)\n txt = 'Indices for aa,bb terms'\n print(len(txt)*'=', file=fh_pol)\n print(txt, file=fh_pol)\n print(len(txt)*'=', file=fh_pol)\n for i in genaabb:\n if i not in saabb:\n print(i, file=fh_pol)\n saabb.add(i)\n\n print('', file=fh_pol)\n print('~'*30, file=fh_pol)\n print('Number of aa,aa terms', len(saaaa), file=fh_pol)\n print('Number of aa,bb terms', len(saabb), file=fh_pol)\n print('~'*30, file=fh_pol)\n print('', file=fh_pol)\n\n # Terms for Mathematica\n print('%'*30, file=fh_pol)\n print('Mathematica style output', file=fh_pol)\n print('%'*30, file=fh_pol) \n print('', file=fh_pol)\n\n # Basis vectors in the experimental reference frame\n r4exp, r4mol = vectors_exp_mol(4)\n print('Experimental reference frame basis vectors', file=fh_pol)\n for item in r4exp:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r4nn,4,False)\n print('S matrix', file=fh_pol)\n print(smat, file=fh_pol)\n print('', file=fh_pol)\n \n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_pol)\n for item in r4mol:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_pol)\n print(vexp, file=fh_pol)\n print('', file=fh_pol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_pol)\n print(vmol, file=fh_pol)\n print('', file=fh_pol)\n\n # Index equivalence for molecular reference frame data\n data, avg_alphaii, avg_alphaij = pol_mathematica(saaaa, saabb) \n\n print('Index equivalence for molecular reference frame vectors', file=fh_pol)\n for item in data:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n\n print('Polarizability Average Terms', file=fh_pol)\n print('<alpha_ii^2> term', file=fh_pol)\n for item in avg_alphaii:\n print(item, file=fh_pol)\n print('', file=fh_pol)\n print('<alpha_ij^2> term', file=fh_pol)\n for item in avg_alphaij:\n print(item, file=fh_pol)\n\n # +++++ First Hyperpolarizability +++++\n\n # For the first hyperpolarizability, we are concerned with the average:\n # <beta_ijk^2> \n # = sum_{abc,def}[ <T_ia*T_jb*T_kc*T_id*T_je*T_kf> beta_abc*beta_def ]\n #\n # For hyper-Raman scattering measured in a perpendicular orientation, we need\n # the averages <beta_iii^2> and <beta_ijj^2>. For averaging of the 6th\n # rank tensor on the right side of the equation, three circumstances\n # give nonzero averages:\n # 1. a = b = c = d = e = f\n # 2. a = b = c = d, e = f\n # 3. a = b, c = d, e = f\n # These are stored in the lists below.\n #laaaaaa = ['a', 'a', 'a', 'a', 'a', 'a']\n #laaaabb = ['a', 'a', 'a', 'a', 'b', 'b']\n #laabbcc = ['a', 'a', 'b', 'b', 'c', 'c']\n laaaaaa = [1, 1, 1, 1, 1, 1]\n laaaabb = [1, 1, 1, 1, 2, 2]\n laabbcc = [1, 1, 2, 2, 3, 3]\n\n saaaaaa = set()\n saaaabb = set()\n saabbcc = set()\n\n genaaaaaa = itertools.permutations(laaaaaa,6)\n genaaaabb = itertools.permutations(laaaabb,6)\n genaabbcc = itertools.permutations(laabbcc,6)\n\n txt = 'First hyperpolarizability Averaging Indices'\n print(len(txt)*'+', file=fh_hpol) \n print(txt, file=fh_hpol)\n print(len(txt)*'+', file=fh_hpol)\n\n # Size of the basis set and number of linearly independent terms\n r6nn, r6qn = fullpermutations(6)\n print('', file=fh_hpol)\n txt = 'For a tensor of rank 6'\n print('*'*2*len(txt), file=fh_hpol)\n print(txt, file=fh_hpol)\n print('*'*2*len(txt), file=fh_hpol)\n txt = 'Size of basis set = ' + str(r6nn)\n print(txt, file=fh_hpol)\n txt = 'Number of linearly independent terms = ' + str(r6qn)\n print(txt, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Terms with aaa,aaa\n txt = 'Indices for aaa,aaa terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n for i in genaaaaaa:\n if i not in saaaaaa:\n print(i, file=fh_hpol)\n saaaaaa.add(i)\n\n print('', file=fh_hpol)\n # Terms with aaa,abb (all permutations)\n txt = 'Indices for aaa,abb terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n for i in genaaaabb:\n if i not in saaaabb:\n print(i, file=fh_hpol)\n saaaabb.add(i)\n\n print('', file=fh_hpol)\n # Terms with aab,bcc (all permutations)\n # Here, we need to be careful that we don't overcount terms. It\n # is very easy to come up with an overcomplete basis.\n txt = 'Indices for aab,bcc terms'\n print(len(txt)*'=', file=fh_hpol)\n print(txt, file=fh_hpol)\n print(len(txt)*'=', file=fh_hpol)\n\n # This will generate all combinations of the aab,bcc terms. However,\n # it requires more prior knowledge than I'd like. \n #count1 = 0\n #count2 = 0\n #count3 = 0\n #count4 = 0\n #count5 = 0\n #for i in genaabbcc:\n # if i not in saabbcc:\n # if i[1] == 1:\n # count1 +=1\n # if count1 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[2] == 1:\n # count2 +=1\n # if count2 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[3] == 1:\n # count3 +=1\n # if count3 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[4] == 1:\n # count4 +=1\n # if count4 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # elif i[5] == 1:\n # count5 +=1\n # if count5 <= 3:\n # print(i, file=fh_hpol)\n # saabbcc.add(i)\n # Generate all combinations of aab,bcc terms. We remove the redundant\n # elements next.\n for i in genaabbcc:\n if i not in saabbcc:\n saabbcc.add(i)\n\n # Basis functions of Kronecker delta products\n f61m = \"krond(a,b)*krond(c,d)*krond(e,f)\"\n f62m = \"krond(a,b)*krond(c,e)*krond(d,f)\"\n f63m = \"krond(a,b)*krond(c,f)*krond(d,e)\"\n f64m = \"krond(a,c)*krond(b,d)*krond(e,f)\"\n f65m = \"krond(a,c)*krond(b,e)*krond(d,f)\"\n f66m = \"krond(a,c)*krond(b,f)*krond(d,e)\"\n f67m = \"krond(a,d)*krond(b,c)*krond(e,f)\"\n f68m = \"krond(a,d)*krond(b,e)*krond(c,f)\"\n f69m = \"krond(a,d)*krond(b,f)*krond(c,e)\"\n f610m = \"krond(a,e)*krond(b,c)*krond(d,f)\"\n f611m = \"krond(a,e)*krond(b,d)*krond(c,f)\"\n f612m = \"krond(a,e)*krond(b,f)*krond(c,d)\"\n f613m = \"krond(a,f)*krond(b,c)*krond(d,e)\"\n f614m = \"krond(a,f)*krond(b,d)*krond(c,e)\"\n f615m = \"krond(a,f)*krond(b,e)*krond(c,d)\"\n\n lmol = [ f61m, f62m, f63m, f64m, f65m, \n f66m, f67m, f68m, f69m, f610m,\n f611m, f612m, f613m, f614m, f615m ]\n\n # Temporary set for checking uniqueness\n stmp = set()\n # This set stores the elements of saabbcc that are redundant when \n # we insert values of the indices.\n sintersect = set()\n # Loop through the elements of saabbcc\n for item in saabbcc:\n # Assign values to the indices\n a = item[0]\n b = item[1]\n c = item[2]\n d = item[3]\n e = item[4]\n f = item[5]\n # Temporary list for storing vectors with values\n tmp = []\n for vec in lmol:\n # Evaluate the value of the Kronecker delta products\n v = eval_krond(vec, a, b, c, d, e, f, 0, 0)\n tmp.append(v)\n # We need immutable objects to add in a set\n tmp = tuple(tmp)\n # Add to sintersect if the item is in stmp\n if tmp in stmp:\n sintersect.add(item)\n # Add to stmp if it isn't present\n else:\n stmp.add(tmp)\n # This function removes elements of saabbcc that intersect with\n # elements of sintersect. The result is a set containing only the \n # unique elements.\n saabbcc.difference_update(sintersect)\n\n # Print elements of saabbcc.\n for i in saabbcc:\n print(i, file=fh_hpol)\n\n print('', file=fh_hpol)\n print('~'*30, file=fh_hpol)\n print('Number of aaa,aaa terms', len(saaaaaa), file=fh_hpol)\n print('Number of aaa,abb terms', len(saaaabb), file=fh_hpol)\n print('Number of aab,bcc terms', len(saabbcc), file=fh_hpol)\n print('~'*30, file=fh_hpol)\n print('', file=fh_hpol)\n\n print('%'*30, file=fh_hpol)\n print('Mathematica style output', file=fh_hpol)\n print('%'*30, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Basis vectors in the experimental reference frame\n r6exp, r6mol = vectors_exp_mol(6)\n print('Experimental reference frame basis vectors', file=fh_hpol)\n for item in r6exp:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r6nn,6,False)\n print('S matrix', file=fh_hpol)\n print(smat, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_hpol)\n for item in r6mol:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_hpol)\n print(vexp, file=fh_hpol)\n print('', file=fh_hpol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_hpol)\n print(vmol, file=fh_hpol)\n print('', file=fh_hpol)\n\n data, avg_betaiii, avg_betaijj = hpol_mathematica(saaaaaa, saaaabb, saabbcc)\n\n print('Set up molecular reference frame vectors', file=fh_hpol)\n for item in data:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n\n print('Hyperpolarizability Average Terms', file=fh_hpol)\n print('<beta_iii^2> term', file=fh_hpol)\n for item in avg_betaiii:\n print(item, file=fh_hpol)\n print('', file=fh_hpol)\n print('<beta_ijj^2> term', file=fh_hpol)\n for item in avg_betaijj:\n print(item, file=fh_hpol)\n\n # +++++ Second Hyperpolarizability +++++\n\n # For the second hyperpolarizability, we are concerned with the average:\n # <gamma_ijkl^2> \n # = sum_{abcd,efgh}[ <T_ia*T_jb*T_kc*T_ld*T_ie*T_jf*T_kg*T_lh> gamma_abcd*gamma_efgh ]\n #\n # For second hyper-Raman scattering measured in a perpendicular orientation, we need\n # the averages <gamma_iiii^2> and <gamma_ijjj^2>. For averaging of the 8th\n # rank tensor on the right side of the equation, four circumstances\n # give nonzero averages:\n # 1. a = b = c = d = e = f = g = h\n # 2. a = b = c = d = e = f, g = h\n # 3. a = b = c = d, e = f = g = h\n # 4. a = b = c = d, e = f, g = h\n # These are stored in the lists below.\n #laaaaaaaa = ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']\n #laaaaaabb = ['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b']\n #laaaabbbb = ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b']\n #laaaabbcc = ['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c']\n laaaaaaaa = [1, 1, 1, 1, 1, 1, 1, 1]\n laaaaaabb = [1, 1, 1, 1, 1, 1, 2, 2]\n laaaabbbb = [1, 1, 1, 1, 2, 2, 2, 2]\n laaaabbcc = [1, 1, 1, 1, 2, 2, 3, 3]\n # This type of average is actually equivalent to the fourth term,\n # because the indices can only be x, y, or z. \n #laabbccdd = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']\n\n saaaaaaaa = set()\n saaaaaabb = set()\n saaaabbbb = set()\n saaaabbcc = set()\n #saabbccdd = set()\n\n genaaaaaaaa = itertools.permutations(laaaaaaaa,8)\n genaaaaaabb = itertools.permutations(laaaaaabb,8)\n genaaaabbbb = itertools.permutations(laaaabbbb,8)\n genaaaabbcc = itertools.permutations(laaaabbcc,8)\n #genaabbccdd = itertools.permutations(laabbccdd,8)\n\n txt = 'Second hyperpolarizability Averaging Indices'\n print(len(txt)*'+', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'+', file=fh_2hpol)\n\n # Size of the basis set and number of linearly independent terms\n r8nn, r8qn = fullpermutations(8)\n print('', file=fh_2hpol)\n txt = 'For a tensor of rank 8'\n print('*'*2*len(txt), file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print('*'*2*len(txt), file=fh_2hpol)\n txt = 'Size of basis set = ' + str(r8nn)\n print(txt, file=fh_2hpol)\n txt = 'Number of linearly independent terms = ' + str(r8qn)\n print(txt, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Terms with aaaa,aaaa\n txt = 'Indices for aaaa,aaaa terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaaaaaa:\n if i not in saaaaaaaa:\n print(i, file=fh_2hpol)\n saaaaaaaa.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,aabb (all permutations)\n txt = 'Indices for aaaa,aabb terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaaaabb:\n if i not in saaaaaabb:\n print(i, file=fh_2hpol)\n saaaaaabb.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,bbbb (all permutations)\n txt = 'Indices for aaaa,bbbb terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n for i in genaaaabbbb:\n if i not in saaaabbbb:\n print(i, file=fh_2hpol)\n saaaabbbb.add(i)\n\n print('', file=fh_2hpol)\n # Terms with aaaa,bbcc (all permutations)\n txt = 'Indices for aaaa,bbcc terms'\n print(len(txt)*'=', file=fh_2hpol)\n print(txt, file=fh_2hpol)\n print(len(txt)*'=', file=fh_2hpol)\n # Temporarily, we store all permutations. There are 420 permutations,\n # but only 210 of them are unique.\n for i in genaaaabbcc:\n if i not in saaaabbcc:\n #print(i, file=fh_2hpol)\n saaaabbcc.add(i)\n\n # Set up the Kronecker delta products as strings. \n f81m = 'krond(a,b)*krond(c,d)*krond(e,f)*krond(g,h)'\n f82m = 'krond(a,b)*krond(c,d)*krond(e,g)*krond(f,h)'\n f83m = 'krond(a,b)*krond(c,d)*krond(e,h)*krond(f,g)'\n f84m = 'krond(a,b)*krond(c,e)*krond(d,f)*krond(g,h)'\n f85m = 'krond(a,b)*krond(c,e)*krond(d,g)*krond(f,h)'\n f86m = 'krond(a,b)*krond(c,e)*krond(d,h)*krond(f,g)'\n f87m = 'krond(a,b)*krond(c,f)*krond(d,e)*krond(g,h)'\n f88m = 'krond(a,b)*krond(c,f)*krond(d,g)*krond(e,h)'\n f89m = 'krond(a,b)*krond(c,f)*krond(d,h)*krond(e,g)'\n f810m = 'krond(a,b)*krond(c,g)*krond(d,e)*krond(f,h)'\n f811m = 'krond(a,b)*krond(c,g)*krond(d,f)*krond(e,h)'\n f812m = 'krond(a,b)*krond(c,g)*krond(d,h)*krond(e,f)'\n f813m = 'krond(a,b)*krond(c,h)*krond(d,e)*krond(f,g)'\n f814m = 'krond(a,b)*krond(c,h)*krond(d,f)*krond(e,g)'\n f815m = 'krond(a,b)*krond(c,h)*krond(d,g)*krond(e,f)'\n f816m = 'krond(a,c)*krond(b,d)*krond(e,f)*krond(g,h)'\n f817m = 'krond(a,c)*krond(b,d)*krond(e,g)*krond(f,h)'\n f818m = 'krond(a,c)*krond(b,d)*krond(e,h)*krond(f,g)'\n f819m = 'krond(a,c)*krond(b,e)*krond(d,f)*krond(g,h)'\n f820m = 'krond(a,c)*krond(b,e)*krond(d,g)*krond(f,h)'\n f821m = 'krond(a,c)*krond(b,e)*krond(d,h)*krond(f,g)'\n f822m = 'krond(a,c)*krond(b,f)*krond(d,e)*krond(g,h)'\n f823m = 'krond(a,c)*krond(b,f)*krond(d,g)*krond(e,h)'\n f824m = 'krond(a,c)*krond(b,f)*krond(d,h)*krond(e,g)'\n f825m = 'krond(a,c)*krond(b,g)*krond(d,e)*krond(f,h)'\n f826m = 'krond(a,c)*krond(b,g)*krond(d,f)*krond(e,h)'\n f827m = 'krond(a,c)*krond(b,g)*krond(d,h)*krond(e,f)'\n f828m = 'krond(a,c)*krond(b,h)*krond(d,e)*krond(f,g)'\n f829m = 'krond(a,c)*krond(b,h)*krond(d,f)*krond(e,g)'\n f830m = 'krond(a,c)*krond(b,h)*krond(d,g)*krond(e,f)'\n f831m = 'krond(a,d)*krond(b,c)*krond(e,f)*krond(g,h)'\n f832m = 'krond(a,d)*krond(b,c)*krond(e,g)*krond(f,h)'\n f833m = 'krond(a,d)*krond(b,c)*krond(e,h)*krond(f,g)'\n f834m = 'krond(a,d)*krond(b,e)*krond(c,f)*krond(g,h)'\n f835m = 'krond(a,d)*krond(b,e)*krond(c,g)*krond(f,h)'\n f836m = 'krond(a,d)*krond(b,e)*krond(c,h)*krond(f,g)'\n f837m = 'krond(a,d)*krond(b,f)*krond(c,e)*krond(g,h)'\n f838m = 'krond(a,d)*krond(b,f)*krond(c,g)*krond(e,h)'\n f839m = 'krond(a,d)*krond(b,f)*krond(c,h)*krond(e,g)'\n f840m = 'krond(a,d)*krond(b,g)*krond(c,e)*krond(f,h)'\n f841m = 'krond(a,d)*krond(b,g)*krond(c,f)*krond(e,h)'\n f842m = 'krond(a,d)*krond(b,g)*krond(c,h)*krond(e,f)'\n f843m = 'krond(a,d)*krond(b,h)*krond(c,e)*krond(f,g)'\n f844m = 'krond(a,d)*krond(b,h)*krond(c,f)*krond(e,g)'\n f845m = 'krond(a,d)*krond(b,h)*krond(c,g)*krond(e,f)'\n f846m = 'krond(a,e)*krond(b,c)*krond(d,f)*krond(g,h)'\n f847m = 'krond(a,e)*krond(b,c)*krond(d,g)*krond(f,h)'\n f848m = 'krond(a,e)*krond(b,c)*krond(d,h)*krond(f,g)'\n f849m = 'krond(a,e)*krond(b,d)*krond(c,f)*krond(g,h)'\n f850m = 'krond(a,e)*krond(b,d)*krond(c,g)*krond(f,h)'\n f851m = 'krond(a,e)*krond(b,d)*krond(c,h)*krond(f,g)'\n f852m = 'krond(a,e)*krond(b,f)*krond(c,d)*krond(g,h)'\n f853m = 'krond(a,e)*krond(b,f)*krond(c,g)*krond(d,h)'\n f854m = 'krond(a,e)*krond(b,f)*krond(c,h)*krond(d,g)'\n f855m = 'krond(a,e)*krond(b,g)*krond(c,d)*krond(f,h)'\n f856m = 'krond(a,e)*krond(b,g)*krond(c,f)*krond(d,h)'\n f857m = 'krond(a,e)*krond(b,g)*krond(c,h)*krond(d,f)'\n f858m = 'krond(a,e)*krond(b,h)*krond(c,d)*krond(f,g)'\n f859m = 'krond(a,e)*krond(b,h)*krond(c,f)*krond(d,g)'\n f860m = 'krond(a,e)*krond(b,h)*krond(c,g)*krond(d,f)'\n f861m = 'krond(a,f)*krond(b,c)*krond(d,e)*krond(g,h)'\n f862m = 'krond(a,f)*krond(b,c)*krond(d,g)*krond(e,h)'\n f863m = 'krond(a,f)*krond(b,c)*krond(d,h)*krond(e,g)'\n f864m = 'krond(a,f)*krond(b,d)*krond(c,e)*krond(g,h)'\n f865m = 'krond(a,f)*krond(b,d)*krond(c,g)*krond(e,h)'\n f866m = 'krond(a,f)*krond(b,d)*krond(c,h)*krond(e,g)'\n f867m = 'krond(a,f)*krond(b,e)*krond(c,d)*krond(g,h)'\n f868m = 'krond(a,f)*krond(b,e)*krond(c,g)*krond(d,h)'\n f869m = 'krond(a,f)*krond(b,e)*krond(c,h)*krond(d,g)'\n f870m = 'krond(a,f)*krond(b,g)*krond(c,d)*krond(e,h)'\n f871m = 'krond(a,f)*krond(b,g)*krond(c,e)*krond(d,h)'\n f872m = 'krond(a,f)*krond(b,g)*krond(c,h)*krond(d,e)'\n f873m = 'krond(a,f)*krond(b,h)*krond(c,d)*krond(e,g)'\n f874m = 'krond(a,f)*krond(b,h)*krond(c,e)*krond(d,g)'\n f875m = 'krond(a,f)*krond(b,h)*krond(c,g)*krond(d,e)'\n f876m = 'krond(a,g)*krond(b,c)*krond(d,e)*krond(f,h)'\n f877m = 'krond(a,g)*krond(b,c)*krond(d,f)*krond(e,h)'\n f878m = 'krond(a,g)*krond(b,c)*krond(d,h)*krond(e,f)'\n f879m = 'krond(a,g)*krond(b,d)*krond(c,e)*krond(f,h)'\n f880m = 'krond(a,g)*krond(b,d)*krond(c,f)*krond(e,h)'\n f881m = 'krond(a,g)*krond(b,d)*krond(c,h)*krond(e,f)'\n f882m = 'krond(a,g)*krond(b,e)*krond(c,d)*krond(f,h)'\n f883m = 'krond(a,g)*krond(b,e)*krond(c,f)*krond(d,h)'\n f884m = 'krond(a,g)*krond(b,e)*krond(c,h)*krond(d,f)'\n f885m = 'krond(a,g)*krond(b,f)*krond(c,d)*krond(e,h)'\n f886m = 'krond(a,g)*krond(b,f)*krond(c,e)*krond(d,h)'\n f887m = 'krond(a,g)*krond(b,f)*krond(c,h)*krond(d,e)'\n f888m = 'krond(a,g)*krond(b,h)*krond(c,d)*krond(e,f)'\n f889m = 'krond(a,g)*krond(b,h)*krond(c,e)*krond(d,f)'\n f890m = 'krond(a,g)*krond(b,h)*krond(c,f)*krond(d,e)'\n f891m = 'krond(a,h)*krond(b,c)*krond(d,e)*krond(f,g)'\n f892m = 'krond(a,h)*krond(b,c)*krond(d,f)*krond(e,g)'\n f893m = 'krond(a,h)*krond(b,c)*krond(d,g)*krond(e,f)'\n f894m = 'krond(a,h)*krond(b,d)*krond(c,e)*krond(f,g)'\n f895m = 'krond(a,h)*krond(b,d)*krond(c,f)*krond(e,g)'\n f896m = 'krond(a,h)*krond(b,d)*krond(c,g)*krond(e,f)'\n f897m = 'krond(a,h)*krond(b,e)*krond(c,d)*krond(f,g)'\n f898m = 'krond(a,h)*krond(b,e)*krond(c,f)*krond(d,g)'\n f899m = 'krond(a,h)*krond(b,e)*krond(c,g)*krond(d,f)'\n f8100m = 'krond(a,h)*krond(b,f)*krond(c,d)*krond(e,g)'\n f8101m = 'krond(a,h)*krond(b,f)*krond(c,e)*krond(d,g)'\n f8102m = 'krond(a,h)*krond(b,f)*krond(c,g)*krond(d,e)'\n f8103m = 'krond(a,h)*krond(b,g)*krond(c,d)*krond(e,f)'\n f8104m = 'krond(a,h)*krond(b,g)*krond(c,e)*krond(d,f)'\n f8105m = 'krond(a,h)*krond(b,g)*krond(c,f)*krond(d,e)'\n\n # Molecular vector of basis functions\n lmol = [ f81m, f82m, f83m, f84m, f85m,\n f86m, f87m, f88m, f89m, f810m,\n f811m, f812m, f813m, f814m, f815m,\n f816m, f817m, f818m, f819m, f820m,\n f821m, f822m, f823m, f824m, f825m,\n f826m, f827m, f828m, f829m, f830m,\n f831m, f832m, f833m, f834m, f835m,\n f836m, f837m, f838m, f839m, f840m,\n f841m, f842m, f843m, f844m, f845m,\n f846m, f847m, f848m, f849m, f850m,\n f851m, f852m, f853m, f854m, f855m,\n f856m, f857m, f858m, f859m, f860m,\n f861m, f862m, f863m, f864m, f865m,\n f866m, f867m, f868m, f869m, f870m,\n f871m, f872m, f873m, f874m, f875m,\n f876m, f877m, f878m, f879m, f880m,\n f881m, f882m, f883m, f884m, f885m,\n f886m, f887m, f888m, f889m, f890m,\n f891m, f892m, f893m, f894m, f895m,\n f896m, f897m, f898m, f899m, f8100m,\n f8101m, f8102m, f8103m, f8104m, f8105m ]\n\n # Temporary set for checking uniqueness\n stmp = set()\n # This set stores the elements of saaaabbcc that are redundant when \n # we insert values of the indices.\n sintersect = set()\n # Loop through the elements of saaaabbcc\n for item in saaaabbcc:\n # Assign values to the indices\n a = item[0]\n b = item[1]\n c = item[2]\n d = item[3]\n e = item[4]\n f = item[5]\n g = item[6]\n h = item[7]\n # Temporary list for storing vectors with values\n tmp = []\n for vec in lmol:\n # Evaluate the value of the Kronecker delta products\n v = eval_krond(vec, a, b, c, d, e, f, g, h)\n tmp.append(v)\n # We need immutable objects to add in a set\n tmp = tuple(tmp)\n # Add to sintersect if the item is in stmp\n if tmp in stmp:\n sintersect.add(item)\n # Add to stmp if it isn't present\n else:\n stmp.add(tmp)\n # This function removes elements of saaaabbcc that intersect with\n # elements of sintersect. The result is a set containing only the \n # unique elements.\n saaaabbcc.difference_update(sintersect)\n\n # Print elements of saaaabbcc.\n for i in saaaabbcc:\n print(i, file=fh_2hpol)\n\n print('', file=fh_2hpol)\n print('~'*30, file=fh_2hpol)\n print('Number of aaaa,aaaa terms', len(saaaaaaaa), file=fh_2hpol)\n print('Number of aaaa,aabb terms', len(saaaaaabb), file=fh_2hpol)\n print('Number of aaaa,bbbb terms', len(saaaabbbb), file=fh_2hpol)\n print('Number of aaaa,bbcc terms', len(saaaabbcc), file=fh_2hpol)\n print('~'*30, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('%'*30, file=fh_2hpol)\n print('Mathematica style output', file=fh_2hpol)\n print('%'*30, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Basis vectors in the experimental reference frame\n r8exp, r8mol = vectors_exp_mol(8)\n print('Experimental reference frame basis vectors', file=fh_2hpol)\n for item in r8exp:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Matrix for generating orientational averages\n smat, vexp, vmol = generate_smat_and_vecs(r8nn,8,False)\n print('S matrix', file=fh_2hpol)\n print(smat, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Basis vectors in the molecular reference frame\n print('Molecular reference frame basis vectors', file=fh_2hpol)\n for item in r8mol:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Experimental vector containing basis vectors\n print('Experimental total vector', file=fh_2hpol)\n print(vexp, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Molecular vector containing basis vectors\n print('Molecular total vector', file=fh_2hpol)\n print(vmol, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n data, avg_gammaiiii, avg_gammaijjj = secondhpol_mathematica(saaaaaaaa, saaaaaabb, saaaabbbb, saaaabbcc)\n\n print('Set up molecular reference frame vectors', file=fh_2hpol)\n for item in data:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('Second Hyperpolarizability Average Terms', file=fh_2hpol)\n print('<gamma_iiii^2> term', file=fh_2hpol)\n for item in avg_gammaiiii:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n print('<gamma_ijjj^2> term', file=fh_2hpol)\n for item in avg_gammaijjj:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Print out the irreducible bases\n red_expbasis, red_molbasis = reduced_basis_2hpol()\n\n print('Irreducible experimental reference frame basis vectors', file=fh_2hpol)\n for item in red_expbasis:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n print('Irreducible molecular reference frame basis vectors', file=fh_2hpol)\n for item in red_molbasis:\n print(item, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Generate the S matrix and total vectors in the irreducible bases\n smat, vexp, vmol = generate_smat_and_vecs(r8qn,8,True)\n \n # Irreducible S matrix\n print('Irreducible S matrix', file=fh_2hpol)\n print(smat, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Irreducible experimental vector containing basis vectors\n print('Irreducible experimental total vector', file=fh_2hpol)\n print(vexp, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Irreducible molecular vector containing basis vectors\n print('Irreducible molecular total vector', file=fh_2hpol)\n print(vmol, file=fh_2hpol)\n print('', file=fh_2hpol)\n\n # Close the files\n fh_pol.close()\n fh_hpol.close()\n fh_2hpol.close()", "def results_psavg_sims():\n posterior_means = [[1.18040327516, 7.55106444832, 3.27420103073, 3.51998795534, 0.67212630002],\n [0.619197296326, 6.49420626987, 2.22495505139, 2.27682390376, 0.678172183554],\n [0.856628471666, 5.94732402905, 3.97580346111, 3.85788708662, 0.690090617623],\n [0.774906025167, 7.34275742443, 2.69729821931, 2.97994334746, 0.663015258594]]\n\n\n sgr1900_results.results_psavg_sims(posterior_means, [5,6,8,12], \"sgr1806\")\n\n return", "def estimate_arpu(x):\n arpu = 0\n if x['mean_luminosity_km2'] > 5:\n # #10 year time horizon\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (20*12) / (1 + 0.03) ** i\n # )\n return 20 * 12 * 10#arpu\n elif x['mean_luminosity_km2'] > 1:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (5*12) / (1 + 0.03) ** i\n # )\n return 5 * 12 * 10#arpu\n else:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (2*12) / (1 + 0.03) ** i\n # )\n return 2 * 12 * 10#arpu", "def meanRegion(thk_s,thk_p,thk_diff):\n meanp = np.nanmean(np.nanmean(thk_p,axis=1),axis=1)\n means = np.nanmean(np.nanmean(thk_s,axis=1),axis=1)\n \n print '\\n --- [[%s to %s N, %s to %s E]] ---' % (latmin,latmax,lonmin,lonmax)\n print 'Average Thickness (Satellite) == %s meters' % np.nanmean(means)\n print 'Average Thickness (PIOMAS) == %s meters' % np.nanmean(meanp)\n print 'Average Difference == %s meters' % (np.nanmean(means)-np.nanmean(meanp))\n \n yearmin = 2004\n yearmax = 2015\n years = np.arange(yearmin,yearmax+1,1)\n years = np.setdiff1d(years,[2010]) ### no satellite data in 2010\n \n fig = plt.figure()\n ax = plt.subplot(111)\n \n ### Call parameters\n plt.rcParams['text.usetex']=True\n plt.rcParams['font.family'] = 'sans-serif'\n plt.rcParams['font.sans-serif'] = 'Avant Garde'\n \n plt.plot(meanp,color='darkred',linewidth=2,linestyle='-',\n label=r'PIOMAS')\n plt.plot(means,color='forestgreen',linewidth=2,linestyle='-',\n label=r'Satellite')\n plt.axvline(6,color='k',linewidth=3,linestyle='-')\n \n labelsy = map(str,np.arange(0,6,1))\n labelsx = map(str,years)\n plt.xticks(np.arange(len(years)),labelsx)\n plt.yticks(np.arange(0,6,1),labelsy)\n plt.ylabel(r'\\textbf{Thickness (meters)}',fontsize=13)\n \n ### Adjust axes in time series plots \n def adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 10))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n \n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n \n ### Adjust axes spines\n adjust_spines(ax, ['left', 'bottom'])\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n plt.grid(color='b',zorder=1,alpha=0.3)\n \n plt.legend(shadow=False,fontsize=11,loc='upper right',\n fancybox=True)\n \n plt.text(2,-0.8,r'\\textbf{ICESat}',fontsize=13)\n plt.text(7.3,-0.8,r'\\textbf{PIOMAS}',fontsize=13)\n \n fig.suptitle(r'\\textbf{SIT Difference [Satellite - PIOMAS]}',fontsize=16)\n plt.savefig(directoryfigure + 'test5_difftseries.png',dpi=300)", "def _r_at_interface(self, polarization, n_1, n_2):\n if polarization == 's':\n return ((n_1-n_2)/(n_1+n_2))\n elif polarization == 'p':\n return ((n_1-n_2)/(n_1+n_2))\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")", "def seasonal_mean(args_file):\n product, start_date, end_date, variable_name, shape_file = Utility.read_yml_params(args_file)\n stat = Statistic.Mean\n time = TimePeriod.Seasonal\n\n ds = get_data_set(product, shape_file)\n\n result = Utility.Apply_stat(ds, start_date, end_date, variable_name, stat, time)\n return result", "def meanTest(li_pre_final,li_post_final): \r\n li_add_A_pre = li_pre_final[0][0]\r\n li_add_B_pre = li_pre_final[0][1] \r\n li_add_C_pre = li_pre_final[0][2] \r\n li_add_D_pre = li_pre_final[0][3]\r\n\r\n li_upd_A_pre = li_pre_final[1][0] \r\n li_upd_B_pre = li_pre_final[1][1]\r\n li_upd_C_pre = li_pre_final[1][2]\r\n li_upd_D_pre = li_pre_final[1][3]\r\n\r\n li_rem_A_pre = li_pre_final[2][0] \r\n li_rem_B_pre = li_pre_final[2][1]\r\n li_rem_C_pre = li_pre_final[2][2]\r\n li_rem_D_pre = li_pre_final[2][3]\r\n\r\n li_add_A_post = li_post_final[0][0]\r\n li_add_B_post = li_post_final[0][1] \r\n li_add_C_post = li_post_final[0][2] \r\n li_add_D_post = li_post_final[0][3] \r\n\r\n li_upd_A_post = li_post_final[1][0] \r\n li_upd_B_post = li_post_final[1][1]\r\n li_upd_C_post = li_post_final[1][2]\r\n li_upd_D_post = li_post_final[1][3] \r\n\r\n li_rem_A_post = li_post_final[2][0] \r\n li_rem_B_post = li_post_final[2][1]\r\n li_rem_C_post = li_post_final[2][2]\r\n li_rem_D_post = li_post_final[2][3] \r\n\r\n li_p_values = [i for i in range(12)]\r\n \r\n U1, li_p_values[0] = mannwhitneyu(li_add_A_pre,li_add_A_post)\r\n U1, li_p_values[1] = mannwhitneyu(li_add_B_pre,li_add_B_post) \r\n U1, li_p_values[2] = mannwhitneyu(li_add_C_pre,li_add_C_post)\r\n U1, li_p_values[3] = mannwhitneyu(li_add_D_pre,li_add_D_post)\r\n\r\n U1, li_p_values[4] = mannwhitneyu(li_upd_A_pre,li_upd_A_post)\r\n U1, li_p_values[5] = mannwhitneyu(li_upd_B_pre,li_upd_B_post)\r\n U1, li_p_values[6] = mannwhitneyu(li_upd_C_pre,li_upd_C_post)\r\n U1, li_p_values[7] = mannwhitneyu(li_upd_D_pre,li_upd_D_post)\r\n\r\n U1, li_p_values[8] = mannwhitneyu(li_rem_A_pre,li_rem_A_post)\r\n U1, li_p_values[9] = mannwhitneyu(li_rem_B_pre,li_rem_B_post)\r\n U1, li_p_values[10] = mannwhitneyu(li_rem_C_pre,li_rem_C_post)\r\n U1, li_p_values[11] = mannwhitneyu(li_rem_D_pre,li_rem_D_post)\r\n\r\n for p_value in li_p_values:\r\n if p_value < 0.05:\r\n print(\"reject H0: statistically different\")\r\n else:\r\n print(\"accept H0: not statistically different\")", "def noisy_sensor_resolution(p1,p2,error_rate=30):\n # set lenslet array parameters\n nu = 27\n nv = 27\n ns = 21\n nt = 21\n ulens_pitch = 125\n ulens_focal_length = 2426\n objective_magnification = 20\n objective_na = 0.5\n medium_index = 1.33\n\n # Construct lenslet array object\n lenslet_array = LensletArray(nu, nv, ns, nt,\n ulens_pitch, ulens_focal_length, ulens_focal_length,\n objective_magnification, objective_na, medium_index,\n ulens_fill_factor = 1.0, pixel_fill_factor = 1.0,\n circular_ulens_profile = False, \n center_wavelength = 509) # Units: nanometers\n\n # Input list with (intensity,x,y,z,num_lenslets_in_psf,lenslet_array,wavelength_nm)\n # to compute_light_field_psf; wavelength currently fixed at 510nm with intensity = 1.0.\n psf0 = compute_light_field_psf( None, 1.0, p1[0], p1[1], p1[2], ns, lenslet_array, 510 )\n psf1 = compute_light_field_psf( None, 1.0, p2[0], p2[1], p2[2], ns, lenslet_array, 510 )\n\n # Add gaussian noise (making poisson intensity >30 assumption)\n # The shot noise variance for each nonzero pixel should be linearly \n # related to the mean intensity of the corresponding pixel in p1.\n noise = psf0 * np.random.normal(loc=0.0, scale=1.0, size=psf0.shape)\n signal = psf1 - psf0 + np.random.normal(loc=0.0, scale=1.0, size=psf0.shape)\n\n # log likelihood ratio on continuous data (based on poisson shot noise)\n l0 = 2*psf0\n la = psf1 + psf0\n logL = np.sum( la*(np.log(la) - np.log(l0) - 1.0) + l0 )\n\n # log likelihood ratio on discrete (16-bit) data (based on poisson shot noise)\n psf_max = 2*np.max(psf0)\n psf0_discrete = (65535*(psf0/psf_max)).astype(np.uint16)\n psf1_discrete = (65535*(psf1/psf_max)).astype(np.uint16)\n l0 = 2.0*psf0_discrete\n la = psf1_discrete + psf0_discrete\n log_la = np.log(la); log_la[np.where(log_la==-np.inf)[0]]=0.0\n log_l0 = np.log(l0); log_l0[np.where(log_l0==-np.inf)[0]]=0.0\n logL_discrete = np.sum( la*(log_la - log_l0 - 1.0) + l0 )\n\n # save 16-bit pngs\n save_image('/home/logan/Documents/Results/Resolution/sensor/psf0.png',psf0_discrete)\n save_image('/home/logan/Documents/Results/Resolution/sensor/psf1.png',psf1_discrete)\n \n # KS test\n ks, pval = ks_2samp( signal.flatten(), noise.flatten() )\n print \"KS statistic:\",ks\n print \"KS p-value:\",pval\n print \"log Likelihood ratio:\",logL\n print \"Discrete log Likelihood ratio:\",logL_discrete\n return ks,pval,logL,logL_discrete", "def average(self, u=None, y=None):\n\n saveu = False\n savey = False\n if u is None:\n u = self.u\n saveu = True\n if y is None:\n y = self.y\n savey = True\n um = u.mean(axis=-1) # (npp,m,R)\n ym = y.mean(axis=-1)\n um = um.swapaxes(1, 2).reshape(-1, self.m, order='F') # (npp*R,m)\n ym = ym.swapaxes(1, 2).reshape(-1, self.p, order='F') # (npp*R,p)\n\n if saveu:\n self.um = um\n # number of samples after average over periods\n self.mns = um.shape[0] # mns = npp*R\n if savey:\n self.ym = ym\n\n return um, ym", "def example(self, s, d, s_len, d_len, snr):\n\t\ts, d, x, n_frames = self.mix(s, d, s_len, d_len, snr)\n\t\ts_STMS, _ = self.polar_analysis(s)\n\t\tx_STMS, _ = self.polar_analysis(x)\n\t\tsmm = tf.math.truediv(s_STMS, x_STMS)\n\t\tsmm_bar = tf.clip_by_value(smm, 0.0, 5.0)\n\t\treturn x_STMS, smm_bar, n_frames", "def jam_axi_rms(surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, ml=None, normpsf=1., pixang=0.,\n pixsize=0., plot=True, rms=None, erms=None, sigmapsf=0.,\n goodbins=None, quiet=False, beta=None, step=0., nrad=20,\n nang=10, rbh=0.01, tensor='zz', vmin=None, vmax=None, **kwargs):\n if beta is None:\n beta = np.zeros_like(surf_lum) # Anisotropy parameter beta = 1 - (sig_z/sig_R)**2\n if not (surf_lum.size == sigma_lum.size == qobs_lum.size == beta.size):\n raise ValueError(\"The luminous MGE components do not match\")\n if not (surf_pot.size == sigma_pot.size == qobs_pot.size):\n raise ValueError(\"The total mass MGE components do not match\")\n if xbin.size != ybin.size:\n raise ValueError(\"xbin and ybin do not match\")\n if rms is not None:\n if erms is None:\n erms = np.full_like(rms, np.median(rms)*0.05) # Constant ~5% errors\n if goodbins is None:\n goodbins = np.ones_like(rms, dtype=bool)\n elif goodbins.dtype != bool:\n raise ValueError(\"goodbins must be a boolean vector\")\n if not (xbin.size == rms.size == erms.size == goodbins.size):\n raise ValueError(\"(rms, erms, goodbins) and (xbin, ybin) do not match\")\n\n sigmapsf = np.atleast_1d(sigmapsf)\n normpsf = np.atleast_1d(normpsf)\n if sigmapsf.size != normpsf.size:\n raise ValueError(\"sigmaPSF and normPSF do not match\")\n\n pc = distance*np.pi/0.648 # Constant factor to convert arcsec --> pc\n\n surf_lum_pc = surf_lum\n surf_pot_pc = surf_pot\n sigma_lum_pc = sigma_lum*pc # Convert from arcsec to pc\n sigma_pot_pc = sigma_pot*pc # Convert from arcsec to pc\n xbin_pc = xbin*pc # Convert all distances to pc\n ybin_pc = ybin*pc\n pixSize_pc = pixsize*pc\n sigmaPsf_pc = sigmapsf*pc\n step_pc = step*pc\n\n # Add a Gaussian with small sigma and the same total mass as the BH.\n # The Gaussian provides an excellent representation of the second moments\n # of a point-like mass, to 1% accuracy out to a radius 2*sigmaBH.\n # The error increses to 14% at 1*sigmaBH, independently of the BH mass.\n #\n if mbh > 0:\n sigmaBH_pc = rbh*pc # Adopt for the BH just a very small size\n surfBH_pc = mbh/(2*np.pi*sigmaBH_pc**2)\n surf_pot_pc = np.append(surfBH_pc, surf_pot_pc) # Add Gaussian to potential only!\n sigma_pot_pc = np.append(sigmaBH_pc, sigma_pot_pc)\n qobs_pot = np.append(1., qobs_pot) # Make sure vectors do not have extra dimensions\n\n qobs_lum = qobs_lum.clip(0, 0.999)\n qobs_pot = qobs_pot.clip(0, 0.999)\n\n t = clock()\n rmsModel = _vrms2(xbin_pc, ybin_pc, inc, surf_lum_pc, sigma_lum_pc,\n qobs_lum, surf_pot_pc, sigma_pot_pc, qobs_pot, beta,\n tensor, sigmaPsf_pc, normpsf, pixSize_pc, pixang,\n step_pc, nrad, nang)\n if not quiet:\n print('jam_axi_rms elapsed time sec: %.2f' % (clock() - t))\n\n if tensor in ('xx', 'yy', 'zz'):\n rmsModel = np.sqrt(rmsModel.clip(0)) # Return SQRT and fix possible rounding errors\n if tensor in ('xy', 'xz'):\n rmsModel *= np.sign(xbin*ybin) # Calculation was done in positive quadrant\n\n # Analytic convolution of the MGE model with an MGE circular PSF\n # using Equations (4,5) of Cappellari (2002, MNRAS, 333, 400)\n #\n lum = surf_lum_pc*qobs_lum*sigma_lum**2 # Luminosity/(2np.pi) of each Gaussian\n flux = np.zeros_like(xbin) # Total MGE surface brightness for plotting\n for sigp, norp in zip(sigmapsf, normpsf): # loop over the PSF Gaussians\n sigmaX = np.sqrt(sigma_lum**2 + sigp**2)\n sigmaY = np.sqrt((sigma_lum*qobs_lum)**2 + sigp**2)\n surfConv = lum / (sigmaX*sigmaY) # PSF-convolved in Lsun/pc**2\n for srf, sx, sy in zip(surfConv, sigmaX, sigmaY): # loop over the galaxy MGE Gaussians\n flux += norp*srf*np.exp(-0.5*((xbin/sx)**2 + (ybin/sy)**2))\n\n if rms is None:\n\n chi2 = None\n if ml is None:\n ml = 1.\n else:\n rmsModel *= np.sqrt(ml)\n\n else:\n\n if (ml is None) or (ml <= 0):\n\n # y1, dy1 = rms, erms # (y1 are the data, y2 the model)\n # scale = sum(y1*y2/dy1**2)/sum(y2**2/dy1**2) # (equation 51)\n #\n ml = (np.sum(rms[goodbins]*rmsModel[goodbins]/erms[goodbins]**2)\n / np.sum((rmsModel[goodbins]/erms[goodbins])**2))**2\n\n rmsModel *= np.sqrt(ml)\n chi2 = np.sum(((rms[goodbins]-rmsModel[goodbins])/erms[goodbins])**2) / goodbins.sum()\n\n if not quiet:\n print('inc=%.1f beta_z=%.2f M/L=%.3g BH=%.2e chi2/DOF=%.3g' % (inc, beta[0], ml, mbh*ml, chi2))\n mass = 2*np.pi*surf_pot_pc*qobs_pot*sigma_pot_pc**2\n print('Total mass MGE: %.4g' % np.sum(mass*ml))\n\n if plot:\n\n rms1 = rms.copy() # Only symmetrize good bins\n rms1[goodbins] = symmetrize_velfield(xbin[goodbins], ybin[goodbins], rms[goodbins])\n\n if (vmin is None) or (vmax is None):\n vmin, vmax = stats.scoreatpercentile(rms1[goodbins], [0.5, 99.5]) # Could use np.percentile in Numpy 1.10\n\n plt.clf()\n plt.subplot(121)\n plot_velfield(xbin, ybin, rms1, vmin=vmin, vmax=vmax, flux=flux, **kwargs)\n plt.title(r\"Input $V_{\\rm rms}$\")\n\n plt.subplot(122)\n plot_velfield(xbin, ybin, rmsModel, vmin=vmin, vmax=vmax, flux=flux, **kwargs)\n plt.plot(xbin[~goodbins], ybin[~goodbins], 'ok', mec='white')\n plt.title(r\"Model $V_{\\rm rms}$\")\n plt.tick_params(labelleft='off')\n plt.subplots_adjust(wspace=0.03)\n\n return rmsModel, ml, chi2, flux", "def aperture_phot(self,data,x,y,v):\n r = np.sqrt((x-self.avg_map_fits['Values'][1])**2 + (y-self.avg_map_fits['Values'][3])**2)\n \n inner = (r < 8./60.) & np.isfinite(data) \n outer = (r > 8.5/60.) & (r < 12./60.) & np.isfinite(data)\n\n annu = np.nanmedian(data[outer])\n annu_rms = np.nanstd(data[outer])\n flux = np.sum(data[inner]) - annu*np.sum(inner)\n\n c = 3e8\n kb=1.38e-23\n beam = (1./60.*np.pi/180.)**2\n factor = 2*kb*(v*1e9/c)**2 * beam * 1e26\n return flux*factor, annu_rms*np.sqrt(np.sum(inner))*factor", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def test_mean(self):\n m = self.m\n analytical_mean = 1.5*self.T*self.k\n computed_mean = 0 \n for j in self.v:\n computed_mean += self.meankin() \n computed_mean = computed_mean/self.N\n relative_error = abs(analytical_mean - computed_mean)/analytical_mean\n print(\"----------Kinetic energy----------\")\n print(\"{:<20}{:g}\".format(\"Computed mean:\", computed_mean))\n print(\"{:<20}{:g}\".format(\"Analytical mean:\", analytical_mean))\n print(\"{:<20}{:.2f}%\".format(\"Relative error:\", relative_error * 100))\n print(\"-----------------------------\")\n break\n assert relative_error < 0.02, \"the mean kinetic energy is off\"\n\n print(\"----------Velocity----------\")\n\n\n analytical_vel = np.sqrt(8*self.k*self.T/(np.pi*m))\n computed_vel = 0\n for i in self.v: \n computed_vel += self.meanvel()\n computed_vel = computed_vel/self.N \n relative_error = abs(analytical_vel - computed_vel)/analytical_vel\n print(\"{:<20}{:g}\".format(\"Computed velocity:\", computed_vel))\n print(\"{:<20}{:g}\".format(\"Analytical velocity:\", analytical_vel))\n print(\"{:<20}{:.2f}%\".format(\"Relative error:\", relative_error *100))\n print(\"-----------------------------\")\n break\n assert relative_error < 0.02, \"the mean velocity is off\"", "def mean(self, like_params):\n\t\traise NotImplementedError", "def test_compute_spectral_norms(self):\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], pool=False, randomize=False, plot=False, mp_fit=False, svd_method=ACCURATE_SVD)\n\n\t\t# SLOW method\n\t\ta = details.spectral_norm.to_numpy()\n\t\tself.assertAlmostEqual(a[0],20.2149, places=3)\n\t\tself.assertAlmostEqual(a[1],24.8158, places=3)\n\t\tself.assertAlmostEqual(a[2],19.3795, places=3)" ]
[ "0.59134376", "0.55239034", "0.53342396", "0.5303632", "0.52453595", "0.5169982", "0.5147065", "0.5143282", "0.51396984", "0.5123302", "0.51190704", "0.51146936", "0.5091671", "0.5088502", "0.50615734", "0.50424194", "0.5028437", "0.5017718", "0.5015112", "0.5014439", "0.4998133", "0.49880025", "0.49866158", "0.49563876", "0.49477303", "0.49418133", "0.49413958", "0.49390128", "0.49275333", "0.49234885" ]
0.5633161
1
Create a layer from the set of preprogrammed materials and add it to the AR coating stack Arguments
def add_layer(self, material, thickness=5.0, units='mil', type='layer', \ stack_position=-1): type = type.lower() if type == 'layer': layer = Layer() layer.name = material.lower() layer.thickness = thickness layer.units = units try: # layer.dielectric = mats.Electrical.DIELECTRIC[layer.name] layer.dielectric = mats.Electrical.props[layer.name][0] except: raise KeyError('I don\'t know that material!') try: # layer.losstangent = mats.Electrical.LOSS_TAN[layer.name] layer.losstangent = mats.Electrical.props[layer.name][1] except: layer.losstangent = 0 print('\nI don\'t know this loss tangent. Setting loss to 0!') if (stack_position == -1): self.stack.append(layer) else: self.stack.insert(stack_position, layer) elif type == 'source': self.source = SourceLayer() self.source.name = material.lower() try: # self.source.dielectric = mats.Electrical.DIELECTRIC[self.source.name] self.source.dielectric = mats.Electrical.props[self.source.name][0] except: raise KeyError('I don\'t know that material!') try: # self.source.losstangent = mats.Electrical.LOSS_TAN[self.source.name] self.source.losstangent = mats.Electrical.props[self.source.name][1] except: self.source.losstangent = 0 print('\nI don\'t know this loss tangent. Setting loss to 0!') elif type == 'terminator': self.terminator = TerminatorLayer() self.terminator.name = material.lower() try: # self.terminator.dielectric = mats.Electrical.DIELECTRIC[self.terminator.name] self.terminator.dielectric = mats.Electrical.props[self.terminator.name][0] except: raise KeyError('I don\'t know that material!') try: # self.terminator.losstangent = mats.Electrical.LOSS_TAN[self.terminator.name] self.terminator.losstangent = mats.Electrical.props[self.terminator.name][1] except: self.terminator.losstangent = 0 print('\nI don\'t know this loss tangent. Setting loss to 0!') else: raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR') return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddDispersionMaterial(GeometryName,RGBData):\n\n r,g,b=RGBData\n onlyR = tuple([r,0,0,1])\n onlyG = tuple([0,g,0,1])\n onlyB = tuple([0,0,b,1])\n\n\n currentMaterial = bpy.data.materials.new(name='TypeA'+GeometryName)\n currentMaterial.use_nodes = True\n nodes = currentMaterial.node_tree.nodes\n\n math01 = nodes.new(\"ShaderNodeMath\")\n math01.operation = \"POWER\"\n\n glassBSDF01 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF01.inputs[0].default_value = onlyR\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF01.inputs[1])\n\n glassBSDF02 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF02.inputs[0].default_value = onlyG\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF02.inputs[1])\n\n glassBSDF03 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF03.inputs[0].default_value = onlyB\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF03.inputs[1])\n\n math02 = nodes.new(\"ShaderNodeMath\")\n currentMaterial.node_tree.links.new(math02.outputs[0],glassBSDF02.inputs[2])\n\n math03 = nodes.new(\"ShaderNodeMath\")\n currentMaterial.node_tree.links.new(math02.outputs[0],math03.inputs[1])\n currentMaterial.node_tree.links.new(math03.outputs[0],glassBSDF01.inputs[2])\n\n addShader01 = nodes.new(\"ShaderNodeAddShader\")\n currentMaterial.node_tree.links.new(glassBSDF01.outputs[0],addShader01.inputs[0])\n currentMaterial.node_tree.links.new(glassBSDF02.outputs[0],addShader01.inputs[1])\n\n addShader02 = nodes.new(\"ShaderNodeAddShader\")\n currentMaterial.node_tree.links.new(addShader01.outputs[0],addShader02.inputs[0])\n currentMaterial.node_tree.links.new(glassBSDF03.outputs[0],addShader02.inputs[1])\n\n volumeAbs = nodes.new(\"ShaderNodeVolumeAbsorption\")\n\n materialOutput=nodes.get(\"Material Output\")\n currentMaterial.node_tree.links.new(addShader02.outputs[0],materialOutput.inputs[0])\n currentMaterial.node_tree.links.new(volumeAbs.outputs[0],materialOutput.inputs[1])\n\n bpy.data.objects[GeometryName].data.materials.append(currentMaterial)", "def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1):\n layer = Layer()\n layer.units = units\n layer.thickness = thickness\n layer.dielectric = dielectric\n layer.losstangent = loss_tangent\n if (stack_position == -1):\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n return", "def AddMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_AddMaterial(self, *args)", "def connect_layers(self, material_dict=None):\n if material_dict is None:\n material_dict = {k: DEFAULT_BEAM for k in range(self.layers)}\n\n for layer in range(self.layers):\n material = material_dict[layer]\n\n for h in range(self.height):\n for c in range(self.ring_n):\n if layer == 0:\n n0 = f'N.{h}.c'\n else:\n n0 = f'R.{layer}.{h}.{c}'\n n1 = f'R.{layer+1}.{h}.{c}'\n name = f'M.{layer}.{h}.{c}'\n\n self.fem.AddMember(name, n0, n1,\n material[0],\n material[1],\n material[2],\n material[3],\n material[4],\n material[5]\n )", "def __init__(self, diffuse=RGB(1,1,1), Kd=1.0, specular=RGB(1,1,1), Ks=0.0, \n shininess=8.0, Kt=0.0, ior=1.0, name=None):\n \n if name is None:\n name = \"Material %d\" % Material._num_materials\n \n Material._num_materials += 1\n \n self.name = name\n self.diffuse = diffuse\n self.Kd = Kd\n self.specular = specular\n self.Ks = Ks\n self.shininess = shininess\n self.Kt = Kt\n self.ior = ior", "def make_input_materials(self) :\n # 1 5 1 MATERIAL 1 (arbitrary line, i think) \n # 1.4493e+00 9.9000e-03 7.9000e-03 1. 0. 0. 7.9000e-03 1.\n # 3.8070e-01 1.0420e-01 1.6920e-01 0 1.5100e-02 0. 1.6920e-01 1.\n self.input_materials = \"\"\n number_mats = len(self.core.pattern)+1\n a = self.core.assemblies\n for i in range(0, number_mats-1) :\n # Row 1: description.\n self.input_materials += \" \" + str(i+1) + \" 5 1 MATERIAL \" + \\\n str(i+1) + \" (\" + \\\n a[i].model + \", \" + \\\n str(a[i].enrichment) + \" w/o, \" + \\\n str(a[i].burnup) + \" MWd/kg)\\n\" \n # Rows 2 and 3.\n D1,D2,A1,A2,F1,F2,S12 = a[i].get_constants()\n d = np.array([[D1,A1,F1,1.0,0.0,0.0,F1,1.0],[D2,A2,F2,0.0,S12,0.0,F2,1.0]])\n for j in range(0, 2) :\n for k in range(0, 8) :\n self.input_materials +='%12.4e' %(d[j,k])\n self.input_materials += '\\n'\n \n a = self.core.reflector\n # Row 1: description.\n self.input_materials += \" \" + str(number_mats) + \" 5 1 MATERIAL \" + \\\n str(number_mats) + \" (REFLECTOR) \\n\" \n # Rows 2 and 3.\n D1,D2,A1,A2,F1,F2,S12 = a.get_constants()\n d = np.array([[D1,A1,F1,1.0,0.0,0.0,F1,1.0],[D2,A2,F2,0.0,S12,0.0,F2,1.0]])\n for i in range(0, 2) :\n for j in range(0, 8) :\n self.input_materials +='%12.4e' %(d[i,j])\n self.input_materials += '\\n'\n self.input_materials += \"WHITE\\n\" + \"BLACK\\n\" + \"END\\n\"", "def __init__(\n self,\n materials: Store,\n optimade: Store,\n **kwargs,\n ):\n self.materials = materials\n self.optimade = optimade\n self.kwargs = kwargs\n\n # Enforce that we key on material_id\n self.materials.key = \"material_id\"\n self.optimade.key = \"material_id\"\n super().__init__(\n source=materials,\n target=optimade,\n projection=[\"structure\"],\n **kwargs,\n )", "def attachSurface(*args, blendBias: Union[float, bool]=0.5, blendKnotInsertion: bool=False,\n caching: bool=True, directionU: bool=True, keepMultipleKnots: bool=True,\n method: Union[int, bool]=0, nodeState: Union[int, bool]=0, parameter:\n Union[float, bool]=0.1, reverse1: bool=False, reverse2: bool=False, swap1:\n bool=False, swap2: bool=False, twist: bool=False, constructionHistory:\n bool=True, name: AnyStr=\"\", object: bool=True, replaceOriginal: bool=True,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def build_layers(self):\n raise NotImplementedError", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def add_face_materials(engine, mesh):\n texture_image = bpy.data.images.load(os.path.join(basepath, settings.TEXTURE_FILE))\n image_texture = bpy.data.textures.new('export_texture', type = 'IMAGE')\n image_texture.image = texture_image\n image_material = bpy.data.materials.new('TextureMaterials')\n image_material.use_shadeless = True\n\n material_texture = image_material.texture_slots.add()\n material_texture.texture = image_texture\n material_texture.texture_coords = 'UV'\n bpy.ops.object.mode_set(mode='OBJECT')\n context_obj = bpy.context.object\n context_obj_data = context_obj.data\n context_obj_data.materials.append(image_material)\n bpy.types.SpaceView3D.show_textured_solid = True", "def __init__(self, weights=[], alphas=[]):\n self._layers = [Layer(w, a) for w, a in zip(weights, alphas)]", "def create_material(name, diffuse, alpha):\n mat = bpy.data.materials.new(name)\n mat.diffuse_color = diffuse\n mat.diffuse_intensity = 1.0\n mat.alpha = alpha\n if alpha:\n mat.use_transparency = True\n return mat", "def add_layer(self, *args):\n\n nm = None\n\n #check to see if we're sending an already formed layer to add - used for data file\n if len(args) == 1 & isinstance(args[0], QgsVectorLayer):\n print('Importing {} as a vector'.format(args[0]))\n self.project.addMapLayer(args[0])\n nm = args[0].name()\n\n elif len(args) > 1:\n print('Importing {} as a vector'.format(args[0]))\n print(args)\n self.project.addMapLayer(QgsVectorLayer(*args))\n nm = args[1]\n\n if nm:\n self.get_layer(nm)\n\n else:\n print()\n print('***Bad map layer for {}***'.format(str(args)))\n print()", "def __init__(self, **kwargs):\n\n # Simply hold on to the parameters for now\n self.name = kwargs.get(\"name\", None)\n\n # Placeholder for the resulting layer\n self.layer = None", "def createAnimLayer(layerName, nodesToAdd=None):\n if not cmds.animLayer(layerName ,query=True, ex=True):\n layerName = cmds.animLayer(layerName)\n if nodesToAdd:\n cmds.select(nodesToAdd)\n cmds.animLayer(layerName, e=True, aso=True)\n cmds.animLayer(layerName, e=True, sel=True)", "def createMemoryLayer(self):\n suffix = \"\"\n name = \"Vector Bender\"\n while len(QgsProject.instance().mapLayersByName(name + suffix)) > 0:\n if suffix == \"\":\n suffix = \" 1\"\n else:\n suffix = \" \" + str(int(suffix) + 1)\n newMemoryLayer = QgsVectorLayer(\"Linestring\", name + suffix, \"memory\")\n newMemoryLayer.loadNamedStyle(join(dirname(__file__), \"PairStyle.qml\"), False)\n QgsProject.instance().addMapLayer(newMemoryLayer)\n self.updateLayersComboboxes()\n index = self.comboBox_pairsLayer.findData(newMemoryLayer.id())\n self.comboBox_pairsLayer.setCurrentIndex(index)\n newMemoryLayer.startEditing()", "def build_sample(self, wavelength=None):\n self.init_materials(wavelength)\n\n multi_layer = ba.MultiLayer()\n air_layer = ba.Layer(self.m_air_material)\n avg_layer = ba.Layer(self.m_air_material, self.m_average_layer_thickness)\n substrate_layer = ba.Layer(self.m_substrate_material)\n\n for layout_factory in self.m_layouts:\n avg_layer.addLayout(layout_factory.create_layout(self.m_particle_material))\n\n roughness = ba.LayerRoughness(self.m_roughness, 0.3, 500.0*nm)\n multi_layer.addLayer(air_layer)\n multi_layer.addLayer(avg_layer)\n multi_layer.addLayerWithTopRoughness(substrate_layer, roughness)\n\n return multi_layer", "def new_layer(self, nodes, inputs, alpha=0.1):\n weights = [[random.uniform(-0.1, 0.1) for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.append(Layer(weights, alphas))", "def materials_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n view = context.space_data\r\n thumbnails_path = get_directory('icons')\r\n library_path = get_library_path()\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n if AM.as_mat_scene:\r\n thumb_list = thumb + [\"AM_Cloth\", \"AM_Sphere\"]\r\n else: \r\n thumb_list = thumb\r\n\r\n cam_is_valid = False\r\n obj_is_valid = False\r\n \r\n \r\n if not AM.as_mat_scene and not bpy.context.object:\r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n box.label(\"No active_object in the scene\", icon='ERROR')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n elif not AM.as_mat_scene and not bpy.context.active_object.active_material:\r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n box.label(\"The object have no material\", icon='ERROR')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n if AM.as_mat_scene and not isdir(join(library_path, 'materials', \"Render Scenes\")):\r\n box.operator(\"object.create_rder_scn_lib\", text = \"Create render scene library\", icon = 'FILESEL')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n \r\n if AM.as_mat_scene:\r\n asset_name = AM.scene_name\r\n else:\r\n active_mat = context.active_object.active_material\r\n asset_name = active_mat.name\r\n \r\n if len(bpy.context.active_object.material_slots) == 1:\r\n AM.multi_materials = False\r\n \r\n if AM.as_mat_scene and (not asset_name in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace') or\\\r\n not AM.as_mat_scene and (AM.multi_materials and get_valid_materials() or not AM.multi_materials and asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace'): \r\n if not AM.multi_materials:\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2 and AM.replace_rename == 'rename':\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if AM.as_mat_scene:\r\n box.prop(AM, \"scene_name\", text = \"\")\r\n else:\r\n box.prop(AM, \"rename_mat\", text=\"\")\r\n \r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n if not AM.as_mat_scene and len(bpy.context.active_object.material_slots) >= 2:\r\n if len(get_valid_materials()) != len(bpy.context.active_object.material_slots) and AM.multi_materials:\r\n box.label(\"Some materials wont be added\", icon = 'ERROR')\r\n box.label(\" because there already exist\")\r\n row = box.row()\r\n row.prop(AM, \"multi_materials\", text = \"All materials\")\r\n if AM.as_mat_scene:\r\n row = box.row(align = True)\r\n row.label(\"Scene name:\")\r\n row.prop(AM, \"scene_name\", text = \"\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n if AM.as_mat_scene:\r\n for obj in context.scene.objects:\r\n if obj.type == 'CAMERA':\r\n cam_is_valid = True\r\n \r\n if len([obj for obj in context.selected_objects if obj.type != 'CAMERA' and bpy.context.active_object == obj]) == 1:\r\n obj_is_valid = True\r\n \r\n row = box.row()\r\n row.label(\"Selected object rendering\", icon = 'FILE_TICK' if obj_is_valid else 'CANCEL')\r\n row = box.row()\r\n row.label(\"Camera in the scene\", icon = 'FILE_TICK' if cam_is_valid else 'CANCEL')\r\n if not cam_is_valid:\r\n row = box.row()\r\n row.operator(\"object.camera_add\", text = \"Add camera\", icon = 'OUTLINER_OB_CAMERA')\r\n \r\n if not AM.as_mat_scene:\r\n # --------------------- # \r\n # RENDER THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'render':\r\n row = box.row(align = True)\r\n row.label(\"Thumbnail:\")\r\n row.prop(AM, \"mat_thumb_type\", text = \"\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n\r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if (AM.as_mat_scene and AM.scene_name and cam_is_valid and obj_is_valid or not AM.as_mat_scene) and (AM.render_type == 'render' or (asset_name not in thumb_list or AM.replace_rename == 'replace') and AM.render_type == 'opengl' or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n if AM.as_mat_scene:\r\n row.operator(\"object.add_scene_in_library\", text=\"OK\", icon='FILE_TICK')\r\n else:\r\n row.operator(\"object.add_material_in_library\", text=\"OK\", icon='FILE_TICK')\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n if AM.multi_materials and not get_valid_materials():\r\n box.label(\"All materials already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2:\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2:\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n else:\r\n AM.multi_materials = False\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if AM.as_mat_scene:\r\n box.prop(AM, \"scene_name\", text = \"\")\r\n else:\r\n box.prop(AM, \"rename_mat\", text=\"\")\r\n \r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def set_up_rlayer(self, rlname, rlname_other=None, include_layers=None,\n exclude_layers=None, mask_layers=None):\n scene = self.set_as_active()\n layer_numbers = constants.layer_numbers\n w_var.rlname = rlname\n\n if include_layers is None:\n include_layers = w_var.layer_numbers_all_used\n\n if exclude_layers is None:\n exclude_layers = []\n\n if mask_layers is None:\n mask_layers = []\n\n if w_var.cb_clear_rlayers:\n for layer in scene.render.layers[:-1]:\n scene.render.layers.remove(layer)\n\n scene.render.layers.active.name = rlname\n scene.render.layers.active.use = True\n\n new_rlayer = scene.render.layers.active\n\n # if not clearing render layers: creates new one\n else:\n new_rlayer = scene.render.layers.new(rlname)\n scene.render.layers.active = new_rlayer\n\n # there needs to be two render layers in the same scene for freestyle compositing\n if w_var.cb_composited:\n w_var.rlname_other = rlname_other\n other_rlayer = scene.render.layers.new(rlname_other)\n other_rlayer.layers[19] = True\n scene.render.layers[rlname_other].layers_zmask = (False,) * 20\n\n if w_var.cb_ao:\n scene.render.layers[rlname].use_pass_ambient_occlusion = True\n\n if w_var.cb_composited:\n scene.render.layers[rlname_other].use_pass_ambient_occlusion = True\n\n # because I can't deactivate a layer if it is the only active one\n new_rlayer.layers[19] = True\n \n scene.render.layers[rlname].layers_exclude = (False,) * 20\n scene.render.layers[rlname].layers_zmask = (False,) * 20\n\n for i in layer_numbers:\n if w_var.cb_composited:\n if i in w_var.layer_numbers_affected:\n scene.render.layers[rlname].layers[i] = True\n scene.render.layers[rlname_other].layers_zmask[i] = True\n\n else:\n scene.render.layers[rlname].layers[i] = False\n\n if i in w_var.layer_numbers_other:\n scene.render.layers[rlname_other].layers[i] = True\n\n else:\n scene.render.layers[rlname_other].layers[i] = False\n\n else:\n if i in include_layers:\n scene.render.layers[rlname].layers[i] = True\n\n else:\n scene.render.layers[rlname].layers[i] = False\n\n if i in mask_layers:\n scene.render.layers[rlname].layers_zmask[i] = True\n\n if i in exclude_layers:\n scene.render.layers[rlname].layers_exclude[i] = True", "def make_materials(*args, **kwargs):\n\n ret_args = []\n for parameters in args:\n if isinstance(parameters, (list, tuple)):\n ret_args.append(Material(*parameters))\n else:\n ret_args.append(Material(parameters))\n\n ret_kwargs = {}\n for name, parameters in kwargs.items():\n if isinstance(parameters, (list, tuple)):\n ret_kwargs[name] = Material(*parameters)\n else:\n ret_kwargs[name] = Material(parameters)\n\n if len(ret_kwargs) == 0:\n return ret_args\n elif len(ret_args) == 0:\n return ret_kwargs\n else:\n return ret_args, ret_kwargs", "def __init__(self, *args, **kwargs):\n super(MayaScene, self).__init__(*args, **kwargs)", "def __init__(self, vs, material):\n self.vs = vs\n self.material = material", "def add_clay_to_selected(self):\n scene = self.set_as_active()\n\n # if the user selected a material, use it\n if w_var.cb_mat_clay:\n clay_mat = bpy.data.materials[w_var.mat_clay_name]\n\n # else, create a new one with the color selected\n else:\n clay_color = w_var.color_clay\n\n # separating rgb and alpha\n clay_color_rgb = clay_color[0:3]\n clay_color_alpha = clay_color[-1]\n clay_mat = bpy.data.materials.new('clay')\n \n renderengine = scene.wirebomb.data_renderengine\n \n if renderengine == 'CYCLES':\n clay_mat.use_nodes = True\n tree = clay_mat.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_transparent = tree.nodes.new('ShaderNodeBsdfTransparent')\n node_transparent.location = -300, 100\n\n node_diffuse = tree.nodes.new('ShaderNodeBsdfDiffuse')\n node_diffuse.location = -300, -100\n node_diffuse.inputs[0].default_value = clay_color_rgb + (1.0, )\n node_diffuse.color = clay_color_rgb\n node_diffuse.name = 'addon_clay_color' # referencing to this ID in the real-time change\n\n node_mixshader = tree.nodes.new('ShaderNodeMixShader')\n node_mixshader.location = 0, 50\n node_mixshader.inputs[0].default_value = clay_color_alpha\n node_mixshader.name = 'addon_clay_alpha' # referencing to this ID in the real-time change\n\n node_output = tree.nodes.new('ShaderNodeOutputMaterial')\n node_output.location = 300, 50\n\n # connecting the nodes\n tree.links.new(node_transparent.outputs[0], node_mixshader.inputs[1])\n tree.links.new(node_diffuse.outputs[0], node_mixshader.inputs[2])\n tree.links.new(node_mixshader.outputs[0], node_output.inputs[0])\n\n for node in tree.nodes:\n node.select = False\n\n # sets the viewport color\n clay_mat.diffuse_color = clay_color_rgb\n \n elif renderengine == 'BLENDER_RENDER':\n clay_mat.diffuse_color = clay_color_rgb\n clay_mat.use_transparency = True\n clay_mat.alpha = clay_color_alpha\n\n previous_area = bpy.context.area.type\n bpy.context.area.type = 'VIEW_3D'\n previous_layers = tuple(scene.layers)\n\n # can't enter edit mode on objects on inactive layers\n scene.layers = (True,)*20\n\n for obj in scene.objects:\n if obj.select:\n # only enters edit mode on active object\n scene.objects.active = obj\n obj.data.materials.append(clay_mat)\n clay_index = obj.data.materials.find(clay_mat.name)\n obj.active_material_index = clay_index\n\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.object.material_slot_assign()\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.object.mode_set(mode='OBJECT')\n\n bpy.context.area.type = previous_area\n scene.layers = previous_layers\n\n return clay_mat", "def new_initial_layer(self, nodes, inputs, alpha=0.1):\n weights = [[1 for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.insert(0, Layer(weights, alphas))", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.primary_source_key in [\n x for x, _ in p.source_atten_tpls\n ], 'Source attention must have the primary source key.'\n for source_key, atten_p in p.source_atten_tpls:\n if isinstance(atten_p, list):\n child_p_list = []\n for atten in atten_p:\n child_p = atten.Copy()\n if child_p.hidden_dim <= 0:\n child_p.hidden_dim = p.hidden_dim\n if child_p.input_dim <= 0:\n child_p.input_dim = p.input_dim\n child_p_list.append(child_p)\n self.CreateChildren('atten_%s' % source_key, child_p_list)\n else:\n child_p = atten_p.Copy()\n if child_p.hidden_dim <= 0:\n child_p.hidden_dim = p.hidden_dim\n if child_p.input_dim <= 0:\n child_p.input_dim = p.input_dim\n self.CreateChild('atten_%s' % source_key, child_p)\n\n # Initialize source context vector merging layer.\n merger_p = p.atten_merger_tpl.Copy()\n merger_p.name = 'atten_merger'\n merger_p.source_dim = p.input_dim\n merger_p.query_dim = p.input_dim\n self.CreateChild('atten_merger', merger_p)", "def AddLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_AddLayer(self, *args)", "def __prepare_shaders(self, rotation_matrix=None, light_matrix=None,\n depth=True):\n self.__sh.add_attribute(0, self.__mean_face, 'mean_position')\n self.__sh.bind_buffer()\n\n self.__sh.use_shaders()\n\n self.__sh.bind_uniform_matrix(light_matrix.dot(rotation_matrix),\n 'light_matrix')\n if not depth:\n self.__sh.bind_uniform_matrix(rotation_matrix, 'rotation_matrix')\n self.__sh.bind_uniform_vector(self.__face.light_cartesian,\n 'light_vector')\n coefficients_amount = len(self.__face.coefficients)\n indices = -ones(199, dtype='i')\n indices[:coefficients_amount] = array(range(coefficients_amount))\n self.__sh.bind_uniform_ints(indices, 'indices')\n\n coefficients = zeros(199, dtype='f')\n coefficients[:coefficients_amount] = self.__face.coefficients\n self.__sh.bind_uniform_floats(coefficients, 'coefficients')\n\n glActiveTexture(GL_TEXTURE0)\n self.__sh.bind_texture(0)\n if not depth:\n glActiveTexture(GL_TEXTURE1)\n self.__sh.bind_texture(1)" ]
[ "0.6226837", "0.6203353", "0.6016738", "0.58274734", "0.5601365", "0.55847776", "0.5581775", "0.5572715", "0.55110264", "0.54817516", "0.5468707", "0.54606545", "0.5430585", "0.5422103", "0.5419676", "0.5414244", "0.5406814", "0.54019004", "0.538", "0.5315735", "0.530001", "0.52929413", "0.529117", "0.52708364", "0.52553725", "0.52335054", "0.5231197", "0.5230847", "0.5224733", "0.5222133" ]
0.6437094
0
Add a layer with custom properties to the AR stack. Arguments
def add_custom_layer(self, material, thickness, units, dielectric, loss_tangent, stack_position=-1): layer = Layer() layer.units = units layer.thickness = thickness layer.dielectric = dielectric layer.losstangent = loss_tangent if (stack_position == -1): self.stack.append(layer) else: self.stack.insert(stack_position, layer) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, layer):\n self._top = layer(self._top)\n layer_name_ = layer.__class__.__name__\n layer_params_ = layer.params\n self._info.append((layer_name_, layer_params_))", "def __call__(cls, *args, **kwargs):\n layer = super(LayerAspect, cls).__call__(*args, **kwargs)\n\n if Job.Current:\n Job.Current.addLayer(layer)\n \n layer.afterInit()\n return layer", "def AddLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_AddLayer(self, *args)", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def add_layer(self, layer):\n idx = len(self.dict_topo)\n idx += 1\n self.dict_topo[idx] = layer", "def addLayer(self, layer):\n self.layers.append(layer)", "def add_layer(self, layer):\n self.__layers.append(layer)", "def add_layer(self, *args):\n\n nm = None\n\n #check to see if we're sending an already formed layer to add - used for data file\n if len(args) == 1 & isinstance(args[0], QgsVectorLayer):\n print('Importing {} as a vector'.format(args[0]))\n self.project.addMapLayer(args[0])\n nm = args[0].name()\n\n elif len(args) > 1:\n print('Importing {} as a vector'.format(args[0]))\n print(args)\n self.project.addMapLayer(QgsVectorLayer(*args))\n nm = args[1]\n\n if nm:\n self.get_layer(nm)\n\n else:\n print()\n print('***Bad map layer for {}***'.format(str(args)))\n print()", "def stack(self, layer: Layer):\n self.layers.append(layer)\n return self", "def add_layer(self, layer_name, layer_def):\n\n layer_idx, datatype = layer_def.split(\"/\")\n layer_idx = int(layer_idx)\n datatype = int(datatype)\n self.layers[layer_name] = LayerInfo(layer_idx, datatype, layer_name)", "def add(self, layer):\n layer.set_dtype(self.dtype)\n self.layers = np.append(self.layers, layer)", "def append(self, layer):\n self.layers.append(layer)", "def decorate(self, layer) :\n raise NotImplemented(\"This one should be implemented in child\")", "def add_layer(self, layer):\n\n self._layers.append(layer)", "def add_layer(self, material, thickness=5.0, units='mil', type='layer', \\\n stack_position=-1):\n\n type = type.lower()\n if type == 'layer':\n layer = Layer()\n layer.name = material.lower()\n layer.thickness = thickness\n layer.units = units\n try:\n# layer.dielectric = mats.Electrical.DIELECTRIC[layer.name]\n layer.dielectric = mats.Electrical.props[layer.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# layer.losstangent = mats.Electrical.LOSS_TAN[layer.name]\n layer.losstangent = mats.Electrical.props[layer.name][1]\n except:\n layer.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n if (stack_position == -1):\n self.stack.append(layer)\n else:\n self.stack.insert(stack_position, layer)\n elif type == 'source':\n self.source = SourceLayer()\n self.source.name = material.lower()\n try:\n# self.source.dielectric = mats.Electrical.DIELECTRIC[self.source.name]\n self.source.dielectric = mats.Electrical.props[self.source.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# self.source.losstangent = mats.Electrical.LOSS_TAN[self.source.name]\n self.source.losstangent = mats.Electrical.props[self.source.name][1]\n except:\n self.source.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n elif type == 'terminator':\n self.terminator = TerminatorLayer()\n self.terminator.name = material.lower()\n try:\n# self.terminator.dielectric = mats.Electrical.DIELECTRIC[self.terminator.name]\n self.terminator.dielectric = mats.Electrical.props[self.terminator.name][0]\n except:\n raise KeyError('I don\\'t know that material!')\n try:\n# self.terminator.losstangent = mats.Electrical.LOSS_TAN[self.terminator.name]\n self.terminator.losstangent = mats.Electrical.props[self.terminator.name][1]\n except:\n self.terminator.losstangent = 0\n print('\\nI don\\'t know this loss tangent. Setting loss to 0!')\n else:\n raise ValueError('Type must be one of LAYER, SOURCE, or TERMINATOR')\n return", "def add_layer(self, layer):\n\t\tif isinstance(layer, Layer):\n\t\t\tif layer != self:\n\t\t\t\tself.sublayers.append(layer)\n\t\t\t\tlayer.superlayer = self\n\t\telse:\n\t\t\traise TypeError('Invalid layer object')", "def add(self, layer):\n if len(self.layers) == 0:\n if not layer.n_inputs:\n raise Exception('Need to have n_inputs for layer.')\n else:\n layer.n_inputs = self.layers[-1].units\n self.layers.append(layer)", "def add_layer(self, layer):\n\t\tif self.root_layer is None:\n\t\t\ts = size()\n\t\t\tself.root_layer = Layer(Rect(0, 0, s[0], s[1]))\n\t\tself.root_layer.add_layer(layer)", "def create_layer(StackId=None, Type=None, Name=None, Shortname=None, Attributes=None, CloudWatchLogsConfiguration=None, CustomInstanceProfileArn=None, CustomJson=None, CustomSecurityGroupIds=None, Packages=None, VolumeConfigurations=None, EnableAutoHealing=None, AutoAssignElasticIps=None, AutoAssignPublicIps=None, CustomRecipes=None, InstallUpdatesOnBoot=None, UseEbsOptimizedInstances=None, LifecycleEventConfiguration=None):\n pass", "def AddLayer(self, name, alias = None, resultsLayer = False):\n if (resultsLayer and\n name in [l.GetName() for l in self.map.GetListOfLayers(l_name = name)]):\n self.frame.Render(self.mapWindow)\n return\n \n cmdlist = ['d.rast', 'map=%s' % name]\n self.map.AddLayer(type = 'raster', command = cmdlist, l_active = True,\n name = name, l_hidden = False, l_opacity = 1.0, l_render = True)\n #self.frame.Render(self.GetWindow().Render())\n self.frame.GetWindow().UpdateMap(render = True, renderVector = False)\n \n \n if alias is not None:\n alias = self._addSuffix(alias)\n self.layerName[alias] = name\n name = alias\n else:\n self.layerName[name] = name\n \n self.toolbar.choice.Insert(name, 0)\n self.toolbar.choice.SetSelection(0)", "def __init__(self):\n super(LinearAggregationLayer, self).__init__()", "def animLayer(*args, addRelatedKG: bool=True, addSelectedObjects: bool=True, affectedLayers:\n bool=True, animCurves: bool=True, attribute: Union[AnyStr, List[AnyStr], bool]=\"\",\n baseAnimCurves: bool=True, bestAnimLayer: bool=True, bestLayer: bool=True,\n blendNodes: bool=True, children: Union[AnyStr, bool]=\"\", collapse: bool=True,\n copy: AnyStr=\"\", copyAnimation: AnyStr=\"\", copyNoAnimation: AnyStr=\"\",\n excludeBoolean: bool=True, excludeDynamic: bool=True, excludeEnum: bool=True,\n excludeRotate: bool=True, excludeScale: bool=True, excludeTranslate: bool=True,\n excludeVisibility: bool=True, exists: bool=True, extractAnimation: AnyStr=\"\",\n findCurveForPlug: Union[AnyStr, bool]=\"\", forceUIRebuild: bool=True,\n forceUIRefresh: bool=True, layeredPlug: Union[AnyStr, bool]=\"\", lock: bool=False,\n maxLayers: bool=True, moveLayerAfter: AnyStr=\"\", moveLayerBefore: AnyStr=\"\",\n mute: bool=False, override: bool=False, parent: Union[AnyStr, bool]=\"\",\n passthrough: bool=True, preferred: bool=True, removeAllAttributes: bool=True,\n removeAttribute: Union[AnyStr, List[AnyStr]]=\"\", root: Union[AnyStr, bool]=\"\",\n selected: bool=True, solo: bool=False, weight: Union[float, bool]=1,\n writeBlendnodeDestinations: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def build_layers(self):\n raise NotImplementedError", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def SetLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_SetLayer(self, *args)", "def add_layer(self, layer: layers.Layer) -> layers.Layer:\n layer.events.select.connect(self._update_active_layer)\n layer.events.deselect.connect(self._update_active_layer)\n layer.events.status.connect(self._update_status)\n layer.events.help.connect(self._update_help)\n layer.events.interactive.connect(self._update_interactive)\n layer.events.cursor.connect(self._update_cursor)\n layer.events.cursor_size.connect(self._update_cursor_size)\n layer.events.data.connect(self._on_layers_change)\n layer.dims.events.ndisplay.connect(self._on_layers_change)\n layer.dims.events.order.connect(self._on_layers_change)\n layer.dims.events.range.connect(self._on_layers_change)\n self.layers.append(layer)\n self._update_layers(layers=[layer])\n\n if len(self.layers) == 1:\n self.reset_view()\n return layer", "def add_layer(self, layer_key_name, caching_layer):\n self.config.layers[layer_key_name] = _parseConfigLayer(self.build_layer_dict(caching_layer, layer_key_name),\n self.config, dirpath='.')", "def add_layer(self, layer):\n assert isinstance(layer, torch.nn.Module)\n setattr(self, 'layer'+str(self._layer_counter), layer)\n self._layer_counter += 1\n # layer indexing : layer 0 is closest to input", "def set_up_rlayer(self, rlname, rlname_other=None, include_layers=None,\n exclude_layers=None, mask_layers=None):\n scene = self.set_as_active()\n layer_numbers = constants.layer_numbers\n w_var.rlname = rlname\n\n if include_layers is None:\n include_layers = w_var.layer_numbers_all_used\n\n if exclude_layers is None:\n exclude_layers = []\n\n if mask_layers is None:\n mask_layers = []\n\n if w_var.cb_clear_rlayers:\n for layer in scene.render.layers[:-1]:\n scene.render.layers.remove(layer)\n\n scene.render.layers.active.name = rlname\n scene.render.layers.active.use = True\n\n new_rlayer = scene.render.layers.active\n\n # if not clearing render layers: creates new one\n else:\n new_rlayer = scene.render.layers.new(rlname)\n scene.render.layers.active = new_rlayer\n\n # there needs to be two render layers in the same scene for freestyle compositing\n if w_var.cb_composited:\n w_var.rlname_other = rlname_other\n other_rlayer = scene.render.layers.new(rlname_other)\n other_rlayer.layers[19] = True\n scene.render.layers[rlname_other].layers_zmask = (False,) * 20\n\n if w_var.cb_ao:\n scene.render.layers[rlname].use_pass_ambient_occlusion = True\n\n if w_var.cb_composited:\n scene.render.layers[rlname_other].use_pass_ambient_occlusion = True\n\n # because I can't deactivate a layer if it is the only active one\n new_rlayer.layers[19] = True\n \n scene.render.layers[rlname].layers_exclude = (False,) * 20\n scene.render.layers[rlname].layers_zmask = (False,) * 20\n\n for i in layer_numbers:\n if w_var.cb_composited:\n if i in w_var.layer_numbers_affected:\n scene.render.layers[rlname].layers[i] = True\n scene.render.layers[rlname_other].layers_zmask[i] = True\n\n else:\n scene.render.layers[rlname].layers[i] = False\n\n if i in w_var.layer_numbers_other:\n scene.render.layers[rlname_other].layers[i] = True\n\n else:\n scene.render.layers[rlname_other].layers[i] = False\n\n else:\n if i in include_layers:\n scene.render.layers[rlname].layers[i] = True\n\n else:\n scene.render.layers[rlname].layers[i] = False\n\n if i in mask_layers:\n scene.render.layers[rlname].layers_zmask[i] = True\n\n if i in exclude_layers:\n scene.render.layers[rlname].layers_exclude[i] = True", "def make_res_layer(self, **kwargs):\n return ResLayer(**kwargs)" ]
[ "0.6919454", "0.6874903", "0.6793331", "0.65545005", "0.64739907", "0.63449734", "0.63442105", "0.6126746", "0.59435934", "0.59133106", "0.5860487", "0.58573556", "0.582993", "0.58179677", "0.57985747", "0.5792031", "0.5770099", "0.57636625", "0.5761266", "0.57004243", "0.568947", "0.56741554", "0.5634889", "0.56135625", "0.5608142", "0.56027585", "0.5597719", "0.55719", "0.5557867", "0.55504686" ]
0.7097225
0
Display all the simulation parameters in one place.
def display_sim_parameters(self): pprint.pprint(vars(self)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\n print('number of wind phase = {}'.format(self.ncomp))\n print('galactic parameter = {}'.format(self.scaling_field))\n print('reference height = {}'.format(self.z0))\n for p in ['cool_params','hot_params','params','ref_params','scaling_params']:\n params = getattr(self,p)\n print(p)\n for k,v in params.items():\n print(' {} = {}'.format(k,v))", "def display_parameters(self):\n ips = GAConfig[\"initial_population_size\"]\n ps = GAConfig[\"population_size\"]\n nomp = GAConfig[\"num_mating_pairs\"]\n mf = GAConfig[\"base_mutation_factor\"]\n ne = GAConfig[\"num_evolutions\"]\n noc = GAConfig[\"num_categories\"]\n nog = len(self.phones)\n\n display_string = \"\"\"\n Genetic Algorithm Parameters\n ----------------------------\n Initial Population Size %s\n Population Size %s\n Number of Mating Pairs %s\n Base Mutation Factor %s\n Number of Evolutions %s\n Number of Categories %s\n Number of Genes %s\n \"\"\" % (ips, ps, nomp, mf, ne, noc, nog)\n\n print(display_string)", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def print_params(self):\n s = self._list_params()+\"\\n\"\n if 'scale_params' in self.__dict__.keys():\n s += self.scale_params._list_params()+\"\\n\"\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n s += self.atmospheric_params._list_params()+\"\\n\"\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n s += self.atemperature_params._list_params()+\"\\n\"\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n s += self.oceanic_params._list_params()+\"\\n\"\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n s += self.ground_params._list_params()+\"\\n\"\n\n if 'gotemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n s += self.gotemperature_params._list_params() + \"\\n\"\n\n print(\"Qgs v0.2.8 parameters summary\")\n print(\"=============================\\n\")\n print(s)", "def display(self):\n print(\"{}, {}\".format(self.label, self.params))", "def parameters():\n return render_template(\n 'parameters.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n )", "def print_params(self):\n\n logger.info('SimulatedMaps has been initialised with the following attributes:')\n for key in self.params.keys():\n logger.info('{} = {}'.format(key, self.params[key]))", "def print_all_params(self, disp=True):\n descriptions = {'general': {}}\n for name, param in self.params.items():\n descriptions['general'][name] = param.get_description()\n\n for comp, comp_obj in self.components.items():\n descriptions[comp] = {}\n for name in comp_obj.get_params():\n descriptions[comp][name] = comp_obj.get_param_description(name)\n return self._print_params(descriptions, disp)", "def print_params(self):\n print(self._list_params())", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def printConf(self):\n print \"\"\n for pname, pvalue in self.neededParams.items():\n print pname, pvalue\n for pname, pvalue in self.optionalParams.items():\n print pname, pvalue", "def display_window(self):\n frame = tk.Frame(master=self.param_window)\n frame.grid(padx=10, pady=20, columnspan=2)\n tk.Label(master=frame, text=\"Enter simulation parameters\").pack()\n\n self.status_text = tk.StringVar()\n self.status_text.set(\"Status message\")\n \n self.rows = 1\n for input_key in self.inputs.keys():\n input_dict = self.inputs[input_key]\n \n frame = tk.Frame(master=self.param_window)\n frame.grid(row=self.rows, column=0, padx=10, pady=1)\n input_dict['label'] = tk.Label(master=frame, text=input_dict['label'])\n input_dict['label'].pack()\n\n frame = tk.Frame(master=self.param_window)\n frame.grid(row=self.rows, column=1, padx=10, pady=1)\n input_dict['entry'] = tk.Entry(master=frame, width=10)\n input_dict['entry'].insert(0, input_dict['default'])\n input_dict['entry'].pack()\n \n self.rows += 1\n\n frame = tk.Frame(master=self.param_window)\n frame.grid(padx=10, pady=20, columnspan = 2)\n self.submit_btn = tk.Button(master=frame, text=\"Submit\", width=10)\n self.submit_btn.pack()\n self.submit_btn.bind(\"<Button-1>\", self.submit_values)\n\n self.param_window.mainloop()\n return self.parameters", "def printParameters(self):\n print(\"----------Model Parameters----------\")\n print(\"Initial Conv. Depth : \" + str(self.conv_depth))\n print(\"Number of Classes : \" + str(self.n_classes))\n print(\"Dropout : \" + str(self.dropout))\n print(\"Activation Function : Relu\")\n print(\"Input Shape : \" + str(self.input_shape))\n print(\"Batch Size : \" + str(self.batch_size))\n print(\"--------Optimizer Parameters--------\")\n print(\"Learning Rate : \" + str(self.optimizer.lr))\n print(\"Momentum : \" + str(self.optimizer.momentum))\n print(\"Initial Decay : \" + str(self.optimizer.initial_decay))", "def print_me(self):\n\n print(\"----- Model:\",self.name,\" -----\")\n print(\"Mass (in M_sun): %.5f\" % (self.glb[imass]/constants.solar_mass))\n print(\"Radius (in R_sun): %.5f\" % (self.glb[iradius]/constants.solar_radius))\n print(\"Reference frequency (in uHz): %.3f\" % self.glb[ifreq_ref])\n print(\"Temperature (in K): %.1f\" % self.glb[itemperature])\n print(\"Luminosity (in L_sun): %.3g\" % (self.glb[iluminosity]/constants.solar_luminosity))\n print(\"Age (in Myrs): %.2f\" % self.glb[iage])\n print(\"Z: %.4f\" % self.glb[iz0])\n print(\"X: %.4f\" % self.glb[ix0])\n for (name, latex_name) in config.user_params:\n print(\"{0:29} {1:.5e}\".format(name,self.glb[user_params_index[name]]))\n print(\"Modes (in muHz):\")\n size = self.modes.shape[0]\n for i in range(size):\n print(\" (n,l,freq,IK) = (%d, %d, %.15f, %.5e)\" % \\\n (self.modes['n'][i], self.modes['l'][i], \\\n self.modes['freq'][i]*self.glb[ifreq_ref],\\\n self.modes['inertia'][i]))", "def display(self):\n for value, prob in self.items():\n print(value, prob)", "def display_parameters(self):\n l = []\n for param in self.parameters.all():\n if len(param.value) > 16:\n l.append(u\"{}={}...\".format(param.name, param.value[:16]))\n else:\n l.append(u\"{}={}\".format(param.name, param.value))\n return \"; \".join(l)", "def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"", "def generate_parameter_list(self) -> None:\n\n # simulation parameters from model\n model_parameter_ids = np.array(self.amici_model.getParameterIds())\n write_string_array(self.f, \"/parameters/modelParameterNames\",\n model_parameter_ids)\n print(Fore.CYAN + \"Number of model parameters:\",\n len(model_parameter_ids))\n\n print(Fore.CYAN + \"Number of optimization parameters:\",\n len(self.parameter_df))\n write_string_array(self.f, \"/parameters/parameterNames\",\n self.parameter_df.index.values[\n (self.parameter_df.estimate == 1)\n & ~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())])\n\n self.generate_simulation_to_optimization_parameter_mapping()\n\n self.f.flush()", "def show_start_screen(self) -> tuple[SimulationParameters, bool, bool, bool]:\n pass", "def show_contents(self):\n print(self.filename, 'loaded')\n\n table = [['group', 'parameter']]\n for group in self.file:\n table.append([group, self.dict[group]])\n display(HTML(tabulate.tabulate(table, tablefmt='html')))\n\n print('Call directly as an attribute or call (parameter) or (group, parameter) to retrieve data')\n print('Use .show_info(group) to show parameter shapes')", "def printDesignVariables(self):\n print(\"-\" * 85)\n print(\"{:>30}{:>20}{:>20}\".format(\"CSM Design Parameter\", \"Name\", \"Value\"))\n print(\"-\" * 85)\n for dvName in self.DVs:\n DV = self.DVs[dvName]\n print(f\"{DV.csmDesPmtr:>30}{DV.name:>20}{DV.value:>20}\")", "def print_grid_params(params):\n tot = 1\n for key, p in params.items():\n tot *= len(p)\n print(f'{key}: {p}')\n\n print(f'\\nTotal models: {tot}\\n')\n print('=' * 40)", "def printParameters(self):\n with self._graph.as_default():\n for var in tf.global_variables():\n print(var.name)\n val = self._sess.run(var)\n print(val)", "def print_model_params(model):\n for param, value in zip(model.param_names, model.parameters):\n print(\"{:0.4f}\\t{}\".format(value, param))", "def print_str(self):\n print('*StanGpMatern with params={}'.format(self.params))", "def print_Twiss(self):\n\n print(\"Twiss parameters in use:\")\n print(\"betax : {}\".format(self._betax))\n print(\"betay : {}\".format(self._betay))\n print(\"alphax : {}\".format(self._alphax))\n print(\"alphay : {}\".format(self._alphay))\n print(\"gammax : {}\".format(self._gammax))\n print(\"gammay : {}\".format(self._gammay))", "def display_layer_parameters(self):\n pprint.pprint(vars(self))\n return", "def view(self,\n print_global_settings=True,\n print_general_settings=True,\n print_tmp_vals=False,\n print_results=True,\n **kws\n ):\n\n print(self.name)\n\n if print_global_settings:\n print(\"Global settings:\")\n pprint.pprint(self.global_settings)\n print()\n\n if print_general_settings:\n print(\"General settings:\")\n pprint.pprint(self.settings[self.name]['General'])\n print()\n\n for i, x in enumerate(self.routine_template):\n print(f\"Step {i}, {x[0].__name__} ({x[1]})\")\n print(\"Settings:\")\n pprint.pprint(x[2], indent=4)\n\n if print_tmp_vals:\n try:\n print(\"Temporary values:\")\n pprint.pprint(x[3], indent=4)\n except IndexError:\n pass\n print()\n\n if print_results:\n print_step_results(self)", "def print_configuration_info():\n print(\"Selected dataset:\", DATASET) \n print(\"Dataset base directory:\", BASE_INPUT_DIR) \n print(\"Daytime option:\", DAYTIME) \n print(\"Nones option:\", NONES) \n print(\"Selected action/activity representation:\", OP)\n print(\"Number of epochs: \", EPOCHS)\n print(\"Number of folds for cross-validation: \", FOLDS)\n print(\"Input directory for data files:\", INPUT_DIR) \n print(\"Embedding matrix file:\", EMBEDDING_WEIGHTS)\n print(\"Action sequences (X) file:\", X_FILE) \n print(\"Word embedding file for activities:\", ACTIVITY_EMBEDDINGS) \n print(\"Activity to int mappings:\", ACTIVITY_TO_INT)\n print(\"Int to activity mappings:\", INT_TO_ACTIVITY) \n print(\"Experiment ID:\", EXPERIMENT_ID)\n print(\"Treat imbalance data:\", TREAT_IMBALANCE)\n print(\"Save intermediate plots:\", SAVE)\n print(\"Batch size:\", BATCH_SIZE)\n print(\"Dropout:\", DROPOUT)\n print(\"Loss:\", LOSS)", "def updateParms(self):\n self.p1text.setText(\"{0:g}\".format(self.parmVal[0]))\n if len(self.parmVal) > 1:\n self.p2text.setText(\"{0:g}\".format(self.parmVal[1]))\n if len(self.parmVal) > 2:\n self.p3text.setText(\"{0:g}\".format(self.parmVal[2]))\n if len(self.parmVal) > 3:\n self.p4text.setText(\"{0:g}\".format(self.parmVal[3]))" ]
[ "0.74094933", "0.73057306", "0.71566695", "0.6884514", "0.6796596", "0.67935765", "0.6771066", "0.67183095", "0.6626539", "0.6595529", "0.65158534", "0.6436903", "0.6424904", "0.64146215", "0.6342033", "0.6288974", "0.62845755", "0.6271121", "0.6263979", "0.6254826", "0.6253278", "0.6226323", "0.6224966", "0.6168631", "0.6163746", "0.6154136", "0.61027354", "0.6091233", "0.60751474", "0.59999907" ]
0.7992493
0
Remove all elements from the current AR ``structure``.
def clear_structure(self): self.structure = [] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_elements(self):\n\n pass", "def clear(self):\n\n\t\tself.atomid = []\n\t\tself.resi = []\n\t\tself.resn = []\n\t\tself.atom = []\n\t\tself.element = []\n\t\tself.chain = []\n\t\tself.type = []\n\t\tself.inverted = False\n\t\tself.atomlist = []\n\t\tself.keeplist = []\n\t\tself.macros = []\n\n\t\tself.invresi = False\n\t\tself.invresn = False\n\t\tself.invatom = False\n\t\tself.invelement = False\n\t\tself.invchain = False\n\t\tself.invtype = False\n\t\tself.invatomid = False", "def remove_element(self, element=None):\n pass", "def removeFromArray(ls=('AnyPin', [], {PinSpecifires.CONSTRAINT: '1', PinSpecifires.ENABLED_OPTIONS: PinOptions.ArraySupported | PinOptions.AllowAny}),\n elem=('AnyPin', None, {PinSpecifires.CONSTRAINT: '1'}),\n removed=(REF, ('BoolPin', False))):\n if elem not in ls:\n removed(False)\n return\n ls.remove(elem)\n removed(True)\n return ls", "def remove_all_objs(self):\n objs = self.scene.get_objects()\n objs_attached = self.scene.get_attached_objects()\n # remove add objects\n for key in objs.keys():\n self.remove_obj(key)\n # remove attached objects\n for key in objs_attached.keys():\n self.unlink_obj(objs_attached[key].link_name, key)", "def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []", "def clear(self):\r\n ElementSet.clear(self)\r\n self.update()", "def clear(self):\n [roi.clear() for roi in self.rois]", "def remove_all_fields(self):\n self.fields = None", "def strip(self):\n types = [type(self.strip),\n type(self.values),\n type(self.__ne__),\n type(self.__class__)]\n\n for attr in dir(self):\n if not type(getattr(self, attr)) in types:\n if any(i in attr for i in self.keep) or attr[0:2] == '__':\n continue\n else:\n x = getattr(self, attr)\n del x\n for molecule in self.values():\n molecule.strip_molecule(self.keep)\n exit()", "def remove_dummy(self) -> None:\n\n for i, atom in enumerate(self):\n if isinstance(atom, DummyAtom):\n del self[i]\n return", "def clear(self):\n for ob in self.obs:\n ob.clear()\n return", "def _removeUnusedElements(self, element):\n self.log(\"element:%r\" % element)\n for pad in element.src_pads():\n if pad.is_linked():\n peer = pad.get_peer().get_parent()\n self._removeUnusedElements(peer)\n if not peer in self._validelements:\n self.log(\"removing %s\" % peer.get_name())\n pad.unlink(pad.get_peer())\n peer.set_state(gst.STATE_NULL)\n self.remove(peer)", "def clear(self):\n for tag in self.meta.findall(CN('meta:user-defined')):\n self.meta.remove(tag)", "def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)", "def clear(self):\n # we want to use self.inputs.clear() but it's not in python2\n del self.inputs[:]\n del self.outputs[:]", "def clearReplacedElements(self):\n return _libsbml.CompSBasePlugin_clearReplacedElements(self)", "def delete(self, structure, sentence) -> List[AcabNode]:\n raise NotImplementedError()", "def delete(self, structure, sentence) -> List[AcabNode]:\n raise NotImplementedError()", "def clear(self):\n for vertex in self.vertices():\n del self[vertex]", "def clearArray(ls=('AnyPin', [], {PinSpecifires.CONSTRAINT: '1', PinSpecifires.ENABLED_OPTIONS: PinOptions.ArraySupported | PinOptions.AllowAny})):\n return clearList(ls)", "def reset(self):\n self.elements = [0] * len(self)", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def clearAllElementMetaIdList(self):\n return _libsbml.Model_clearAllElementMetaIdList(self)", "def remove():", "def clear(self):\n [group.clear() for group in self.roi_groups]\n self.signal_status_message.emit('Cleared data')", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def remove_all(self):\n self._options.clear()\n self._programs.clear()", "def remove(self, board):\n for c in board.copy():\n while self in c:\n index = tuple(c.inputs.values()).index(self)\n key = tuple(c.inputs.keys())[index]\n c.inputs[key] = None\n # fixes possible memory leak\n self.inputs = {k: None for k, v in self.inputs.items()}", "def clear(self):\n\n\t\tfor chain in self.chain:\n\t\t\tchain.clear()\n\n\t\tself.chain = []\n\t\tself.remark = []" ]
[ "0.6407057", "0.6135725", "0.6079741", "0.6073262", "0.6065226", "0.5991256", "0.59750926", "0.5952651", "0.59332174", "0.58856636", "0.5879297", "0.5873883", "0.5859984", "0.58570814", "0.5840715", "0.5840436", "0.58200175", "0.5818231", "0.5818231", "0.5808292", "0.5805646", "0.57839054", "0.5761216", "0.5740637", "0.5735307", "0.5726789", "0.57244873", "0.5722872", "0.5693402", "0.5693296" ]
0.75357366
0
Remove the specified layer from the AR coating stack. Arguments
def remove_layer(self, layer_pos): self.stack.pop(layer_pos) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RemoveLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_RemoveLayer(self, *args)", "def remove_layer(self, layer=None):\n\t\tif layer is not None:\n\t\t\ttry:\n\t\t\t\tself.sublayers.remove(layer)\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\telif self.superlayer is not None:\n\t\t\tself.superlayer.remove_layer(self)", "def remove_layer(self, layer: CommandLayer):\n try:\n index = self.command_layers.index(layer)\n except ValueError:\n return\n\n if layer.active: # Transfer the active status to another layer\n if index < 0: # ... to the previous layer in the stack\n before = self.command_layers[index - 1]\n before.active = True\n elif len(self.command_layers) > 1: # ... to the next layer\n next = self.command_layers[index + 1]\n next.active = True\n\n self.command_layers.remove(layer)\n self._save()", "def remove_layer(self, layer_key_name):\n del(self.config.layers[layer_key_name])", "def delete_layer(LayerId=None):\n pass", "def RemoveLayer(self, name, idx):\n name = self.layerName[name]\n self.map.RemoveLayer(name = name)\n del self.layerName[name]\n self.toolbar.choice.Delete(idx)\n if not self.toolbar.choice.IsEmpty():\n self.toolbar.choice.SetSelection(0)\n\n self.frame.GetWindow().UpdateMap(render = True, renderVector = False) \n #self.frame.Render(self.mapWindow)", "def remove_layer(self, layer_pos):\n\n # If not within feasible bounds, return\n if layer_pos <= 1 or layer_pos > self.number_hidden_layers:\n return\n\n # We set the number of input and output dimensions for the layer to be\n # added and for the ones in the architecture that will be connected to it\n\n # We delete the layer in pos layer_pos\n self.dims = np.delete(self.dims, layer_pos)\n self.init_functions = np.delete(self.init_functions, layer_pos)\n self.act_functions = np.delete(self.act_functions, layer_pos)\n self.batch_norm = np.delete(self.batch_norm, layer_pos)\n self.dropout = np.delete(self.dropout, layer_pos)\n self.dropout_probs = np.delete(self.dropout_probs, layer_pos)\n\n # Finally the number of hidden layers is updated\n self.number_hidden_layers = self.number_hidden_layers - 1", "def _clearLayer(self, layer=0):\n for i in self._existingLayerItems(layer):\n self._plt.removeItem(i)", "def DeleteLayer(self, event):\n pass", "def delete_layer(self, index) :\n \n # Remove the actor, delete the list item, and update the other layers.\n self._renderer.RemoveActor(self._layers[index].actor)\n del self._layers[index]\n self._update_layers_positions()", "def UnSetOneLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_UnSetOneLayer(self, *args)", "def deleteLayer(self, id):\n\n # just in case we got None\n if id is None:\n return\n\n # see if what we are about to remove might be visible\n layer = self.layer_mapping[id]\n visible = layer.visible\n\n del layer\n self.layer_z_order.remove(id)\n\n # if layer was visible, refresh display\n if visible:\n self.Refresh()", "def add_drop_out_layer(self, input_layer):\n return tf.nn.dropout(input_layer, self.keep_prob)", "def detach(self, phy_layer):\n self._attached_phys.remove(phy_layer)", "def UnSetLayers(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_UnSetLayers(self, *args)", "def test_addon_layer_removed(self):\n layers = [l.getName() for l in registered_layers()]\n self.assertNotIn('IBriefyPloneLayer', layers)", "def op_delete(self, args):\n stack_level = 0\n if args != None:\n stack_level = int(args[0])\n self.require_stack(stack_level+1)\n if stack_level == None:\n self.stack.pop()\n else:\n self.stack.pop(-stack_level-1)", "def remove(self):\n return self.stack_list.pop()", "def remove(self) -> None:\n self.map.remove_brush(self)", "def delete_stack(Name=None):\n pass", "def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.dataset.pop(name)", "def delete_from_featurelayer(featurelayer):\n\n # TODO why is where clause required to delete all Features?\n result = featurelayer.delete_features(where=\"OBJECTID is not null\")\n deleted_count = len(result['deleteResults'])\n logging.info(f\"deleted {deleted_count} stations from FeatureLayer\")\n if len(featurelayer.query().features) != 0:\n raise RuntimeError(\"Expected the FeatureLayer to be empty\")", "def _removeWithSplay(self, entry, bst_remove):\r\n \r\n result = bst_remove(entry)\r\n \r\n # perform splaying on lowest accessed node\r\n \r\n target_node = self.action_node\r\n \r\n self._splayGivenActionNode(target_node)\r\n \r\n return result", "def __removeSoftMax(self,layer):\n newLayer = layer.__class__.from_config(layer.get_config())\n if hasattr(newLayer,\"activation\") and newLayer.activation == tf.keras.activations.softmax:\n newLayer.activation = tf.keras.activations.linear #No computa nada, deja pasar los valores --> f(x) = x\n return newLayer", "def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.train.pop(name)\n self.test.pop(name)", "def removeRow(self):\n row = self.getCurrentRow()\n self.jobRow.removeRow(row)\n return self.layers.pop(row)", "def removeParameter(self, *args):\n return _libsbml.KineticLaw_removeParameter(self, *args)", "def remove_cat(self, path: Path):\n if not self.active:\n return\n if path is None:\n return\n for i, coord in enumerate(path.path):\n self.cat[coord[1]][coord[0]].remove((path.identifier, i))", "def remove(self, src: int, dst: int) -> None:\n if src and dst is None:\n raise ValueError(\n \"tfgraph and dst must not be None \")\n self.run_tf([tf.scatter_nd_add(self.A_tf, [[src, dst]], [-1.0]),\n tf.scatter_nd_add(self.out_degrees_tf, [[src, 0]], [-1.0]),\n tf.scatter_nd_add(self.in_degrees_tf, [[0, dst]], [-1.0])])\n self.m -= 1\n self._notify(np.array([src, dst]), -1)", "def removeSkinLayer(context):\n skins = getToolByName(context, \"portal_skins\")\n # Remove directory views for directories missing on the filesystem\n our_skin = \"CMFEditions\"\n if our_skin in skins.keys():\n skins._delObject(our_skin)\n logger.info(\"Removed %s from skin layers.\", our_skin)\n\n for layer, paths in skins.selections.items():\n paths = paths.split(\",\")\n if our_skin not in paths:\n continue\n paths.remove(our_skin)\n skins.selections[layer] = \",\".join(paths)\n logger.info(\"Removed %s from skin selection %s.\", our_skin, layer)" ]
[ "0.77137464", "0.7416595", "0.7410551", "0.71367216", "0.68727535", "0.6798072", "0.679285", "0.6770055", "0.65042233", "0.6372433", "0.6315002", "0.62630314", "0.57843345", "0.5730568", "0.57278323", "0.5645684", "0.5617629", "0.55241436", "0.5509654", "0.5505746", "0.5484322", "0.54686034", "0.5459289", "0.5438105", "0.543676", "0.5426092", "0.54197073", "0.53988254", "0.5382865", "0.5374241" ]
0.78272414
0
Take the attributes of the ``Builder()`` object and execute the simulation at each frequency in ``Builder().freq_sweep``. Save the output to a columnized, tabseparated text file. Returns
def run_sim(self): t0 = time.time() print('Beginning AR coating simulation') self._d_converter() self._interconnect() f_list = [] t_list = [] r_list = [] for f in self.freq_sweep: results = self.sim_single_freq(f) f_list.append(f) t_list.append(results['T']) r_list.append(results['R']) fs = np.asarray(f_list) ts = np.asarray(t_list) rs = np.asarray(r_list) results = np.array([fs, ts, rs]) t = time.ctime(time.time()) data_name = self._make_save_path(self.save_path, self.save_name) header = 'Frequency (Hz)\t\tTransmission amplitude\t\tReflection amplitude' # log_name = self._make_save_path(self.save_path, self.log_name) # log = self._make_log() with open(data_name, 'wb') as f: np.savetxt(f, np.c_[fs, ts, rs], delimiter='\t', header=header) # with open(log_name, 'wb') as f: # for line in log: # f.writelines(line) # f.write('\n') print('Finished running AR coating simulation') t1 = time.time() t_elapsed = t1-t0 print('Elapsed time: {t}s\n'.format(t=t_elapsed)) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\r\n #print 'WriteFITS.run'\r\n\r\n # construct the name of the file\r\n runtime = self.previous_results['runtime']\r\n fitsname = '%s.fits' % runtime\r\n\r\n # get list of instrument observations\r\n observe = self.previous_results['observe']\r\n obs_timeline = observe['observed_timeline']\r\n observed_times = obs_timeline.keys()\r\n observed_times.sort()\r\n\r\n # construct lists of the values to be stored in each Table column\r\n for t in observed_times:\r\n timelist = []\r\n smec_position = []\r\n smec_nominal_position = []\r\n flag = []\r\n data = []\r\n pointing1_x = []\r\n pointing1_y = []\r\n pointing2_x = []\r\n pointing2_y = []\r\n\r\n config = obs_timeline[t]\r\n\r\n timelist.append(config.time)\r\n smec_position.append(config.smec_position)\r\n smec_nominal_position.append(config.smec_nominal_position)\r\n flag.append(config.flag)\r\n data.append(config.data)\r\n pointing1_x.append(config.pointing1_x)\r\n pointing1_y.append(config.pointing1_y)\r\n pointing2_x.append(config.pointing2_x)\r\n pointing2_y.append(config.pointing2_y)\r\n\r\n # create a Header object and primary HDU - this just contains\r\n # some very basic, general information\r\n prihdr = pyfits.Header()\r\n prihdr['COMMENT'] = 'This FITS file was created by pyfiins at %s' % \\\r\n runtime\r\n prihdu = pyfits.PrimaryHDU(header=prihdr)\r\n\r\n # create list of Header Data Unit objects, include the primary HDU\r\n hdulist = pyfits.HDUList([prihdu])\r\n\r\n # create an HDU to contain the Table and append it to the list\r\n hdulist.append(pyfits.BinTableHDU.from_columns(\r\n pyfits.ColDefs([\r\n pyfits.Column(name='Time', format='D',\r\n array=np.array(timelist)),\r\n pyfits.Column(name='SMEC Position', format='E',\r\n array=np.array(smec_position)),\r\n pyfits.Column(name='SMEC Nominal Position', format='E',\r\n array=np.array(smec_nominal_position)),\r\n pyfits.Column(name='Flag', format='L',\r\n array=np.array(flag)),\r\n pyfits.Column(name='Data', format='E',\r\n array=np.array(data)),\r\n pyfits.Column(name='Pointing1 X', format='E',\r\n array=np.array(pointing1_x)),\r\n pyfits.Column(name='Pointing1 Y', format='E',\r\n array=np.array(pointing1_y)),\r\n pyfits.Column(name='Pointing2 X', format='E',\r\n array=np.array(pointing2_x)),\r\n pyfits.Column(name='Pointing2 Y', format='E',\r\n array=np.array(pointing2_y))])))\r\n\r\n # write the HDU list to a file\r\n hdulist.writeto(fitsname, clobber=True)\r\n self.result['fitsfile'] = fitsname\r\n\r\n return self.result", "def main():\n\n\t# eesAmplitudes = range(200,321,10)\n\teesAmplitudes = [\"%\"+\"%.2f_0_0\"%(i) for i in np.arange(0,1.01,.05)]\n\t# eesFrequencies = range(10,1001,20)\n\teesFrequencies = np.logspace(1,3,50)\n\t# nrnStructureFile = \"fsSFrFfMnArtMod.txt\"\n\t# nrnStructureFile = \"fsSFrFfMnArtModHuman.txt\"\n\tnrnStructureFile = \"fsMnArtModHuman.txt\"\n\t# name = \"FreqAmpModHuman_0367S\"\n\tname = \"FreqAmpModHuman_ArtmodHuman_10msBurst\"\n\n\tnSim = len(eesFrequencies)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\t# simTime = 250\n\tsimTime = 15\n\tspecies = \"human\"\n\n\tfor eesAmplitude in eesAmplitudes:\n\t\tfor eesFrequency in eesFrequencies:\n\t\t\tfilName = name+\"_amp_\"+str(eesAmplitude)+\"_freq_\"+str(eesFrequency)\n\t\t\tresultFile = gt.find(\"*\"+filName+\".p\",pathToResults)\n\t\t\tif not resultFile:\n\t\t\t\treturnCode = None\n\t\t\t\twhile not returnCode==0:\n\t\t\t\t\tprogram = ['python','scripts/computeAfferentsEfferentsModulation.py',\n\t\t\t\t\t\tstr(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,\"--simTime\",str(simTime)]\n\t\t\t\t\tprint \" \".join(program)\n\t\t\t\t\tforwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t\t\treturnCode = None\n\t\t\t\t\twhile returnCode is None:\n\t\t\t\t\t\tmessage = forwardSimulation.stdout.readline().rstrip(\"\\n\").split()\n\t\t\t\t\t\tif message != None:print \"\\t\\t\"+\" \".join(message)+\"\\t\\t\"\n\t\t\t\t\t\treturnCode = forwardSimulation.poll()\n\t\t\t\t\tif returnCode != 0: print \"\\t\\t\\t\\t Error n: \",forwardSimulation.poll(),\" resetting simulation...\"\n\t\t\tcount+=1\n\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\tplot_stats(eesAmplitudes,eesFrequencies,simTime,name)", "def call(self, *args):\n self.formula.to_file(self.output_file)", "def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)", "def run(self):\n\t\tself._data_updater_thread.start()\n\t\tf = open('output.csv', 'w') #TODO: handle output file correctly (cfg)\n\t\tf.write('(datetime;valve;thin_temp;thin_pres;out_temp)\\n')\n\t\tf.close()", "def run(self):\n self._display_sims(self._compute_sims())", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def Dynamic_Analysis(InputMethod=Get_Input, TestMode=False, PrintMode=True, ReportMode=True):\n OutputFile = None\n CSVFile = None\n try:\n Simulation_Title=\"Padulles-Amphlett\"\n if PrintMode==True:\n print(\"###########\")\n print(Simulation_Title+\"-Model Simulation\")\n print(\"###########\")\n OutputParamsKeys = list(OutputParams.keys())\n OutputParamsKeys.sort()\n Output_Dict = dict(zip(OutputParamsKeys, [None] * len(OutputParamsKeys)))\n if not TestMode:\n Input_Dict = InputMethod(InputParams,params_default=Defaults)\n else:\n Input_Dict = InputMethod\n Input_Dict = filter_default(input_dict=Input_Dict, params_default=Defaults)\n Input_Dict = filter_lambda(Input_Dict)\n if PrintMode==True:\n print(\"Analyzing . . .\")\n Name = Input_Dict[\"Name\"]\n if ReportMode==True:\n OutputFile = Output_Init(Input_Dict,Simulation_Title,Name)\n CSVFile = CSV_Init(OutputParamsKeys,OutputParams,Simulation_Title,Name)\n HTMLFile = HTML_Init(Simulation_Title, Name)\n IEndMax = Input_Dict[\"JMax\"] * Input_Dict[\"A\"]\n IEnd = min(IEndMax, Input_Dict[\"i-stop\"])\n IEnd = Input_Dict[\"i-stop\"]\n IStep = Input_Dict[\"i-step\"]\n Precision = get_precision(IStep)\n i = Input_Dict[\"i-start\"]\n I_List = []\n Power_List = []\n Vstack_List = []\n Kr=Kr_Calc(Input_Dict[\"N0\"])\n qH2=qH2_Calc(Input_Dict[\"qMethanol\"],Input_Dict[\"CV\"],Input_Dict[\"t1\"],Input_Dict[\"t2\"])\n qO2=qO2_Calc(qH2,Input_Dict[\"rho\"])\n while i < IEnd:\n try:\n I_List.append(i)\n Output_Dict[\"PO2\"] = PO2_Calc(Input_Dict[\"KO2\"], Input_Dict[\"tO2\"], Kr, i, qO2)\n Output_Dict[\"PH2\"] = PH2_Calc(Input_Dict[\"KH2\"], Input_Dict[\"tH2\"], Kr, i, qH2)\n Output_Dict[\"Eta Activation\"] = Eta_Act_Calc(Input_Dict[\"T\"], Output_Dict[\"PO2\"], Output_Dict[\"PH2\"], i,\n Input_Dict[\"A\"])\n Output_Dict[\"Eta Ohmic\"] = Eta_Ohmic_Calc(i, Input_Dict[\"l\"], Input_Dict[\"A\"], Input_Dict[\"T\"],\n Input_Dict[\"lambda\"], R_elec=Input_Dict[\"R\"])\n Output_Dict[\"Eta Concentration\"] = Eta_Conc_Calc(i, Input_Dict[\"A\"], Input_Dict[\"B\"],\n Input_Dict[\"JMax\"])\n Output_Dict[\"Loss\"] = Loss_Calc(Output_Dict[\"Eta Activation\"], Output_Dict[\"Eta Ohmic\"],\n Output_Dict[\"Eta Concentration\"])\n Output_Dict[\"PH2O\"]=PH2O_Calc(Input_Dict[\"KH2O\"],Input_Dict[\"tH2O\"],Kr,i,qH2)\n Output_Dict[\"E\"]=Enernst_Calc(Input_Dict[\"E0\"],Input_Dict[\"N0\"],Input_Dict[\"T\"],Output_Dict[\"PH2\"],Output_Dict[\"PO2\"],Output_Dict[\"PH2O\"])\n Output_Dict[\"FC Voltage\"]=Vcell_Calc(Output_Dict[\"E\"], Output_Dict[\"Loss\"],Input_Dict[\"N0\"])\n Vstack_List.append(Output_Dict[\"FC Voltage\"])\n Output_Dict[\"FC Efficiency\"] = Efficiency_Calc(Output_Dict[\"FC Voltage\"],Input_Dict[\"N0\"])\n Output_Dict[\"FC Power\"] = Power_Calc(Output_Dict[\"FC Voltage\"], i)\n Power_List.append(Output_Dict[\"FC Power\"])\n if ReportMode==True:\n Output_Save(OutputParamsKeys, Output_Dict,OutputParams, i, OutputFile,PrintMode)\n CSV_Save(OutputParamsKeys, Output_Dict, i, CSVFile)\n i = rounder(i + IStep, Precision)\n except Exception as e:\n print(str(e))\n i = rounder(i + IStep, Precision)\n if ReportMode==True:\n Output_Save(OutputParamsKeys, Output_Dict, OutputParams, i, OutputFile,PrintMode)\n CSV_Save(OutputParamsKeys, Output_Dict, i, CSVFile)\n if ReportMode==True:\n HTML_Chart(x=str(I_List), y=str(Power_List), color='rgba(255,99,132,1)', x_label=\"I(A)\", y_label=\"P(W)\",\n chart_name=\"FC-Power\", size=\"600px\", file=HTMLFile)\n HTML_Chart(x=str(I_List), y=str(Vstack_List), color='rgba(99,100,255,1)', x_label=\"I(A)\", y_label=\"V(V)\",\n chart_name=\"FC-Voltage\", size=\"600px\", file=HTMLFile)\n HTML_Input_Table(Input_Dict=Input_Dict, Input_Params=InputParams, file=HTMLFile)\n HTML_End(HTMLFile)\n OutputFile.close()\n CSVFile.close()\n HTMLFile.close()\n if PrintMode==True:\n print(\"Done!\")\n if not TestMode:\n if PrintMode==True:\n print(\"Result In -->\" + os.path.join(os.getcwd(),Simulation_Title))\n else:\n return {\"P\": Power_List, \"I\": I_List, \"V\": Vstack_List}\n except Exception:\n print(\"[Error] Padulles-Amphlett Dynamic Simulation Failed!(Check Your Inputs)\")", "def _BuildSims(self):\r\n if self.mode == \"All\":\r\n #Iterate through all rows of the training dataframe.\r\n for index, _ in self.trainDF.iterrows():\r\n #Obtain the document embeddings for each method.\r\n wordVec = self._WordSimAveVec(self.trainDF,index)\r\n docVec = self._DocSim(self.trainDF,index)\r\n #Save the embeddings to a dictionary\r\n self.VDF[\"Word\"][index] = wordVec\r\n self.VDF[\"Doc\"][index] = docVec\r\n if self.GloveFail == False:\r\n gloveVec = self._GloveSim(self.trainDF,index)\r\n self.VDF[\"Glove\"][index] = gloveVec\r\n if self.mode == \"Word\":\r\n for index, _ in self.trainDF.iterrows():\r\n wordVec = self._WordSimAveVec(self.trainDF,index)\r\n self.VDF[\"Word\"][index] = wordVec", "def simulate(self):\n #loop to perform additional steps until the current temperature is no longer greater than the ending_temperature\n while self.current_T >= self.end_temp: \n self.step(self.current_T)\n \n #log various parameters that changed in the MCMCSampler object after a single step\n self.temperature.append(self.current_T)\n self.iteration.append(self.current_iteration)\n self.energy.append(self.current_energy)\n #return a pandas dataframe that will hold all of the information requested above\n log_table = pd.DataFrame(list(zip(self.iteration, self.energy, self.temperature)), columns =['iteration', 'energy', 'temperature']) \n return(log_table)", "def main():\n file_txt = open('results.txt','w+')\n positions = [1,10,100,1000]\n num_trials = 10000\n \n # Simulate the investment and plot histogram for different positions\n for position in positions:\n daily_ret = simulation(position, num_trials)\n plt.figure()\n plt.hist(daily_ret, 100, range=[-1,1])\n plt.title('The histogram of daily return for position ={}'.format(position))\n plt.xlabel('Daily return')\n plt.ylabel('The number of trials')\n plt.savefig('histogram_{}_pos.pdf'.format(str(position).zfill(4)))\n \n # Save the results of the simulation into a txt file \n file_txt.write('Position: {}\\n'.format(position))\n file_txt.write('Mean: {}; Std: {}\\n'.format(np.mean(daily_ret),np.std(daily_ret)))\n file_txt.write('\\n')\n file_txt.close()", "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot", "def tabular_data(self):\n path = CFG.GRAPHS_DIR\n chdir(path)\n\n if self.experiment_count == 1:\n f = open(self.tablefile, 'w')\n f.write(self.print_border_line())\n f.write(self.table_header())\n f.write(self.print_border_line())\n f.write(self.pretty_string(\"Functions\"))\n f.write(self.pretty_string(\"Batch Size\"))\n f.write(self.pretty_string(\"Training (%)\"))\n f.write(self.pretty_string(\"Testing (%)\", True))\n f.write('\\n')\n f.write(self.print_border_line())\n f.close()\n\n f = open(self.tablefile, 'a')\n f.write(self.pretty_string(self.function_name))\n f.write(self.pretty_string(str(self.batch_size)))\n f.write(self.pretty_string(self.tr_mean_str))\n f.write(self.pretty_string(self.test_mean_str, True))\n f.write('\\n')\n f.close()", "def tabulate(self) -> str:\n items = [\n ('Number of stations', self._num_stations),\n ('Loss probability', self.drop_prob),\n ]\n\n for node in range(self._num_stations):\n items.append((f'[[ STATION #{node} ]]', ''))\n\n ssize = self.system_size[node]\n qsize = self.queue_size[node]\n busy = self.busy[node]\n\n ssize_pmf = [ssize.pmf(x) for x in range(ssize.truncated_at + 1)]\n qsize_pmf = [qsize.pmf(x) for x in range(qsize.truncated_at + 1)]\n busy_pmf = [busy.pmf(x) for x in range(busy.truncated_at + 1)]\n\n items.extend([\n ('System size PMF', str_array(ssize_pmf)),\n ('System size average', ssize.mean),\n ('System size std.dev.', ssize.std),\n ('Queue size PMF', str_array(qsize_pmf)),\n ('Queue size average', qsize.mean),\n ('Queue size std.dev.', qsize.std),\n ('Busy PMF', str_array(busy_pmf)),\n ('Utilization', self.get_utilization(node)),\n ('Drop probability', self.drop_prob[node]),\n ('Delivery probability', self.delivery_prob[node]),\n ('Departures, average', self.departures[node].avg),\n ('Departures, std.dev.', self.departures[node].std),\n ('Response time, average', self.response_time[node].avg),\n ('Response time, std.dev.', self.response_time[node].std),\n ('Wait time, average', self.wait_time[node].avg),\n ('Wait time, std.dev.', self.wait_time[node].std),\n ('End-to-end delays, average', self.delivery_delays[node].avg),\n ('End-to-end delays, std.dev.', self.delivery_delays[node].std),\n ])\n return tabulate(items, headers=('Param', 'Value'))", "def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)", "def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))", "def verify_implementation():\n import csv\n import time\n\n times = dict()\n max_run_times = 200\n for n in range(1, max_run_times):\n sentence = ''\n for _ in range(n):\n sentence += random.choice('01')\n sentence += 'ε'\n start = time.time()\n parse(SmallGrammar, sentence)\n end = time.time()\n times[n] = end - start\n print(n)\n with open('cyk_run_times.csv', 'w') as fout:\n writer = csv.writer(fout)\n for n in range(1, max_run_times):\n writer.writerow([n, times[n]])", "def make_simulations(self):\n pass", "def print(self):\n df = self.gen_test()\n # print(df)\n df.to_csv('some_dated_file.csv', index=False)\n return df", "def freqchk_method():\n y = 0\n csvfile = open('KIE_Output.csv', 'wb')\n outputwriter = csv.writer(csvfile, dialect='excel')\n outputwriter.writerow([\"GS_file\"] + [\"TS_File\"] + [\"Temperature\"] + [\"Pressure\"] + [\"Scale_factor\"] + [\"Chem_symbol\"] + [\"GS_number\"] + [\"TS_number\"] + [\"Labeled_isotope\"] + [\"KIE\"] + [\"KIE_tunneling\"])\n heading = [\"GS_file\", \"TS_File\", \"Temperature\", \"Pressure\", \"Scale_factor\", \"Chem_symbol\", \"GS_number\", \"TS_number\", \"Labeled_isotope\", \"KIE\", \"KIE_tunneling\"]\n x=0\n for each_GS in GS_chkpt_file:\n for each_TS in TS_chkpt_file:\n for each_item in isotope_changes: #split the changes to the isotope\n for each_temp in temp:\n if len(each_GS) > x:\n x = len(each_GS)\n if len(each_TS) > x:\n x = len(each_TS)\n y += 1\n chem_sym = []\n gs_num = []\n ts_num = []\n isotope_mass = []\n output = []\n i = 0\n while i < len(each_item):\n chem_sym.append(each_item[i])\n gs_num.append(each_item[i+1])\n ts_num.append(each_item[i+2])\n isotope_mass.append(each_item[i+3])\n i += 4\n \n # run freqchk for TS without a marker\n run_freqchk_TS_no_marker(each_TS, hyperchem_files, each_temp, pressure, scale_factor, gradient_direction)\n #run freqchk for TS with a marker\n run_freqchk_TS_marker(each_TS, hyperchem_files, each_temp, pressure, scale_factor, gradient_direction, ts_num, isotope_mass, number_atoms_TS)\n #run freqchk for GS without a marker\n run_freqchk_GS_no_marker(each_GS, hyperchem_files, each_temp, pressure, scale_factor, gradient_direction)\n #run freqchk for GS with a marker\n run_freqchk_GS_marker(each_GS, hyperchem_files, each_temp, pressure, scale_factor, gradient_direction, gs_num, isotope_mass, number_atoms_GS)\n\n #get frequencies from .txt file\n frequency_TS_natural = map(float, get_frequencies(\"freq_TS_no_marker.txt\"))\n frequency_TS_isotope = map(float, get_frequencies(\"freq_TS_marker.txt\"))\n frequency_GS_natural = map(float, get_frequencies(\"freq_GS_no_marker.txt\"))\n frequency_GS_isotope = map(float, get_frequencies(\"freq_GS_marker.txt\"))\n\n #remove freqchk output files\n os.system(\"rm freq_TS_no_marker.txt freq_TS_marker.txt freq_GS_no_marker.txt freq_GS_marker.txt\")\n\n # create array with u values\n u_TS_natural = np.array(calc_u(frequency_TS_natural, each_temp, scale_factor))\n u_TS_isotope = np.array(calc_u(frequency_TS_isotope, each_temp, scale_factor))\n u_GS_natural = np.array(calc_u(frequency_GS_natural, each_temp, scale_factor))\n u_GS_isotope = np.array(calc_u(frequency_GS_isotope, each_temp, scale_factor))\n if u_TS_natural[0] < 0:\n u_neg_TS_natural= u_TS_natural[0] #negative u value\n u_TS_natural = u_TS_natural[1:] #allows calculation on all u values that are positive\n u_neg_TS_isotope = u_TS_isotope[0] #negative u value\n u_TS_isotope = u_TS_isotope[1:] #allows calculation on all u values that are positive\n else:\n u_neg_TS_natural =[]\n u_neg_TS_isotope = []\n\n # create array with exp(u/2) values\n exp_TS_natural = np.array(exp_u_half(u_TS_natural))\n exp_TS_isotope = np.array(exp_u_half(u_TS_isotope))\n exp_GS_natural = np.array(exp_u_half(u_GS_natural))\n exp_GS_isotope = np.array(exp_u_half(u_GS_isotope))\n\n # create array with 1-exp(-u) values\n one_minus_exp_TS_natural = np.array(calc_one_minus_exp(u_TS_natural))\n one_minus_exp_TS_isotope = np.array(calc_one_minus_exp(u_TS_isotope))\n one_minus_exp_GS_natural = np.array(calc_one_minus_exp(u_GS_natural))\n one_minus_exp_GS_isotope = np.array(calc_one_minus_exp(u_GS_isotope))\n\n # create array with prod values\n prod_TS_natural = np.array(calc_prod(u_TS_natural, exp_TS_natural, one_minus_exp_TS_natural))\n prod_TS_isotope = np.array(calc_prod(u_TS_isotope, exp_TS_isotope, one_minus_exp_TS_isotope))\n prod_GS_natural = np.array(calc_prod(u_GS_natural, exp_GS_natural, one_minus_exp_GS_natural))\n prod_GS_isotope = np.array(calc_prod(u_GS_isotope, exp_GS_isotope, one_minus_exp_GS_isotope))\n \n # calculate FTS\n if u_neg_TS_natural:\n FTS_TS_natural = calc_FTS_TS(prod_TS_natural, u_neg_TS_natural)\n FTS_TS_isotope = calc_FTS_TS(prod_TS_isotope, u_neg_TS_isotope)\n FTS_GS_natural = calc_FTS(prod_GS_natural)\n FTS_GS_isotope = calc_FTS(prod_GS_isotope)\n else:\n FTS_TS_natural = calc_FTS(prod_TS_natural)\n FTS_TS_isotope = calc_FTS(prod_TS_isotope)\n FTS_GS_natural = calc_FTS(prod_GS_natural)\n FTS_GS_isotope = calc_FTS(prod_GS_isotope)\n \n # calcualte qt for TS\n if u_neg_TS_natural:\n qt_TS_natural = calc_qt(u_neg_TS_natural)\n qt_TS_isotope = calc_qt(u_neg_TS_isotope)\n else:\n qt_TS_natural = calc_qt(u_TS_natural[0])\n qt_TS_isotope = calc_qt(u_TS_isotope[0])\n\n # build dictionary with elements and get the mass of the\n # elements being used\n elements = {'H':1, 'He':4, 'Li':7, 'B':11, 'C':12, 'N':14, 'O':16, 'Ne':20, 'Mg':24, 'Si':28, 'S':32, 'Cl':35, 'Ar':40, 'K':39, 'Ca':40, 'Ti':48, 'Cr':52, 'Fe':56, 'Ni':58, 'Cu':63, 'Zn':64, 'Ga':69, 'Ge':74, 'Se':80, 'Br':79, 'Kr':84, 'Sr':88, 'Zr':90, 'Mo':98, 'Ru':102, 'Pd':106, 'Ag':107, 'Cd':114, 'Sn':120, 'Sb':121, 'Te':126, 'Xe':132, 'Ba':138, 'Ce':140, 'Nd':142, 'Sm':152, 'Eu':153, 'Gd':158, 'Dy':164, 'Er':166, 'Yb':174, 'Hf':180, 'W':184, 'Os':192, 'Ir':193, 'Pt':195, 'Hg':202, 'Tl':205, 'Pb':208}\n temp_sym = ' '.join(chem_sym[0])\n temp_isotope_mass = isotope_mass[0]\n temp_isotope_mass = \"\".join(repr(temp_isotope_mass))\n temp_isotope_mass = int(temp_isotope_mass)\n\n # calculate KIE\n for a in elements.keys():\n if a == ''.join(temp_sym):\n if elements[a] > temp_isotope_mass:\n KIE1 = (FTS_TS_isotope / FTS_TS_natural)\n KIE2 = (FTS_GS_natural / FTS_GS_isotope)\n KIE3 = KIE2 * KIE1\n KIE = 1 / KIE3\n else:\n KIE1 = (FTS_TS_isotope / FTS_TS_natural)\n KIE2 = (FTS_GS_natural / FTS_GS_isotope)\n KIE = KIE2 * KIE1\n\n #calculate KIE with tunneling\n KIE_tunneling = KIE * qt_TS_natural / qt_TS_isotope\n\n #convert to strings\n gs_num = map(str, gs_num)\n ts_num = map(str, ts_num)\n isotope_mass = map(str, isotope_mass)\n KIE = str(KIE)\n KIE_tunneling = str(KIE_tunneling)\n\n each_temp = ''.join(each_temp)\n pressure = ''.join(pressure)\n scale_factor = ''.join(scale_factor)\n chem_sym = ', '.join(chem_sym)\n gs_num = ', '.join(gs_num)\n ts_num = ', '.join(ts_num)\n isotope_mass = ', '.join(isotope_mass)\n \n output.append(each_GS)\n output.append(each_TS)\n output.append(each_temp)\n output.append(pressure)\n output.append(scale_factor)\n output.append(chem_sym)\n output.append(gs_num)\n output.append(ts_num)\n output.append(isotope_mass)\n output.append(KIE)\n output.append(KIE_tunneling)\n master_output.append(output)\n\n outputwriter.writerow([each_GS] + [each_TS] + [each_temp] + [pressure] + [scale_factor] + [chem_sym] + [gs_num] + [ts_num] + [isotope_mass] + [KIE] + [KIE_tunneling])\n csvfile.close()\n\n print ' '.join(heading)\n for each_entry in master_output:\n print ' '.join(each_entry)\n \n print \"\\n\", \"All KIE's have been calculated!\", \"\\n\"", "def produce_all_term_data(self):\n # remove cold start records if requested\n test = self.test.copy()\n test = self.handle_cold_start(test)\n\n outputs = self.output()\n trainf, testf = outputs['train'], outputs['test']\n with trainf.open('w') as ftrain, testf.open('w') as ftest:\n self.write_libfm_data(ftrain, ftest, self.train, test)\n\n # Write the term-to-id guide\n test = test.sort(('termnum'))\n test['rownum'] = np.arange(len(test))\n guide = test.groupby('termnum').max()['rownum']\n with self.output()['guide'].open('w') as f:\n guide.to_csv(f, index_label='termnum', header=True)", "def simulate_trading(self):\n # Create the file output stream\n posix_now = datetime.datetime.timestamp(datetime.datetime.now())\n out_path = os.getcwd() + \"/OutputResults/backtest_{}\".format(posix_now)[:-7:] + \".csv\"\n\n out = open(out_path, \"w+\")\n\n spl = len(self.strat_params_list)\n for i, sp in enumerate(self.strat_params_list): # http://book.pythontips.com/en/latest/enumerate.html\n print(\"Strategy %s out of %s...\" % (i + 1, spl))\n self._generate_trading_instances(sp)\n self._run_backtest()\n stats = self._output_performance()\n pprint.pprint(stats)\n\n tot_ret = float(stats[0][1].replace(\"%\", \"\"))\n cagr = float(stats[1][1].replace(\"%\", \"\"))\n sharpe = float(stats[2][1])\n max_dd = float(stats[3][1].replace(\"%\", \"\"))\n dd_dur = int(stats[4][1])\n\n # This should be more general in future implementations...\n out.write(\n \"%s,%s,%s,%s,%s,%s,%s,%s\\n\" % (sp[\"ols_window\"], sp[\"zscore_high\"], sp[\"zscore_low\"],\n tot_ret, cagr, sharpe, max_dd, dd_dur)\n )\n\n out.close()", "def _compute_(self):\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n fbgc = \"data/sim/{dn}/{rad}/exp.bgc.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), \n rad=self.rad, bm=self.bmnum)\n fflare = \"data/sim/{dn}/{rad}/exp.flare.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"),\n rad=self.rad, bm=self.bmnum)\n cmd = \"export DIR_MODELS_REF_DAT=/home/shibaji/Collaboration_NCAR/code_rt_sd/pharlap/pharlap_4.1.3/dat;\\\n cd pharlap;\\\n matlab -nodisplay -nodesktop -nosplash -nojvm -r \\\"UT=[{ut}];rad='{rad}';dic='{dic}';fbgc='{fbgc}';bm={bm};\\\n fflare='{fflare}';rt_1D_sim;exit;\\\"\".format(ut=self.event.strftime(\"%Y %m %d %H %S\"), rad=self.rad,\n dic=dic, bm=self.bmnum, fbgc=fbgc, fflare=fflare)\n os.system(cmd)\n return", "def frequency(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['frequency']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for label in self.uuid_map:\n if label == 'LSTATE':\n distillate_label = label\n else:\n distillate_label = get_distillate_label([label])\n if 'ANG' not in distillate_label:\n continue\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_label = label\n dep_name = fields['deps'][0]\n dep_uuid = self.uuid_map[label]\n deps = [[dep_label, dep_name, dep_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"FREQ\"\n params = [[param_section_name, param_section_value],\n [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label+\"_1-SEC\"] = emitted[-3][-36:]\n output_uuid_map[label+\"_C37\"] = emitted[-2][-36:]\n\n filename = \"{0}/FREQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")", "def simulate(self, outputfile, nsteps, dt):\n starttime = time.process_time() # For simulation length timing purposes\n # Initialisation of all the lists used throughout simulations.\n timelist, VMD_list, positions, velocities = [], [], [], [];\n KE, PE, TE = [], [], []\n\n # Calculate initial forces.\n forces = self.get_forces()\n for t in range(nsteps):\n positions.append(self.get_positions()) #Save position\n self.enforce_pbc() # Enforce periodic boundary conditions.\n velocities.append(self.get_velocities()) # Save velocities\n timelist.append(t*dt) # Save time stamp\n VMD_list.append(self.VMD_string(t)) # Save VMD data to temporary list\n\n # Calculate and save energies in lists\n energies = self.get_energies()\n PE.append(energies[0])\n KE.append(energies[1])\n TE.append(energies[2])\n\n # Updates positions\n self.update_pos(forces, dt)\n temp_forces = forces\n forces = self.get_forces()\n # Update velocities\n self.update_vel(0.5*(temp_forces + forces), dt)\n\n # Output VMD data to file\n vmdstring = ''.join(VMD_list)\n with open(outputfile, 'w') as out:\n out.write(vmdstring)\n print('Succesful VMD Data write to '+outputfile+'\\n')\n\n # Output energy data to file\n write_output(\"energyfile.txt\", timelist, PE, KE, TE)\n print('Successful Energies write to energyfile.txt \\n')\n\n # Print simulation total runtime in seconds\n runtime = time.process_time() - starttime\n print('Simulate method ran for %f seconds\\n'%runtime)\n\n\n return np.array(positions), np.array(timelist)", "def generate_stats(simulation_folder):\n\n Start = datetime.now()\n project_directory = os.path.dirname(os.getcwd())\n path_to_data = os.path.join(project_directory, \"Data\", simulation_folder)\n path_to_characteristics_data = os.path.join(path_to_data, \"Characteristics\")\n path_to_scenario = os.path.join(project_directory, \"Models\", simulation_folder,\n \"XGB\", \"Model\")\n path_to_stats = os.path.join(path_to_scenario, \"Stats\")\n if not os.path.exists(path_to_stats):\n os.makedirs(path_to_stats)\n path_to_model = os.path.join(path_to_scenario, \"model.sav\")\n \n X_train = np.load(os.path.join(path_to_characteristics_data, \"X_train.npy\"), allow_pickle=True)\n X_test = np.load(os.path.join(path_to_characteristics_data, \"X_test.npy\"), allow_pickle=True)\n y_train = np.load(os.path.join(path_to_characteristics_data, \"y_train.npy\"), allow_pickle=True)\n y_test = np.load(os.path.join(path_to_characteristics_data, \"y_test.npy\"), allow_pickle=True)\n # TODO: fix the save of the data to get variable names from there\n characteristics_data = pd.read_csv(os.path.join(path_to_characteristics_data, \"characteristics.csv\"))\n model = joblib.load(path_to_model)\n data_type = [\"Train\", \"Test\"]\n for dt in data_type:\n X = X_train if dt == \"Train\" else X_test\n y = y_train if dt == \"Train\" else y_test\n # Making the Confusion Matrix\n y_pred = model.predict(X)\n\n ## TODO: mae per class\n print(\"mae\")\n test_train_mae = mean_absolute_error(y, y_pred)\n df = pd.DataFrame({'mae': [test_train_mae]})\n df.to_csv(os.path.join(path_to_stats, \"MAE_\" + dt + \".csv\"))\n\n # feature importances\n importances = model.feature_importances_\n column_names = characteristics_data.drop([\"file\", \"motion\", \"diff_type\"], axis=1).columns.values\n df = imp_df(column_names, importances)\n df.to_csv(os.path.join(path_to_stats, \"Feature_importances.csv\"), index=False)\n \n\n # permutation importances\n X_train_df = pd.DataFrame(X_train, columns=column_names)\n y_train_df = pd.DataFrame(y_train)\n md = clone(model)\n md.fit(X_train_df,y_train_df)\n df = permutation_importances(md, X_train_df, y_train_df, mae)\n df.to_csv(os.path.join(path_to_stats, \"Permutation_fi.csv\"), index=True)\n\n # drop column feature importance\n X_train_df = pd.DataFrame(X_train, columns=column_names)\n df = drop_col_feat_imp(model, X_train_df, y_train)\n df.to_csv(os.path.join(path_to_stats, \"Drop_column_fi.csv\"), index=False)\n\n End = datetime.now()\n ExecutedTime = End - Start\n df = pd.DataFrame({'ExecutedTime': [ExecutedTime]})\n df.to_csv(os.path.join(path_to_stats, \"time_for_stats_generator.csv\"))\n print(ExecutedTime)", "def main():\n start = 1554994269 # unix timestamp, fixed for reproducability\n stop = start + 850 * 61 # number of acqs * time between acqs\n sampling_rate = 512. # Hz\n\n # Nyquist freq needs to be larger than frequency of J-peaks\n nyquist = sampling_rate / 2 + 1\n assert nyquist > 250\n\n # Test single mass for now\n mass = 2e-15\n result = run_sim(mass, start, stop, sampling_rate)\n\n sim_name = 'sim_mass_{:g}_rate_{:g}.npz'.format(mass, sampling_rate)\n np.savez(sim_name, times=result[0], amplitudes=result[1])\n print('saved: {}'.format(sim_name))", "def Simulator(self):\n print(\"======================== Start OF the Simulator()*\")\n source_list = self.input_list\n # takeoff_report = []\n # clock = 0\n\n # call method process_input() to read requests from file\n self.process_input()\n # CALLING method __sort_by_priority() to sort requests by in order\n self.__sort_by_priority(source_list)\n # CALLING method set_takeoff_time() to set take-off times\n source_list = self.set_takeoff_time(source_list)\n\n print(\"========================End of Simulation()*\")", "def outputTiming():\r\n print ('N\\tSum \\tSet\\t String\\t Loop')\r\n for trial in [2**_ for _ in range(1,11)]:\r\n numbers = f'[random.randint(0, 2 ** 24) for _ in range({trial})]'\r\n \r\n methods = ['sumValues', 'uniqueCheckSet', 'uniqueCheckString', 'uniqueCheckLoop' ]\r\n counts = {}\r\n for meth in methods:\r\n counts[meth] = timeit.timeit(stmt=f'{meth}(numbers)', number=1000,\r\n setup=f'import random\\nfrom __main__ import {meth}\\nrandom.seed({trial})\\nnumbers = {numbers}')\r\n\r\n results = '\\t'.join(f'{counts[meth]:f}' for meth in methods)\r\n print (f'{trial}\\t{results}')" ]
[ "0.5954314", "0.559512", "0.55548674", "0.5545312", "0.5534271", "0.55245703", "0.5496111", "0.5488325", "0.54748154", "0.5410784", "0.5380403", "0.53455675", "0.53385794", "0.52782524", "0.5235391", "0.5225953", "0.5218088", "0.52113324", "0.5201477", "0.516396", "0.5160859", "0.5147567", "0.5121919", "0.5114827", "0.50950974", "0.50750226", "0.5070954", "0.50680184", "0.50639087", "0.50547516" ]
0.5908045
1
List the materials with known properties. The listed material names are keys in the materials properties dictionary.
def show_materials(self): print('\nThe materials with known dielectric properties are:\n') pprint.pprint(mats.Electrical.props) # pprint.pprint(mats.Electrical.DIELECTRIC) print('\nThe materials with known loss tangents are:\n') pprint.pprint(mats.Electrical.props) # pprint.pprint(mats.Electrical.LOSS_TAN) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_materials_properties(dbpath): #<un-named>nook\n odb = openOdb(path=dbpath)\n data = []\n for _name,_mat in odb.materials.items():\n _elastic_mod = _mat.elastic.table[0][0]\n _poisson = _mat.elastic.table[0][1]\n if hasattr(_mat,\"plastic\"):\n _plastic = _mat.plastic.table\n else:\n _plastic = []\n data.append((_name,_elastic_mod,_poisson,_plastic))\n odb.close()\n return data", "def info_materials_polymer_get():\n materials = _material_by_group(974) # 974 == intermediate group\n return materials, 200", "def info_materials_get():\n materials = _material_by_group() # empty means all groups\n return materials, 200", "def materials(self):\n return MaterialManager(session=self._session)", "def load_materials(self):\n # Create material objects\n for meta_mat in self.gltf.materials:\n mat = Material(meta_mat.name)\n mat.color = meta_mat.baseColorFactor or [1.0, 1.0, 1.0, 1.0]\n mat.double_sided = meta_mat.doubleSided\n\n if meta_mat.baseColorTexture is not None:\n mat.mat_texture = self.textures[meta_mat.baseColorTexture[\"index\"]]\n\n self.materials.append(mat)\n self.scene.materials.append(mat)", "def read_all():\n # Query the database for all the materials\n materials = Material.query.order_by(Material.family_id, Material.material_name).all()\n\n # Serialize the list of materials from our data\n material_schema = MaterialSchema(many=True, exclude=[\"family.materials\"])\n data = material_schema.dump(materials).data\n return data", "def get_material_mapping(self):\n return {name: self.get_material(name) for name in self.parts.keys()}", "def info_materials_raw_get():\n materials = _material_by_group(427) # 427 == intermediate group\n return materials, 200", "def getMaterialPhysics():\r\n physicsProperties = {}\r\n for material in bpy.data.materials:\r\n properties = utils.extract_cryblend_properties(material.name)\r\n if properties:\r\n physicsProperties[properties[\"Name\"]] = properties[\"Physics\"]\r\n return physicsProperties", "def info_materials_booster_get():\n materials = _material_by_group(712) # 712 == intermediate group\n return materials, 200", "def materials(cls) -> MaterialSelector:\n selector: MaterialSelector = cls._materials\n return selector", "def info_materials_gas_get():\n materials = _material_by_group(711) # 711 == intermediate group\n return materials, 200", "def GetMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_GetMaterial(self, *args)", "def create_materials_from_data(textures):\n\n materials = []\n\n #Set colour to incremenet from 0 - 8\n colour_inc = 1.0 / len(textures)\n colour = 0\n\n for current_material in textures:\n mat = bpy.data.materials.new(current_material[0])\n mat.diffuse_color = (0, colour, 0,)\n mat.diffuse_shader = 'LAMBERT' \n mat.diffuse_intensity = 1.0 \n mat.specular_color = (1, 1, 1,)\n mat.specular_shader = 'COOKTORR'\n mat.specular_intensity = 0.5\n mat.alpha = 1\n mat.ambient = 1\n mat.use_shadeless = True\n\n mtex = mat.texture_slots.add()\n mtex.texture = current_material[1]\n mtex.texture_coords = 'UV'\n mtex.use_map_color_diffuse = True \n\n materials.append(mat)\n colour += colour_inc\n \n return materials", "def get_material_set(**kw):\n mat_ids = set()\n volumes = get_volume_list()\n for v in volumes:\n d = volume_metadata( v )\n if( kw.get('with_rho') is True ):\n # rho is undefined for the void material and dagmc may return anything.\n if d['material'] == 0:\n mat_ids.add( (d['material'], 0.0) )\n else:\n mat_ids.add( (d['material'], d['rho']) )\n else:\n mat_ids.add( d['material'] )\n return mat_ids", "def GetMaterialLabels(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_GetMaterialLabels(self, *args)", "def info_materials_intermediates_get():\n materials = _material_by_group(428) # 428 == intermediate group\n return materials, 200", "def test_materials_present(self):\n self.assertIsNotNone('Materials' in self.header.parameters.attrs)", "def has_material(obj, name):\n return name in obj.data.materials.keys()", "def info_materials_composites_get():\n materials = _material_by_group(429) # 429 == intermediate group\n return materials, 200", "def get_materials_from_blender_objects(blender_objects):\n materials = set()\n meshes = {ob.data for ob in blender_objects if ob.type == 'MESH'}\n for ob in meshes:\n if not ob.materials:\n continue\n materials.add(ob.materials[0])\n return sorted(materials, key=lambda m: m.name)", "def define_materials():\n global robot\n robot.add_material(ur.Material('Black', ur.Color(0.1, 0.1, 0.1, 1)))\n robot.add_material(ur.Material('LightGrey', ur.Color(0.9, 0.9, 0.9, 1)))\n robot.add_material(ur.Material('Grey', ur.Color(0.6, 0.6, 0.6, 1)))\n robot.add_material(ur.Material('DarkGrey', ur.Color(0.3, 0.3, 0.3, 1)))", "def test_create_material_multi_basic(self):\n expected_materials = [\n ['cotton', 'AAA', 'BBB', 'CCC'],\n ['cotton', 'AAA', 'BBB', 'CCC'],\n ['wool', 'AAA', 'BBB', 'CCC'],\n ]\n\n select_listings_to_edit(self.driver)\n d = self.driver\n bp = BulkPage(d)\n\n send_keys(bp.operation_input(), 'AAA,BBB ,CCC')\n click(bp.operation_apply())\n\n material_names = bp.material_names()\n assert material_names == expected_materials", "def get_materials(dbpath):\n odb = openOdb(path=dbpath)\n _materials = []\n for _name in odb.materials.items():\n _materials.append(_name)\n odb.close()\n return _materials", "def get_material_features(self):\n return self.material_features", "def info_materials_groups_get():\n session = info_map.Session()\n\n mat = aliased(info_map.Material)\n grp = aliased(info_map.Group)\n\n q = session.query(mat.group_id,grp.name).join(grp).distinct()\n groups = [Group(group=row.group_id,name=row.name) for row in q.all()]\n return groups, 200", "def read_material_data(self, material):\n material_yaml_file = glob.glob(os.path.join(material_dir, material + '.yaml'))\n\n inputs = utilities.yaml_reader(material_yaml_file, material_dir, material)\n self.name = inputs['Name']\n self.materialName = material\n self.elements = inputs['Elements']\n self.zaids = inputs['Elemental ZAIDs']\n self.weightFraction = inputs['Elemental Weight Fractions'] if 'Elemental Weight Fractions' in inputs else []\n self.enrichmentZaids = inputs['Elemental Adjustment ZAIDs'] if 'Elemental Adjustment ZAIDs' in inputs else []\n self.enrichmentIsotopes = inputs['Isotopic Adjustment ZAIDs'] if 'Isotopic Adjustment ZAIDs' in inputs else []\n self.enrichmentVector = inputs['Isotopic Weight Percents'] if 'Isotopic Weight Percents' in inputs else []\n self.isotopicAtomPercents = inputs['Isotopic Atom Percents'] if 'Isotopic Atom Percents' in inputs else []\n self.density = inputs['Density']\n self.linearCoeffExpansion = inputs['Linear Coefficient of Expansion']", "def layer_properties(freq_vec, material):\n # name of the material\n material_name = material[0]\n # thickness of the material (reshape with freq shape, in a tuple, to\n # allow the sum with the tuple of material properties)\n thickness = (np.array( [material[1]]*len(freq_vec) ), )\n # check if we have to pass extra arguments for non homogenous material\n if material_name == 'meta':\n param = material[2:]\n else:\n param = ()\n # read/compute material properties\n prop = mat.properties(material_name, freq_vec, *param)\n\n return thickness + prop", "def MaterialsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialsLabel(*args)", "def generate_materials_dict(self):\n c = 299792458.0\n w_mat = 2 * np.pi * c / self.l_mat - self.w0\n l2_mat = (self.l_mat * 1e6) ** 2\n\n n_air = 1 + 0.05792105 * l2_mat / (238.0185 * l2_mat - 1) + 0.00167917 * l2_mat / (57.362 * l2_mat - 1)\n air_ip = interp1d(w_mat, n_air, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['air'] = air_ip\n\n n_fs = np.sqrt(1 + 0.6961663 * l2_mat / (l2_mat - 0.0684043 ** 2) +\n 0.4079426 * l2_mat / (l2_mat - 0.1162414 ** 2) +\n 0.8974794 * l2_mat / (l2_mat - 9.896161 ** 2))\n fs_ip = interp1d(w_mat, n_fs, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['fs'] = fs_ip\n\n n_mgf2 = np.sqrt(1 + 0.48755108 * l2_mat / (l2_mat - 0.04338408 ** 2) +\n 0.39875031 * l2_mat / (l2_mat - 0.09461442 ** 2) +\n 2.3120353 * l2_mat / (l2_mat - 23.793604 ** 2))\n mgf2_ip = interp1d(w_mat, n_mgf2, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['mgf2'] = mgf2_ip\n\n n_sapphire_o = np.sqrt(1 + 1.4313493 * l2_mat / (l2_mat - 0.0726631 ** 2) +\n 0.65054713 * l2_mat / (l2_mat - 0.1193242 ** 2) +\n 5.3414021 * l2_mat / (l2_mat - 18.028251 ** 2))\n sapphire_o_ip = interp1d(w_mat, n_sapphire_o, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['sapphire_o'] = sapphire_o_ip\n\n n_sapphire_e = np.sqrt(1 + 1.5039759 * l2_mat / (l2_mat - 0.0740288 ** 2) +\n 0.55069141 * l2_mat / (l2_mat - 0.1216529 ** 2) +\n 6.5927379 * l2_mat / (l2_mat - 20.072248 ** 2))\n sapphire_e_ip = interp1d(w_mat, n_sapphire_e, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['sapphire_e'] = sapphire_e_ip\n\n n_bbo_o = np.sqrt(2.7405 + 0.0184 / (l2_mat - 0.0179) - 0.0155 * l2_mat)\n bbo_o_ip = interp1d(w_mat, n_bbo_o, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['bbo_o'] = bbo_o_ip\n\n n_bbo_e = np.sqrt(2.3730 + 0.0128 / (l2_mat - 0.0156) - 0.0044 * l2_mat)\n bbo_e_ip = interp1d(w_mat, n_bbo_e, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['bbo_e'] = bbo_e_ip\n\n materials_files = os.listdir(self.materials_path)\n logger.info(\"Found {0:d}\".format(materials_files.__len__()))\n for mat_file in materials_files:\n logger.debug(mat_file)\n self.read_material(''.join((self.materials_path, '/', mat_file)))" ]
[ "0.6943955", "0.6848694", "0.6815325", "0.66644573", "0.6500591", "0.6459267", "0.6354483", "0.62616825", "0.6259314", "0.6180884", "0.6179187", "0.61718166", "0.6148789", "0.6141167", "0.60783863", "0.58889025", "0.5850806", "0.58497685", "0.5842548", "0.58035386", "0.5800268", "0.5784925", "0.577055", "0.576991", "0.565827", "0.56290025", "0.5602089", "0.55942464", "0.5591067", "0.5579133" ]
0.7507949
0
Returns the root 'src' absolute path of this Chromium Git checkout.
def get_chromium_src_path() -> pathlib.Path: _CHROMIUM_SRC_ROOT = pathlib.Path(__file__).parents[3].resolve(strict=True) if _CHROMIUM_SRC_ROOT.name != 'src': raise AssertionError( f'_CHROMIUM_SRC_ROOT "{_CHROMIUM_SRC_ROOT}" should end in "src".') try: _assert_git_repository(_CHROMIUM_SRC_ROOT) except (ValueError, RuntimeError): raise AssertionError return _CHROMIUM_SRC_ROOT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetSrc():\n return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,\n os.pardir))", "def source_root_dir():\n return os.path.abspath(os.path.dirname(__file__))", "def menpowidgets_src_dir_path():\n # to avoid cluttering the menpowidgets.base namespace\n from pathlib import Path\n import os.path\n\n return Path(os.path.abspath(__file__)).parent", "def getGitPath() -> osp:\n current_dir = osp.dirname(osp.realpath(__file__))\n git_dir = osp.dirname(osp.dirname(current_dir))\n return git_dir", "def srcdir(path):\n if not workflow.included_stack:\n return None\n return workflow.current_basedir.join(path).get_path_or_uri()", "def path(self) -> str:\n return self.src + \"/\"", "def getRootPath()->str:\n if '--develop' in sys.argv:\n return eel._get_real_path('public') + '/'\n\n return eel._get_real_path('build') + '/'", "def get_git_root():\n path = os.getcwd()\n git_repo = git.Repo(path, search_parent_directories=True)\n git_root = git_repo.git.rev_parse(\"--show-toplevel\")\n return git_root", "def root_path(self) -> Path:\n return ARCHIVES_ROOT / self.source_name / self.key", "def path(self) -> str:\n self.__verify_repo_initialized()\n return str(self._repo_path.parent)", "def src(self) -> str:\n return self._branch + self._id", "def get_project_source_dir() -> Path:\n return Path(__file__).resolve().parents[1].resolve()", "def get_git_root(path):\n\tgit_repo = git.Repo(path, search_parent_directories=True)\n\tgit_root = git_repo.git.rev_parse(\"--show-toplevel\")\n\treturn git_root", "def source_dir(self):\n\t\tassert self.revision.is_dev_build\n\n\t\trev = self._get_dev_build_suffix()\n\t\treturn os.path.join(self._cfg.basedir, 'develop', self.name + rev)", "def rel_cwd():\n return os.path.relpath(os.getcwd(), git_toplevel())", "def bin_root(self):\n return os.path.join(self.build_dir, self.build, \"stage0\")", "def get_source_airflow_folder() -> str:\n return os.path.abspath(AIRFLOW_SOURCES_ROOT_PATH)", "def getmp_srcdir():\n return os.path.join(getmp_rootdir(), 'src')", "def get_src_path(obj, src_root='tefla', append_base=True):\n path = getsourcefile(obj)\n if not src_root in path:\n # this can happen with e.g.\n # inlinefunc-wrapped functions\n if hasattr(obj, \"__module__\"):\n path = \"%s.%s\" % (obj.__module__, obj.__name__)\n else:\n path = obj.__name__\n path = path.replace(\".\", \"/\")\n try:\n pre, post = path.rsplit(src_root + \"/\", 1)\n except:\n pre, post = '', ''\n\n lineno = get_line_no(obj)\n lineno = \"\" if lineno is None else \"#L{}\".format(lineno)\n\n path = src_root + \"/\" + post + lineno\n if append_base:\n path = os.path.join(\n 'https://github.com/openagi/tefla/blob/master', path)\n return path", "def cd_genny_root():\n script_path = os.path.abspath(__file__)\n script_dir = os.path.dirname(script_path)\n # cd into script directory first so we can get the project root with git.\n os.chdir(script_dir)\n root = get_project_root()\n os.chdir(root)", "def repo_root_path() -> str:\n\n global __REPO_ROOT\n if __REPO_ROOT:\n return __REPO_ROOT\n\n path = os.path.normpath(os.getcwd())\n while os.path.split(path)[1]:\n if is_repo_root(path):\n break\n\n path = os.path.split(path)[0]\n else:\n # fallback to the location of this file if the CWD is not in the\n # repo root:\n path = os.path.normpath(os.path.dirname(__file__))\n while os.path.split(path)[1]:\n if is_repo_root(path):\n break\n\n path = os.path.split(path)[0]\n else:\n print(\"Could not find repo root!\")\n sys.exit(1)\n\n __REPO_ROOT = path\n return __REPO_ROOT", "def getSrcDir(self):\n if self.srcdir == None:\n tmpl = self.getTmplDir()\n self.srcdir = os.path.join(tmpl, 'template', self.args.tmpl)\n return self.srcdir", "def repo_root() -> str:\n path = os.path.realpath(os.curdir)\n\n while True:\n if os.path.exists(os.path.join(path, \"setup.py\")):\n return path\n path = os.path.realpath(os.path.join(path, \"..\"))", "def src_subpath(self) -> str:\n val = self.get(\"src_subpath\", \"public\").strip()\n if len(val) > 0:\n return val\n return None", "def runner_path():\n git_base = os.popen('git rev-parse --show-toplevel').read().strip()\n return os.path.join(git_base, RUNNER_SCRIPT_BASENAME)", "def root_dir():\r\n return Path(__file__).parent.parent", "def gitroot(dir=\"\"):\n # Supress errors from Git\n git_cmd = \"git rev-parse --show-toplevel \" + dir + \" 2> \" + os.devnull\n if dir:\n original_cwd = os.getcwd()\n os.chdir(dir)\n try:\n sub_out = subprocess.check_output(git_cmd, shell=True)\n cmd_out = sub_out.decode().rstrip(). splitlines()[0]\n except:\n cmd_out = \"\"\n if dir:\n os.chdir(original_cwd)\n return cmd_out", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent", "def get_project_root() -> Path:\n return Path(__file__).parent.parent" ]
[ "0.7207564", "0.70896846", "0.70820946", "0.70458966", "0.7013986", "0.6813633", "0.6768941", "0.6744797", "0.6672002", "0.6643499", "0.6634959", "0.66120505", "0.6588977", "0.6572024", "0.6556969", "0.65439117", "0.65423363", "0.65149325", "0.65131664", "0.65060693", "0.6501921", "0.6498542", "0.6466095", "0.6433235", "0.6422085", "0.6417806", "0.6365784", "0.6349699", "0.6349699", "0.6349699" ]
0.7769902
0
Gets the datetime of the commit at HEAD for a Git repository in UTC. The datetime returned contains timezone information (in timezone.utc) so that it can be easily be formatted or converted (e.g., to local time) based on the caller's needs.
def get_head_commit_datetime( git_repo: Optional[Union[str, pathlib.Path]] = None) -> dt.datetime: if not git_repo: git_repo = get_chromium_src_path() if not isinstance(git_repo, pathlib.Path): git_repo = pathlib.Path(git_repo) _assert_git_repository(git_repo) timestamp = subprocess_utils.run_command( ['git', 'show', '--no-patch', '--format=%ct'], cwd=git_repo) return dt.datetime.fromtimestamp(float(timestamp), tz=dt.timezone.utc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()", "def __last_commit_date(self):\n return utils.run('git', ['log', '--all', '-1', '--format=%cI'],\n self.__project.location).rstrip()", "def get_current_timestamp(path_to_repository):\n repo = Repo(path_to_repository)\n return repo.head.commit.committed_date", "def commit_time(self):\n return self._commit_time", "def svn_client_commit_info_t_date_get(svn_client_commit_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def GetCommitDate(git_url, commit):\n parsed_url = urlparse.urlparse(git_url)\n path = '%s/+log/%s?n=1&format=JSON' % (parsed_url.path.rstrip('/'), commit)\n j = FetchUrlJson(parsed_url.netloc, path, ignore_404=False)\n if not j:\n raise GOBError(\n 'Could not find revision information from %s' % git_url)\n try:\n commit_timestr = j['log'][0]['committer']['time']\n except (IndexError, KeyError, TypeError):\n msg = ('The json returned by https://%s%s has an unfamiliar structure:\\n'\n '%s\\n' % (parsed_url.netloc, path, j))\n raise GOBError(msg)\n try:\n # We're parsing a string of the form 'Tue Dec 02 17:48:06 2014'.\n return datetime.datetime.strptime(commit_timestr,\n constants.GOB_COMMIT_TIME_FORMAT)\n except ValueError:\n raise GOBError('Failed parsing commit time \"%s\"' % commit_timestr)", "def __first_commit_date(self):\n return utils.run('git',\n ['log', '--all', '--format=%cI', '--first-parent',\n '--reverse', '--max-parents=0'],\n self.__project.location).splitlines()[0].rstrip()", "def git_get_mtime_at_commit(path: Path, revision: str, cwd: Path) -> str:\n cmd = [\"log\", \"-1\", \"--format=%ct\", revision, \"--\", path.as_posix()]\n lines = _git_check_output_lines(cmd, cwd)\n return datetime.utcfromtimestamp(int(lines[0])).strftime(GIT_DATEFORMAT)", "def commit(self):\n return settings.GIT_COMMIT", "def get_current_commit_hash() -> FullCommitHash:\n return get_commit_hash(\"HEAD\")", "def get_commit_time(self, metric):\n session = requests.Session()\n session.verify = False\n\n logging.debug(\"metric.repo_project %s\" % (metric.repo_project))\n logging.debug(\"metric.git_api %s\" % (self._git_api))\n\n git_server = self._git_api\n\n if (\n \"github\" in git_server\n or \"bitbucket\" in git_server\n or \"gitlab\" in git_server\n or \"gitea\" in git_server\n ):\n logging.warn(\"Skipping non Azure DevOps server, found %s\" % (git_server))\n return None\n\n # Private or personal token\n # Fill in with your personal access token and org URL\n personal_access_token = self._token\n organization_url = self._git_api\n\n # Create a connection to the org\n credentials = BasicAuthentication(\"\", personal_access_token)\n connection = Connection(base_url=organization_url, creds=credentials)\n\n # Get a client (the \"git\" client provides access to commits)\n git_client = connection.clients.get_git_client()\n\n commit = git_client.get_commit(\n commit_id=metric.commit_hash,\n repository_id=metric.repo_project,\n project=metric.repo_project,\n )\n logging.debug(\"Commit %s\" % ((commit.committer.date).isoformat(\"T\", \"auto\")))\n if hasattr(commit, \"innerExepction\"):\n # This will occur when trying to make an API call to non-Github\n logging.warning(\n \"Unable to retrieve commit time for build: %s, hash: %s, url: %s. Got http code: %s\"\n % (\n metric.build_name,\n metric.commit_hash,\n metric.repo_url,\n str(commit.message),\n )\n )\n else:\n try:\n metric.commit_time = commit.committer.date.isoformat(\"T\", \"auto\")\n logging.info(\"metric.commit_time %s\" % (str(metric.commit_time)[:19]))\n logging.info(\"self._timedate_format %s\" % (self._timedate_format))\n metric.commit_timestamp = pelorus.convert_date_time_to_timestamp(\n (str(metric.commit_time)[:19]), self._timedate_format\n )\n except Exception:\n logging.error(\n \"Failed processing commit time for build %s\" % metric.build_name,\n exc_info=True,\n )\n logging.debug(commit)\n raise\n return metric", "def get_git_changeset(filename=None):\n dirname = os.path.dirname(filename or __file__)\n git_show = sh('git show --pretty=format:%ct --quiet HEAD',\n cwd=dirname)\n timestamp = git_show.partition('\\n')[0]\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n except ValueError:\n return None\n return timestamp.strftime('%Y%m%d%H%M%S')", "def get_commit_hash():\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n return subprocess.check_output(args).strip().decode()", "def cur_commit():\n result = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, stderr=PIPE, encoding=\"utf-8\",\n )\n result.check_returncode()\n return result.stdout.strip()", "def get_commit():\n cmd = \"git rev-parse HEAD\"\n result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)\n return result.stdout.decode(\"utf-8\").strip()", "def get_git_timestamp(path):\n return int(_run_command(path, 'git log -1 --format=%ct'))", "def get_current_commit():\n import os\n import subprocess\n git_dir = \"{}/.git\".format(settings.BASE_DIR)\n if os.name == 'nt':\n git_dir = \"{}\\\\.git\".format(settings.BASE_DIR)\n return subprocess.check_output([\"git\", \"--git-dir={}\".format(git_dir), \"rev-parse\", \"--verify\", \"HEAD\", \"--short\"]).decode(\"utf-8\")", "def last_modified_commit(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%h',\n '--',\n *paths\n ], **kwargs).decode('utf-8')", "def get_commit_hash(repo_location, commit='origin/HEAD'):\n if not os.path.exists(pjoin(repo_location, '.git')):\n raise ValueError\n ret, out = spawn_get_output(\n ['git', 'rev-parse', commit], cwd=repo_location)\n if ret != 0:\n raise ValueError(\n f'failed retrieving {commit} commit hash '\n f'for git repo: {repo_location}')\n return out[0].strip()", "async def get_last_commit(self) -> None:\n _endpoint = f\"/repos/{self.full_name}/branches/{self.default_branch}\"\n response = await self.client.get(endpoint=_endpoint)\n return AIOGitHubAPIReposCommit(response.get(\"commit\", {}))", "def get_current_commit_sha():\n return check_output(\n \"git rev-parse HEAD\".split(\" \")\n ).decode('utf-8').strip()", "def _get_git_hash(self):\n try:\n with open(os.path.join(self._base_dir, '.git', 'HEAD'), 'r') as head_file:\n ref = head_file.read().strip()\n if ref[:5] == 'ref: ':\n with open(os.path.join(self._base_dir, '.git', ref[5:]), 'r') as commit_file:\n return commit_file.read().strip()\n else:\n return ref[5:]\n except Exception as err:\n self._logger.warning('Couldnt read the git commit hash: %s :: %s',\n err.__class__.__name__, err)\n return 'UNKNOWN'", "def get_commit_message():\n return shell_output('git log HEAD -1 --pretty=%B')", "def last_modified_date(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%cd',\n '--date=iso',\n '--',\n *paths\n ], **kwargs).decode('utf-8')", "def get_commit(self, cwd=None):\n cwd = cwd or self.path\n if isinstance(cwd, str):\n cwd = config.Path(cwd)\n if not cwd.exists():\n return None\n try:\n return subprocess.check_output([\n \"git\", \"rev-parse\", \"HEAD\"\n ], cwd=str(cwd)).decode(\"utf-8\").strip()\n except subprocess.CalledProcessError:\n return \"Failed\"", "def _getUTC(self, config = {} ):\n # Default implementation: get system local time\n return datetime.datetime.utcnow()", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def last_changed_utc(self) -> str:\n return pulumi.get(self, \"last_changed_utc\")", "def _get_git_commit_id():\n from git import Repo\n from os.path import split, dirname\n path = split(dirname(__file__))[0]\n commit_id = Repo(path).head.object.hexsha\n return commit_id[:8]", "def get_repository_last_update_timestamp(api_1_0_url):\n repository_last_update_timestamp = ''\n try:\n r = requests.get(api_1_0_url)\n json_string = r.content\n data = json.loads(json_string)\n try:\n repository_last_update_timestamp = data['utc_last_updated']\n except Exception as error:\n print(\"Caught error: \" + repr(error))\n except Exception as error:\n print(\"Failed to connect to bitbucket: \" + repr(error))\n exit(1)\n return repository_last_update_timestamp" ]
[ "0.73576576", "0.7228298", "0.6683409", "0.663864", "0.6581028", "0.6477623", "0.6476076", "0.64546764", "0.64242756", "0.6367367", "0.6354047", "0.6353915", "0.6298306", "0.6136942", "0.61079997", "0.6062234", "0.6038261", "0.6030498", "0.59632814", "0.59031916", "0.58791256", "0.5873956", "0.58670807", "0.58617187", "0.5844455", "0.5839948", "0.58171856", "0.5780696", "0.5777978", "0.57769835" ]
0.7551182
0
Takes list or array of postcodes, and returns it in a cleaned numpy array
def clean_postcodes(postcodes): postcode_df = pd.DataFrame({'Postcode':postcodes}) postcode_df['Postcode'] = postcode_df['Postcode'].str.upper() # If length is not 7 get rid of spaces. This fixes e.g. "SW19 2AZ" -> "SW192AZ" postcode_df['Postcode'] = postcode_df['Postcode'].where( postcode_df['Postcode'].str.len() == 7, postcode_df['Postcode'].str.replace(" ", "")) # If length is 5 (e.g. "W67HZ") add two spaces in the middle (-> "W6 7HZ") postcode_df['Postcode'] = postcode_df['Postcode'].where( postcode_df['Postcode'].str.len() != 5, postcode_df['Postcode'].str[:2]+ " " + postcode_df['Postcode'].str[2:]) # If length is 6 (e.g. "SW72AZ") add a space in the middle and end(-> "SW7 2AZ") postcode_df['Postcode'] = postcode_df['Postcode'].where( postcode_df['Postcode'].str.len() != 6, postcode_df['Postcode'].str[:3]+ " " + postcode_df['Postcode'].str[3:]) return postcode_df['Postcode'].to_numpy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(lst, typecode ):\n a = array.array( typecode )\n for n,c in lst: \n a.extend( array.array( typecode, (c,) * n ) )\n return a", "def clean_data(array):\n ret = np.zeros(len(array))\n for i in range(len(array)):\n drop_id = len(str(i+1)) + 1\n array[i, 0] = array[i, 0][int(drop_id):]\n return array", "def clean(listoflist):\n # Firs column is Customer ID, not used for this classification, remove\n array2d = np.array(listoflist)\n array2d = np.delete(array2d, 0, 1)\n\n # convert array from string to float\n array2d = array2d.astype(np.float)\n\n # days_since_last_order is in hours, divide by 24 to get the correct value\n array2d[:, 1] = array2d[:, 1] / 24\n\n # average_discount_used is multiplied by 1000, divide by 1000 to get the correct value, keep 4 decimal places\n array2d[:, 40] = np.around(array2d[:, 40] / 10000, 4)\n\n return array2d", "def clean_values(values_to_clean: np.ndarray):\n char_rem = \"!@#$%^*()[]{};:.,/<>?|`~-=_+'\\\\\"\n for j in range(values_to_clean.shape[0]):\n for k in range(2, 4):\n for c in char_rem:\n values_to_clean[j, k] = re.sub(' +', ' ', values_to_clean[j, k].replace(c, \" \").strip())\n return values_to_clean", "def preprocess_data(self, amino_data):\n\t\tfor i in range(len(amino_data)):\n\t\t\tif amino_data[i] == 'H':\n\t\t\t\tamino_data[i] = 1\n\t\t\telif amino_data[i] == 'P':\n\t\t\t\tamino_data[i] = 2\n\n\t\treturn np.array(amino_data)", "def get_postal_codes(pts):\n codigos = np.zeros((len(pts),))\n for i, p in tqdm(enumerate(pts), desc=\"GETTING POSTAL CODES\"):\n p = Point(p[0], p[1])\n for j in range(cod_postales.shape[0]):\n if cod_postales.geometry.iloc[j].contains(p):\n codigos[i] = cod_postales.geocodigo.iloc[j]\n return codigos[codigos != 0]", "def convert_gcode_to_array(gcode_raw, dim=(HEIGHT, WIDTH)):\n\t\n\tarray = np.zeros(dim, dtype=int)\n\tprint dim\n\t(x,y) = (0,0)\n\tfor gcode_command in gcode_raw:\n\t\tletter_address = gcode_command[0:2]\n\t\tif letter_address == \"G1\":\n\t\t\t(x, y) = get_xy_position(gcode_command)\n\t\telif letter_address == \"M4\":\n\t\t\tcontinue\n\t\telif letter_address == \"M7\":\n\t\t\tfiring_pattern = parse_gcode_M7(gcode_command)\n\t\t\tstrip_array = np.array(list(firing_pattern), dtype=int)\n\t\t\t#print strip_array\n\n\t\t\ttry:\n\t\t\t\t#array[y, x:x+13] += strip_array\n\t\t\t\tarray[y:y+13, x] += strip_array\n\t\t\t\tprint \"strip_array\", strip_array\n\t\t\texcept ValueError: # ValueError: operands could not be broadcast together with shapes XXX\n\t\t\t\tprint \"strip_array\", strip_array\n\t\t\t\t#len_array = len(array[x:x+13, y])\n\t\t\t\tlen_array = len(array[y:y+13, x])\n\t\t\t\t#array[y, x:x+13] += strip_array[:len_array]\n\t\t\t\tarray[y:y+13, x] += strip_array[:len_array]\n\t\t\t\t#print array[x:x+13, y]\n\t\t\t\tprint array[y:y+13, x]\n\t\t\t\tprint strip_array[:len_array]\n\t\t\t\tprint \"array\", array\n\t\telse:\n\t\t\traise Exception(\"Encountered unexpected gcode letter address: [{}]\".format(letter_address))\n\t\tprint (x,y)\n\treturn array", "def remove_handles(text_array):\n\n handle_pattern = re.compile(r'([@])\\w+')\n\n return np.array([re.sub(handle_pattern, \"\", str(string)) for string in text_array])", "def construct_preunitary(arr, array_list):\n s = array_list[1].shape\n preunitary = np.array([])\n preunitary = np.append(preunitary, arr)\n preunitary = preunitary.reshape(1, s[1])\n for i in range(1, len(array_list)):\n preunitary = np.hstack((preunitary, array_list[i]))\n preunitary = preunitary.reshape(s[1], s[1]).T\n return preunitary", "def parse_preds(preds):\n _preds = (preds > 0.5).astype(int)\n _preds = _preds * 255\n print(_preds.shape)\n return _preds", "def strip_array(arr):\n\n return [word.strip(' ') for word in arr]", "def _parse(self, array):\n return [self._parse_note(x) for x in array]", "def clean(imagedata):\n\n if isinstance(imagedata, ndarray):\n imagedata = [imagedata]\n\n outdict = [array_to_im(im) for im in imagedata]\n\n return {'images': outdict}", "def sanitise_array(data):\n array = np.array(data)\n\n if array.ndim == 0:\n array = array[np.newaxis, np.newaxis]\n elif array.ndim == 1:\n array = array[:, np.newaxis]\n elif array.ndim != 2:\n raise ValueError(f'Only 1/2 dimensional data can be saved to text files, data.shape = {array.shape}')\n\n return array", "def encode(data, code_book):\n return np.array([int(chunk, 2).to_bytes(-(-len(chunk) // 8), byteorder='big') for chunk in\n map(lambda tup: ''.join(tup), (lambda iterable: zip_longest(*[iter(iterable)] * 8, fillvalue=''))(\n ''.join(map(lambda x: code_book[x], data))))])", "def normalize_array(var):\n if np.issubdtype(var.dtype, 'S1'):\n if var.dtype == str:\n # Python 2 on netCDF4 'string' variables needs this.\n # Python 3 returns false for np.issubdtype(var.dtype, 'S1')\n return var[:]\n\n def decoder(x):\n return str(x.decode('utf-8'))\n vfunc = np.vectorize(decoder)\n return vfunc(nc4.chartostring(var[:]))\n else:\n return var[:]", "def decode(codes, dictionary):\n c, d = dictionary.shape\n codes = codes.reshape(-1, c)\n \n return np.dot(codes, dictionary)", "def post_process_result(self, result: np.ndarray) -> np.ndarray:\n to_cut = len(\"_tag\")\n return np.asarray([[tag[:-to_cut] for tag in list_of_tags] for list_of_tags in result])", "def pose_list_to_array(poses):\n tmp_list = list()\n for p in poses:\n tmp_list.append([p.pose.position.x,\n p.pose.position.y,\n p.pose.position.z,\n\n p.pose.orientation.x,\n p.pose.orientation.y,\n p.pose.orientation.z,\n p.pose.orientation.w,\n\n p.header.stamp.to_sec()\n ])\n\n return np.array(tmp_list)", "def string_to_array(self):\n temp_map = copy.deepcopy(self.map.replace(\" \", \"\"))\n map_list = [[a for a in row] for row in temp_map.splitlines()]\n\n # Checks that all lines are of equal length\n for line in map_list:\n for index in range(len(map_list)):\n if len(map_list[index]) == len(line):\n continue\n else:\n raise SyntaxError(\"Island geography multi-line string \"\n \"must have lines of same length.\")\n map_arr = np.array(map_list)\n\n # Checks that there are only 'O's at the edges.\n edge = []\n edge += list(map_arr[0, :])\n edge += list(map_arr[-1, :])\n edge += list(map_arr[1:-1, 0])\n edge += list(map_arr[1:-1, -1])\n if set(edge) == {'O'}:\n pass\n else:\n raise SyntaxError(\"Island geography multi-line string \"\n \"must have 'O' around the edges. \")\n\n return map_arr", "def normalise_multiple_iso(data, isos, lookup_table):\n as_list = [normalise_single_iso(data_sub, ISO, lookup_table) for data_sub, ISO in zip(data, isos)]\n as_array = np.array(as_list)\n return as_array", "def de_project(np_arr):\n item = (np_arr +1)*255 / 2\n return item.astype(np.int32, copy=True)", "def numpy_to_native_list(np_list):\n ret = []\n for val in np_list:\n ret.append(safe_numpy_to_native(val))\n return ret", "def cleaning (data):", "def preprocess(data):\n raise NotImplementedError", "def geocode_postcode(self, postcode: [str],\n address: Optional[str] = None) -> Union[Tuple[float, float], List[Tuple[float, float]]]:\n address = [None for a in address] if address is None else list(address)\n logging.debug(\"Geocoding %s postcodes (%s addresses)\", len(postcode), len(address))\n results = []\n for pc, addr in zip(postcode, address):\n results.append(self.geocode_one(postcode=pc, address=addr))\n return results", "def read_postcodes(self):\n if \".xls\" in self.postcodes_file:\n wb = openpyxl.load_workbook(self.postcodes_file)\n sheet = wb.get_sheet_by_name('Sheet1')\n postcodes = []\n for row in sheet.values:\n if \"postcode\" in str(row[0]): continue\n postcodes.append(str(row[0]).strip())\n else:\n postcodes = [str(i.strip()) for i in open(self.postcodes_file).readlines() if i.strip()]\n\n return postcodes", "def transform(self, y):\n encode_y = []\n for l in y:\n encode_y.append(self.encode_dict[l])\n\n return np.array(encode_y)", "def extract(self, packets):\n data = ''.join(data[3:63] for data in packets)\n return np.fromstring(data, dtype='<u2').astype('u4')", "def kmer_seq_to_filters(kmers):\n\treturn np.concatenate([dna_string_to_array(s) for s in kmers])" ]
[ "0.5970978", "0.5884224", "0.5817092", "0.5791146", "0.57216644", "0.5662122", "0.56127787", "0.55187774", "0.5317119", "0.5283018", "0.5241644", "0.52055186", "0.5141189", "0.5121496", "0.5102922", "0.50876445", "0.5085679", "0.50591373", "0.5050036", "0.5042256", "0.5041779", "0.5022929", "0.5010269", "0.50058895", "0.49995425", "0.49827817", "0.4971443", "0.49663344", "0.49591914", "0.49536544" ]
0.60236096
0
Get an array of WGS84 (latitude, longitude) pairs from a list of postcodes.
def get_lat_long(self, postcodes): # Fix evil postcodes postcodes = clean_postcodes(postcodes) postcode_df = self.postcode_df postcode_df = postcode_df.fillna('np.nan') postcode_df = postcode_df.set_index('Postcode') index_data = postcode_df.loc[postcodes] lat = np.array(index_data['Latitude']).T lng = np.array(index_data['Longitude']).T return np.vstack((lat, lng)).transpose()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def geocode_postcode(self, postcode: [str],\n address: Optional[str] = None) -> Union[Tuple[float, float], List[Tuple[float, float]]]:\n address = [None for a in address] if address is None else list(address)\n logging.debug(\"Geocoding %s postcodes (%s addresses)\", len(postcode), len(address))\n results = []\n for pc, addr in zip(postcode, address):\n results.append(self.geocode_one(postcode=pc, address=addr))\n return results", "def loc_to_coord(codes):\n def adfilter(codes):\n return re.findall(\"\"\"[a-zA-Z]+, [A-Z]{2}\"\"\", \";\".join(codes))\n\n api_key = \"AIzaSyCxQCjOrHFAf7T-W3vtUYqWkgSFkvMjxN4\"\n\n g = geocoders.GoogleV3(api_key = api_key)\n coords = {\"lat\":[], \"long\":[]}\n for code in adfilter(codes):\n if code != \"\":\n try:\n place = g.geocode(code)\n if place != None:\n coords[\"lat\"].append(place.latitude)\n coords[\"long\"].append(place.longitude)\n except (exc.GeocoderTimedOut, exc.GeocoderQueryError):\n pass\n return coords", "def get_coordinates(postal_code):\n # TODO IMPROVE: ideally we want the exact coordinates of postal_code not the ones of the closest...\n # TODO IMPROVE: ...postal code !!\n # we pre loaded PC_COORD to speed up computations\n name = PC_COORD.ix[(PC_COORD['Postal Code']-postal_code).abs().argsort()[0]]\n return (name.Lat, name.Long)", "def get_coordinates(addresses, boroughs):\n latitude = []\n longitude = []\n for address, borough in zip(addresses, boroughs):\n try:\n g = geocoder.osm('{}, {}, New York'.format(address, borough)).json\n latitude.append(g['lat'])\n longitude.append(g['lng'])\n except:\n latitude.append(None)\n longitude.append(None)\n\n return np.array(latitude).T, np.array(longitude).T", "def createCityCoordinateList(gmaps, cityList):\n cities = []\n print \"Calculating gps coordinates.\"\"\"\n for i in range(len(cityList)):\n r = gmaps.geocode(cityList[i])\n c = r['Placemark'][0]['Point']['coordinates'][0:2]\n cities.append((cityList[i], c[0], c[1]))\n \n return cities", "def get_lat_lon(data):\n from time import sleep\n from geopy import geocoders\n from geopy.exc import GeocoderTimedOut\n\n gn = geocoders.GeoNames(username='foobar')\n\n cities = get_cities(data).keys()\n\n coords = {}\n for city in cities:\n while True:\n try:\n loc = gn.geocode(city + \", Brazil\")\n except GeocoderTimedOut:\n sleep(2)\n else:\n break\n\n coords[city] = (loc.latitude, loc.longitude)\n\n return coords", "def get_postal_codes(pts):\n codigos = np.zeros((len(pts),))\n for i, p in tqdm(enumerate(pts), desc=\"GETTING POSTAL CODES\"):\n p = Point(p[0], p[1])\n for j in range(cod_postales.shape[0]):\n if cod_postales.geometry.iloc[j].contains(p):\n codigos[i] = cod_postales.geocodigo.iloc[j]\n return codigos[codigos != 0]", "def retrieveManualGeocodes():\n\n\tshp_2013 = join(project_dir, '2013', 'shp')\n\tw_lid = join(shp_2013, 'west_lid_qcew13_zip_regeocoded.shp')\n\te_lid = join(shp_2013, 'east_lid_qcew13_zip_regeocoded.shp')\n\n\tbin_dict = {}\n\tfor lid in (w_lid, e_lid):\n\t\twith da.SearchCursor(lid, '*') as cursor:\n\t\t\tfor row in cursor:\n\t\t\t\td = OrderedDict(zip(cursor.fields, row))\n\t\t\t\t# if the geometry wasn't matched in the geocoding it has\n\t\t\t\t# a value of (None, None) in the 'Shape' field\n\t\t\t\tif d['Status'] != 'U':\n\t\t\t\t\tgeo_fields = (\n\t\t\t\t\t\t'Shape', 'Loc_name', 'Score', 'Match_type')\n\t\t\t\t\tgeo_dict = {k: d[k] for k in geo_fields}\n\t\t\t\t\tbin_dict[d['BIN']] = geo_dict\n\t\n\treturn bin_dict", "def feature_coords(features):\n coords_list = []\n for feature in features:\n coord_start = feature.location.nofuzzy_start\n coord_end = feature.location.nofuzzy_end\n coord_pair = (coord_start, coord_end)\n coords_list.append(coord_pair)\n ## consider adding some info to the log\n return coords_list", "def get_coord_from_address(code_postal, adresse=None):\n headers = {\"Content-Type\": \"application/json\"}\n if adresse != None:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(adresse) + \"&postcode=\" + str(code_postal)))\n else:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n print(url)\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n if code_postal == 75001:\n x = js['features'][1]['geometry']['coordinates']\n else:\n \tx = js['features'][0]['geometry']['coordinates']\n longitude = x[0]\n latitude = x[1]\n pos = []\n pos.append(longitude)\n pos.append(latitude)\n print(pos)\n return pos", "def fix_location(r):\n \n # all is fine: just change zipcode datatype to str\n if not np.isnan(r['zip']) and not np.isnan(r['lat']):\n return [str(int(r['zip'])), r['lng'], r['lat']]\n \n # try to locate within zipcode polygons\n if not np.isnan(r['lat']):\n query = \"\"\"\n SELECT t.geoid as zip, {} as lng, {} as lat\n FROM us_zcta5 t JOIN usps_zcta5 z ON t.geoid = z.zip\n WHERE ST_Contains(t.shape, ST_GeomFromText('POINT({} {})', 2))\n \"\"\"\n res = pd.read_sql(query.format(r['lng'], r['lat'], r['lng'], r['lat']), con = con)\n if len(res) == 1:\n return res.values[0].tolist()\n\n # use zipcode center as location proxy: geocoding is prefered in this case, but might be quite expensive\n if not np.isnan(r['zip']):\n res = zipcodes[zipcodes['zip'] == str(int(r['zip']))]\n if len(res) == 1:\n return res.values[0].tolist()[:3]\n\n return [None, None, None]", "def coord_in_lat_long(list_lat_long):\n mid = len(list_lat_long) // 2\n print(\"Number of coordinates : {}\".format(mid))\n return [[list_lat_long[2 * counter], list_lat_long[2 * counter + 1]] for counter in range(mid)]", "def geocode(address, jurisdictions, required_precision_km=1., limit=5):\n try:\n key = 'pk.eyJ1IjoiZGV2c2VlZCIsImEiOiJnUi1mbkVvIn0.018aLhX0Mb0tdtaT2QNe2Q'\n geocoded = NewMapboxQuery(address, key=key, country='us', limit=limit)\n results = []\n if len(geocoded) > 0:\n for item in geocoded:\n multipoints = MultiPoint([GEOSGeometry(item.wkt)])\n for jurisdiction in jurisdictions.filter(geometry__intersects=multipoints):\n if not jurisdiction in results:\n results.append(jurisdiction)\n return results\n return []\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n return []", "def get_coordinates_list(self):\n return [tweet['coordinates'][::-1] for tweet in self.tweets_data]", "def get_polygon_coordinates(self) -> Tuple[List, List]:\n\n polygon_query = f\"https://nominatim.openstreetmap.org/\" \\\n f\"search?city={self.location.replace(' ', '+')}&polygon_geojson=1&format=json\"\n r = requests.get(polygon_query)\n js = ast.literal_eval(r.text)\n\n self.monitor.info(\"-> Downloaded area polygon data points.\")\n clean_polygon_coords = js[0]['geojson']['coordinates'][0]\n\n polygon_lats = [float(i[1]) for i in clean_polygon_coords]\n polygon_longs = [float(i[0]) for i in clean_polygon_coords]\n\n self.monitor.info(\"-> Created lat/long vectors.\")\n return polygon_lats, polygon_longs", "def fn2lonlat(filename):\n tokens = filename.split(\"/\")[-1].rsplit(\".\", 1)[0].split(\"x\")\n return [0 - float(tokens[0]), float(tokens[1])]", "def proj_coords(coords, proj_in, proj_out): \n return [proj_coord(coord, proj_in, proj_out) for coord in coords]", "def coordinates_to_locations(coordinates):\n np_coords = np.array(coordinates)\n longs, lats = transform(Proj(init=EPSG_IN), Proj(init=EPSG_OUT), np_coords[:, 0], np_coords[:, 1])\n length = len(lats)\n result = []\n for i in range(length):\n loc = Location(lats[i], longs[i])\n raw_location = extract_raw_simple_coordinates(coordinates[i])\n loc.set_raw_coordinates_simplified(raw_location[0],raw_location[1])\n result.append(loc)\n\n return result", "def get_friends_coordinates(friends_locations_list: list, geocode) -> list:\n friends_coordinates_list = []\n for user, location_str in friends_locations_list:\n match_location = geocode(location_str)\n if match_location is not None:\n friends_coordinates_list.append((user, (\n match_location.latitude, match_location.longitude\n )))\n return friends_coordinates_list", "def convert_coordinates(self, coordinates):\n return np.array(zip(*self.basemap(*zip(*coordinates))))", "def shot_geolocations(self) -> geopandas.array.GeometryArray:\n geolocations = np.array(\n [shapely.geometry.Point(lon, lat) for lon, lat in self.shot_lon_lat],\n dtype=shapely.geometry.Point,\n )\n\n return geopandas.array.GeometryArray(geolocations, crs=WGS84)", "def convert_all_coordinates(results: List[ResponseObject]) -> List[ResponseObject]:\n results = [convert_lat_long_dict(result) for result in results]\n results = [convert_lat_long_list(result) for result in results]\n return results", "def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats", "def get_zipcode_stations(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n neighborhood_stations = text(\r\n \"\"\"\r\n SELECT\r\n \"name\" as name,\r\n \"addressStreet\" as address,\r\n \"bikesAvailable\" as available_bikes,\r\n v.geom as geom,\r\n ST_X(v.geom) as lon, ST_Y(v.geom)as lat\r\n FROM indego_rt1130 as v\r\n JOIN philly_zipcode as n\r\n ON ST_Intersects(v.geom, n.geom)\r\n WHERE n.code = :name\r\n \"\"\"\r\n )\r\n stations = gpd.read_postgis(neighborhood_stations, con=engine, params={\"name\": name})\r\n return stations", "def read_waypoints():\n\tfilename = \"waypoints.txt\"\n\tfile = open(filename, \"r\")\n\twp_list = []\n\n\tfor line in file:\n\t\t# Get the individual elements, splitting by whitespace\n\t\tdata_list = line.split()\n\t\tcoordinate = {'x': data_list[0], 'y': data_list[1], 'z': data_list[2]}\n\t\twaypoint = {'radius': data_list[3], 'point': coordinate}\n\n\t\twp_list.append (waypoint)\n\n\treturn wp_list", "def convert(gpsfile):\n coordinates = []\n lst = []\n for line in gpsfile:\n if line.startswith('$GPGGA'):\n # get time, fix signal and dilution of precision\n arr = line.split(',')\n data = [arr[2], arr[6], arr[8]]\n lst.append(data)\n\n elif line.startswith('lng'):\n # get longitude, latitude, altitude, speed, angle\n arr = line.split(',')\n lng = arr[0].split('=')\n lng = lng[1]\n lat = arr[1].split('=')\n lat = lat[1]\n alt = arr[2].split('=')\n alt = alt[1]\n speed = arr[3].split('=')\n speed = speed[1]\n ang = arr[5].split('=')\n ang = ang[1]\n lst.append([float(lng), float(lat), float(alt), float(speed), float(ang)])\n\n # check if a GPGGA line was found, otherwise don't add this point\n if len(lst) == 2:\n coordinates.append(lst)\n lst = []\n\n return coordinates", "def parse_kmz(filename: str) -> List[Tuple[float, float]]:\n kmz = ZipFile(filename, \"r\")\n kml = kmz.open(\"doc.kml\", \"r\").read()\n\n tree = etree.parse(BytesIO(kml))\n\n coordinates = tree.xpath(\n \"/a:kml/a:Document/a:Placemark/a:LineString/a:coordinates\",\n namespaces={\"a\": \"http://www.opengis.net/kml/2.2\"},\n )[0].text\n\n # geopy expects coordinate in the (long, lat) format\n coords = [\n (float(y[1]), float(y[0]))\n for y in [\n x.strip().split(\",\") for x in coordinates.split(\"\\n\") if len(x.strip())\n ]\n ]\n\n return coords", "def waypoint_coordinate_extractor(waypoint):\n return [waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]", "def get_locations(loc_fn):\n with open(loc_fn, 'rb') as fin:\n coordinate_string = fin.readline()\n coordinates = coordinate_string.split(',')\n coordinates = [float(c.strip()) for c in coordinates]\n return coordinates", "def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data" ]
[ "0.6265567", "0.62188435", "0.616504", "0.6100916", "0.58316505", "0.58076537", "0.56954175", "0.5694291", "0.56838447", "0.5665563", "0.5650249", "0.54783994", "0.5472408", "0.54524064", "0.5436842", "0.5424833", "0.5395187", "0.53837734", "0.5375772", "0.5349759", "0.5339228", "0.53194726", "0.53140223", "0.530647", "0.5304108", "0.53027755", "0.5295396", "0.5288968", "0.52764183", "0.5272558" ]
0.62757945
0
Get an array of flood risk probabilities from arrays of eastings and northings. Flood risk data is extracted from the Tool flood risk file. Locations not in a risk band circle return `Zero`, otherwise returns the name of the highest band it sits in.
def get_easting_northing_flood_probability(self, easting, northing): # Read in risk files as pandas dataframe risks = self.risk_df prob_bands = np.full(np.size(easting), "Zero", dtype='<U8') # For each point we get: for point, point_east in enumerate(easting): point_north = northing[point] # Pick the zones where easting_min < easting < easting_max zones = risks.loc[(risks.X_max >= point_east) & (risks.X_min <= point_east)] # Further reduce these to where northing_min < northing < northing_max zones_pot = zones.loc[(zones.Y_max >= point_north) & (zones.Y_min <= point_north)] # For each potential zone: for i in range(len(zones_pot.index)): # Don't bother with further zones if we already know the risk is High if prob_bands[point] == "High": break row = zones_pot.iloc[i] # Squared distance from point to zone (we use squares to avoid square-rooting) dist2 = (row.X-point_east)*(row.X-point_east) + (row.Y-point_north)*(row.Y-point_north) if dist2 <= row.radius_squared: risk = row.prob_4band current_band = prob_bands[point] if risk == "High": prob_bands[point] = risk elif risk == "Medium" and current_band != "High": prob_bands[point] = risk elif risk == "Low" and (current_band != "High" and current_band != "Medium"): prob_bands[point] = risk elif risk == "Very Low" and current_band == "Zero": prob_bands[point] = "Very Low" return prob_bands
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sorted_flood_probability(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get latitude and longitude\n output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array\n lat_long = pd.DataFrame(\n {'Postcode':postcodes, 'latitude':output[:, 0], 'longitude':output[:, 1]})\n\n # Delete the wrong format of postcode\n lat_long = lat_long.dropna(how='any')\n latitude = np.array(lat_long.latitude)\n longitude = np.array(lat_long.longitude)\n\n # Returns Eastings and Northings in an array\n output_2 = geo.get_easting_northing_from_lat_long(latitude, longitude)\n\n # Returns array of flood risk probabilities\n output_3 = self.get_easting_northing_flood_probability(output_2[0], output_2[1])\n\n # New column in dataframe containing the probabilities\n lat_long['Probability Band'] = output_3\n\n # Removing invalid postcodes\n lat_long = lat_long.dropna(how='any')\n # Removing duplicates\n lat_long = lat_long.drop_duplicates(subset='Postcode')\n\n # Sort by Probability Bands\n # add variable ordered to sort later by Xun Xie\n lat_long['Probability Band'] = pd.Categorical(\n lat_long['Probability Band'],\n categories=[\"High\", \"Medium\", \"Low\", \"Very Low\", \"Zero\"], ordered=True)\n #add sort firstly by Probability Band and then sort secondly by Postcode\n lat_long = lat_long.sort_values(by=['Probability Band', 'Postcode'], ascending=[True, True])\n lat_long = lat_long.set_index('Postcode')\n\n return lat_long # Make Postcode the Index", "def get_sorted_annual_flood_risk(self, postcodes):\n\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get lat, long of postcodes\n arr = self.get_lat_long(postcodes)\n lat = arr[:, 0] # Latitude\n lng = arr[:, 1] # Longitude\n\n # Convert lat, long -> easting, northing\n tem = geo.get_easting_northing_from_lat_long(lat, lng, radians=False)\n eos = tem[0] # Easting\n nos = tem[1] # Northing\n\n # Get our data frame of postcodes and risks\n prob_band = self.get_easting_northing_flood_probability(eos, nos)\n flood_risk = self.get_annual_flood_risk(postcodes, prob_band)\n risk_df = pd.DataFrame({'Postcode':postcodes, 'Flood Risk':flood_risk})\n\n # Clean up data frame\n risk_df = risk_df.drop_duplicates()\n risk_df = risk_df.set_index('Postcode')\n risk_df = risk_df.sort_values(by=['Flood Risk', 'Postcode'], ascending=[False, True])\n\n return risk_df", "def animal_fitness(self):\n herb_fits = []\n carn_fits = []\n for cell in self.land_cells.values():\n for herb in cell.herbivores:\n herb_fits.append(herb.fitness)\n for carn in cell.carnivores:\n carn_fits.append(carn.fitness)\n if not herb_fits:\n return [carn_fits]\n elif not carn_fits:\n return [herb_fits]\n else:\n return [herb_fits, carn_fits]", "def get_annual_flood_risk(self, postcodes, probability_bands):\n #get cost_value\n cost_value = self.get_flood_cost(postcodes)\n\n #create Dataframe for replacing corresonding value\n risk_df = pd.DataFrame({'Probability Band': probability_bands})\n total_df = risk_df.replace(\n {'High':0.1, 'Medium': 0.02, 'Low': 0.01, 'Very Low': 0.001, 'Zero': 0})\n pro_ser = np.array(total_df['Probability Band'])\n\n #compute result\n annual = pro_ser * cost_value * 0.05\n\n return annual", "def heralded_fock_basis(self, detector_pattern):\n undetected_photons = self.photon_number - sum(detector_pattern)\n undetected_modes = set(range(self.N)) - self.circuit.detected_modes\n\n #write down the detector outcome in terms of which modes the photons arrived \n detector_outcome = []\n for mode, occupancy in zip(self.circuit.detected_modes, detector_pattern):\n detector_outcome.extend([mode] * occupancy)\n\n if undetected_photons > 0:\n #look at all options for where undetected photons could be\n undetected_outcomes = combinations_with_replacement(undetected_modes, undetected_photons)\n\n #combine detected and undetected outcomes\n return (tuple(sorted(detector_outcome + list(u))) for u in undetected_outcomes)\n else:\n return (tuple(detector_outcome),)", "def check_gto_wellFracs():\n inList = ascii.read('lists/gto_saturation_check.csv')\n pdGrism = yaml.load(open('yaml/grism_example.yaml'))\n \n outWell = []\n for oneObs in inList:\n pdGrism['scene'][0]['spectrum']['normalization']['norm_flux'] = float(oneObs['Kmag'])\n pdGrism['scene'][0]['spectrum']['normalization']['bandpass'] = str(oneObs['Norm Bandpass'])\n pdGrism['scene'][0]['spectrum']['sed']['teff'] = float(oneObs['Teff'])\n pdGrism['scene'][0]['spectrum']['sed']['log_g'] = float(oneObs['Logg'])\n pdGrism['scene'][0]['spectrum']['sed']['metallicity'] = float(oneObs['Metallicity'])\n pdGrism['configuration']['instrument']['filter'] = str(oneObs['Filter']).lower()\n pdGrism['configuration']['detector']['ngroup'] = int(oneObs['Ngroups'])\n pdGrism['configuration']['detector']['subarray'] = str(oneObs['Subarray']).lower()\n pdGrism['configuration']['detector']['readmode'] = str(oneObs['Read Mode']).lower()\n pd2 = pdFromDict(pdGrism)\n maxWell = np.max(pd2.get_well_depth_image())\n outWell.append(maxWell)\n inList['Well Frac'] = outWell\n inList.write('output/gto_wells.csv',overwrite=True)", "def get_masks(data):\n return [patient[0] for i, patient in enumerate(data) if i in good_patients]", "def GetParameters(ParamsFile, QualityFile, Bands, NumberOfParameters, RelativeUncert, ScaleFactor, ProcessSnow = 0):\n\n FillValue = 32767\n NumberOfBands = Bands.shape[0]\n\n # Get dimensions\n rows, cols = GetDimSubDataset( ParamsFile )\n\n Parameters = np.zeros((rows, cols, NumberOfBands, NumberOfParameters), np.float32)\n Uncertainties = np.zeros((rows, cols, NumberOfBands), np.float32)\n\n # Get Snow\n # 1 Snow albedo retrieved\n # 0 Snow-free albedo retrieved\n # 255 Fill Value\n print \"Reading Snow QA:\", QualityFile\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + QualityFile + '\":MOD_Grid_BRDF:Snow_BRDF_Albedo'\n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n SnowQA = SubDataset.GetRasterBand(1).ReadAsArray()\n if ProcessSnow == 0:\n SnowQA = np.where( SnowQA == 0, 1, 0)\n else:\n SnowQA = np.where( SnowQA == 1, 1, 0)\n\n # Load BRDF parameters\n print \"Reading BRDF parameters...\"\n for Band in range( Bands.shape[0] ):\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + ParamsFile + '\":MOD_Grid_BRDF:BRDF_Albedo_Parameters_Band' + str( Bands[Band] )\n print SubDatasetName \n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n\n for Parameter in range(NumberOfParameters):\n print \"Getting BRDF parameter\", Parameter\n Parameters[:,:,Band,Parameter] = SubDataset.GetRasterBand( Parameter + 1 ).ReadAsArray()\n\n # Snow mask\n Parameters[:,:,Band,Parameter] = Parameters[:,:,Band,Parameter] * SnowQA\n\n # Filter out fill values\n Parameters[:,:,Band,Parameter] = np.where(Parameters[:,:,Band,Parameter] == FillValue, 0.,\n Parameters[:,:,Band,Parameter] * ScaleFactor )\n\n # Get QA\n print \"Reading QA:\", QualityFile\n for Band in range( Bands.shape[0] ):\n SubDatasetName = 'HDF4_EOS:EOS_GRID:\"' + QualityFile + '\":MOD_Grid_BRDF:BRDF_Albedo_Band_Quality_Band' + str( Bands[Band] )\n SubDataset = gdal.Open(SubDatasetName, GA_ReadOnly)\n QA = SubDataset.GetRasterBand(1).ReadAsArray()\n\n # https://ladsweb.nascom.nasa.gov/api/v1/filespec/collection=6&product=MCD43A2\n # BRDF_Albedo_Band_Quality_BandN ( N is 1 to 7 )> \n # 0 = best quality, full inversion (WoDs, RMSE majority good)\n # 1 = good quality, full inversion (also including the cases that no clear sky\n # observations over the day of interest or the Solar Zenith Angle is too \n # large even WoDs, RMSE majority good)\n # 2 = Magnitude inversion (numobs >=7)\n # 3 = Magnitude inversion (numobs >=2&<7)\n # 4 = Fill value\n\n QA_flags = np.array( [ 0,1,2,3 ] )\n\n for i, QA_flag in enumerate( QA_flags ) :\n indices = np.where( QA == QA_flag )\n Uncertainties[ indices[0], indices[1], Band ] = RelativeUncert[ i ]\n\n Uncertainties[:,:,Band] = Uncertainties[:,:,Band] * SnowQA \n\n SubDataset = None\n return Parameters, Uncertainties", "def find_fbs(self,q=99.9):\n fbs=np.zeros((self.dataset['pred_avg'].shape[0],1))\n for itrc in np.arange(0,self.dataset['pred_avg'].shape[0]):\n trc=self.dataset['pred_avg'][itrc]\n nonzero=np.where(trc!=0)[0]\n perc=np.nanpercentile(trc[list(nonzero)],q)\n potential_fbs=np.where(trc[:]>=perc)[0]\n if len(potential_fbs)!=0:\n fbs[itrc]=np.int(potential_fbs[0])\n else:\n print('FB was not found for trace id:\\t{}'.format(itrc))\n print('Completed')\n return fbs", "def load_factor_h_non_peak(self, data):\n load_factor_h = np.zeros((data['nr_of_fueltypes'], 1)) # Initialise array to store fuel\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for fueltype, fueldata in enumerate(self.fuels_tot_enduses_h):\n\n '''all_hours = []\n for day_hours in self.fuels_tot_enduses_h[fueltype]:\n for h in day_hours:\n all_hours.append(h)\n maximum_h_of_day_in_year = max(all_hours)\n '''\n maximum_h_of_day_in_year = self.rs_fuels_peak_h[fueltype]\n\n average_demand_h = np.sum(fueldata) / (365 * 24) # Averae load = yearly demand / nr of days\n\n # If there is a maximum day hour\n if maximum_h_of_day_in_year != 0:\n load_factor_h[fueltype] = average_demand_h / maximum_h_of_day_in_year # Calculate load factor\n\n # Convert load factor to %\n load_factor_h *= 100\n\n return load_factor_h", "def get_flood_cost(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n values_df = self.values_df[['Postcode', 'Total Value']]\n values_df = values_df.loc[values_df.Postcode.isin(postcodes)]\n values_df = values_df.set_index('Postcode').reindex(postcodes)\n values_df = values_df.fillna(0)\n\n return np.array(values_df['Total Value'])", "def number_of_fibers(plate, **kwargs):\n import os\n import os.path\n from astropy.io import fits as pyfits\n from astropy.extern.six import integer_types\n import numpy as np\n from . import latest_mjd\n #\n # Get mjd values\n #\n if isinstance(plate, integer_types) or plate.shape == ():\n platevec = np.array([plate], dtype='i4')\n else:\n platevec = plate\n mjd = latest_mjd(plate, **kwargs)\n nfiber = np.zeros(mjd.size, dtype='i4')\n #\n # SDSS-I,II plates\n #\n nfiber[mjd < 55025] = 640\n #\n # Short circuit if we're done.\n #\n if (nfiber == 640).all():\n return nfiber\n #\n # Not all BOSS plates have 1000 fibers\n #\n if 'path' in kwargs:\n platelistpath = os.path.join(kwargs['path'], 'platelist.fits')\n else:\n platelistpath = os.path.join(os.getenv('BOSS_SPECTRO_REDUX'), 'platelist.fits')\n platelist = pyfits.open(platelistpath)\n platentotal = platelist[1].data.field('N_TOTAL')\n plateplate = platelist[1].data.field('PLATE')\n platemjd = platelist[1].data.field('MJD')\n platerun2d = platelist[1].data.field('RUN2D')\n platerun1d = platelist[1].data.field('RUN1D')\n platelist.close()\n if 'run2d' in kwargs:\n run2d = kwargs['run2d']\n else:\n run2d = os.getenv('RUN2D')\n if 'run1d' in kwargs:\n run1d = kwargs['run1d']\n else:\n run1d = os.getenv('RUN1D')\n for k in range(mjd.size):\n nfiber[k] = platentotal[(plateplate == platevec[k]) &\n (platemjd == mjd[k]) &\n (platerun2d == run2d) &\n (platerun1d == run1d)]\n return nfiber", "def determineOutlierThreshold(listOfLysozymeWellNames, pathrfu, pathContentsMap):\n lysozyme=[]\n results = []\n\n #the path to a directory of exported RFU result files\n files = os.listdir(pathrfu)\n pathrfu = pathrfu + \"/\"\n for data in files:\n #creates each plate\n plate = DSFPlate(pathrfu+data, pathContentsMap)\n for well in listOfLysozymeWellNames:\n lysozyme.append(plate.wells[well].fluorescence)\n for pair in combinations(lysozyme,2):\n results.append(sqrDiffWellFluoro(pair[0],pair[1]))\n total = 0\n for num in results:\n total+=num\n return total/len(results)", "def print_highest_rainfall(properly_organized_date_and_totals):\n\tprint('The highest rain fall value found was: {} on {}.'.format(properly_organized_date_and_totals[1], properly_organized_date_and_totals[0]))", "def east_asia_pacific_countries():\r\n east_asia_pacific_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in east_asia_pacific:\r\n east_asia_pacific_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in east_asia_pacific_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def _compute_bare_spectrum_constant(self):\n eigendata = []\n for subsys in self._hilbertspace:\n if subsys not in self.subsys_update_list:\n evals_count = subsys.truncated_dim\n eigendata.append(subsys.eigensys(evals_count=evals_count))\n else:\n eigendata.append(None)\n return eigendata", "def load_default_atf_data():\n df = load_dataframe(\"oqmd_1.2_voronoi_magpie_fingerprints\")\n return df[df['N_species'] == 2].sample(frac=0.2)", "def most_discriminating( features_df, labels_df, top=5):\n \n columns = features_df.shape[1]\n labels_df = labels_df[['file', 'candy_id']].set_index('file')\n qualities = np.zeros(columns)\n \n _left = 0\n _right = 1\n\n _c = 0\n _h = 1\n\n # globals\n cases = float(labels_df['candy_id'].count()) # total cases\n\n p_c_A = (labels_df['candy_id'] == 0).sum() / cases\n p_h_A = 1.0 - p_c_A\n\n\n for feature in range(columns):\n\n branch_cases = np.zeros(2) # total on each branch\n pi = np.zeros(2) # proportion on each branch\n\n split = np.array([\n #c, h\n [0, 0], #left\n [0, 0] #right\n ])\n\n for index, value in features_df[feature].iteritems():\n split[value][labels_df.loc[index][0]] += 1\n\n branch_cases[_left] = split[_left].sum()\n branch_cases[_right] = split[_right].sum()\n \n if branch_cases[_left] == 0.0 or branch_cases[_right] == 0.0:\n qualities[feature] = 0\n continue\n \n pi[_left] = branch_cases[_left] / cases\n pi[_right] = branch_cases[_right] / cases\n\n p_c_B = split[_left][_c] / branch_cases[_left]\n p_h_B = split[_left][_h] / branch_cases[_left]\n\n p_c_C = split[_right][_c] / branch_cases[_right]\n p_h_C = split[_right][_h] / branch_cases[_right]\n\n gini_tree = 1.0 - (math.pow(p_c_A, 2) + math.pow(p_h_A, 2))\n\n gini_left = 1.0 - (math.pow(p_c_B, 2) + math.pow(p_h_B, 2))\n gini_right = 1.0 - (math.pow(p_c_C, 2) + math.pow(p_h_C, 2))\n\n quality = gini_tree - pi[_left] * gini_left - pi[_right] * gini_right\n\n qualities[feature] = quality\n return list(reversed(qualities.argsort()))[:top]", "def calculate_soil_depth_probabilities(self):\n soil_depth_probabilities = []\n for y in range(self.controller.image_height_map.size):\n print(\"Calculating soil depth probabilities: Row: \" + str(y))\n row = []\n for x in range(self.controller.image_height_map.size):\n available_soil_depth = self.controller.image_edaphic_map.image[y][x]\n needed_soil_depth = self.vegetation.soil_depth_demand\n if available_soil_depth < needed_soil_depth:\n probability = available_soil_depth / needed_soil_depth\n else:\n probability = 1.0\n row.append(probability)\n soil_depth_probabilities.append(row)\n return soil_depth_probabilities", "def search_fidelity(self, p1, pw1, p2, n=10):\n uplim = max(p1+pw1+1, min([p1+15*pw1, p2])) # highest possible value for the threshold \n threshes = np.linspace(p1+pw1, uplim, n) # n points between peaks\n fid, err_fid = 0, 0 # store the previous value of the fidelity\n for thresh in threshes[1:]: # threshold should never be at the background peak p1\n f, fe = self.get_fidelity(thresh) # calculate fidelity for given threshold\n if f > fid:\n fid, err_fid = f, fe\n self.thresh = thresh # the threshold at which there is max fidelity\n if fid > 0.9999:\n break\n # set to max value, round to 4 d.p.\n self.fidelity, self.err_fidelity = np.around([fid, err_fid] , 4)", "def get_absolute_humidity(region: int) -> np.ndarray:\n\n return read_conditions.read_absolute_humidity(region)", "def ffgs_regions():\n return [\n ('Hispaniola', 'hispaniola'),\n ('Central America', 'centralamerica')\n ]", "def explore_FAAM_aerosol_data():\n # -- PCASP\n dsPCASP = get_FAAM_mineral_dust_calibration(instrument='PCASP',\n rtn_values=False)\n # -- CDP\n dsCDP = get_FAAM_mineral_dust_calibration(instrument='CDP',\n rtn_values=False)\n # only consider \"potential dust\" above a certain size?\n # Use 100 um for now", "def generateBounds(regionFilename, latitudeRange, longitudeRange): \n rastData = Dataset(regionFilename)\n\n #setting up values for raster data\n latsRast = np.array(rastData[\"lat\"][:])\n lonsRast = np.array(rastData[\"lon\"][:])\n regionOfInterest = np.array(rastData[\"Band1\"][:][:])\n\n\n regionArray = np.zeros((len(longitudeRange),len(latitudeRange)))\n\n\n for lat in latitudeRange:\n closestLatIndex = np.where( np.abs(latsRast-lat) == np.abs(latsRast-lat).min())[0][0]\n for lon in longitudeRange:\n closestLonIndex = np.where( np.abs(lonsRast-lon) == np.abs(lonsRast-lon).min())[0][0]\n\n #If lat long of MERRA data box is offshore or in region (values 1 in raster) set them equal to 1 for master Array, else they are left as zeros\n if (regionOfInterest[closestLatIndex][closestLonIndex] == 1):\n latIndex = np.where(latitudeRange == lat)[0][0]\n lonIndex = np.where(longitudeRange == lon)[0][0]\n regionArray[lonIndex][latIndex] = 1\n\n\n #for debugging\n ''' \n ax = sns.heatmap(regionArray)\n plt.show()\n '''\n return regionArray", "def roof_measurements():\n \n # roof measurements from excel\n path_inputs = '/Users/bekah/Desktop/class/be523/modeling_project/roof_function.xlsx'\n sheet_name_setup = 'roof'\n roof = pd.read_excel(path_inputs, sheetname = sheet_name_setup)\n \n # make arrays for east and west roof sections\n x_west = roof['x_west']\n z_west = roof['z_west']\n x_east = roof['x_east']\n z_east = roof['z_east']\n \n return x_west, z_west, x_east, z_east", "def determine_floor_assignments():\n floors = bottom_floor, _, _, _, top_floor = range(1, 6)\n orderings = itertools.permutations(floors, len(floors))\n\n return next(\n [Hopper, Kay, Liskov, Perlis, Ritchie]\n for (Hopper, Kay, Liskov, Perlis, Ritchie) in orderings\n if Hopper != top_floor\n if Kay != bottom_floor\n if Liskov not in (bottom_floor, top_floor)\n if higher_floor(Perlis, Kay)\n if not adjacent_floor(Ritchie, Liskov)\n if not adjacent_floor(Liskov, Kay)\n )", "def _compute_bare_spectrum_constant(self) -> List[Tuple[ndarray, ndarray]]:\n eigendata = []\n for subsys in self._hilbertspace:\n if subsys not in self.subsys_update_list:\n evals_count = subsys.truncated_dim\n eigendata.append(subsys.eigensys(evals_count=evals_count))\n else:\n eigendata.append(None) # type: ignore\n return eigendata", "def get_ergodic_region(self):\n return [self.f1(self.rho),self.f0(self.rho)]", "def find_bandgap(bandsdata, number_electrons=None, fermi_energy=None):\n\n def nint(num):\n \"\"\"\n Stable rounding function\n \"\"\"\n if num > 0:\n return int(num + 0.5)\n else:\n return int(num - 0.5)\n\n if fermi_energy and number_electrons:\n raise EitherNumberOfElectronsOrFermiEnergyError()\n\n assert bandsdata.units == \"eV\"\n stored_bands = bandsdata.get_bands()\n\n if len(stored_bands.shape) == 3:\n # I write the algorithm for the generic case of having both the\n # spin up and spin down array\n\n # put all spins on one band per kpoint\n bands = np.concatenate(list(stored_bands), axis=1)\n else:\n bands = stored_bands\n\n # analysis on occupations:\n if fermi_energy is None:\n num_kpoints = len(bands)\n\n if number_electrons is None:\n try:\n _, stored_occupations = bandsdata.get_bands(also_occupations=True)\n except KeyError as exc:\n raise FermiEnergyOrOccupationsNotPresentError() from exc\n\n # put the occupations in the same order of bands, also in case of multiple bands\n if len(stored_occupations.shape) == 3:\n # I write the algorithm for the generic case of having both the\n # spin up and spin down array\n\n # put all spins on one band per kpoint\n occupations = np.concatenate(list(stored_occupations), axis=1)\n else:\n occupations = stored_occupations\n\n # now sort the bands by energy\n # Note: I am sort of assuming that I have an electronic ground state\n\n # sort the bands by energy, and reorder the occupations accordingly\n # since after joining the two spins, I might have unsorted stuff\n bands, occupations = (\n np.array(y)\n for y in zip(\n *[\n zip(*j)\n for j in [\n sorted(\n zip(i[0].tolist(), i[1].tolist()),\n key=lambda x: x[0],\n )\n for i in zip(bands, occupations)\n ]\n ]\n )\n )\n number_electrons = int(\n round(sum([sum(i) for i in occupations]) / num_kpoints)\n )\n\n homo_indexes = [\n np.where(np.array([nint(_) for _ in x]) > 0)[0][-1]\n for x in occupations\n ]\n if (\n len(set(homo_indexes)) > 1\n ): # there must be intersections of valence and conduction bands\n return False, None, None, None\n else:\n homo = [_[0][_[1]] for _ in zip(bands, homo_indexes)]\n try:\n lumo = [_[0][_[1] + 1] for _ in zip(bands, homo_indexes)]\n except IndexError as exc:\n raise NeedMoreBandsError() from exc\n\n else:\n bands = np.sort(bands)\n number_electrons = int(number_electrons)\n\n # find the zero-temperature occupation per band (1 for spin-polarized\n # calculation, 2 otherwise)\n number_electrons_per_band = 4 - len(stored_bands.shape) # 1 or 2\n # gather the energies of the homo band, for every kpoint\n homo = [\n i[number_electrons / number_electrons_per_band - 1] for i in bands\n ] # take the nth level\n try:\n # gather the energies of the lumo band, for every kpoint\n lumo = [\n i[number_electrons / number_electrons_per_band] for i in bands\n ] # take the n+1th level\n except IndexError as exc:\n raise NeedMoreBandsError() from exc\n\n if number_electrons % 2 == 1 and len(stored_bands.shape) == 2:\n # if #electrons is odd and we have a non spin polarized calculation\n # it must be a metal and I don't need further checks\n return False, None, None, None\n\n # if the nth band crosses the (n+1)th, it is an insulator\n gap = min(lumo) - max(homo)\n if gap == 0.0:\n return False, 0.0, None, None\n elif gap < 0.0:\n return False, gap, None, None\n else:\n return True, gap, max(homo), min(lumo)\n\n # analysis on the fermi energy\n else:\n # reorganize the bands, rather than per kpoint, per energy level\n\n # I need the bands sorted by energy\n bands.sort()\n\n levels = bands.transpose()\n max_mins = [(max(i), min(i)) for i in levels]\n\n if fermi_energy > bands.max():\n raise FermiEnergyAndBandsEnergiesError(where=\"above\")\n if fermi_energy < bands.min():\n raise FermiEnergyAndBandsEnergiesError(where=\"below\")\n\n # one band is crossed by the fermi energy\n if any(i[1] < fermi_energy and fermi_energy < i[0] for i in max_mins):\n return False, 0.0, None, None\n\n # case of semimetals, fermi energy at the crossing of two bands\n # this will only work if the dirac point is computed!\n elif any(i[0] == fermi_energy for i in max_mins) and any(\n i[1] == fermi_energy for i in max_mins\n ):\n return False, 0.0, None, None\n # insulating case\n else:\n # Take the max of the band maxima below the fermi energy.\n homo = max([i[0] for i in max_mins if i[0] < fermi_energy])\n # Take the min of the band minima above the fermi energy.x\n lumo = min([i[1] for i in max_mins if i[1] > fermi_energy])\n\n gap = lumo - homo\n if gap <= 0.0:\n raise WrongCodeError()\n return True, gap, homo, lumo", "def _calculate_percentile_cutoff(run_numbers):\n mcp_values = []\n andor_values = []\n for run_number in run_numbers:\n current_data_path = ''.join([DATA_PATH, 'run', str(run_number), 'allevts.h5'])\n f = h5py.File(current_data_path, 'r')\n current_phot = _get_photon_energy(f, run_number)\n current_mcp = np.array(f['Acqiris2']['acq'])\n current_mcp = current_mcp[(current_phot > 781) & (current_phot < 782)]\n mcp_values.extend(current_mcp)\n current_andor = np.array(f['Andor']['signal'])\n current_andor = current_andor[(current_phot > 781) & (current_phot < 782)]\n andor_values.extend(current_andor)\n #plt.figure()\n #plt.scatter(mcp_values, andor_values)\n mcp_percentile_cutoff = min([percentileofscore(andor_values, 4000), 99.9])\n return mcp_percentile_cutoff" ]
[ "0.5840573", "0.5451073", "0.5239133", "0.51064956", "0.4967001", "0.49006256", "0.4847061", "0.48315755", "0.48241347", "0.47855008", "0.47808293", "0.47772777", "0.4758176", "0.4740778", "0.47345486", "0.47230184", "0.4712612", "0.47092313", "0.47091162", "0.4705005", "0.46939307", "0.4668451", "0.46641883", "0.46625537", "0.4654693", "0.46366385", "0.46132806", "0.46130425", "0.46110424", "0.4584022" ]
0.75808704
0
Get an array of flood risk probabilities from a sequence of postcodes. Probability is ordered High>Medium>Low>Very low>Zero. Flood risk data is extracted from the `Tool` flood risk file.
def get_sorted_flood_probability(self, postcodes): # Fix evil postcodes postcodes = clean_postcodes(postcodes) # Get latitude and longitude output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array lat_long = pd.DataFrame( {'Postcode':postcodes, 'latitude':output[:, 0], 'longitude':output[:, 1]}) # Delete the wrong format of postcode lat_long = lat_long.dropna(how='any') latitude = np.array(lat_long.latitude) longitude = np.array(lat_long.longitude) # Returns Eastings and Northings in an array output_2 = geo.get_easting_northing_from_lat_long(latitude, longitude) # Returns array of flood risk probabilities output_3 = self.get_easting_northing_flood_probability(output_2[0], output_2[1]) # New column in dataframe containing the probabilities lat_long['Probability Band'] = output_3 # Removing invalid postcodes lat_long = lat_long.dropna(how='any') # Removing duplicates lat_long = lat_long.drop_duplicates(subset='Postcode') # Sort by Probability Bands # add variable ordered to sort later by Xun Xie lat_long['Probability Band'] = pd.Categorical( lat_long['Probability Band'], categories=["High", "Medium", "Low", "Very Low", "Zero"], ordered=True) #add sort firstly by Probability Band and then sort secondly by Postcode lat_long = lat_long.sort_values(by=['Probability Band', 'Postcode'], ascending=[True, True]) lat_long = lat_long.set_index('Postcode') return lat_long # Make Postcode the Index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def psipred(infile, sequence):\n aa2sec = {\n 'H': [1, 0, 0],\n 'E': [0, 1, 0],\n 'C': [0, 0, 1]\n }\n result = []\n with open(infile, 'r') as fh:\n for line in fh:\n if line.startswith('Pred:'):\n spl = line.strip().split(' ')\n if len(spl) < 2:\n continue\n for aa in spl[1]:\n result.append(aa2sec[aa])\n\n return np.array([result])", "def get_sorted_annual_flood_risk(self, postcodes):\n\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get lat, long of postcodes\n arr = self.get_lat_long(postcodes)\n lat = arr[:, 0] # Latitude\n lng = arr[:, 1] # Longitude\n\n # Convert lat, long -> easting, northing\n tem = geo.get_easting_northing_from_lat_long(lat, lng, radians=False)\n eos = tem[0] # Easting\n nos = tem[1] # Northing\n\n # Get our data frame of postcodes and risks\n prob_band = self.get_easting_northing_flood_probability(eos, nos)\n flood_risk = self.get_annual_flood_risk(postcodes, prob_band)\n risk_df = pd.DataFrame({'Postcode':postcodes, 'Flood Risk':flood_risk})\n\n # Clean up data frame\n risk_df = risk_df.drop_duplicates()\n risk_df = risk_df.set_index('Postcode')\n risk_df = risk_df.sort_values(by=['Flood Risk', 'Postcode'], ascending=[False, True])\n\n return risk_df", "def profbval_strict(infile, sequence):\n result = np.zeros((1, len(sequence), 1))\n with open(infile, \"r\") as fh:\n it = 0\n for line in fh:\n if not line.startswith(\"number\"):\n pred_str = line.strip().split()[5]\n if pred_str == \"F\":\n result[0, it, 0] = 1\n it += 1\n\n return result", "def get_flood_cost(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n values_df = self.values_df[['Postcode', 'Total Value']]\n values_df = values_df.loc[values_df.Postcode.isin(postcodes)]\n values_df = values_df.set_index('Postcode').reindex(postcodes)\n values_df = values_df.fillna(0)\n\n return np.array(values_df['Total Value'])", "def get_probs(self):\n\t\tprobArray = []\n\t\tfor combination in self.codepool:\n\t\t\tif self.feasible(combination):\n\t\t\t\tprobArray.append(self.get_probability(combination))\n\t\t\telse:\n\t\t\t\tprobArray.append(0)\n\t\tprobArray = np.array(probArray) / np.sum(probArray)\n\t\treturn probArray", "def get_annual_flood_risk(self, postcodes, probability_bands):\n #get cost_value\n cost_value = self.get_flood_cost(postcodes)\n\n #create Dataframe for replacing corresonding value\n risk_df = pd.DataFrame({'Probability Band': probability_bands})\n total_df = risk_df.replace(\n {'High':0.1, 'Medium': 0.02, 'Low': 0.01, 'Very Low': 0.001, 'Zero': 0})\n pro_ser = np.array(total_df['Probability Band'])\n\n #compute result\n annual = pro_ser * cost_value * 0.05\n\n return annual", "def get_note_probabilities(self, peaks):\n notes = []\n \n for p in peaks:\n harmonics = self.get_harmonics(p[0])\n probability = self.test_harmonics(peaks, harmonics)\n \n if probability > self.probability_threshold:\n notes.append(tuple([p[0], probability]))\n\n return notes", "def filtro_probs(prediccion,p_min):\n clases = []\n for probabilidad in prediccion:\n if probabilidad[1]>=p_min:\n clases.append(probabilidad)\n else:\n clases.append(\"-\")\n return clases", "def characters(probabilities):\n return [id2char(c) for c in np.argmax(probabilities, 1)]", "def characters(probabilities):\n return [id2char(c) for c in np.argmax(probabilities, 1)]", "def get_preds(img_path):\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output", "def prof(infile, sequence):\n aa2sec = {\n 'H': [1, 0, 0],\n 'E': [0, 1, 0],\n 'L': [0, 0, 1]\n }\n result = []\n with open(infile, 'r') as fh:\n for line in fh:\n if not line.startswith('#') and not line.startswith('No'):\n aa = line.strip().split()[3]\n result.append(aa2sec[aa])\n\n return np.array([result])", "def characters(probabilities):\n # argmax for the most likely character\n return [id2char(c) for c in np.argmax(probabilities, 1)]", "def parse_preds(preds):\n _preds = (preds > 0.5).astype(int)\n _preds = _preds * 255\n print(_preds.shape)\n return _preds", "def read_pgm(pgmf):\n assert pgmf.readline() == 'P5\\n'\n (width, height) = [int(i) for i in pgmf.readline().split()]\n depth = int(pgmf.readline())\n assert depth <= 255\n raster = []\n for y in range(height):\n row = []\n for y in range(width):\n row.append(ord(pgmf.read(1)))\n raster.append(row)\n return raster", "def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities", "def calc_ppfd_clf(irr_values):\n ppfd_values = [calculate_ppfd(round(float(v))) for v in irr_values]\n total_count = len(ppfd_values)\n count = [0, 0, 0]\n for v in ppfd_values:\n if v <= 300:\n count[0] += 1\n elif v <= 600:\n count[1] += 1\n else:\n count[2] += 1\n\n # convert to %\n count = [round(c * 100 / total_count, 2) for c in count]\n\n return ppfd_values, {'low': count[0], 'medium': count[1], 'high': count[2]}", "def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities", "def get_gold_probdist():\n\n # Read in the dataset as a pandas dataframe.\n card_data_annot = gspd.read_in_categorised()\n\n # Based on the frequencies of each category in the data, create probability distribution and return.\n probdist_dict = gspd.freq_dist_to_prob_dist(card_data_annot)\n return probdist_dict", "def read_prnu_files():\n file_path = r'C:\\Users\\nmishra\\Workspace\\TEMPO\\PRNU_map\\PRNU_map_Median'\n prnu_mask_a = np.genfromtxt(file_path +'/' + 'Quad A_Final_PRNU.csv',\n delimiter=',')\n prnu_mask_b = np.genfromtxt(file_path +'/' + 'Quad B_Final_PRNU.csv',\n delimiter=',')\n prnu_mask_c = np.genfromtxt(file_path +'/' + 'Quad C_Final_PRNU.csv',\n delimiter=',')\n prnu_mask_d = np.genfromtxt(file_path +'/' + 'Quad D_Final_PRNU.csv',\n delimiter=',')\n prnu_mask = [prnu_mask_a, prnu_mask_b, prnu_mask_c, prnu_mask_d]\n return prnu_mask", "def calculate_soil_demand_probabilities(self):\n soil_damand_probabilities = []\n for y in range(self.controller.image_height_map.size):\n print(\"Calculating soil demand probabilities: Row: \" + str(y))\n row = []\n for x in range(self.controller.image_height_map.size):\n if self.vegetation.soil_demand.id == self.controller.soil_ids_map.image[y][x]:\n probability = 1.0\n else:\n probability = 0.0\n row.append(probability)\n soil_damand_probabilities.append(row)\n return soil_damand_probabilities", "def read_patch(test_pattern=TEST_PATTERN,\n ralims=SCENE_RA_RANGE, declims=SCENE_DEC_RANGE):\n import glob\n files = glob.glob(test_pattern)\n # This is a list of postage stamp objects, where the image data is restricted to the patch of interest\n stamps = [read_exposure(fn, ralims, declims, mask=True) \n for fn in files]\n\n return stamps", "def PIL_series(self, map_list, pos_g_val=100, neg_g_val=-100, size=4):\n pil_maps = []\n success_count = 0\n\n for i, sub_map in enumerate(map_list):\n pos_map, neg_map = self.dt.identify_pos_neg_region(sub_map, pos_gauss=pos_g_val, neg_gauss=neg_g_val)\n\n pos_edge = self.dt.edge_detection(pos_map)\n neg_edge = self.dt.edge_detection(neg_map)\n\n pos_dil_edge = self.dt.buff_edge(pos_edge, size=size)\n neg_dil_edge = self.dt.buff_edge(neg_edge, size=size)\n\n pil_maps.append(self.PIL_extraction(pos_dil_edge, neg_dil_edge, sub_map))\n\n success_count += 1\n\n print(\"Number of Detected Candidate RoPIs: \", success_count)\n\n return pil_maps", "def fs_probability(self, fs):\n\n\t\tprobs = []\n\t\tfor c in fs: probs.append(self.get_probability(c))\n\t\tprobs /= np.sum(probs)\n\t\treturn probs", "def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb", "def get_non_adjacent_crops_probabilities(dp: DataProvider, image_type: ImageType, t):\n comparator = image_type_to_t_to_comparator[image_type][t]\n inputs = dp.get_fish_images() if image_type == ImageType.IMAGES else dp.get_docs_images()\n inputs = shred_and_resize_to(inputs, t, (comparator.width, comparator.height))\n non_adj_probabilities = []\n for stacked_shreds in inputs:\n left_idx, right_idx = 0, 1\n while left_idx + 1 == right_idx:\n left_idx, right_idx = tuple(np.random.choice(t ** 2, 2, replace=False))\n softmax = comparator.predict_is_left_probability([stacked_shreds[left_idx]], [stacked_shreds[right_idx]])\n non_adj_probabilities.append(softmax[0][1])\n\n return non_adj_probabilities", "def get_thresholdtable_from_fpr(scores,labels, fpr_list):\n threshold_list = []\n live_scores = []\n for score, label in zip(scores,labels):\n if label == 0:\n live_scores.append(float(score))\n live_scores.sort(reverse=True)\n live_nums = len(live_scores)\n for fpr in fpr_list:\n i_sample = int(fpr * live_nums)\n i_sample = max(1, i_sample)\n threshold_list.append(live_scores[i_sample - 1])\n return threshold_list", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def calculate_soil_depth_probabilities(self):\n soil_depth_probabilities = []\n for y in range(self.controller.image_height_map.size):\n print(\"Calculating soil depth probabilities: Row: \" + str(y))\n row = []\n for x in range(self.controller.image_height_map.size):\n available_soil_depth = self.controller.image_edaphic_map.image[y][x]\n needed_soil_depth = self.vegetation.soil_depth_demand\n if available_soil_depth < needed_soil_depth:\n probability = available_soil_depth / needed_soil_depth\n else:\n probability = 1.0\n row.append(probability)\n soil_depth_probabilities.append(row)\n return soil_depth_probabilities", "def get_posterior_model_probabilities(self, mode='BIC'):\n # Note: assumes uniform prior!\n bf = np.exp(self.get_log_Bayes_factor(mode))\n if np.isinf(bf):\n return {'pmc': 0.0, 'pmd': 1.0}\n else:\n pmd = bf / (1+bf)\n pmc = 1 - pmd\n return {'pmc': pmc, 'pmd': pmd}" ]
[ "0.6149525", "0.57030034", "0.53762126", "0.5350772", "0.5320058", "0.5255733", "0.52238405", "0.52073324", "0.51705116", "0.51705116", "0.50618285", "0.50456816", "0.50339997", "0.50247955", "0.4985709", "0.4939108", "0.4907953", "0.49060336", "0.485977", "0.48142055", "0.48099306", "0.4784238", "0.47816914", "0.47793397", "0.47746876", "0.4771641", "0.47708264", "0.47421446", "0.47349313", "0.4728534" ]
0.64648455
0
Get an array of estimated cost of a flood event from a sequence of postcodes.
def get_flood_cost(self, postcodes): # Fix evil postcodes postcodes = clean_postcodes(postcodes) values_df = self.values_df[['Postcode', 'Total Value']] values_df = values_df.loc[values_df.Postcode.isin(postcodes)] values_df = values_df.set_index('Postcode').reindex(postcodes) values_df = values_df.fillna(0) return np.array(values_df['Total Value'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size", "def get_sorted_flood_probability(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get latitude and longitude\n output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array\n lat_long = pd.DataFrame(\n {'Postcode':postcodes, 'latitude':output[:, 0], 'longitude':output[:, 1]})\n\n # Delete the wrong format of postcode\n lat_long = lat_long.dropna(how='any')\n latitude = np.array(lat_long.latitude)\n longitude = np.array(lat_long.longitude)\n\n # Returns Eastings and Northings in an array\n output_2 = geo.get_easting_northing_from_lat_long(latitude, longitude)\n\n # Returns array of flood risk probabilities\n output_3 = self.get_easting_northing_flood_probability(output_2[0], output_2[1])\n\n # New column in dataframe containing the probabilities\n lat_long['Probability Band'] = output_3\n\n # Removing invalid postcodes\n lat_long = lat_long.dropna(how='any')\n # Removing duplicates\n lat_long = lat_long.drop_duplicates(subset='Postcode')\n\n # Sort by Probability Bands\n # add variable ordered to sort later by Xun Xie\n lat_long['Probability Band'] = pd.Categorical(\n lat_long['Probability Band'],\n categories=[\"High\", \"Medium\", \"Low\", \"Very Low\", \"Zero\"], ordered=True)\n #add sort firstly by Probability Band and then sort secondly by Postcode\n lat_long = lat_long.sort_values(by=['Probability Band', 'Postcode'], ascending=[True, True])\n lat_long = lat_long.set_index('Postcode')\n\n return lat_long # Make Postcode the Index", "def proba_fc(c_pred,f_pred,dataset):\n p = np.zeros(10)\n for i in range(10):\n if dataset =='cifar10':\n if i <4:\n p[i] = (c_pred[0])*(f_pred[i]/np.sum(f_pred[0:4]))\n else:\n p[i] = (c_pred[1])*(f_pred[i]/np.sum(f_pred[4:]))\n else:\n if i<5:\n p[i] = (c_pred[0])*(f_pred[i]/np.sum(f_pred[0:5]))\n else:\n p[i] = (c_pred[1])*(f_pred[i]/np.sum(f_pred[5:]))\n return(p)", "def get_cost(self) -> numpy.ndarray:\n def product(*args, repeat=1):\n # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy\n # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111\n pools = [list(pool) for pool in args] * repeat\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n for prod in result:\n yield list(prod)\n\n # Number of edges\n M = self.num_edges\n # Number of nodes\n N = self.num_nodes\n # Adjacency matrix\n A = networkx.adjacency_matrix(self.graph).todense()\n\n # Generate a list of all possible n‐tuples of elements from {1,-1} and\n # organize them as a (2^n x n) matrix. In other words create all\n # possible solutions to the problem.\n s = numpy.array(list(product([1, -1], repeat=N)))\n\n # Construct the the cost function for Max Cut: C=1/2*Sum(Z_i*Z_j)-M/2\n # Note: This is the minimization version\n return 1 / 2 * (numpy.diag([email protected](A)@s.T) - M)", "def calories_protein(og, fg):\n\n return 0.994 * fg * real_extract(og, fg)", "def costfxn(position, attr_keys, d_set):\n pattern = decodegp(attr_keys, position)\n temp_bin = np.array([])\n for gi in pattern.gradual_items:\n arg = np.argwhere(np.isin(d_set.valid_bins[:, 0], gi.gradual_item))\n if len(arg) > 0:\n i = arg[0][0]\n valid_bin = d_set.valid_bins[i]\n if temp_bin.size <= 0:\n temp_bin = valid_bin[1].copy()\n else:\n temp_bin = np.multiply(temp_bin, valid_bin[1])\n bin_sum = np.sum(temp_bin)\n if bin_sum > 0:\n cost = (1 / bin_sum)\n else:\n cost = 1\n return cost", "def fixed_cost(self):\n return np.einsum('i->', self.c[self.f])", "def individual_cost_function(gp, output_trajectory, output_times):\r\n # GET RIGHT PART OF ARRAY\r\n # REFORMAT\r\n # NOISE DATA\r\n # PREDICT NEW VALUES\r\n # GET COST.\r\n X_reshaped = output_times[:,None]\r\n # X_list = GPy_reformat_3D(output_times)\r\n # Y_list = GPy_reformat_3D(output_trajectory)\r\n\r\n # X_list = np.concatenate((X_reshaped,X_reshaped,X_reshaped), axis=1)\r\n X_list = X_reshaped\r\n array1 = output_trajectory.T[:, 0, None]\r\n array2 = output_trajectory.T[:, 1, None]\r\n array3 = output_trajectory.T[:, 2, None]\r\n Y_list = np.concatenate((array1,array2,array3),axis=1)\r\n Y_list = array1\r\n X_list = np.concatenate((X_reshaped,np.zeros_like(X_reshaped)),axis=1)\r\n\r\n\r\n Times_pred_1 = np.concatenate((X_reshaped, np.ones_like(X_reshaped)-1), axis=1)\r\n noise_dict1 = {'output_index': Times_pred_1[:, 1:].astype(int)}\r\n Xpred, Xvar = gp.predict(Times_pred_1,Y_metadata=noise_dict1)\r\n\r\n Times_pred_2 = np.concatenate((X_reshaped, np.ones_like(X_reshaped)), axis=1)\r\n noise_dict2 = {'output_index': Times_pred_2[:, 1:].astype(int)}\r\n Ypred, Yvar = gp.predict(Times_pred_2,Y_metadata=noise_dict2)\r\n\r\n Times_pred_3 = np.concatenate((X_reshaped, np.ones_like(X_reshaped)+1), axis=1)\r\n noise_dict3 = {'output_index': Times_pred_3[:, 1:].astype(int)}\r\n Zpred, Zvar = gp.predict(Times_pred_3,Y_metadata=noise_dict3)\r\n\r\n return gp.log_predictive_density(X_list,Y_list) # ,Y_metadata=noise_dict1) # ARRAY OF ROW INDICES, ARRAY OF COLUMN INDICES, COST\r", "def ipd_feats(state,action):\r\n f = np.zeros((5,2))\r\n a = int(action == \"C\") \r\n #dict for states to ints\r\n states = {(\"*\",\"*\"):0,(\"C\",\"D\"):1, (\"C\",\"C\"):2, (\"D\",\"C\"):3, (\"D\",\"D\"):4}\r\n f[states[state],a] = 1\r\n f = f.flatten(1)\r\n return(f)", "def cost(lossvalues):\n return np.sum(lossvalues ** 2) / (2 * lossvalues.shape[1])", "def get_postal_codes(pts):\n codigos = np.zeros((len(pts),))\n for i, p in tqdm(enumerate(pts), desc=\"GETTING POSTAL CODES\"):\n p = Point(p[0], p[1])\n for j in range(cod_postales.shape[0]):\n if cod_postales.geometry.iloc[j].contains(p):\n codigos[i] = cod_postales.geocodigo.iloc[j]\n return codigos[codigos != 0]", "def cost(self, boards, labels):\n return self._cost(boards, labels, volatile=True).data.numpy()", "def total_cost(self):\n return np.einsum('i->', self.c[self.s])", "def fn(i, j):\n if i == len(costs): return 0 # no more houses to paint \n return costs[i][j] + min(fn(i+1, jj) for jj in range(3) if j != jj)", "def gen_fitness_curves(pop,conc=None):\n\n if conc is None:\n conc = np.logspace(-3,5,num=1000)\n \n n_genotype = pop.n_genotype\n\n fc = {}\n for g in range(n_genotype):\n f = np.zeros(len(conc))\n i = 0\n for c in conc:\n f[i] = gen_fitness(pop,g,c) - pop.death_rate\n i+=1\n fc[g] = f\n\n return fc", "def _c2c_cost(sclst, eclst):\n def _c2c(point):\n _c_sum = 0\n for pt in eclst.points:\n _c_sum += point.frequency(pt)\n return _c_sum\n return int(sum(map(_c2c, sclst.points)))", "def DCG_p(results, topic, p):\n rel = lambda label: gold_topic_labels[topic][label]\n top_p = results[:p]\n dcg = 0\n for idx, label in enumerate(top_p):\n rank = idx + 1\n if idx == 0:\n dcg += rel(label)\n continue\n dcg += rel(label)/ math.log(rank,2)\n return dcg", "def compute(self, pointfeats, point_pos):\n descriptors = self._core_characterizer(pointfeats, point_pos)\n ## TODO: Transform dict to array and reverse\n #keys = [self.mapper[key] for key in counts.keys()]\n #descriptors[0, keys] = counts.values()\n return descriptors", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)", "def events(self):\n\n events = []\n # Update the totals\n self.update_totals()\n\n # Replication - total number of bacteria of metabolism * prob of replication\n events.append((self.total_f * self.rates[P_REPLICATE_FAST], lambda f: self.replicate(BACTERIA_FAST)))\n events.append((self.total_s * self.rates[P_REPLICATE_SLOW], lambda f: self.replicate(BACTERIA_SLOW)))\n events.append((self.total_intra * self.rates[P_REPLICATE_INTRACELLULAR],\n lambda f: self.replicate(BACTERIA_INTRACELLULAR)))\n\n # Metabolism change - sum of (number of bacteria of metabolism in patch * o2 tension) * prob of change\n # TODO - check if this is ok\n events.append((self.total_f_o2 * self.rates[P_CHANGE_FAST_SLOW], lambda f: self.change(BACTERIA_SLOW)))\n events.append((self.total_s_o2 * self.rates[P_CHANGE_SLOW_FAST], lambda f: self.change(BACTERIA_FAST)))\n\n # Migrate - sum of (number of bacteria of metabolism in patch * degree of patch) * prob of migration\n events.append((self.total_f_degree * self.rates[P_MIGRATE_FAST], lambda f: self.migrate(BACTERIA_FAST)))\n events.append((self.total_s_degree * self.rates[P_MIGRATE_SLOW], lambda f: self.migrate(BACTERIA_SLOW)))\n\n # Recruit mac - num of nodes * prob of recruit\n events.append((len(self.nodes()) * self.rates[P_RECRUIT], lambda f: self.recruit_mac()))\n\n # Death of mac - total number of macs * prob of death\n events.append((self.total_mac_regular * self.rates[P_DEATH_REGULAR],\n lambda f: self.death_mac(MACROPHAGE_REGULAR)))\n events.append((self.total_mac_infected * self.rates[P_DEATH_INFECTED],\n lambda f: self.death_mac(MACROPHAGE_INFECTED)))\n\n # Mac ingest - sum of (number of bacteria of metabolism in patch * num of macrophages in patch) * prob of ingest\n events.append((self.total_regular_fast * self.rates[P_REGULAR_INGEST_FAST],\n lambda f: self.ingest(BACTERIA_FAST, MACROPHAGE_REGULAR)))\n events.append((self.total_regular_slow * self.rates[P_REGULAR_INGEST_SLOW],\n lambda f: self.ingest(BACTERIA_SLOW, MACROPHAGE_REGULAR)))\n events.append((self.total_infected_fast * self.rates[P_INFECTED_INGEST_FAST],\n lambda f: self.ingest(BACTERIA_FAST, MACROPHAGE_INFECTED)))\n events.append((self.total_infected_slow * self.rates[P_INFECTED_INGEST_SLOW],\n lambda f: self.ingest(BACTERIA_SLOW, MACROPHAGE_INFECTED)))\n\n # Activation\n events.append((self.total_activation * self.rates[P_ACTIVATION], lambda f: self.activate()))\n\n return events", "def Ag_density():\n # initialise no infection default for the number of infections required\n agcurves = [np.zeros(cf.endtime + 1) for inf in cf.tinf]\n # for every infection, calculate its individual effect per timepoint\n for i in range(len(cf.tinf)):\n pag = cf.dose[i] # peak\n tai = 0 # tnow after infection\n while pag > 0.01:\n pag = cf.dose[i] * math.exp(-float(tai) / cf.tdecay)\n agcurves[i][cf.tinf[i] + tai] = pag\n tai += 1\n if cf.tinf[i] + tai >= cf.endtime:\n break\n # sum up all effects\n agcurve_uncapped = np.sum(agcurves, axis=0)\n # set all values above 100% to 100%\n agcurve = [np.min([val, 1]) for val in agcurve_uncapped]\n\n return agcurve", "def get_cost(self, Y, T):\n return - np.multiply(T, np.log(Y)).sum() / Y.shape[0]", "def getReward (events_counters):\n global P_0, P_1, C_0, C_1, C_2 \n return (P_0 - C_0) * events_counters[0] - (C_0 + C_1) * events_counters[1] - (\n C_2 * events_counters[2] - P_1 * events_counters[3])", "def rate(way):\n cost = 0\n for i in range(len(way)-1):\n cost += DISTANCES[way[i]][way[i+1]]\n return cost", "def ind_sources_comb(P_prior,Y_list,lambda_forward,lambda_grad, delta):\n N = P_prior.shape[0]\n A = P_prior.shape[1]\n \n P_post_sum = np.zeros_like(P_prior)\n \n for Y in Y_list:\n \n G = lambda_grad(P_prior,Y,delta)\n\n lambda_val = lambda_forward(P_prior,Y,delta)[-1,-1]\n # compute the log lambda from gradient\n\n log_P_post = np.log(P_prior+1e-100) + G - lambda_val # formula for symbolwise MAP\n P_post_sum += np.exp(log_P_post) \n \n X = decode_from_P(P_post_sum)\n \n return X", "def get_pedestal(tbc, n_events=2000):\n arr = tbc.acquire(n_events=10000) # Getting 10K events\n\n return {\n channel: (np.mean(arr.adc[arr.channel == channel]),\n np.std(arr.adc[arr.channel == channel]))\n for channel in range(1, 12)\n }", "def get_annual_flood_risk(self, postcodes, probability_bands):\n #get cost_value\n cost_value = self.get_flood_cost(postcodes)\n\n #create Dataframe for replacing corresonding value\n risk_df = pd.DataFrame({'Probability Band': probability_bands})\n total_df = risk_df.replace(\n {'High':0.1, 'Medium': 0.02, 'Low': 0.01, 'Very Low': 0.001, 'Zero': 0})\n pro_ser = np.array(total_df['Probability Band'])\n\n #compute result\n annual = pro_ser * cost_value * 0.05\n\n return annual", "def evolve(p: Population, c: Int, s: Int, r: Int) -> List(Float):\n payoffs = []\n for i in range(c):\n p2 = p.match_up(r)\n pp = p2.payoffs()\n p3 = p2.regenerate(s)\n payoffs = payoffs + [relative_average(pp, r)]\n p = p3\n\n return payoffs", "def cost(self) -> float:", "def _construct_reg_costs(self):\n param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])\n return param_reg_cost" ]
[ "0.5173801", "0.51564807", "0.5112312", "0.51068145", "0.5104989", "0.5104811", "0.5067886", "0.5053143", "0.5042089", "0.50331557", "0.49932286", "0.49598402", "0.49526855", "0.49343145", "0.4929131", "0.49237642", "0.49029443", "0.48845562", "0.48811215", "0.4875639", "0.487541", "0.48746073", "0.48658973", "0.48618704", "0.48575917", "0.48571467", "0.48518038", "0.48509744", "0.48365343", "0.48247495" ]
0.7135164
0
Get an array of estimated annual flood risk in pounds sterling per year of a flood event from a sequence of postcodes and flood probabilities.
def get_annual_flood_risk(self, postcodes, probability_bands): #get cost_value cost_value = self.get_flood_cost(postcodes) #create Dataframe for replacing corresonding value risk_df = pd.DataFrame({'Probability Band': probability_bands}) total_df = risk_df.replace( {'High':0.1, 'Medium': 0.02, 'Low': 0.01, 'Very Low': 0.001, 'Zero': 0}) pro_ser = np.array(total_df['Probability Band']) #compute result annual = pro_ser * cost_value * 0.05 return annual
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sorted_annual_flood_risk(self, postcodes):\n\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get lat, long of postcodes\n arr = self.get_lat_long(postcodes)\n lat = arr[:, 0] # Latitude\n lng = arr[:, 1] # Longitude\n\n # Convert lat, long -> easting, northing\n tem = geo.get_easting_northing_from_lat_long(lat, lng, radians=False)\n eos = tem[0] # Easting\n nos = tem[1] # Northing\n\n # Get our data frame of postcodes and risks\n prob_band = self.get_easting_northing_flood_probability(eos, nos)\n flood_risk = self.get_annual_flood_risk(postcodes, prob_band)\n risk_df = pd.DataFrame({'Postcode':postcodes, 'Flood Risk':flood_risk})\n\n # Clean up data frame\n risk_df = risk_df.drop_duplicates()\n risk_df = risk_df.set_index('Postcode')\n risk_df = risk_df.sort_values(by=['Flood Risk', 'Postcode'], ascending=[False, True])\n\n return risk_df", "def repIpd(length,gammas,epsilon1,epsilon2):\r\n avgRewards = []\r\n for gamma in gammas: \r\n avgRewards.append(np.mean(ipd(length,gamma,epsilon1,gamma,epsilon2)))\r\n return(avgRewards)", "def get_sorted_flood_probability(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get latitude and longitude\n output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array\n lat_long = pd.DataFrame(\n {'Postcode':postcodes, 'latitude':output[:, 0], 'longitude':output[:, 1]})\n\n # Delete the wrong format of postcode\n lat_long = lat_long.dropna(how='any')\n latitude = np.array(lat_long.latitude)\n longitude = np.array(lat_long.longitude)\n\n # Returns Eastings and Northings in an array\n output_2 = geo.get_easting_northing_from_lat_long(latitude, longitude)\n\n # Returns array of flood risk probabilities\n output_3 = self.get_easting_northing_flood_probability(output_2[0], output_2[1])\n\n # New column in dataframe containing the probabilities\n lat_long['Probability Band'] = output_3\n\n # Removing invalid postcodes\n lat_long = lat_long.dropna(how='any')\n # Removing duplicates\n lat_long = lat_long.drop_duplicates(subset='Postcode')\n\n # Sort by Probability Bands\n # add variable ordered to sort later by Xun Xie\n lat_long['Probability Band'] = pd.Categorical(\n lat_long['Probability Band'],\n categories=[\"High\", \"Medium\", \"Low\", \"Very Low\", \"Zero\"], ordered=True)\n #add sort firstly by Probability Band and then sort secondly by Postcode\n lat_long = lat_long.sort_values(by=['Probability Band', 'Postcode'], ascending=[True, True])\n lat_long = lat_long.set_index('Postcode')\n\n return lat_long # Make Postcode the Index", "def annualized_return_risk(vals):\n P = 252\n v = np.array(vals)\n vt1 = v[1:]\n vt = v[:-1]\n rets = (vt1-vt)/vt\n \n ann_return = np.mean(rets)*P\n ann_risk = np.std(rets)*np.sqrt(P)\n \n return ann_return, ann_risk", "def get_year_uncertainty(bm, num):\n am_in_use = pd.DataFrame()\n am_waste = pd.DataFrame()\n am_total = pd.DataFrame()\n y_in_use = np.empty(0)\n y_total = np.empty(0)\n for j in xrange(num):\n tmp = get_AM_by_factor()\n am_in_use[j] = tmp['in-use']\n am_waste[j] = tmp['waste']\n am_total[j] = tmp['in-use'] + tmp['waste']\n bm_rand = bm.iloc[j, :]\n # For each random couple of timelines - of Biomass and Anthropogenic mass - find the time of intersection and append to array \n y_in_use = np.append(y_in_use, intersection_time(bm_rand, am_in_use[j]))\n y_total = np.append(y_total, intersection_time(bm_rand, am_total[j]))\n return am_in_use, am_waste, am_total, y_in_use, y_total", "def repIpdTft(length,gammas,epsilon):\r\n avgRewards = []\r\n for gamma in gammas: \r\n avgRewards.append(np.mean(ipdTft(length,gamma,epsilon)))\r\n return(avgRewards)", "def psipred(infile, sequence):\n aa2sec = {\n 'H': [1, 0, 0],\n 'E': [0, 1, 0],\n 'C': [0, 0, 1]\n }\n result = []\n with open(infile, 'r') as fh:\n for line in fh:\n if line.startswith('Pred:'):\n spl = line.strip().split(' ')\n if len(spl) < 2:\n continue\n for aa in spl[1]:\n result.append(aa2sec[aa])\n\n return np.array([result])", "def calc_probabilities(applications):\n sum_advantage = sum(app.get_advantage() for app in applications)\n return [app.get_advantage() / sum_advantage for app in applications]", "def get_empIM(periods, siteprop, faultprop):\n\n if np.isscalar(periods):\n periods = [periods]\n\n periods = np.asarray(periods)\n empIM = np.empty((periods.size, 2))\n\n for i, T in enumerate(periods):\n siteprop.period = T\n empIM[i, 0], empIM_sigm = Bradley_2010_Sa(siteprop, faultprop)\n empIM[i, 1] = empIM_sigm[0]\n\n return empIM", "def get_flood_cost(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n values_df = self.values_df[['Postcode', 'Total Value']]\n values_df = values_df.loc[values_df.Postcode.isin(postcodes)]\n values_df = values_df.set_index('Postcode').reindex(postcodes)\n values_df = values_df.fillna(0)\n\n return np.array(values_df['Total Value'])", "def prob():\n # Get decision score for our example that came with the request\n data = flask.request.json\n #data[]\n year = data[\"year\"]\n Month = data[\"month\"]\n Day = data[\"day\"]\n species = data[\"species\"]\n lat = data[\"lattitude\"]\n lng = data[\"longitude\"]\n\n # find the closest trap to the clicked location\n closest_trap = find_closest_trap(lat,lng,trap_stations)\n trap = trap_stations[(trap_stations.trap == closest_trap) & (trap_stations.species == species)]\n \n # add 0 in front of month\n month = \"0\"+ Month if len(Month) == 1 else Month\n day = \"0\"+ Day if len(Day) == 1 else Day\n date = year + \"-\" + month + \"-\" + day\n trap[\"date\"] = date\n\n # convert datetime to str to match differing formats\n weather.station = weather.station.astype(str)\n trap.station = trap.station.astype(int).astype(str)\n weather.date = weather.date.astype(str)\n\n # merge trap & weather dataframes\n merged = trap.merge(weather,on=[\"date\",\"station\"],how=\"inner\")\n\n # convert back datetime\n merged.date = pd.to_datetime(merged.date)\n\n # extract features from datetime and dewpoint\n merged['month'] = month\n merged['year'] = year\n merged['dry'] = merged['dewpoint'].subtract(merged.wetbulb)\n merged['week'] = merged['date'].map(lambda x: str(x.isocalendar()[1]))\n # add risk feature if month is between june and sept (peak virus season)\n merged['risk'] = 1 if (int(month) > 6) and (int(month) < 9) else 0\n\n # Reorder columns & Assign to X\n merged2 = merged.drop([\"date\", \"year\", \"trap\",\"species\"],1)\n cols = [\"month\",\"week\",\"longitude\",\"latitude\",\"tmax\",\"tmin\",\"tavg\",\"depart\",\"dewpoint\",\n \"wetbulb\",\"heat\",\"cool\",\"sunrise\",\"sunset\",\"preciptotal\",\"resultspeed\",\"avgspeed\",\n \"month\",\"dry\",\"risk\"]\n X = merged2[cols]\n \n # predict probability\n pred = loaded_model.predict_proba(X)\n prob = round(pred[0][1]*100,2)\n return flask.jsonify(prob)", "def performStats(dataArray):\n yearArray = [[0,0] for i in range(20)]\n for entry in dataArray:\n oSum = 0\n nSum = 0\n for k, v in entry.old.items():\n # print(k,v)\n oSum += v\n for k,v in entry.new.items():\n # print(k,v)\n nSum += v\n entry.oldSum = oSum\n entry.newSum = nSum\n idx = int(entry.year)%20 #0-19 index\n yearArray[idx][0] += entry.oldSum\n yearArray[idx][1] += entry.newSum\n return yearArray", "def precipitation():\n # Create our session (link) from Python to the DB\n \n session = Session(engine)\n\n \"\"\"Return a list of all precipiation values for the last 12 months\"\"\"\n # Example # results = session.query(Passenger.name).all()\n\n\n results = session.query(Measurement.date, func.avg(Measurement.prcp)).\\\n filter(Measurement.date > '2016-08-23').\\\n group_by(Measurement.date).\\\n order_by(Measurement.date).all()\n\n # Close session\n\n session.close()\n\n prcp = []\n \n for date, average in results:\n prcp_dict = {}\n prcp_dict['Date'] = date\n prcp_dict['Avg. Precipitation'] = average\n prcp.append(prcp_dict)\n \n # jsonify the results\n return jsonify(prcp)\n\n # Convert list of tuples into normal list\n # Example # all_names = list(np.ravel(results))", "def createdog(self,imagearr):\n re = [0,1,2,3]\n re[0] = self.diff(self.gs_blur(self.sigma,imagearr))\n for i in range(1,4):\n base = self.sampling(re[i-1][2])\n re[i] = self.diff(self.gs_blur(self.sigma, base))\n return re", "def get_beam_stats(dates):\n # get_beam_stats() assumes all beams of a date are good unless told otherwise!\n beams = np.ones([len(dates), 40])\n beams[dates == '190628', :32] = 0\n beams[dates == '190722', :] = 0\n beams[dates == '190821', 4] = 0\n beams[dates == '190905', 18:22] = 0\n beams[dates == '190905', 27:34] = 0\n beams[dates == '190916', 27:] = 0\n # Big change in beam quality after 1 October 2019 (see in difference images).\n beams[dates == '191002', 8:15] = 1 # best we have at the moment, but would flip to 0 if better comes!\n beams[dates == '191002', 4:6] = 1 # best we have at the moment, but would flip to 0 if better comes!\n beams[dates == '191008', 1:16] = 0\n beams[dates == '191008', 18:22] = 0\n beams[dates == '191008', 26] = 0\n beams[dates == '191023', :] = beams[dates == '191002', :]\n beams[dates == '191120', :] = 0\n\n # *** Until we have a full beam complement ***:\n beams = np.ones([len(dates), 40])\n beams[dates == '190628', :32] = 0\n beams[dates == '190821', 4] = 1 # best we have at the moment, but would flip to 0 if better comes!\n\n return beams", "def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities", "def precipitation():\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n rain = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date > last_year).\\\n order_by(Measurement.date).all()", "def get_emp_pSA(periods, siteprop, faultprop):\n\n if np.isscalar(periods):\n periods = [periods]\n\n periods = np.asarray(periods)\n pSA_emp = np.empty((periods.size, 2))\n\n for i, T in enumerate(periods):\n siteprop.period = T\n pSA_emp[i, 0], pSA_emp_sigm = Bradley_2010_Sa(siteprop, faultprop)\n pSA_emp[i, 1] = pSA_emp_sigm[0]\n\n return pSA_emp", "def get_average_repro(self):\n return np.mean([agent.get_fledge_probability() for agent in self.agents])", "def GetGraphicAverages(diagnostic_cases, diagnostic, weeks,year, n_years):\n t = 1.96\n\n current_year = Year.objects.get(year=year)\n weeks_current_year = weeks.filter(year=current_year)\n year_ob = Year.objects.filter(year__lt=year)\n weeks = weeks.filter(year__in=year_ob)\n\n popu = 0\n\n #cases per diagnostic\n diagnostic_cases_w = diagnostic_cases\n\n #arithmetic average of the weeks / n_years\n averages = [0] * 52\n\n standard_deviations = [0] * 52\n #number of years\n\n #cases per week of the diferent years\n cases_per_weeks = [0] * 52\n\n for i in range(len(averages)):\n\n f = [0]*(n_years)\n \n\n year = 0\n\n y_idx = 0\n for w in range(len(weeks)):\n #print(y)\n if weeks[w].week == i+1:\n \n if year != weeks[w].year: # Esto no pasa nunca\n year = weeks[w].year\n cases = 0\n \n \n for p in diagnostic_cases_w:\n\n if p.week == weeks[w]:\n \n cases += p.cases\n\n f[y_idx ] = cases\n y_idx +=1\n\n averages[i] = np.average(f) #borrar\n\n standard_deviations[i] = np.std(f)\n \n cases = 0\n for week in weeks_current_year:\n if week.week == i+1:\n dia = diagnostic_cases.filter(week=week)\n \n for d in dia:\n\n cases += d.cases\n\n cases_per_weeks[i] = cases \n\n\n #array of class dots for draw the chart of averages\n dots_graphic_averages = []\n #array of class dots for draw the chart of cumulative\n dots_graphic_cumulative = []\n\n\n average_cumulative = 0\n top_rank_cumulative = 0\n cases_acumulative = 0\n lower_rank_cumulative = 0\n\n for i in range(len(standard_deviations)):\n lower_rank = 0\n top_rank = 0\n\n if n_years != 0:\n lower_rank = averages[i] - (t * standard_deviations[i]/ math.sqrt(n_years))\n top_rank = averages[i] + (t * standard_deviations[i] / math.sqrt(n_years))\n if lower_rank < 0:\n lower_rank = 0\n\n # Acumulative dots\n cases_acumulative += cases_per_weeks[i]\n average_cumulative += averages[i]\n if lower_rank >= 0:\n lower_rank_cumulative += lower_rank\n top_rank_cumulative += top_rank\n\n dots_average = DotsGraphicAverage(averages[i],i+1, lower_rank, top_rank,cases_per_weeks[i])\n dots_cumulative = DotsGraphicAverage(average_cumulative,i+1, lower_rank_cumulative, top_rank_cumulative,cases_acumulative)\n dots_graphic_averages.append(dots_average)\n dots_graphic_cumulative.append(dots_cumulative)\n\n\n return dots_graphic_averages, dots_graphic_cumulative", "def profbval_strict(infile, sequence):\n result = np.zeros((1, len(sequence), 1))\n with open(infile, \"r\") as fh:\n it = 0\n for line in fh:\n if not line.startswith(\"number\"):\n pred_str = line.strip().split()[5]\n if pred_str == \"F\":\n result[0, it, 0] = 1\n it += 1\n\n return result", "def temp_advection(bufr):\r\n items = list(bufr.items())\r\n for item in items:\r\n if item[0] == 'PROF' or item[0] == b'PROF':\r\n bufr_prof = item[1]\r\n models = list(bufr_prof.keys())\r\n num_models = len(models)\r\n dates = list(bufr_prof[list(bufr_prof.keys())[0]].keys())\r\n num_dates = len(dates)\r\n num_times = len(bufr_prof[list(bufr_prof.keys())[0]][dates[0]].keys())\r\n num_features = num_models * num_times\r\n\r\n advection_array = np.zeros((num_dates, num_features))\r\n\r\n def advection_index(V1, V2):\r\n \"\"\"\r\n The advection index measures the strength of veering/backing of wind.\r\n :param V1: array wind vector at lower model level\r\n :param V2: array wind vector at higher model level\r\n :return: index of projection of (V2 - V1) onto V1\r\n \"\"\"\r\n proj = V2 - np.dot(V1, V2) * V1 / np.linalg.norm(V1)\r\n diff = V1 - V2\r\n sign = np.sign(np.arctan2(diff[1], diff[0]))\r\n return sign * np.linalg.norm(proj)\r\n\r\n # Here comes the giant ugly loop.\r\n sample = 0\r\n for date in dates:\r\n feature = 0\r\n for model in models:\r\n try:\r\n for eval_date in bufr_prof[model][date].keys():\r\n items = bufr_prof[model][date][eval_date].items()\r\n for item in items:\r\n if item[0] == 'UWND' or item[0] == b'UWND':\r\n u = item[1]\r\n if item[0] == 'VWND' or item[0] == b'VWND':\r\n v = item[1]\r\n try:\r\n V1 = np.array([u[0], v[0]])\r\n V2 = np.array([u[1], v[1]])\r\n except IndexError:\r\n print('Not enough wind levels available for advection calculation; omitting...')\r\n return\r\n advection_array[sample, feature] = advection_index(V1, V2)\r\n feature += 1\r\n except KeyError: #date doesn't exist\r\n pass\r\n sample += 1\r\n\r\n return advection_array", "def main():\n\n args = get_args()\n \n patient_years_dict = {}\n\n isolates_to_seq = []\n\n with open(args.input_file, \"r\") as infile1:\n for line in infile1:\n if not line.startswith(\"PID\"):\n line_elements = line.strip().split(\"\\t\")\n patient_anumber = line_elements[1].split(\"_\")[0]\n patient_year = line_elements[2].split(\"-\")[0]\n if patient_anumber not in patient_years_dict:\n patient_years_dict[patient_anumber] = {patient_year:[line]}\n else:\n if patient_year not in patient_years_dict[patient_anumber]:\n patient_years_dict[patient_anumber][patient_year] = [line]\n if line not in patient_years_dict[patient_anumber][patient_year]:\n patient_years_dict[patient_anumber][patient_year].append(line)\n \n for patient in patient_years_dict:\n for year in patient_years_dict[patient]:\n print(f\"Checking patient {patient} and year {year}...\")\n wgs_codes = []\n pfge_codes = []\n isolate_dates = []\n isolate_lines = []\n for isolate in patient_years_dict[patient][year]:\n isolate_elements = isolate.strip().split(\"\\t\")\n wgs_codes.append(int(isolate_elements[5]))\n pfge_codes.append(int(isolate_elements[4]))\n isolate_dates.append(isolate_elements[2])\n isolate_lines.append(isolate)\n \n if any(wgs_codes):\n print(f\"\\tWGS present, moving on to next year/patient.\")\n continue\n else:\n print(f\"\\tWGS missing, checking PFGE...\\n\\tPFGE presence/absence codes for {year} are: {pfge_codes}\")\n if any(pfge_codes):\n pfge_index = pfge_codes.index(1)\n isolate_to_seq = isolate_dates[pfge_index]\n isolate_line_to_seq = isolate_lines[pfge_index]\n print(f\"\\tPFGE present, selecting first isolate with PFGE to sequence...\\n\\tIsolate to sequence is ---> {isolate_to_seq} <--- out of possible isolates {isolate_dates}.\")\n isolates_to_seq.append(isolate_line_to_seq)\n else:\n print(f\"\\tPFGE missing...\\n\\tSelecting 1st available isolate for sequencing ---> {isolate_dates[0]} <--- out of available isolates {isolate_dates}.\")\n isolates_to_seq.append(isolate_lines[0])\n\n header = f\"PID\\tADATE\\tCULTDAT\\tvalues\\tPFGE_Isolates\\tSequenced_Isolates\\n\" \n to_write = \"\".join(isolates_to_seq)\n with open(args.output_file, \"w\") as outfile1:\n outfile1.write(header + to_write)", "def desforestation_loss_year(ras):\n \"\"\"input raster path -> return stats\"\"\"\n\n # get area grid\n area_grid = raster_area_lat(ras) # true WGS84 spheroid\n\n # getting numpy object\n ras_np_raw = gdal_tif_to_numpy(ras)\n # masking data not need as further masked below\n\n # 0 - no loss, 1 - change in 2000-2001, .. 12 change 2011-2013\n years = range(0, 14)\n year_dict = dict()\n\n for year in years:\n # get subset of the year, i.e. all other valuse are masked\n # ras_sub = numpy.ma.masked_not_equal(ras_np_raw, year)\n\n # the mask is useful\n ras_sub_mask = numpy.ma.masked_equal(ras_np_raw, year)\n\n # use count (no mask) NOT size (including mask)\n # count_pixel = ras_sub.count()\n count_pixel = ras_sub_mask.mask.sum()\n\n # True is treated as 1\n # need to include dtype = 'float64' otherwise the calcaulate\n # will produce incorrect result (overflow?)\n\n total_area = (ras_sub_mask.mask * area_grid).sum(dtype='float64')\n\n year_dict[year] = [count_pixel, total_area]\n\n return year_dict", "def get_probs(self):\n\t\tprobArray = []\n\t\tfor combination in self.codepool:\n\t\t\tif self.feasible(combination):\n\t\t\t\tprobArray.append(self.get_probability(combination))\n\t\t\telse:\n\t\t\t\tprobArray.append(0)\n\t\tprobArray = np.array(probArray) / np.sum(probArray)\n\t\treturn probArray", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)", "def run(start_year, end_year, depth_from, depth_to):\n years, times, rootgrps = retrieve(1950,2018)\n \n HC = calculate_HC(rootgrps,25,31, -43, 41)\n \n months, month_avgs = monthly_avgs(HC)\n pos = str(-43)+\"N \"+str(41)+\"E\"\n \n return years, times, HC, pos, months, month_avgs", "def plot_avg_prevalence_probs(infection_times_list, infection_prob, n_nodes, bins):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n bin_centers = (bins[:-1] + bins[1:]) / 2\n dateconv = np.vectorize(dt.datetime.fromtimestamp)\n date = dateconv(bin_centers)\n prevalences = []\n for list, prob in zip(infection_times_list, infection_prob):\n for l in list:\n counts, _, _ = binned_statistic(\n x=l,\n values=l,\n bins=bins,\n statistic='count')\n\n cum_counts = np.cumsum(counts)\n prevalence = cum_counts / n_nodes\n prevalences.append(prevalence)\n\n avg_prevalence = np.array(prevalences)\n avg_prevalence = avg_prevalence.mean(0)\n\n ax.plot(date, avg_prevalence, label=prob)\n prevalences = []\n\n ax.legend()\n plt.suptitle(r'Averaged prevalence of the disease with different infection probabilities')\n ax.set_xlabel(r'time')\n ax.set_ylabel(r'averaged prevalence $\\rho(t)$')\n\n fig.autofmt_xdate(bottom=0.2, rotation=20, ha='right')\n fig.savefig(\"./plots/t2_averaged_prevalence_probs.pdf\")", "def cal_thresh(pred_prob,labels):\n mu_stds = []\n for i in range(19):\n pos_mu, pos_std = fit(pred_prob[labels==i, i])\n mu_stds.append([pos_mu, pos_std])\n return mu_stds", "def calculate_insolation_probabilities(self):\n insolation_probabilities = []\n for y in range(self.controller.image_height_map.size):\n print(\"Calculating insolation probabilities: Row: \" + str(y))\n row = []\n for x in range(self.controller.image_height_map.size):\n available_calories = self.controller.image_insolation_map.image[y][x]\n needed_calories = self.vegetation.energy_demand\n probability = self.calculate_probability(needed_calories, available_calories)\n row.append(probability)\n insolation_probabilities.append(row)\n return insolation_probabilities" ]
[ "0.59848833", "0.56781584", "0.5404115", "0.5328161", "0.52764213", "0.5244134", "0.522769", "0.51827186", "0.513963", "0.5112866", "0.51083076", "0.50649613", "0.49471208", "0.49010098", "0.48951918", "0.48927584", "0.48918253", "0.48807698", "0.48748794", "0.48567662", "0.48488516", "0.48402902", "0.48388642", "0.48151743", "0.48003465", "0.4788689", "0.47841683", "0.47839117", "0.47537103", "0.47437385" ]
0.6496352
0
Get a sorted pandas DataFrame of flood risks.
def get_sorted_annual_flood_risk(self, postcodes): # Fix evil postcodes postcodes = clean_postcodes(postcodes) # Get lat, long of postcodes arr = self.get_lat_long(postcodes) lat = arr[:, 0] # Latitude lng = arr[:, 1] # Longitude # Convert lat, long -> easting, northing tem = geo.get_easting_northing_from_lat_long(lat, lng, radians=False) eos = tem[0] # Easting nos = tem[1] # Northing # Get our data frame of postcodes and risks prob_band = self.get_easting_northing_flood_probability(eos, nos) flood_risk = self.get_annual_flood_risk(postcodes, prob_band) risk_df = pd.DataFrame({'Postcode':postcodes, 'Flood Risk':flood_risk}) # Clean up data frame risk_df = risk_df.drop_duplicates() risk_df = risk_df.set_index('Postcode') risk_df = risk_df.sort_values(by=['Flood Risk', 'Postcode'], ascending=[False, True]) return risk_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_gefs_frame(frame):\n if frame is None:\n return frame\n else:\n return pd.DataFrame(np.sort(frame), index=frame.index)", "def get_goldilocks_dataframe(self, filename):\n df = pd.DataFrame()\n for j in range(self.n_orders):\n df_i = pd.DataFrame()\n for i in range(1, 10):\n name = self.hdus[i].name\n df_i[name] = self.hdus[i].data[j, :]\n df_i[\"order\"] = j\n df = df.append(df_i, ignore_index=True)\n keep_mask = df[df.columns[0:6]] != 0.0\n df = df[keep_mask.all(axis=1)].reset_index(drop=True)\n\n return df", "def sort_neighbors_by_site_index_i(neighbor_count_df: pd.DataFrame) -> pd.DataFrame:\n return neighbor_count_df.sort_values(by=[\"i\", \"distance_bin\", \"j\"]).reset_index(\n drop=True\n )", "def depth_g(self) -> pd.DataFrame:\n return self._load_fetch(self.DEPTH_G)", "def get_sorted_flood_probability(self, postcodes):\n # Fix evil postcodes\n postcodes = clean_postcodes(postcodes)\n\n # Get latitude and longitude\n output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array\n lat_long = pd.DataFrame(\n {'Postcode':postcodes, 'latitude':output[:, 0], 'longitude':output[:, 1]})\n\n # Delete the wrong format of postcode\n lat_long = lat_long.dropna(how='any')\n latitude = np.array(lat_long.latitude)\n longitude = np.array(lat_long.longitude)\n\n # Returns Eastings and Northings in an array\n output_2 = geo.get_easting_northing_from_lat_long(latitude, longitude)\n\n # Returns array of flood risk probabilities\n output_3 = self.get_easting_northing_flood_probability(output_2[0], output_2[1])\n\n # New column in dataframe containing the probabilities\n lat_long['Probability Band'] = output_3\n\n # Removing invalid postcodes\n lat_long = lat_long.dropna(how='any')\n # Removing duplicates\n lat_long = lat_long.drop_duplicates(subset='Postcode')\n\n # Sort by Probability Bands\n # add variable ordered to sort later by Xun Xie\n lat_long['Probability Band'] = pd.Categorical(\n lat_long['Probability Band'],\n categories=[\"High\", \"Medium\", \"Low\", \"Very Low\", \"Zero\"], ordered=True)\n #add sort firstly by Probability Band and then sort secondly by Postcode\n lat_long = lat_long.sort_values(by=['Probability Band', 'Postcode'], ascending=[True, True])\n lat_long = lat_long.set_index('Postcode')\n\n return lat_long # Make Postcode the Index", "def top_groups():\n groups = Group.objects.filter(country='PT').order_by('-members')[:10]\n df = pd.DataFrame.from_records(groups.values())\n return df", "def WFD(self) -> pd.DataFrame:\n return self.fdist", "def stoch_fix():\n df = stoch_test_data()\n return df", "def kdf(self) -> pd.DataFrame:\n k = self.k\n df = pd.DataFrame({\"freq\": k})\n df = df.query(\"freq > 0\")\n return df", "def fdist(self) -> pd.DataFrame:\n df = pd.DataFrame.from_dict(self, orient=\"index\").reset_index()\n df.columns = [\"type\", \"freq\"]\n df = df.sort_values(\"freq\", ascending=False).reset_index(drop=True)\n df.index = df.index + 1\n df.index.name = \"rank\"\n return df", "def data_for_sorting():\n return RaggedArray([[1, 0], [2, 0], [0, 0]])", "def sort_restos(filtered_table):\n\tdf = pd.DataFrame(filtered_table).T\n\tdf = df.sort(columns=columns['time_to_resto'])\n\n\treturn df.index", "def get_patient_df(self):\n df = pd.DataFrame(self.heartbeats)\n df.drop(columns=['cardiac_cycle'], inplace=True)\n return df", "def get_tops_df(project, tops_field='tops', columns=['UWI', 'PICK', 'MD']):\n tops_set = []\n rows = []\n for well in project:\n for t in well.data[tops_field]:\n row = [well.uwi, t.components[0]['formation'], t.top.middle]\n tops_set.append(t.components[0]['formation'])\n rows.append(row)\n df = pd.DataFrame(rows, columns=columns)\n return df", "def depth_c(self) -> pd.DataFrame:\n return self._load_fetch(self.DEPTH_C)", "def build_dataframe() -> pd.DataFrame:\n df = pd.DataFrame(\n np.random.randint(0, 1000, size=(1000, 6)), columns=list(\"ABCDEF\")\n )\n\n return df", "def _sort_dataframe(self, dataframe):\r\n columns = list(dataframe.columns)\r\n columns.sort()\r\n dataframe = dataframe[columns]\r\n return dataframe", "def get_sorted_allocated_seats():\n return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=[\"party\"])[\"allocated_seats\"])", "def find_top_seven_routes(self):\n df = self.filter_according_to_travel_day('Sunday')\n # Group the dataset according to the frequency of the travel route\n df =df.groupby([\"travel_from\", \"travel_to\"]).size().reset_index(name=\"Frequency\")\n #Sort the dataset according to the frequency in descending order\n df =df.sort_values(\"Frequency\", ascending=False)[:7]\n return df", "def data_missing_for_sorting():\n return RaggedArray([[1, 0], [], [0, 0]])", "def dataframe(self):\n frames = []\n for game in self.__iter__():\n df = game.dataframe\n if df is not None:\n frames.append(df)\n if frames == []:\n return None\n return pd.concat(frames)", "def analyse_and_sort(self, df):\n if (type(df) is pd.Series):\n df = df.to_frame(\"score\")\n elif (type(df) is pd.DataFrame):\n df.columns = [\"score\"]\n df = self._filter(df)\n df = self._argrelmax(df)\n df = self._drop_close_extrema(df) # by = [deb1[0]]\n return df.sort_values(by=[\"score\"])[::-1]", "def get_sorted_parties():\n return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=[\"party\"])[\"party\"])", "def coerce( self ):\n df = self.copy()\n gcond = ['neighbor', 'pdb'] if 'source' not in df.columns else ['neighbor', 'pdb', 'source']\n for frame_id, frame in df.groupby('frame'):\n g = frame.groupby(gcond)\n neighbors = len(g)\n neighbor = list(g.ngroup() + 1)\n position = list(g.cumcount() + frame_id)\n df.loc[(df['frame'] == frame_id), 'neighbors'] = [neighbors] * frame.shape[0]\n df.loc[(df['frame'] == frame_id), 'neighbor'] = neighbor\n df.loc[(df['frame'] == frame_id), 'position'] = position\n return df", "def df(self):\n data = {\"sites\": self.sites, \"values\": self.values,\n \"stdeviations\": self.stdeviations}\n return pd.DataFrame(data, columns=[\"sites\", \"values\", \"stdeviations\"])", "def depth_t(self) -> pd.DataFrame:\n return self._load_fetch(self.DEPTH_T)", "def data_with_fips(self) -> pd.DataFrame:\n return self.data", "def df_sdb(self):\n df = pd.DataFrame(index=self.sdb_net.keys())\n df[\"out\"] = [-q for q in self.sdb_out.values()] # assign negative values\n df[\"in\"] = self.sdb_in.values()\n df[\"net\"] = self.sdb_net.values()\n return df", "def competitions(self) -> DataFrame[Any]:", "def get_df(self):\n return pd.DataFrame(self.points)" ]
[ "0.6370824", "0.58607304", "0.5721609", "0.56225353", "0.5615848", "0.55028003", "0.550037", "0.54332703", "0.54226243", "0.5420903", "0.53964025", "0.53897834", "0.53690106", "0.52974486", "0.5292217", "0.52744174", "0.5213015", "0.5210845", "0.5197446", "0.51936984", "0.51609534", "0.514815", "0.5128119", "0.50994927", "0.5090686", "0.50845885", "0.5076424", "0.5067188", "0.5065545", "0.50534165" ]
0.6240735
1
Insert a new value after a key.
def insert_after(self, key, value): self._insert_after(self.head, key, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self, key, value):\n\t\tself.__insert(key, value, key[1:])", "def _insert_after(cls, node, key, value):\n # End of list base case\n if node is None:\n return\n\n # Base case for key found\n if node.value == key:\n node.next_ = Node(value, node.next_)\n return\n\n # Recursive case\n cls._insert_after(node.next_, key, value)", "def __setitem__(self, key, value):\n self.insert(key, value)", "def add(self, key, value):\n self._data.add_last(self._Item(key, value))", "def __setitem__(self,k,v):\n self.insert(k,v)", "def insert(self, key):\r\n index = self.search(key)\r\n self.keys.insert(index, key)", "def insert(self, key, val):\n self.dict.setdefault(key, []).append(val)", "def _insert_item(self, key: _KT, value: _VT) -> None:\n dict.__setitem__(self, key, value)", "def insert(self, key, value):\n hash_key = hash(key) % self.length\n bucket = self.array[hash_key]\n for idx, key_val_pair in enumerate(bucket):\n k, v = key_val_pair\n if k == key:\n bucket[idx] = [key, value]\n return\n bucket.append([key, value])", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def insert(self, key, value):\n tags = self.__all_tags()\n if value not in tags:\n tags.insert(key, value)\n self.__post_changes(tags)", "def _insert(self, key, value):\n entry = self._lookup(key)\n if entry.value is None:\n self.used += 1\n if entry.key is not dummy:\n self.filled += 1\n entry.key = key\n entry.hash = self.first_hash(key)\n entry.value = value", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def insert_key(self, key : str, value : int) -> None:\n \n hash_key = self.hash_key(key)\n head = self.array[hash_key]\n \n while head.next:\n if head.next.key == key:\n head.next.value = value\n return\n head = head.next\n head.next = Node(key,value)\n self.keys.append(key)", "def insert(self, index, key, value):\r\n if key in self.keyOrder:\r\n n = self.keyOrder.index(key)\r\n del self.keyOrder[n]\r\n if n < index:\r\n index -= 1\r\n self.keyOrder.insert(index, key)\r\n super(OrderedDict, self).__setitem__(key, value)", "def add(self, key, value):\n # If the node is empty, simply insert the key-value pair.\n if not self.keys:\n self.keys.append(key)\n self.values.append([value])\n return None\n\n for i, item in enumerate(self.keys):\n # If new key matches existing key, add to list of values.\n if key == item:\n self.values[i].append(value)\n break\n\n # If new key is smaller than existing key, insert new key to the left of existing key.\n elif key < item:\n self.keys = self.keys[:i] + [key] + self.keys[i:]\n self.values = self.values[:i] + [[value]] + self.values[i:]\n break\n\n # If new key is larger than all existing keys, insert new key to the right of all\n # existing keys.\n elif i + 1 == len(self.keys):\n self.keys.append(key)\n self.values.append([value])", "def insert(self, key, value):\n # Resize array here if necessary.\n if key < 0: key = 0\n elif key > len(self): key = len(self)\n if key < len(self):\n for j in range(len(self), key, -1):\n self._items[j] = self._items[j - 1]\n self._items[key] = value\n self._size += 1\n self.incModCount()", "def add(self, key, value):\n\t\tself.__add_key_to_bt(key)[3] = self.__add_key_value_to_ll(key, value)", "def insert(self, key, value):\r\n self._data.append(self._Item(key, value))\r\n self._upheap(len(self._data) - 1) # upheap newly added position\r", "def insertKeyValueAfterDictKey(curDict, afterKey, newKey, newValue):\n # keyList = curDict.keys()\n keyList = list(curDict.keys())\n keyMaxNum = len(keyList)\n keyMaxIdx = keyMaxNum - 1\n # valuesList = curDict.values()\n valuesList = list(curDict.values())\n afterKeyIndex = keyList.index(afterKey) # 6\n if afterKeyIndex < keyMaxIdx:\n toInsertIndex = afterKeyIndex + 1\n keyList.insert(toInsertIndex, newKey)\n valuesList.insert(toInsertIndex, newValue)\n else:\n keyList.append(newKey)\n valuesList.append(newValue)\n updatedDict = {}\n for keyIdx, eachKey in enumerate(keyList):\n eachValue = valuesList[keyIdx]\n updatedDict[eachKey] = eachValue\n return updatedDict", "def _insert_after(paragraph, item_before, new_item, new_value):\n item_found = False\n for item in paragraph:\n if item_found:\n value = paragraph.pop(item)\n paragraph[item] = value\n if item == item_before:\n item_found = True\n paragraph[new_item] = new_value\n if not item_found:\n paragraph[new_item] = new_value", "def insert(self, key, value):\n # Find the leaf node where to do the insertion.\n if not self.is_leaf():\n insert_point = self.get_position(key)\n return self.refs[insert_point].insert(key, value)\n\n # Located a leaf node, so insert the (key, value) pair.\n insert_point = self.get_position(key)\n self.keys.insert(insert_point, key)\n self.values.insert(insert_point, value)\n\n if self.is_full():\n self.split()\n\n return self", "def insert_before(self, key, value):\n # Iterating to node that has value\n node = self.head\n last_node = None\n while node is not None and node.value != key:\n last_node = node\n node = node.next_\n\n # Check if the node has been found\n if node is None:\n return\n\n # Checking whether head matched\n if last_node is None:\n self.add_first(value)\n return\n\n # Inserting new node\n last_node.next_ = Node(value, node)", "def insert(self, index, key, value):\n if key in self:\n # FIXME: efficiency?\n del self[key]\n self._sequence.insert(index, key)\n dict.__setitem__(self, key, value)", "def insert(self, key, value=None):\n if isinstance(key, list):\n for k in key:\n self.insert(k)\n else:\n if key == self.key:\n # update key: value\n self.value = value\n elif key < self.key:\n if self.left == None:\n self.left = Tree(key, value)\n else:\n self.left.insert(key, value)\n else:\n if self.right == None:\n self.right = Tree(key, value)\n else:\n self.right.insert(key, value)", "def add(self, key, value):", "def append(self, key, value):\n # forces a `__getitem__`, which in turn calls `__missing__`\n # the first time we try to insert a value\n self[key].send((value,))", "def _insert(self, key):\n self.tree.insert(key)", "def inc(self, key):\n if key in self.key_dict:\n self.increase(key)\n return\n self.key_dict[key] = key_node = KeyNode(key, 1)\n value_node = self.value_dict.get(1)\n if value_node is None:\n self.value_dict[1] = value_node = ValueNode(1, None, self.head)\n if self.head:\n self.head.prev = value_node\n self.head = value_node\n if self.last is None:\n self.last = value_node\n self.insert_key_node(key_node)", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())" ]
[ "0.824605", "0.76902354", "0.7456207", "0.74112356", "0.7222522", "0.7161951", "0.71594304", "0.71453357", "0.7112022", "0.70699406", "0.7042985", "0.7003373", "0.6992507", "0.6984114", "0.69314635", "0.6923191", "0.68841237", "0.68686163", "0.685315", "0.6815772", "0.68092054", "0.68023896", "0.6753997", "0.6728788", "0.67070043", "0.66909677", "0.66829747", "0.6676366", "0.6655326", "0.66255444" ]
0.8499372
0
Insert a new value after a key recursively.
def _insert_after(cls, node, key, value): # End of list base case if node is None: return # Base case for key found if node.value == key: node.next_ = Node(value, node.next_) return # Recursive case cls._insert_after(node.next_, key, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_after(self, key, value):\n self._insert_after(self.head, key, value)", "def insert(self, key, value):\n\t\tself.__insert(key, value, key[1:])", "def insert(self, key, value=None):\n if isinstance(key, list):\n for k in key:\n self.insert(k)\n else:\n if key == self.key:\n # update key: value\n self.value = value\n elif key < self.key:\n if self.left == None:\n self.left = Tree(key, value)\n else:\n self.left.insert(key, value)\n else:\n if self.right == None:\n self.right = Tree(key, value)\n else:\n self.right.insert(key, value)", "def insert(self, key, value):\n self.root.insert(key, value)\n\n # Update the new root if need be.\n node = self.root\n while node.parent != None:\n node = node.parent\n self.root = node", "def _insert(self, key):\n self.tree.insert(key)", "def insert(self, key, value):\n # Find the leaf node where to do the insertion.\n if not self.is_leaf():\n insert_point = self.get_position(key)\n return self.refs[insert_point].insert(key, value)\n\n # Located a leaf node, so insert the (key, value) pair.\n insert_point = self.get_position(key)\n self.keys.insert(insert_point, key)\n self.values.insert(insert_point, value)\n\n if self.is_full():\n self.split()\n\n return self", "def insert(self, key, value):\n\n if self.key == key:\n self.val = value\n elif key < self.key:\n if self.left is None:\n self.left = self.__class__(key, value)\n else:\n self.left = self.left.insert(key, value)\n else:\n if self.right is None:\n self.right = self.__class__(key, value)\n else:\n self.right = self.right.insert(key, value)\n\n return self", "def insert_before(self, key, value):\n # Iterating to node that has value\n node = self.head\n last_node = None\n while node is not None and node.value != key:\n last_node = node\n node = node.next_\n\n # Check if the node has been found\n if node is None:\n return\n\n # Checking whether head matched\n if last_node is None:\n self.add_first(value)\n return\n\n # Inserting new node\n last_node.next_ = Node(value, node)", "def add(self, key, value):\n # If the node is empty, simply insert the key-value pair.\n if not self.keys:\n self.keys.append(key)\n self.values.append([value])\n return None\n\n for i, item in enumerate(self.keys):\n # If new key matches existing key, add to list of values.\n if key == item:\n self.values[i].append(value)\n break\n\n # If new key is smaller than existing key, insert new key to the left of existing key.\n elif key < item:\n self.keys = self.keys[:i] + [key] + self.keys[i:]\n self.values = self.values[:i] + [[value]] + self.values[i:]\n break\n\n # If new key is larger than all existing keys, insert new key to the right of all\n # existing keys.\n elif i + 1 == len(self.keys):\n self.keys.append(key)\n self.values.append([value])", "def insert(self, key, value):\n\n val = 0\n val = self.search(self.key)\n\n if self.key == key:\n self.val = value\n elif key < self.key:\n if self.left is None:\n self.left = self.__class__(key, value)\n else:\n self.left.insert(key, value)\n else:\n if self.right is None:\n self.right = self.__class__(key, value)\n else:\n self.right = self.right.insert(key, value)\n\n return self", "def insert(node, key):\n # If the tree is empty, return a new node\n if node is None:\n return Node(key)\n\n # Otherwise recur down the tree\n if key < node.key:\n node.left = insert(node.left, key)\n else:\n node.right = insert(node.right, key)\n\n # return the (unchanged) node pointer\n return node", "def insert(root, key, value=None):\n if root is None:\n root = Node(key, value)\n else:\n if key >= root.key:\n if root.right is None:\n root.right = Node(key, value)\n else:\n # Use root.right as the root of the subtree\n insert(root.right, key, value)\n else:\n if root.left is None:\n root.left = Node(key, value)\n else:\n # Use root.left as the root of the subtree\n insert(root.left, key, value)", "def insert(self, key, val=None):\n self.root = self._insert(self.root, key, val) # Returns root of resulting tree after insertion - update it\n self.n += 1", "def inc(self, key):\n if key in self.key_dict:\n self.increase(key)\n return\n self.key_dict[key] = key_node = KeyNode(key, 1)\n value_node = self.value_dict.get(1)\n if value_node is None:\n self.value_dict[1] = value_node = ValueNode(1, None, self.head)\n if self.head:\n self.head.prev = value_node\n self.head = value_node\n if self.last is None:\n self.last = value_node\n self.insert_key_node(key_node)", "def insert(self, key):\r\n if self.root.num_keys() == self.max_num_keys:\r\n self.root = Node([], [self.root])\r\n self.root.split_child(0)\r\n\r\n node = self.root \r\n while not node.is_leaf():\r\n index = node.search(key)\r\n\r\n child = node.children[index]\r\n if child.num_keys() == self.max_num_keys:\r\n node.split_child(index)\r\n\r\n if node.keys[index] < key:\r\n index += 1\r\n\r\n node = node.children[index] \r\n\r\n node.insert(key)", "def insert_key(self, key : str, value : int) -> None:\n \n hash_key = self.hash_key(key)\n head = self.array[hash_key]\n \n while head.next:\n if head.next.key == key:\n head.next.value = value\n return\n head = head.next\n head.next = Node(key,value)\n self.keys.append(key)", "def insert(self, key, value=None):\n if key in self.nodes:\n return None\n else:\n new_node = Node(key, value)\n (self.nodes)[key] = new_node \n current = self.root\n last = current\n\n if current is None:\n self.root = self.nodes[key]\n self.root.height = 0\n return new_node\n\n while (current is not None):\n if new_node.key > current.key:\n last = current\n current = current.right\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n else:\n last = current\n current = current.left\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n\n if new_node.key > last.key:\n last.right = new_node\n new_node.parent = last\n else:\n last.left = new_node\n new_node.parent = last\n\n self.root.height = self.get_height_tree()\n return new_node", "def insert(self, key):\n if self.root is None:\n self.root = self.Node(key)\n else:\n self.root = self.root.insert(key)", "def insert(root: Node, key: int) -> Node:\n node = Node(key)\n l, r = split(root, key)\n return merge(merge(l, node), r)", "def _put(self, k, v, currNode):\n if k < currNode.key:\n if currNode.hasLeftChild():\n self._put(k, v, currNode.leftChild)\n else:\n currNode.leftChild = TreeNode(k, v, parent=currNode)\n\n elif k > currNode.key:\n if currNode.hasRightChild():\n self._put(k, v, currNode.rightChild)\n else:\n currNode.rightChild = TreeNode(k, v, parent=currNode)\n\n else:\n currNode.payload = v\n self.size -= 1", "def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk", "def insert(self, key):\n # Create new node\n n = TreeNode(key)\n if not self.node:\n self.node = n\n self.node.left = AvlTree()\n self.node.right = AvlTree()\n elif key < self.node.val:\n self.node.left.insert(key)\n elif key > self.node.val:\n self.node.right.insert(key)\n self.re_balance()", "def insert(self, item, key):\n if self.key == key:\n self.item = item\n elif self.key < key:\n if self.right:\n self.right.insert(item, key)\n else:\n self.right = BSTreeNode(item, key)\n else:\n if self.left:\n self.left.insert(item, key)\n else:\n self.left = BSTreeNode(item, key)\n # Replace by correct code\n pass", "def _put(self, key, value, current_node):\n pass", "def __setitem__(self,k,v):\n self.insert(k,v)", "def _insert(self, key, value):\n entry = self._lookup(key)\n if entry.value is None:\n self.used += 1\n if entry.key is not dummy:\n self.filled += 1\n entry.key = key\n entry.hash = self.first_hash(key)\n entry.value = value", "def insert(self,key):\n \n current_node = self.root \n length = len(key) \n for level in range(length): \n index = self._charToIndex(key[level]) \n # if current character is not present \n if not current_node.children[index]: \n current_node.children[index] = self.getNode(key[level]) \n current_node = current_node.children[index] \n \n # mark last node as leaf \n current_node.isEndOfWord = True", "def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r", "def __setitem__(self, key, value):\n self.insert(key, value)", "def insert(self,node,key):\n position=self.find(node,key)\n if position.key==key:\n print(\"node already present\")\n elif position.key>key:\n n=Node(key)\n position.setLeftChild(n)\n n.setParent(position)\n print(n.getParent())\n else:\n n=Node(key)\n position.setRightChild(n)\n n.setParent(position)" ]
[ "0.7798615", "0.7577879", "0.7455184", "0.7206514", "0.7101434", "0.70513165", "0.6990954", "0.69256455", "0.6925461", "0.6884688", "0.685683", "0.6851591", "0.68488187", "0.6843558", "0.68378764", "0.68335766", "0.6827937", "0.6778415", "0.6688068", "0.6680994", "0.6632043", "0.6568431", "0.6539546", "0.6536627", "0.65319747", "0.651712", "0.65149844", "0.6507315", "0.6505166", "0.64899415" ]
0.80542064
0
Delete node with the value.
def delete(self, value): # Iterating to node that has value node = self.head last_node = None while node is not None and node.value != value: last_node = node node = node.next_ # Check if the node has been found if node is None: return # Checking whether head matched if last_node is None: self.head = node.next_ return # Deleting node last_node.next_ = node.next_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_node(tx, node_value, node_type):\n cql = \"MATCH(n:\" + node_type + \"{name:$node_value}) DETACH DELETE(n);\"\n try:\n tx.run(cql, node_value=node_value)\n except Exception as e:\n print(str(e))", "def delete_node(name: str, value: str) -> None:\n global _graph\n\n if _graph is None:\n print('\\ndelete_node(): Error: graph has not been initialized or opened.\\n\\n')\n return\n\n lname = str(name)\n lvalue = str(value)\n\n if lname == '' or lvalue == '' or lname == 'nan' or lvalue == 'nan':\n return\n\n node = read_node(name=lname, value=lvalue)\n if node is None:\n return\n\n _graph.delete(node)\n return", "def del_node(self, val):\n try:\n del self[val]\n for key in self:\n if val in self[key]:\n self[key].remove(val)\n except KeyError:\n raise ValueError('Value not in graph')", "def del_node(self, val):\n try:\n del self[val]\n for key in self:\n if val in self[key]:\n del self[key][val]\n except KeyError:\n raise ValueError('Value not in graph')", "def delete(self, value):\r\n # Return false if tree was empty\r\n if self.empty():\r\n return False\r\n\r\n # Find the node containing the value\r\n node = self.descend_to_node(value)\r\n # If that node is 'False', value wasn't found. Give error and return False.\r\n if not node:\r\n print(\"Value\", value, \"not found.\")\r\n return False\r\n else:\r\n # If it wasn't False, call on helper function delete_node\r\n self.delete_node(node)", "def delete(self, value):\n if self.root is not None: # else do nothing\n if type(value) == self.typing: # else do nothing\n hasValue, self.root = self.root.contains(value)\n if hasValue: # always deletes root\n self.root = self.root.delete(value) \n self._size -= 1", "def delete(self, value):\n pass", "def delete_child(self, val):\n del self._children[val]\n return val", "def delete(self, val):\n if not self.contains(val):\n return None\n node = self.search(val)\n if node._rkid and node._lkid:\n self._del_node_two_children(node._parent, node)\n elif node._rkid or node._lkid:\n self._del_node_one_child(node._parent, node)\n else:\n self._del_node_no_children(node._parent, node)\n self._get_new_max(False)\n self._size = len(self._balance_helper_breadth_first(self._root))", "def delete(self, value):\n n = self._find(value) # find and splay relevant node\n n._splay()\n\n if n.value == value: # only if value actually found\n left, right = n._uproot()\n \n # there is a left child: splay around its maximum, connect to right\n if left is not None: \n while left.right is not None:\n left = left.right\n left._splay()\n left.right = right\n if right is not None: \n right.parent = left\n n = left\n\n # there is no left child: all we need is the right\n else:\n n = right\n\n return n # new root of the entire tree", "def delete(self, value):\n current = self.head\n prev = None\n\n while current:\n if current.value == value:\n if prev == None:\n self.head = current.next\n else:\n prev.next = current.next\n break\n prev = current\n current = current.next", "def remove_node(self, value):\n node = self.head\n\n while node:\n if self.head.value == value:\n self.head = self.head.next\n return\n if node.next.value == value:\n node.next = node.next.next\n return\n node = node.next", "def delete_node(self, node):\n return node.delete()", "def delete_value(self, value):\n del self.index[value]", "def delete(self, value):\n current = self.head\n if current.value == value:\n self.head = current.next\n else:\n while current:\n if current.value == value:\n break\n prev = current\n current = current.next\n if current == None:\n return\n prev.next = current.next\n current = None", "def remove_value(self, value):\n if self.head is None: \n raise ValueError('Deleting from empty list.')\n node = self.head \n if node.value == value: \n self.head = self.head.next_node \n return node \n while node.next_node is not None:\n current = node.next_node \n if current.value == value:\n node.next_node = current.next_node \n return current \n node = current\n raise ValueError('Deleting non-existing value.')", "def delete(self, val):\n\n\t\tself.root = self.deleteHelper(self.root, val)\n\t\tself.numNodes = 0\n\t\tif self.root:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif node.left:\n\t\t\t\t\tQ.append(node.left)\n\t\t\t\tif node.right:\n\t\t\t\t\tQ.append(node.right)\n\t\t\t\tself.numNodes += 1", "def remove_value(self, value):\n if self.head is None: \n raise ValueError('Deleting from empty list.')\n node = self.head \n if node.value == value: \n self.head = self.head.next_node \n if self.head is None: \n self.tail = None\n else:\n self.head.prev_node = None \n return node \n while node.next_node is not None:\n node = node.next_node \n if node.value == value:\n node.prev_node.next_node = node.next_node \n if node.next_node is None: \n self.tail = node.prev_node \n else:\n node.next_node.prev_node = node.prev_node\n return node\n raise ValueError('Deleting non-existing value.')", "def remove_node(self, key) -> Node:", "def del_node(node, delnum):\n pass", "def delete(self, value):\n current = self.head\n previous = None\n while current.value != value and current.next:\n previous = current\n current = current.next\n if current.value == value:\n if previous:\n previous.next = current.next\n else:\n self.head = current.next\n pass", "def delete(self, key):\n node = self.search(key)\n if node:\n self.remove_node(node)", "def delete(self, value):\n current = self.head\n index = 1\n ''' delete first element '''\n if index == 1 and current.value == value:\n print (\"deleting first element\")\n current.next = current.next.next\n return\n \n ''' delete last element '''\n while not current.next.next and current.next.value == value:\n print (\"deleting last element\")\n current.next = None\n return\n \n ''' anywhere in between '''\n while current.next.next and current.next.value != value:\n current = current.next\n \n ''' delete the element '''\n print (\"deleting anywhere between element\")\n current.next = current.next.next\n return", "def removeNode(self, node):", "def remove_value(self, value):\n # check the head's key\n temp_node = self.head\n if temp_node.val==value:\n self.head = temp_node.next\n temp_node = None\n self.n -= 1\n return\n\n # search for the key value\n while temp_node.val != value: # check the next node's key\n prev_node = temp_node # store prev node to change prev.next\n temp_node = temp_node.next\n # if the key is not found\n if temp_node == None:\n print(\"Error; key value is not found\")\n return\n else:\n # reconfigure; unlink the current node\n prev_node.next = temp_node.next\n temp_node = None\n self.n -= 1", "def remove(self, value):\r\n if self.head is None:\r\n return\r\n\r\n if self.head.value == value:\r\n self.head = self.head.next\r\n return\r\n\r\n node = self.head\r\n while node.next:\r\n if node.next.value == value:\r\n node.next = node.next.next\r\n return\r\n node = node.next", "def delete_by_value(self, key):\n cur_node = self.head\n\n if cur_node and cur_node.data == key:\n self.head = cur_node.next\n cur_node = None\n prev = None\n while cur_node and cur_node.data != key:\n prev = cur_node\n cur_node = cur_node.next\n if cur_node is None:\n return\n prev.next = cur_node.next\n cur_node = None", "def remove_node(self, node_key: NodeKey) -> Node:", "def remove(self, value):\n # find node to be removed\n node = self.find(value)\n\n # value does not exist: abort\n if node == None:\n print('Removal failure: Node with value ', value, ' not found')\n return\n\n # value exists: find best substitute candidate\n # node to be removed is a leaf: remove it\n if node.left == None and node.right == None:\n parent = node.parent\n self.updateNodeParentChild(node, None)\n\n # node to be removed has left child: find left child most right node\n elif node.left != None:\n\n # find substitute\n substitute = node.left\n while substitute.right != None:\n substitute = substitute.right\n\n # update node value to substitute value\n node.value = substitute.value\n\n # update substitute's parent child, and this child's parent\n parent = substitute.parent\n if parent == node:\n node.left = substitute.left\n else:\n parent.right = substitute.left\n if substitute.left != None:\n substitute.left.parent = parent\n\n # node to be removed has only right child: find right child most left nd\n else:\n\n # find substitute\n substitute = node.right\n while substitute.left != None:\n substitute = substitute.left\n\n # update node value to substitute value\n node.value = substitute.value\n\n # update substitute's parent child, and this child's parent\n parent = substitute.parent\n if parent == node:\n node.right = substitute.right\n else:\n parent.left = substitute.right\n if substitute.right != None:\n substitute.right.parent = parent\n\n # value updated and node removed: rebalance tree\n self.rebalance(parent)", "def delete_value(self, val, txn=None):\n assert self.derived_keys\n return self.delete(self.key_func(val), txn)" ]
[ "0.7895208", "0.7865066", "0.7842883", "0.77804184", "0.7733886", "0.7649159", "0.75719863", "0.7489731", "0.7425765", "0.73570824", "0.7320378", "0.72870445", "0.72328395", "0.7221816", "0.7196131", "0.7181461", "0.711635", "0.70913994", "0.70902276", "0.7073389", "0.70698", "0.70482945", "0.7007304", "0.69897026", "0.6955695", "0.69555646", "0.69391567", "0.6870856", "0.686394", "0.6856512" ]
0.80483353
0
Try importing Pyspark or display warn message in streamlit
def try_import_pyspark_in_streamlit(): try: import pyspark from pyspark.sql import SparkSession except: print("You need Pyspark installed to run NLU. Run <pip install pyspark==3.0.2>") try: import streamlit as st st.error( "You need Pyspark, Sklearn, Pyplot, Pandas, Numpy installed to run this app. Run <pip install pyspark==3.0.2 sklearn pyplot numpy pandas>") except: return False return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_spark(self):\n\t\traise NotImplementedError()", "def get_spark_i_know_what_i_am_doing():\n return _spark", "def test_pyspark(container):\n c = container.run(\n tty=True,\n command=['start.sh', 'python', '-c', 'import pyspark']\n )\n rv = c.wait(timeout=30)\n assert rv == 0 or rv[\"StatusCode\"] == 0, \"pyspark not in PYTHONPATH\"\n logs = c.logs(stdout=True).decode('utf-8')\n LOGGER.debug(logs)", "def provoke_and_handle_ImportError():\n try:\n from you import me\n except ImportError as impe:\n print(f\"Sorry! {impe}\")", "def warning(self, msg):\n oscid = self.app.global_osc_id()\n print(\"WARNING : /Llia/%s : %s\" % (oscid, msg))", "def main():\n\n warning_message = \"This script contains the code that builds the \" \\\n + \"main page of the FCA (File Converter App).\" \\\n + \"\\n\\nThis script should NOT be run DIRECTLY.\" \\\n + \"\\n\\nPlease, import it in another script.\"\n\n cGUIf.show_warning(\"Import warning\",warning_message)", "def no_xb_gui():\n logger.warning(\"Could not import the GUI.\")\n logger.warning(\"For instructions on how to install the GUI,\")\n logger.warning(\"check the docs janclemenslab.org/das/install.html.\")", "def warn():\n pass", "def displayNoDBWarning(self): \n tkMessageBox.showwarning(\"Cannot launch plugin\",\n 'No Database is currently open. '\n 'You should first open a project.') \n return", "def show_missing():\n if missing_modules:\n info(\"The following modules are currently not installed and would enable additional tasks:\")\n for pkg_name in missing_modules:\n info(' ' + pkg_name)", "def __init__(self, spark, logger):\n self.spark = spark\n self.logger = logger", "def _maybe_show_deprecation_warning(self):\n if self._deprecation_warning is not None:\n show_deprecation_warning(self._deprecation_warning)", "def iceberg_spark_for_demo(demo: NessieDemo, nessie_ref: str = \"main\", catalog_name: str = \"nessie\") -> Tuple:\n global __NESSIE_ICEBERG_SPARK_DEMO__\n iceberg_dispose()\n\n demo_spark = NessieDemoIcebergSpark(demo)\n __NESSIE_ICEBERG_SPARK_DEMO__ = demo_spark\n spark, sc, jvm = demo_spark.get_or_create_spark_context(nessie_ref=nessie_ref, catalog_name=catalog_name)\n # TODO need a way to properly shutdown the spark-context (the pyspark-shell process)\n return spark, sc, jvm, demo_spark", "def _warn_no_students(self):\n message = \"<tr><h2>No student records were found</h2></tr>\"\n self.add_element(message,True,0,True)", "def main():\n spark_it_up()", "def show_warning(title, message, print_message=False):\n\n pass", "def import_fail_info(mod_name,fns=None):\n\n if fns == None:\n warn(\"Loading of %s failed.\\n\" % (mod_name,))\n else:\n warn(\"Loading of %s from %s failed.\\n\" % (fns,mod_name))", "def spark_context(request):\n spark = SparkSession.builder.master(\"local[1]\").appName(\"pytest-test\").getOrCreate()\n request.addfinalizer(lambda: spark.stop())\n\n quiet_py4j()\n return spark", "def warning_function():\r\n app = QApplication(sys.argv)\r\n ex = WarningBox()\r\n sys.exit(app.exec_())", "def troubleshoot():\n libraries = (sys, pd, openpyxl, matplotlib, pip)\n for i in libraries:\n try:\n print(str(i), 'version:', i.__version__)\n except AttributeError:\n pass\n except ModuleNotFoundError:\n print('You do not have', str(i), 'installed.')\n print('You can do so via your interpreter or:')\n print('py -m pip install', '-' + str(i))\n print('in command prompt')", "def notice(self, warning):\n pass", "def main():\n\n\tst.title(\"Iris EDA App with streamlit\")\n\tst.subheader(\"Streamlit is Cool\")", "def info(self) -> str:\n return r\"slp.py not parsing yet ¯\\_(ツ)_/¯\"", "def test_import_kedro_viz_with_no_official_support_emits_warning(mocker):\n mocker.patch(\"kedro_viz.sys.version_info\", (3, 12))\n\n # We use the parent class to avoid issues with `exec_module`\n with pytest.warns(UserWarning) as record:\n kedro_viz.__loader__.exec_module(kedro_viz)\n\n assert len(record) == 1\n assert (\n \"\"\"Please be advised that Kedro Viz is not yet fully\n compatible with the Python version you are currently using.\"\"\"\n in record[0].message.args[0]\n )", "def get_or_create_spark_context(self: \"NessieDemoIcebergSpark\", nessie_ref: str = \"main\", catalog_name: str = \"nessie\") -> Tuple:\n conf = self.__spark_conf(nessie_ref, catalog_name)\n return self._get_or_create_spark_context(conf)", "def get_usage(self):\n return \"This is spylon-kernel. It implements a Scala interpreter.\"", "def loading_failures():\n\n import simtk.openmm as mm\n print(mm.Platform.getPluginLoadFailures())", "def warning(self, warning):\n pass", "def quiet_py4j():\n logger = logging.getLogger('py4j')\n logger.setLevel(logging.WARN)", "def warn_on_old_or_unsupported_python_version():\n\n if PY26:\n import scalyr_agent.scalyr_logging\n\n scalyr_agent.scalyr_logging.getLogger(__name__).warn(PYTHON26_EOL_WARNING)" ]
[ "0.5612264", "0.53342944", "0.5266926", "0.50426006", "0.50365645", "0.50164586", "0.5011089", "0.4967256", "0.49386474", "0.4916724", "0.49133012", "0.49097872", "0.4909218", "0.48768187", "0.48554832", "0.4795663", "0.4754267", "0.4747436", "0.4722205", "0.47067413", "0.46928075", "0.46650323", "0.4659675", "0.4652551", "0.46486217", "0.46470165", "0.4630299", "0.46275684", "0.46192774", "0.46187907" ]
0.78209
0
Authenticate enviroment for JSL Liscensed models. Installs NLPHealthcare if not in enviroment detected Either provide path to spark_nlp_for_healthcare.json file as first param or manually enter them, SPARK_NLP_LICENSE_OR_JSON_PATH,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,JSL_SECRET . Set gpu=true if you want to enable GPU mode
def auth(SPARK_NLP_LICENSE_OR_JSON_PATH='/content/spark_nlp_for_healthcare.json', AWS_ACCESS_KEY_ID='', AWS_SECRET_ACCESS_KEY='', JSL_SECRET='', gpu=False): if os.path.exists(SPARK_NLP_LICENSE_OR_JSON_PATH): with open(SPARK_NLP_LICENSE_OR_JSON_PATH) as json_file: j = json.load(json_file) auth_utils.get_authenticated_spark(j['SPARK_NLP_LICENSE'], j['AWS_ACCESS_KEY_ID'], j['AWS_SECRET_ACCESS_KEY'], j['SECRET'], gpu) return nlu if AWS_ACCESS_KEY_ID != '': auth_utils.get_authenticated_spark(SPARK_NLP_LICENSE_OR_JSON_PATH, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, JSL_SECRET, gpu) else: return nlu return nlu
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(_):\n hps = LM.get_default_hparams().parse(FLAGS.hpconfig)\n hps._set(\"num_gpus\", FLAGS.num_gpus)\n print ('*****HYPER PARAMETERS*****')\n print (hps)\n print ('**************************')\n\n vocab = Vocabulary.from_file(os.path.join(FLAGS.datadir, \"vocabulary.txt\"))\n\n if FLAGS.mode == \"train\":\n #hps.batch_size = 256\n dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"train.txt\"))\n run_train(dataset, hps, os.path.join(FLAGS.logdir, \"train\"), ps_device=\"/gpu:0\")\n elif FLAGS.mode.startswith(\"eval\"):\n data_dir = os.path.join(FLAGS.datadir, \"eval.txt\")\n #predict_model = prediction.Model('/dir/ckpt',os.path.join(FLAGS.datadir, \"vocabulary.txt\"), hps)\n\n dataset = Dataset(vocab, data_dir, deterministic=True)\n prefix_words = \"<brk>\".split()\n predict_model = predict.Model(hps, FLAGS.logdir, FLAGS.datadir)\n print ('start input')\n out = predict_model.predictnextkwords(prefix_words, FLAGS.num_sen)\n for row in out:\n print(' '.join(row) + \"\\n\")\n print(\"len_out: \" + str(len(out)))\n #prediction.topkwords(prefix_words, dataset, hps, FLAGS.logdir, FLAGS.mode)\n #sentence_ppl(prefix_words,dataset, hps, FLAGS.logdir, FLAGS.mode)\n #print vocab\n #dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"eval.txt\"))\n #run_eval(dataset, hps, FLAGS.logdir, FLAGS.mode, FLAGS.eval_steps)", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def train(\n # fmt: off\n lang: (\"Model language\", \"positional\", None, str),\n output_path: (\"Output directory to store model in\", \"positional\", None, Path),\n train_path: (\"Location of JSON-formatted training data\", \"positional\", None, Path),\n dev_path: (\"Location of JSON-formatted development data\", \"positional\", None, Path),\n raw_text: (\"Path to jsonl file with unlabelled text documents.\", \"option\", \"rt\", Path) = None,\n base_model: (\"Name of model to update (optional)\", \"option\", \"b\", str) = None,\n pipeline: (\"Comma-separated names of pipeline components\", \"option\", \"p\", str) = \"tagger,parser,ner\",\n vectors: (\"Model to load vectors from\", \"option\", \"v\", str) = None,\n replace_components: (\"Replace components from base model\", \"flag\", \"R\", bool) = False,\n n_iter: (\"Number of iterations\", \"option\", \"n\", int) = 30,\n n_early_stopping: (\"Maximum number of training epochs without dev accuracy improvement\", \"option\", \"ne\", int) = None,\n n_examples: (\"Number of examples\", \"option\", \"ns\", int) = 0,\n use_gpu: (\"Use GPU\", \"option\", \"g\", int) = -1,\n version: (\"Model version\", \"option\", \"V\", str) = \"0.0.0\",\n meta_path: (\"Optional path to meta.json to use as base.\", \"option\", \"m\", Path) = None,\n init_tok2vec: (\"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.\", \"option\", \"t2v\", Path) = None,\n parser_multitasks: (\"Side objectives for parser CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"pt\", str) = \"\",\n entity_multitasks: (\"Side objectives for NER CNN, e.g. 'dep' or 'dep,tag'\", \"option\", \"et\", str) = \"\",\n noise_level: (\"Amount of corruption for data augmentation\", \"option\", \"nl\", float) = 0.0,\n orth_variant_level: (\"Amount of orthography variation for data augmentation\", \"option\", \"ovl\", float) = 0.0,\n eval_beam_widths: (\"Beam widths to evaluate, e.g. 4,8\", \"option\", \"bw\", str) = \"\",\n gold_preproc: (\"Use gold preprocessing\", \"flag\", \"G\", bool) = False,\n learn_tokens: (\"Make parser learn gold-standard tokenization\", \"flag\", \"T\", bool) = False,\n textcat_multilabel: (\"Textcat classes aren't mutually exclusive (multilabel)\", \"flag\", \"TML\", bool) = False,\n textcat_arch: (\"Textcat model architecture\", \"option\", \"ta\", str) = \"bow\",\n textcat_positive_label: (\"Textcat positive label for binary classes with two labels\", \"option\", \"tpl\", str) = None,\n tag_map_path: (\"Location of JSON-formatted tag map\", \"option\", \"tm\", Path) = None,\n verbose: (\"Display more information for debug\", \"flag\", \"VV\", bool) = False,\n debug: (\"Run data diagnostics before training\", \"flag\", \"D\", bool) = False,\n # fmt: on\n):\n util.fix_random_seed()\n util.set_env_log(verbose)\n\n # Make sure all files and paths exists if they are needed\n train_path = util.ensure_path(train_path)\n dev_path = util.ensure_path(dev_path)\n meta_path = util.ensure_path(meta_path)\n output_path = util.ensure_path(output_path)\n if raw_text is not None:\n raw_text = list(srsly.read_jsonl(raw_text))\n if not train_path or not train_path.exists():\n msg.fail(\"Training data not found\", train_path, exits=1)\n if not dev_path or not dev_path.exists():\n msg.fail(\"Development data not found\", dev_path, exits=1)\n if meta_path is not None and not meta_path.exists():\n msg.fail(\"Can't find model meta.json\", meta_path, exits=1)\n meta = srsly.read_json(meta_path) if meta_path else {}\n if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:\n msg.warn(\n \"Output directory is not empty\",\n \"This can lead to unintended side effects when saving the model. \"\n \"Please use an empty directory or a different path instead. If \"\n \"the specified output path doesn't exist, the directory will be \"\n \"created for you.\",\n )\n if not output_path.exists():\n output_path.mkdir()\n msg.good(f\"Created output directory: {output_path}\")\n\n tag_map = {}\n if tag_map_path is not None:\n tag_map = srsly.read_json(tag_map_path)\n # Take dropout and batch size as generators of values -- dropout\n # starts high and decays sharply, to force the optimizer to explore.\n # Batch size starts at 1 and grows, so that we make updates quickly\n # at the beginning of training.\n dropout_rates = util.decaying(\n util.env_opt(\"dropout_from\", 0.2),\n util.env_opt(\"dropout_to\", 0.2),\n util.env_opt(\"dropout_decay\", 0.0),\n )\n batch_sizes = util.compounding(\n util.env_opt(\"batch_from\", 100.0),\n util.env_opt(\"batch_to\", 1000.0),\n util.env_opt(\"batch_compound\", 1.001),\n )\n\n if not eval_beam_widths:\n eval_beam_widths = [1]\n else:\n eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(\",\")]\n if 1 not in eval_beam_widths:\n eval_beam_widths.append(1)\n eval_beam_widths.sort()\n has_beam_widths = eval_beam_widths != [1]\n\n default_dir = Path(__file__).parent.parent / \"ml\" / \"models\" / \"defaults\"\n\n # Set up the base model and pipeline. If a base model is specified, load\n # the model and make sure the pipeline matches the pipeline setting. If\n # training starts from a blank model, intitalize the language class.\n pipeline = [p.strip() for p in pipeline.split(\",\")]\n msg.text(f\"Training pipeline: {pipeline}\")\n disabled_pipes = None\n pipes_added = False\n if use_gpu >= 0:\n activated_gpu = None\n try:\n activated_gpu = set_gpu(use_gpu)\n except Exception as e:\n msg.warn(f\"Exception: {e}\")\n if activated_gpu is not None:\n msg.text(f\"Using GPU: {use_gpu}\")\n else:\n msg.warn(f\"Unable to activate GPU: {use_gpu}\")\n msg.text(\"Using CPU only\")\n use_gpu = -1\n if base_model:\n msg.text(f\"Starting with base model '{base_model}'\")\n nlp = util.load_model(base_model)\n if nlp.lang != lang:\n msg.fail(\n f\"Model language ('{nlp.lang}') doesn't match language \"\n f\"specified as `lang` argument ('{lang}') \",\n exits=1,\n )\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n nlp.select_pipes(disable=[p for p in nlp.pipe_names if p not in pipeline])\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n if pipe not in nlp.pipe_names:\n msg.text(f\"Adding component to base model '{pipe}'\")\n nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n elif replace_components:\n msg.text(f\"Replacing component from base model '{pipe}'\")\n nlp.replace_pipe(pipe, nlp.create_pipe(pipe, config=pipe_cfg))\n pipes_added = True\n else:\n if pipe == \"textcat\":\n textcat_cfg = nlp.get_pipe(\"textcat\").cfg\n base_cfg = {\n \"exclusive_classes\": textcat_cfg[\"exclusive_classes\"],\n \"architecture\": textcat_cfg[\"architecture\"],\n \"positive_label\": textcat_cfg[\"positive_label\"],\n }\n if base_cfg != pipe_cfg:\n msg.fail(\n f\"The base textcat model configuration does\"\n f\"not match the provided training options. \"\n f\"Existing cfg: {base_cfg}, provided cfg: {pipe_cfg}\",\n exits=1,\n )\n msg.text(f\"Extending component from base model '{pipe}'\")\n disabled_pipes = nlp.select_pipes(\n disable=[p for p in nlp.pipe_names if p not in pipeline]\n )\n else:\n msg.text(f\"Starting with blank model '{lang}'\")\n lang_cls = util.get_lang_class(lang)\n nlp = lang_cls()\n\n if vectors:\n msg.text(f\"Loading vectors from model '{vectors}'\")\n _load_vectors(nlp, vectors)\n\n for pipe in pipeline:\n # first, create the model.\n # Bit of a hack after the refactor to get the vectors into a default config\n # use train-from-config instead :-)\n if pipe == \"parser\":\n config_loc = default_dir / \"parser_defaults.cfg\"\n elif pipe == \"tagger\":\n config_loc = default_dir / \"tagger_defaults.cfg\"\n elif pipe == \"morphologizer\":\n config_loc = default_dir / \"morphologizer_defaults.cfg\"\n elif pipe == \"ner\":\n config_loc = default_dir / \"ner_defaults.cfg\"\n elif pipe == \"textcat\":\n config_loc = default_dir / \"textcat_defaults.cfg\"\n elif pipe == \"senter\":\n config_loc = default_dir / \"senter_defaults.cfg\"\n else:\n raise ValueError(f\"Component {pipe} currently not supported.\")\n pipe_cfg = util.load_config(config_loc, create_objects=False)\n if vectors:\n pretrained_config = {\n \"@architectures\": \"spacy.VocabVectors.v1\",\n \"name\": vectors,\n }\n pipe_cfg[\"model\"][\"tok2vec\"][\"pretrained_vectors\"] = pretrained_config\n\n if pipe == \"parser\":\n pipe_cfg[\"learn_tokens\"] = learn_tokens\n elif pipe == \"textcat\":\n pipe_cfg[\"exclusive_classes\"] = not textcat_multilabel\n pipe_cfg[\"architecture\"] = textcat_arch\n pipe_cfg[\"positive_label\"] = textcat_positive_label\n\n pipe = nlp.create_pipe(pipe, config=pipe_cfg)\n nlp.add_pipe(pipe)\n\n # Update tag map with provided mapping\n nlp.vocab.morphology.tag_map.update(tag_map)\n\n # Multitask objectives\n multitask_options = [(\"parser\", parser_multitasks), (\"ner\", entity_multitasks)]\n for pipe_name, multitasks in multitask_options:\n if multitasks:\n if pipe_name not in pipeline:\n msg.fail(\n f\"Can't use multitask objective without '{pipe_name}' in \"\n f\"the pipeline\"\n )\n pipe = nlp.get_pipe(pipe_name)\n for objective in multitasks.split(\",\"):\n pipe.add_multitask_objective(objective)\n\n # Prepare training corpus\n msg.text(f\"Counting training words (limit={n_examples})\")\n corpus = GoldCorpus(train_path, dev_path, limit=n_examples)\n n_train_words = corpus.count_train()\n\n if base_model and not pipes_added:\n # Start with an existing model, use default optimizer\n optimizer = create_default_optimizer()\n else:\n # Start with a blank model, call begin_training\n cfg = {\"device\": use_gpu}\n optimizer = nlp.begin_training(lambda: corpus.train_examples, **cfg)\n nlp._optimizer = None\n\n # Load in pretrained weights (TODO: this may be broken in the config rewrite)\n if init_tok2vec is not None:\n components = _load_pretrained_tok2vec(nlp, init_tok2vec)\n msg.text(f\"Loaded pretrained tok2vec for: {components}\")\n\n # Verify textcat config\n if \"textcat\" in pipeline:\n textcat_labels = nlp.get_pipe(\"textcat\").cfg.get(\"labels\", [])\n if textcat_positive_label and textcat_positive_label not in textcat_labels:\n msg.fail(\n f\"The textcat_positive_label (tpl) '{textcat_positive_label}' \"\n f\"does not match any label in the training data.\",\n exits=1,\n )\n if textcat_positive_label and len(textcat_labels) != 2:\n msg.fail(\n \"A textcat_positive_label (tpl) '{textcat_positive_label}' was \"\n \"provided for training data that does not appear to be a \"\n \"binary classification problem with two labels.\",\n exits=1,\n )\n train_data = corpus.train_data(\n nlp,\n noise_level=noise_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n train_labels = set()\n if textcat_multilabel:\n multilabel_found = False\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1:\n multilabel_found = True\n if not multilabel_found and not base_model:\n msg.warn(\n \"The textcat training instances look like they have \"\n \"mutually-exclusive classes. Remove the flag \"\n \"'--textcat-multilabel' to train a classifier with \"\n \"mutually-exclusive classes.\"\n )\n if not textcat_multilabel:\n for ex in train_data:\n train_labels.update(ex.gold.cats.keys())\n if list(ex.gold.cats.values()).count(1.0) != 1 and not base_model:\n msg.warn(\n \"Some textcat training instances do not have exactly \"\n \"one positive label. Modifying training options to \"\n \"include the flag '--textcat-multilabel' for classes \"\n \"that are not mutually exclusive.\"\n )\n nlp.get_pipe(\"textcat\").cfg[\"exclusive_classes\"] = False\n textcat_multilabel = True\n break\n if base_model and set(textcat_labels) != train_labels:\n msg.fail(\n f\"Cannot extend textcat model using data with different \"\n f\"labels. Base model labels: {textcat_labels}, training data \"\n f\"labels: {list(train_labels)}\",\n exits=1,\n )\n if textcat_multilabel:\n msg.text(\n f\"Textcat evaluation score: ROC AUC score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n elif textcat_positive_label and len(textcat_labels) == 2:\n msg.text(\n f\"Textcat evaluation score: F1-score for the \"\n f\"label '{textcat_positive_label}'\"\n )\n elif len(textcat_labels) > 1:\n if len(textcat_labels) == 2:\n msg.warn(\n \"If the textcat component is a binary classifier with \"\n \"exclusive classes, provide '--textcat_positive_label' for \"\n \"an evaluation on the positive class.\"\n )\n msg.text(\n f\"Textcat evaluation score: F1-score macro-averaged across \"\n f\"the labels '{', '.join(textcat_labels)}'\"\n )\n else:\n msg.fail(\n \"Unsupported textcat configuration. Use `spacy debug-data` \"\n \"for more information.\"\n )\n\n # fmt: off\n row_head, output_stats = _configure_training_output(pipeline, use_gpu, has_beam_widths)\n row_widths = [len(w) for w in row_head]\n row_settings = {\"widths\": row_widths, \"aligns\": tuple([\"r\" for i in row_head]), \"spacing\": 2}\n # fmt: on\n print(\"\")\n msg.row(row_head, **row_settings)\n msg.row([\"-\" * width for width in row_settings[\"widths\"]], **row_settings)\n try:\n iter_since_best = 0\n best_score = 0.0\n for i in range(n_iter):\n train_data = corpus.train_dataset(\n nlp,\n noise_level=noise_level,\n orth_variant_level=orth_variant_level,\n gold_preproc=gold_preproc,\n max_length=0,\n ignore_misaligned=True,\n )\n if raw_text:\n random.shuffle(raw_text)\n raw_batches = util.minibatch(\n (nlp.make_doc(rt[\"text\"]) for rt in raw_text), size=8\n )\n words_seen = 0\n with tqdm.tqdm(total=n_train_words, leave=False) as pbar:\n losses = {}\n for batch in util.minibatch_by_words(train_data, size=batch_sizes):\n if not batch:\n continue\n try:\n nlp.update(\n batch,\n sgd=optimizer,\n drop=next(dropout_rates),\n losses=losses,\n )\n except ValueError as e:\n err = \"Error during training\"\n if init_tok2vec:\n err += \" Did you provide the same parameters during 'train' as during 'pretrain'?\"\n msg.fail(err, f\"Original error message: {e}\", exits=1)\n if raw_text:\n # If raw text is available, perform 'rehearsal' updates,\n # which use unlabelled data to reduce overfitting.\n raw_batch = list(next(raw_batches))\n nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)\n docs = [ex.doc for ex in batch]\n if not int(os.environ.get(\"LOG_FRIENDLY\", 0)):\n pbar.update(sum(len(doc) for doc in docs))\n words_seen += sum(len(doc) for doc in docs)\n with nlp.use_params(optimizer.averages):\n util.set_env_log(False)\n epoch_model_path = output_path / f\"model{i}\"\n nlp.to_disk(epoch_model_path)\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for beam_width in eval_beam_widths:\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n nwords = sum(len(ex.doc) for ex in dev_dataset)\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n if use_gpu < 0:\n gpu_wps = None\n cpu_wps = nwords / (end_time - start_time)\n else:\n gpu_wps = nwords / (end_time - start_time)\n with use_ops(\"numpy\"):\n nlp_loaded = util.load_model_from_path(epoch_model_path)\n for name, component in nlp_loaded.pipeline:\n if hasattr(component, \"cfg\"):\n component.cfg[\"beam_width\"] = beam_width\n dev_dataset = list(\n corpus.dev_dataset(\n nlp_loaded,\n gold_preproc=gold_preproc,\n ignore_misaligned=True,\n )\n )\n start_time = timer()\n scorer = nlp_loaded.evaluate(dev_dataset, verbose=verbose)\n end_time = timer()\n cpu_wps = nwords / (end_time - start_time)\n acc_loc = output_path / f\"model{i}\" / \"accuracy.json\"\n srsly.write_json(acc_loc, scorer.scores)\n\n # Update model meta.json\n meta[\"lang\"] = nlp.lang\n meta[\"pipeline\"] = nlp.pipe_names\n meta[\"spacy_version\"] = f\">={about.__version__}\"\n if beam_width == 1:\n meta[\"speed\"] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta.setdefault(\"accuracy\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"accuracy\"][metric] = scorer.scores[metric]\n else:\n meta.setdefault(\"beam_accuracy\", {})\n meta.setdefault(\"beam_speed\", {})\n for component in nlp.pipe_names:\n for metric in _get_metrics(component):\n meta[\"beam_accuracy\"][metric] = scorer.scores[metric]\n meta[\"beam_speed\"][beam_width] = {\n \"nwords\": nwords,\n \"cpu\": cpu_wps,\n \"gpu\": gpu_wps,\n }\n meta[\"vectors\"] = {\n \"width\": nlp.vocab.vectors_length,\n \"vectors\": len(nlp.vocab.vectors),\n \"keys\": nlp.vocab.vectors.n_keys,\n \"name\": nlp.vocab.vectors.name,\n }\n meta.setdefault(\"name\", f\"model{i}\")\n meta.setdefault(\"version\", version)\n meta[\"labels\"] = nlp.meta[\"labels\"]\n meta_loc = output_path / f\"model{i}\" / \"meta.json\"\n srsly.write_json(meta_loc, meta)\n util.set_env_log(verbose)\n\n progress = _get_progress(\n i,\n losses,\n scorer.scores,\n output_stats,\n beam_width=beam_width if has_beam_widths else None,\n cpu_wps=cpu_wps,\n gpu_wps=gpu_wps,\n )\n if i == 0 and \"textcat\" in pipeline:\n textcats_per_cat = scorer.scores.get(\"textcats_per_cat\", {})\n for cat, cat_score in textcats_per_cat.items():\n if cat_score.get(\"roc_auc_score\", 0) < 0:\n msg.warn(\n f\"Textcat ROC AUC score is undefined due to \"\n f\"only one value in label '{cat}'.\"\n )\n msg.row(progress, **row_settings)\n # Early stopping\n if n_early_stopping is not None:\n current_score = _score_for_model(meta)\n if current_score < best_score:\n iter_since_best += 1\n else:\n iter_since_best = 0\n best_score = current_score\n if iter_since_best >= n_early_stopping:\n msg.text(\n f\"Early stopping, best iteration is: {i - iter_since_best}\"\n )\n msg.text(\n f\"Best score = {best_score}; Final iteration score = {current_score}\"\n )\n break\n except Exception as e:\n msg.warn(f\"Aborting and saving final best model. Encountered exception: {e}\")\n finally:\n best_pipes = nlp.pipe_names\n if disabled_pipes:\n disabled_pipes.restore()\n with nlp.use_params(optimizer.averages):\n final_model_path = output_path / \"model-final\"\n nlp.to_disk(final_model_path)\n meta_loc = output_path / \"model-final\" / \"meta.json\"\n final_meta = srsly.read_json(meta_loc)\n final_meta.setdefault(\"accuracy\", {})\n final_meta[\"accuracy\"].update(meta.get(\"accuracy\", {}))\n final_meta.setdefault(\"speed\", {})\n final_meta[\"speed\"].setdefault(\"cpu\", None)\n final_meta[\"speed\"].setdefault(\"gpu\", None)\n meta.setdefault(\"speed\", {})\n meta[\"speed\"].setdefault(\"cpu\", None)\n meta[\"speed\"].setdefault(\"gpu\", None)\n # combine cpu and gpu speeds with the base model speeds\n if final_meta[\"speed\"][\"cpu\"] and meta[\"speed\"][\"cpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"cpu\"], meta[\"speed\"][\"cpu\"]]\n )\n final_meta[\"speed\"][\"cpu\"] = speed\n if final_meta[\"speed\"][\"gpu\"] and meta[\"speed\"][\"gpu\"]:\n speed = _get_total_speed(\n [final_meta[\"speed\"][\"gpu\"], meta[\"speed\"][\"gpu\"]]\n )\n final_meta[\"speed\"][\"gpu\"] = speed\n # if there were no speeds to update, overwrite with meta\n if (\n final_meta[\"speed\"][\"cpu\"] is None\n and final_meta[\"speed\"][\"gpu\"] is None\n ):\n final_meta[\"speed\"].update(meta[\"speed\"])\n # note: beam speeds are not combined with the base model\n if has_beam_widths:\n final_meta.setdefault(\"beam_accuracy\", {})\n final_meta[\"beam_accuracy\"].update(meta.get(\"beam_accuracy\", {}))\n final_meta.setdefault(\"beam_speed\", {})\n final_meta[\"beam_speed\"].update(meta.get(\"beam_speed\", {}))\n srsly.write_json(meta_loc, final_meta)\n msg.good(\"Saved model to output directory\", final_model_path)\n with msg.loading(\"Creating best model...\"):\n best_model_path = _collate_best_model(final_meta, output_path, best_pipes)\n msg.good(\"Created best model\", best_model_path)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def config(language):\n global k # Set The window size\n global ignore_U_activated # Set True to ignore the training data with Unknown senseid\n global vector_0_1 # Set True to convert vectors to \"binary form\" e.g.: [0,3,0,4]->[0,1,0,1]\n global remove_punctuations_activated # Set True to ignore all the punctuation tokens\n global lowercase_activated # Set True to convert all the tokens to lowercase\n global stemming_activated # Set True to do stemming on all the tokens\n global remove_stop_words_activated # Set True to ignore stop words\n global expand_synset_activated # Set True to involve synsets, hypernyms, hypornyms in the model\n global extract_4c_feature_activated # Set True to involve feature introduced in 4c in the requirement\n global extract_chi_square_activated # Set True to involve chi-square feature in 4d in the requirement\n global extract_pmi_activated # Set True to involve PMI in 4d in the requirement\n\n if language.__eq__(\"English\"):\n k = 13\n ignore_U_activated = True\n vector_0_1 = False\n remove_punctuations_activated = True\n lowercase_activated = True\n stemming_activated = True\n remove_stop_words_activated = True\n expand_synset_activated = False\n extract_4c_feature_activated = True\n extract_chi_square_activated = False\n extract_pmi_activated = False\n\n elif language.__eq__(\"Spanish\"):\n k = 13\n ignore_U_activated = True\n vector_0_1 = False\n remove_punctuations_activated = True\n lowercase_activated = True\n stemming_activated = True\n remove_stop_words_activated = True\n expand_synset_activated = False # not applicable to Spanish, set to False\n extract_4c_feature_activated = True\n extract_chi_square_activated = False\n extract_pmi_activated = False\n\n elif language.__eq__(\"Catalan\"):\n k = 13\n ignore_U_activated = True\n vector_0_1 = True\n remove_punctuations_activated = True\n lowercase_activated = True\n stemming_activated = False # not applicable to Catalan, set to False\n remove_stop_words_activated = False # not applicable to Catalan, set to False\n expand_synset_activated = False # not applicable to Catalan, set to False\n extract_4c_feature_activated = True\n extract_chi_square_activated = False\n extract_pmi_activated = True", "def prepare_spacy_pipeline():\n global nlp \n nlp = spacy.load(\"en_core_web_lg\", disable = [\"ner\"])\n \n return", "def main():\n\n st.title(\"Spacy-Streamlit NLP App\")\n models = ['ja_core_news_md', 'ja_core_news_sm']\n \n# 3つのモデルはサイズが異なっている\n# lg —> large\n# md —> medium\n# sm —> small\n# core —> モデルの能力として次のタスクを行えることを示す: general-purpose model with vocabulary, syntax, entities and word vectors\n# news —> ニュース記事のデータで学習されたことを示す\n\n choiced_model = st.sidebar.selectbox(\"Model\",models)\n\n nlp = load_model(choiced_model)\n\n menu = [\"MAIN\", \"Home\",\"NER\"]\n choice = st.sidebar.selectbox(\"Menu\",menu)\n \n if choice == \"MAIN\":\n st.subheader(\"visualize\")\n raw_text = st.text_area(\"Your Text\",\"Enter Text Here\")\n if st.button(\"Visualize\"):\n docx = nlp(raw_text)\n spacy_streamlit.visualize(models, docx)\n \n if choice == \"Home\":\n st.subheader(\"Tokenization\")\n raw_text = st.text_area(\"Your Text\",\"Enter Text Here\")\n docx = nlp(raw_text)\n if st.button(\"Tokenize\"):\n spacy_streamlit.visualize_tokens(docx,attrs=['text','pos_','dep_','ent_type_'])\n\n elif choice == \"NER\":\n st.subheader(\"Named Entity Recognition\")\n raw_text = st.text_area(\"Your Text\",\"Enter Text Here\")\n docx = nlp(raw_text)\n spacy_streamlit.visualize_ner(docx,labels=nlp.get_pipe('ner').labels)", "def initialize(self, context):\n self.initialized = True\n properties = context.system_properties\n # Contains the url parameter passed to the load request\n model_dir = properties.get(\"model_dir\") \n gpu_id = properties.get(\"gpu_id\")\n\n # Load Gluonts Model\n self.mx_model = self.load_model(model_dir)", "def main():\n args = parse_args()\n interactive_auth = InteractiveLoginAuthentication(tenant_id=os.getenv(\"TENANT_ID\"))\n work_space = Workspace.from_config(auth=interactive_auth)\n\n # Set up the dataset for training\n datastore = work_space.get_default_datastore()\n dataset = Dataset.File.from_files(path=(datastore, args.target_folder))\n\n # Set up the experiment for training\n experiment = Experiment(workspace=work_space, name=args.file.replace(\".py\", \"\"))\n # azureml._restclient.snapshots_client.SNAPSHOT_MAX_SIZE_BYTES = 2000000000\n config = ScriptRunConfig(\n source_directory=\".\",\n script=args.file,\n compute_target=\"cpu-cluster\",\n arguments=[\n \"--target_folder\",\n dataset.as_named_input(\"input\").as_mount(),\n \"--experiment\",\n True,\n \"--log_folder\",\n \"./logs\",\n ],\n )\n\n # Set up the Tensoflow/Keras environment\n environment = work_space.environments[args.file.replace(\".py\", \"\")]\n config.run_config.environment = environment\n\n # Run the experiment for training\n run = experiment.submit(config)\n aml_url = run.get_portal_url()\n print(\n \"Submitted to an Azure Machine Learning compute cluster. Click on the link below\"\n )\n print(\"\")\n print(aml_url)\n\n tboard = Tensorboard([run])\n # If successful, start() returns a string with the URI of the instance.\n tboard.start(start_browser=True)\n run.wait_for_completion(show_output=True)\n # After your job completes, be sure to stop() the streaming otherwise it will continue to run.\n print(\"Press enter to stop\")\n input()\n tboard.stop()\n\n # Register\n metrics = run.get_metrics()\n run.register_model(\n model_name=args.target_folder,\n tags={\"model\": \"LSTM\"},\n model_path=\"outputs/keras_lstm.h5\",\n model_framework=\"keras\",\n model_framework_version=\"2.2.4\",\n properties={\n \"train_loss\": metrics[\"train_loss\"][-1],\n \"val_loss\": metrics[\"val_loss\"][-1],\n \"data\": \"USD/TWD from {0} to {1}\".format(metrics[\"start\"], metrics[\"end\"]),\n \"epoch\": metrics[\"epoch\"],\n },\n )\n\n run.register_model(\n model_name=\"scaler\",\n tags={\"data\": \"USD/TWD from 1983-10-04\", \"model\": \"MinMaxScaler\"},\n model_path=\"outputs/scaler.pickle\",\n model_framework=\"sklearn\",\n )", "def spark_setup(self):\n # Update the global variables for config details\n globals()[\"spark_token\"] = self.spark_bot_token\n globals()[\"bot_email\"] = self.spark_bot_email\n\n sys.stderr.write(\"Spark Bot Email: \" + self.spark_bot_email + \"\\n\")\n sys.stderr.write(\"Spark Token: REDACTED\\n\")\n\n # Setup the Spark Connection\n globals()[\"spark\"] = CiscoSparkAPI(access_token=self.spark_bot_token)\n globals()[\"webhook\"] = self.setup_webhook(self.spark_bot_name,\n self.spark_bot_url)\n sys.stderr.write(\"Configuring Webhook. \\n\")\n sys.stderr.write(\"Webhook ID: \" + globals()[\"webhook\"].id + \"\\n\")", "def train_entry_point():", "def __init__(__self__, *,\n aad_profile: Optional[pulumi.Input['AADProfileArgs']] = None,\n addon_profiles: Optional[pulumi.Input[Mapping[str, pulumi.Input['AddonProfilesArgs']]]] = None,\n agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['NamedAgentPoolProfileArgs']]]] = None,\n cloud_provider_profile: Optional[pulumi.Input['CloudProviderProfileArgs']] = None,\n control_plane: Optional[pulumi.Input['ControlPlaneProfileArgs']] = None,\n enable_rbac: Optional[pulumi.Input[bool]] = None,\n features: Optional[pulumi.Input['ProvisionedClustersCommonPropertiesFeaturesArgs']] = None,\n http_proxy_config: Optional[pulumi.Input['HttpProxyConfigArgs']] = None,\n kubernetes_version: Optional[pulumi.Input[str]] = None,\n linux_profile: Optional[pulumi.Input['LinuxProfilePropertiesArgs']] = None,\n network_profile: Optional[pulumi.Input['NetworkProfileArgs']] = None,\n node_resource_group: Optional[pulumi.Input[str]] = None,\n windows_profile: Optional[pulumi.Input['WindowsProfileArgs']] = None):\n if aad_profile is not None:\n pulumi.set(__self__, \"aad_profile\", aad_profile)\n if addon_profiles is not None:\n pulumi.set(__self__, \"addon_profiles\", addon_profiles)\n if agent_pool_profiles is not None:\n pulumi.set(__self__, \"agent_pool_profiles\", agent_pool_profiles)\n if cloud_provider_profile is not None:\n pulumi.set(__self__, \"cloud_provider_profile\", cloud_provider_profile)\n if control_plane is not None:\n pulumi.set(__self__, \"control_plane\", control_plane)\n if enable_rbac is not None:\n pulumi.set(__self__, \"enable_rbac\", enable_rbac)\n if features is not None:\n pulumi.set(__self__, \"features\", features)\n if http_proxy_config is not None:\n pulumi.set(__self__, \"http_proxy_config\", http_proxy_config)\n if kubernetes_version is not None:\n pulumi.set(__self__, \"kubernetes_version\", kubernetes_version)\n if linux_profile is not None:\n pulumi.set(__self__, \"linux_profile\", linux_profile)\n if network_profile is not None:\n pulumi.set(__self__, \"network_profile\", network_profile)\n if node_resource_group is not None:\n pulumi.set(__self__, \"node_resource_group\", node_resource_group)\n if windows_profile is not None:\n pulumi.set(__self__, \"windows_profile\", windows_profile)", "def train(**kwargs):\n print(\"train(**kwargs) - kwargs: %s\" % (kwargs)) if debug_model else ''\n run_results = { \"status\": \"ok\",\n \"sys_info\": [],\n \"training\": [],\n }\n\n\n # Check if necessary local directories exist:\n if not os.path.exists(cfg.Retina_LocalDataRecords):\n print(\"[INFO] %s is not found locally, creating...\" % \n cfg.Retina_LocalDataRecords)\n os.makedirs(cfg.Retina_LocalDataRecords)\n if not os.path.exists(cfg.Retina_LocalModelsServe):\n print(\"[INFO] %s is not found locally, creating...\" % \n cfg.Retina_LocalModelsServe)\n os.makedirs(cfg.Retina_LocalModelsServe) \n\n # use the schema\n schema = cfg.TrainArgsSchema()\n # deserialize key-word arguments\n train_args = schema.load(kwargs)\n\n # Take parameters defined via deepaas by a user\n train_epochs = train_args['train_epochs']\n batch_size = train_args['batch_size']\n num_gpus = train_args['num_gpus']\n epochs_between_evals = train_args['epochs_between_evals']\n upload_back = train_args['upload_back']\n if debug_model:\n print(\"train_args:\", train_args)\n print(type(train_args['train_epochs']), type(train_args['batch_size']))\n print(\"Number of GPUs:\", train_args['num_gpus'], num_gpus)\n\n # from deep-nextcloud into the container\n e1=time.time()\n # check if retinopathy_tr.tfrecord.XX or retinopathy_va.tfrecord.XX files exist locally,\n # if not -> download them from the RemoteStorage\n train_files = 0\n val_files = 0\n for f in os.listdir(cfg.Retina_LocalDataRecords):\n f_path = os.path.join(cfg.Retina_LocalDataRecords, f)\n if (os.path.isfile(f_path) and cfg.Retina_TrainingData in f):\n train_files += 1\n if (os.path.isfile(f_path) and cfg.Retina_ValidationData in f):\n val_files += 1\n\n if train_files < 100 or val_files < 20:\n # Retina_RemoteDataRecords and Retina_LocalDataRecords are defined in config.py #vk\n print(\"[INFO] Either training or validation files NOT found locally, download them from %s\" % \n (cfg.Retina_RemoteDataRecords))\n output, error = rclone_copy(cfg.Retina_RemoteDataRecords, cfg.Retina_LocalDataRecords)\n if error:\n message = \"[ERROR] training data not copied. rclone returned: \" + error\n raise Exception(message)\n\n download_time=time.time()-e1\n time.sleep(60)\n\n e2=time.time()\n ### mimic retinopathy_main.py main()\n # we first delete all the FLAGS\n FLAGS = flags.FLAGS\n #FLAGS.unparse_flags()\n for name in list(FLAGS):\n delattr(FLAGS, name)\n\n tf.logging.set_verbosity(tf.logging.INFO)\n #tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n # define default FLAGS for retinopathy_main and _run_loop\n retimain.define_retinopathy_flags(batch_size=str(batch_size),\n train_epochs=str(train_epochs),\n num_gpus=str(num_gpus),\n epochs_between_evals=str(epochs_between_evals))\n\n # build list of FLAG names and parse them via FLAGS(list)(IMPORTANT!) #vk\n flag_names = []\n for name in FLAGS:\n flag_names.append(name)\n\n # According to the docs, actual parsing happens by either calling\n # FLAGS(list_of_arguments) or by app.run()\n FLAGS(flag_names)\n # call actual training with the set flags\n with logger.benchmark_context(flags.FLAGS):\n graph_zip_path = retimain.run_retinopathy(flags.FLAGS)\n\n\n try:\n graph_zip_path = graph_zip_path.decode()\n except (UnicodeDecodeError, AttributeError):\n pass\n graph_zip_path = graph_zip_path.rstrip()\n\n print(\"[INFO] Call of the training script returned: \", graph_zip_path)\n training_time=time.time()-e2\n time.sleep(60)\n\n e3=time.time()\n # Retina_LocalModelsServe and Retina_RemoteModelsUpload are defined in config.py #vk\n if(upload_back and os.path.exists(graph_zip_path)):\n graph_zip_dir, graph_zip_name = os.path.split(graph_zip_path)\n print(\"[INFO] Uploading {} to {} ...\".format(graph_zip_name, \n cfg.Retina_RemoteModelsUpload))\n output, error = rclone_copy(graph_zip_path,\n os.path.join(cfg.Retina_RemoteModelsUpload, \n graph_zip_name))\n if error:\n print(\"[ERROR] rclone returned: {}\".format(error))\n else:\n # if there is no error, remove zip file and the graph directory\n savedmodel_dir, _ = os.path.splitext(graph_zip_name) # split name, ext\n savedmodel_path = os.path.join(graph_zip_dir, savedmodel_dir)\n ## Try to remove tree, if it exists\n print(\"[INFO] Uploaded, deleting local {} and {}...\".format(graph_zip_path,\n savedmodel_path))\n os.remove(graph_zip_path) # remove zipped file\n if os.path.exists(savedmodel_path):\n shutil.rmtree(savedmodel_path) # remove corresponding directory\n else:\n print(\"[INFO] Saved model path, {}, doesn't exitst!\".format(\n savedmodel_path)) \n else:\n print(\"[INFO] Created zip file of the graph, %s, was NOT uploaded!\" % graph_zip_path)\n\n upload_time=time.time()-e3\n\n train_files_size = 0\n val_files_size = 0\n for f in os.listdir(cfg.Retina_LocalDataRecords):\n f_path = os.path.join(cfg.Retina_LocalDataRecords, f)\n if (os.path.isfile(f_path) and cfg.Retina_TrainingData in f):\n train_files_size += os.stat(f_path).st_size\n if (os.path.isfile(f_path) and cfg.Retina_ValidationData in f):\n val_files_size += os.stat(f_path).st_size\n\n message = {\n \"Message\": \"Training finished!\",\n \"Download time\": download_time, \n \"Training time\": training_time,\n \"Upload time\": upload_time,\n \"Training set size\": convert_bytes(train_files_size), \n \"Validation set size\": convert_bytes(val_files_size)\n }\n return message", "def main():\n spark = create_spark_session()\n logging.info('Spark Session created')\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://dend-emr-project\"\n #input_data = './data/'\n #output_data = '/Users/daniel/Desktop/output/'\n logging.info(f'Set input path to {input_data}')\n logging.info(f'Set output path to {output_data}')\n \n copy_raw_data(spark, input_data, output_data)\n\n s3_data = restore_data_from_s3(spark, output_data)\n \n sas_desc_string = load_sas_desc_file(input_data)\n \n process_fact_table(spark, s3_data, output_data, sas_desc_string)\n \n process_dim_tables(spark, s3_data, output_data, sas_desc_string)\n\n data_quality_check(spark, output_data)\n \n logging.info('ETL process successfully finished.')", "def main():\n # see ML_Master.md\n # Title\n st.title(\"Jian's work \")\n\n # sidbar title image\n image = Image.open('C:/Local/Work/ML_Name/Code/Lib/my_nlp-word-cloud.jpg')\n st.sidebar.image(image, caption='NLP Cloud',\n use_column_width=True)\n st.sidebar.subheader(\"Natural Language Processing On the Go..\")\n st.markdown(\"\"\"\n \t# Description\n \t+ This is a Natural Language Processing(NLP) Based App useful for basic NLP task\n \tTokenization,NER,Sentiment,Summarization : See \n \t\"\"\")\n\n # Name Nationality\n if st.sidebar.checkbox(\"Name Nationality Recongition\"):\n st.subheader(\"Enter your names, seperated by , \")\n message = st.text_area(\"Enter Name\", \"Type Here ..\")\n if st.button(\"Analyze\"):\n nlp_name = nn.name_nationality(message)\n st.json(nlp_name)\n\n # Name entity recognition\n if st.sidebar.checkbox(\"Name Entity Recongition\"):\n st.subheader(\"Enter your website \")\n message = st.text_area(\n \"Input: article web link;\\n Output : person's names in article\\n Highlight the person's name in the article\\n Example : https://finance.yahoo.com/news/why-us-china-relations-are-worsening-on-almost-every-front-213714768.html\")\n if st.button(\"Analyze\"):\n nlp_text = ner.Get_html_text(message)\n #nlp_text = nlp_text.replace(\"University \", \"**University** \")\n (nlp_text, person_name_all) = ner.get_person_name(nlp_text)\n st.write(person_name_all)\n st.markdown(nlp_text)\n \n # Amazon Best seller \n # Give a search lis\n if st.sidebar.checkbox(\"Amazon best seller list \"):\n st.subheader(\"Enter your product search item \")\n message = st.text_area(\"Enter Name\", \"Type Here ..\")\n if st.button(\"Analyze\"):\n best_seller.ML_amazon_best_seller(message)\n #st.json(nlp_name) \n \n # give a product ID : download Amazon Product Information \n # see ML_Master.md\n import bs_lib\n if st.sidebar.checkbox(\"Amazon Product\"):\n st.subheader(\"Enter your product ID \")\n message = st.text_area(\n \"ID: eg. B079K4TVPG\")\n if st.button(\"Analyze\"): \n amazon_product_dict = bs_lib.get_amazon_product_info(message)\n st.write(amazon_product_dict)\n \n \n # \n st.sidebar.subheader(\"About App\")\n st.sidebar.text(\"NLP with Streamlit\")\n st.sidebar.info(\"Cudos to the Streamlit Team\")\n\n st.sidebar.subheader(\"By\")\n st.sidebar.text(\"Jian Tang\")", "def initialize_multigpu_train(\n rdzv_endpoint: str,\n rank: int,\n local_rank: int,\n gpu_ids: List[int],\n world_size: int,\n ):\n\n host, port = rdzv_endpoint.split(\":\")\n os.environ[\"MASTER_ADDR\"] = host\n os.environ[\"MASTER_PORT\"] = port\n os.environ[\"LOCAL_WORLD_SIZE\"] = str(len(gpu_ids))\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n os.environ[\"LOCAL_RANK\"] = str(local_rank)\n os.environ[\"RANK\"] = str(rank)", "def __init__(__self__, *,\n accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['AcceleratorConfigArgs']]]] = None,\n advanced_machine_features: Optional[pulumi.Input['AdvancedMachineFeaturesArgs']] = None,\n boot_disk_kms_key: Optional[pulumi.Input[str]] = None,\n confidential_nodes: Optional[pulumi.Input['ConfidentialNodesArgs']] = None,\n disk_size_gb: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n ephemeral_storage_config: Optional[pulumi.Input['EphemeralStorageConfigArgs']] = None,\n ephemeral_storage_local_ssd_config: Optional[pulumi.Input['EphemeralStorageLocalSsdConfigArgs']] = None,\n fast_socket: Optional[pulumi.Input['FastSocketArgs']] = None,\n gcfs_config: Optional[pulumi.Input['GcfsConfigArgs']] = None,\n gvnic: Optional[pulumi.Input['VirtualNICArgs']] = None,\n image_type: Optional[pulumi.Input[str]] = None,\n kubelet_config: Optional[pulumi.Input['NodeKubeletConfigArgs']] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n linux_node_config: Optional[pulumi.Input['LinuxNodeConfigArgs']] = None,\n local_nvme_ssd_block_config: Optional[pulumi.Input['LocalNvmeSsdBlockConfigArgs']] = None,\n local_ssd_count: Optional[pulumi.Input[int]] = None,\n logging_config: Optional[pulumi.Input['NodePoolLoggingConfigArgs']] = None,\n machine_type: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n min_cpu_platform: Optional[pulumi.Input[str]] = None,\n node_group: Optional[pulumi.Input[str]] = None,\n oauth_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n preemptible: Optional[pulumi.Input[bool]] = None,\n reservation_affinity: Optional[pulumi.Input['ReservationAffinityArgs']] = None,\n resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n sandbox_config: Optional[pulumi.Input['SandboxConfigArgs']] = None,\n service_account: Optional[pulumi.Input[str]] = None,\n shielded_instance_config: Optional[pulumi.Input['ShieldedInstanceConfigArgs']] = None,\n spot: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n taints: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTaintArgs']]]] = None,\n windows_node_config: Optional[pulumi.Input['WindowsNodeConfigArgs']] = None,\n workload_metadata_config: Optional[pulumi.Input['WorkloadMetadataConfigArgs']] = None):\n if accelerators is not None:\n pulumi.set(__self__, \"accelerators\", accelerators)\n if advanced_machine_features is not None:\n pulumi.set(__self__, \"advanced_machine_features\", advanced_machine_features)\n if boot_disk_kms_key is not None:\n pulumi.set(__self__, \"boot_disk_kms_key\", boot_disk_kms_key)\n if confidential_nodes is not None:\n pulumi.set(__self__, \"confidential_nodes\", confidential_nodes)\n if disk_size_gb is not None:\n pulumi.set(__self__, \"disk_size_gb\", disk_size_gb)\n if disk_type is not None:\n pulumi.set(__self__, \"disk_type\", disk_type)\n if ephemeral_storage_config is not None:\n pulumi.set(__self__, \"ephemeral_storage_config\", ephemeral_storage_config)\n if ephemeral_storage_local_ssd_config is not None:\n pulumi.set(__self__, \"ephemeral_storage_local_ssd_config\", ephemeral_storage_local_ssd_config)\n if fast_socket is not None:\n pulumi.set(__self__, \"fast_socket\", fast_socket)\n if gcfs_config is not None:\n pulumi.set(__self__, \"gcfs_config\", gcfs_config)\n if gvnic is not None:\n pulumi.set(__self__, \"gvnic\", gvnic)\n if image_type is not None:\n pulumi.set(__self__, \"image_type\", image_type)\n if kubelet_config is not None:\n pulumi.set(__self__, \"kubelet_config\", kubelet_config)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if linux_node_config is not None:\n pulumi.set(__self__, \"linux_node_config\", linux_node_config)\n if local_nvme_ssd_block_config is not None:\n pulumi.set(__self__, \"local_nvme_ssd_block_config\", local_nvme_ssd_block_config)\n if local_ssd_count is not None:\n pulumi.set(__self__, \"local_ssd_count\", local_ssd_count)\n if logging_config is not None:\n pulumi.set(__self__, \"logging_config\", logging_config)\n if machine_type is not None:\n pulumi.set(__self__, \"machine_type\", machine_type)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)\n if min_cpu_platform is not None:\n pulumi.set(__self__, \"min_cpu_platform\", min_cpu_platform)\n if node_group is not None:\n pulumi.set(__self__, \"node_group\", node_group)\n if oauth_scopes is not None:\n pulumi.set(__self__, \"oauth_scopes\", oauth_scopes)\n if preemptible is not None:\n pulumi.set(__self__, \"preemptible\", preemptible)\n if reservation_affinity is not None:\n pulumi.set(__self__, \"reservation_affinity\", reservation_affinity)\n if resource_labels is not None:\n pulumi.set(__self__, \"resource_labels\", resource_labels)\n if sandbox_config is not None:\n pulumi.set(__self__, \"sandbox_config\", sandbox_config)\n if service_account is not None:\n pulumi.set(__self__, \"service_account\", service_account)\n if shielded_instance_config is not None:\n pulumi.set(__self__, \"shielded_instance_config\", shielded_instance_config)\n if spot is not None:\n pulumi.set(__self__, \"spot\", spot)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if taints is not None:\n pulumi.set(__self__, \"taints\", taints)\n if windows_node_config is not None:\n pulumi.set(__self__, \"windows_node_config\", windows_node_config)\n if workload_metadata_config is not None:\n pulumi.set(__self__, \"workload_metadata_config\", workload_metadata_config)", "def train(log_dir):\n\n # Create train and test environments\n print(\n colorize(\n f\"INFO: You are training in the {ENV_NAME} environment.\",\n \"cyan\",\n bold=True,\n )\n )\n env = get_env_from_name(ENV_NAME, ENV_SEED)\n test_env = get_env_from_name(ENV_NAME, ENV_SEED)\n\n # Set initial learning rates\n lr_a, lr_l, lr_c = (\n ALG_PARAMS[\"lr_a\"],\n ALG_PARAMS[\"lr_l\"],\n ALG_PARAMS[\"lr_c\"],\n )\n lr_a_now = ALG_PARAMS[\"lr_a\"] # learning rate for actor, lambda and alpha\n lr_l_now = ALG_PARAMS[\"lr_l\"] # learning rate for Lyapunov critic\n lr_c_now = ALG_PARAMS[\"lr_c\"] # learning rate for q critic\n\n # Get observation and action space dimension and limits from the environment\n s_dim = env.observation_space.shape[0]\n a_dim = env.action_space.shape[0]\n a_lowerbound = env.action_space.low\n a_upperbound = env.action_space.high\n\n # Create the Agent\n policy = LAC(a_dim, s_dim, act_limits={\"low\": a_lowerbound, \"high\": a_upperbound})\n\n # Load model if retraining is selected\n if TRAIN_PARAMS[\"continue_training\"]:\n\n # Create retrain model path\n retrain_model_folder = TRAIN_PARAMS[\"continue_model_folder\"]\n retrain_model_path = osp.abspath(\n osp.join(log_dir, \"../..\", TRAIN_PARAMS[\"continue_model_folder\"])\n )\n\n # Check if retrain model exists if not throw error\n if not osp.exists(retrain_model_path):\n print(\n colorize(\n (\n \"ERROR: Shutting down training since the model you specified \"\n f\"in the `continue_model_folder` `{retrain_model_folder}` \"\n f\"argument was not found for the `{ENV_NAME}` environment.\"\n ),\n \"red\",\n bold=True,\n )\n )\n sys.exit(0)\n\n # Load old model\n print(\n colorize(\n f\"INFO: Restoring model `{retrain_model_path}`.\", \"cyan\", bold=True\n )\n )\n result = policy.restore(\n osp.abspath(osp.join(retrain_model_path, \"policy\")),\n restore_lagrance_multipliers=(not ALG_PARAMS[\"reset_lagrance_multipliers\"]),\n )\n if not result:\n print(\n colorize(\n \"ERROR: Shuting down training as something went wrong while \"\n \"loading \"\n f\"model `{retrain_model_folder}`.\",\n \"red\",\n bold=True,\n )\n )\n sys.exit(0)\n\n # Create new storage folder\n log_dir_split = log_dir.split(\"/\")\n log_dir_split[-2] = (\n \"_\".join(TRAIN_PARAMS[\"continue_model_folder\"].split(\"/\")) + \"_finetune\"\n )\n log_dir = \"/\".join(log_dir_split)\n else:\n print(colorize(f\"INFO: Train new model `{log_dir}`\", \"cyan\", bold=True))\n\n # Print logging folder path\n print(colorize(f\"INFO: Logging results to `{log_dir}`.\", \"cyan\", bold=True))\n\n # Create replay memory buffer\n pool = Pool(\n s_dim=s_dim,\n a_dim=a_dim,\n store_last_n_paths=TRAIN_PARAMS[\"num_of_training_paths\"],\n memory_capacity=ALG_PARAMS[\"memory_capacity\"],\n min_memory_size=ALG_PARAMS[\"min_memory_size\"],\n )\n\n # Setup logger and log hyperparameters\n logger.configure(dir=log_dir, format_strs=[\"csv\"])\n logger.logkv(\"tau\", ALG_PARAMS[\"tau\"])\n logger.logkv(\"alpha3\", ALG_PARAMS[\"alpha3\"])\n logger.logkv(\"batch_size\", ALG_PARAMS[\"batch_size\"])\n logger.logkv(\"target_entropy\", policy.target_entropy)\n\n ####################################################\n # Training loop ####################################\n ####################################################\n\n # Setup training loop parameters\n t1 = time.time()\n global_step = 0\n global_episodes = 0\n last_training_paths = deque(maxlen=TRAIN_PARAMS[\"num_of_training_paths\"])\n training_started = False\n\n # Train the agent in the environment until max_episodes has been reached\n print(colorize(\"INFO: Training...\\n\", \"cyan\", bold=True))\n while 1: # Keep running episodes until global step has been reached\n\n # Create variable to store information about the current path\n if policy.use_lyapunov:\n current_path = {\n \"rewards\": [],\n \"lyapunov_error\": [],\n \"alpha\": [],\n \"lambda\": [],\n \"entropy\": [],\n \"a_loss\": [],\n \"alpha_loss\": [],\n \"lambda_loss\": [],\n }\n else:\n current_path = {\n \"rewards\": [],\n \"critic_error\": [],\n \"alpha\": [],\n \"entropy\": [],\n \"a_loss\": [],\n \"alpha_loss\": [],\n }\n\n # Reset environment\n s = env.reset()\n\n # Training Episode loop\n for jj in range(ENVS_PARAMS[ENV_NAME][\"max_ep_steps\"]):\n\n # Break out of loop if global steps have been reached\n if global_step >= TRAIN_PARAMS[\"max_global_steps\"]:\n\n # Print step count, save model and stop the program\n print(\n colorize(\n f\"\\nINFO: Training stopped after {global_step} steps.\",\n \"cyan\",\n bold=True,\n )\n )\n print(\n colorize(\n \"INFO: Running time: {}\".format(time.time() - t1),\n \"cyan\",\n bold=True,\n )\n )\n print(colorize(\"INFO: Saving Model\", \"cyan\", bold=True))\n policy.save_result(log_dir)\n return\n\n # Save intermediate checkpoints if requested\n if TRAIN_PARAMS[\"save_checkpoints\"]:\n if (\n global_step % TRAIN_PARAMS[\"checkpoint_save_freq\"] == 0\n and global_step != 0\n ):\n\n # Create intermediate result checkpoint folder\n checkpoint_save_path = osp.abspath(\n osp.join(log_dir, \"checkpoints\", \"step_\" + str(jj))\n )\n os.makedirs(checkpoint_save_path, exist_ok=True)\n\n # Save intermediate checkpoint\n policy.save_result(checkpoint_save_path)\n\n # Render environment if requested\n if ENVS_PARAMS[ENV_NAME][\"eval_render\"]:\n env.render()\n\n # Retrieve (scaled) action based on the current policy\n # NOTE (rickstaa): The scaling operation is already performed inside the\n # policy based on the `act_limits` you supplied.\n a = policy.choose_action(s)\n\n # Perform action in env\n s_, r, done, _ = env.step(a)\n\n # Increment global step count\n if training_started:\n global_step += 1\n\n # Stop episode if max_steps has been reached\n if jj == ENVS_PARAMS[ENV_NAME][\"max_ep_steps\"] - 1:\n done = True\n terminal = 1.0 if done else 0.0\n\n # Store experience in replay buffer\n pool.store(s, a, r, terminal, s_)\n\n # Optimize network weights and lagrance multipliers\n if (\n pool.memory_pointer > ALG_PARAMS[\"min_memory_size\"]\n and global_step % ALG_PARAMS[\"steps_per_cycle\"] == 0\n ):\n training_started = True\n\n # Perform STG a set number of times (train per cycle)\n for _ in range(ALG_PARAMS[\"train_per_cycle\"]):\n batch = pool.sample(ALG_PARAMS[\"batch_size\"])\n if policy.use_lyapunov:\n (\n labda,\n alpha,\n l_loss,\n entropy,\n a_loss,\n alpha_loss,\n labda_loss,\n ) = policy.learn(lr_a_now, lr_l_now, lr_a, lr_c_now, batch)\n else:\n alpha, loss_q, entropy, a_loss, alpha_loss = policy.learn(\n lr_a_now, lr_l_now, lr_a, lr_c_now, batch\n )\n\n # Store current path results\n if training_started:\n if policy.use_lyapunov:\n current_path[\"rewards\"].append(r)\n current_path[\"lyapunov_error\"].append(l_loss)\n current_path[\"alpha\"].append(alpha)\n current_path[\"lambda\"].append(labda)\n current_path[\"entropy\"].append(entropy)\n current_path[\"a_loss\"].append(a_loss)\n current_path[\"alpha_loss\"].append(alpha_loss)\n current_path[\"lambda_loss\"].append(labda_loss)\n else:\n current_path[\"rewards\"].append(r)\n current_path[\"critic_error\"].append(loss_q.numpy())\n current_path[\"alpha\"].append(alpha.numpy())\n current_path[\"entropy\"].append(entropy.numpy())\n current_path[\"a_loss\"].append(\n a_loss.numpy()\n ) # Improve: Check if this is the fastest way\n current_path[\"alpha_loss\"].append(alpha_loss)\n\n # Evalute the current policy performance and log the results\n if (\n training_started\n and global_step % TRAIN_PARAMS[\"evaluation_frequency\"] == 0\n and global_step > 0\n ):\n logger.logkv(\"total_timesteps\", global_step)\n training_diagnostics = evaluate_training_rollouts(last_training_paths)\n if training_diagnostics is not None:\n if TRAIN_PARAMS[\"num_of_evaluation_paths\"] > 0:\n eval_diagnostics = training_evaluation(test_env, policy)\n [\n logger.logkv(key, eval_diagnostics[key])\n for key in eval_diagnostics.keys()\n ]\n training_diagnostics.pop(\"return\")\n [\n logger.logkv(key, training_diagnostics[key])\n for key in training_diagnostics.keys()\n ]\n logger.logkv(\"lr_a\", lr_a_now)\n if policy.use_lyapunov:\n logger.logkv(\"lr_l\", lr_l_now)\n else:\n logger.logkv(\"lr_c\", lr_c_now)\n string_to_print = [\"time_step:\", str(global_step), \"|\"]\n if TRAIN_PARAMS[\"num_of_evaluation_paths\"] > 0:\n [\n string_to_print.extend(\n [key, \":\", str(eval_diagnostics[key]), \"|\"]\n )\n for key in eval_diagnostics.keys()\n ]\n [\n string_to_print.extend(\n [key, \":\", str(round(training_diagnostics[key], 2)), \"|\"]\n )\n for key in training_diagnostics.keys()\n ]\n prefix = (\n colorize(\"LAC|\", \"green\")\n if ALG_PARAMS[\"use_lyapunov\"]\n else colorize(\"SAC|\", \"yellow\")\n )\n print(\n colorize(prefix, \"yellow\", bold=True) + \"\".join(string_to_print)\n )\n logger.dumpkvs()\n\n # Update state\n s = s_\n\n # Check if episode is done (continue to next episode)\n if done:\n\n # Store paths\n if training_started:\n last_training_paths.appendleft(current_path)\n\n # Decay learning rates\n frac = 1.0 - (global_step - 1.0) / TRAIN_PARAMS[\"max_global_steps\"]\n lr_a_now = lr_a * frac # learning rate for actor, lambda, alpha\n lr_l_now = lr_l * frac # learning rate for Lyapunov critic\n lr_c_now = lr_c * frac # learning rate for q critic\n break # Continue to next episode\n\n # Increase episode counter\n global_episodes += 1", "def start(context):\n context.run(\"python hellotensorflow/hello.py\")", "def main():\n nlp = spacy.load(\"en_core_web_sm\")\n\n notebook_dir = os.getcwd()\n situ_df = pd.read_csv('data/interim/calltaker_situation.csv', \n keep_default_na = False, \n converters = {'sop': eval})\n doc_term_bow, corpus, dictionary = get_dct_dtmatrix(nlp, situ_df['sop'])\n tfidf_situ = TfidfModel(doc_term_bow)\n tfidf_mtx = bow2tfidf(doc_term_bow, tfidf_situ)\n km_190 = KMeans(n_clusters = 190, random_state = 2020).fit(tfidf_mtx)\n\n situ_topics_kmeans_tfidf = situ_df.copy()\n situ_topics_kmeans_tfidf['cluster'] = km_190.labels_\n situ_topics_kmeans_tfidf = situ_topics_kmeans_tfidf.sort_values(by = ['cluster', 'type', 'juri'], ignore_index = True)\n situ_topics_kmeans_tfidf['situ_lst'] = situ_topics_kmeans_tfidf['situation'].apply(lambda x: [x])\n situ_topics_kmeans_tfidf.to_csv('data/interim/situ_topics_kmeans_tfidf.csv', index = False)", "def main():\n\n print(\"Initiating Spark session...\")\n print('-' * 50)\n spark = create_spark_session()\n \n # Use these settings if you want to test on the full\n # dataset, but it takes a LONG time.\n song_input_data = config['AWS']['SONG_DATA']\n log_input_data = config['AWS']['LOG_DATA']\n \n # Uncomment the two lines if you want to test on\n # minimal data\n #song_input_data = config['AWS']['SINGLE_SONG_DATA']\n #log_input_data = config['AWS']['SINGLE_LOG_DATA']\n \n output_data = config['AWS']['OUTPUT_DATA']\n \n print('-' * 50)\n print(\"Processing song data...\")\n print('-' * 50)\n print('')\n process_song_data(spark, song_input_data, output_data)\n \n print('-' * 50) \n print(\"Processing log data...\")\n print('-' * 50)\n print('')\n process_log_data(spark, song_input_data, log_input_data, output_data)", "def __init__(__self__, *,\n is_enabled: pulumi.Input[bool],\n is_v1_enabled: pulumi.Input[bool],\n is_v3_enabled: pulumi.Input[bool],\n site_name: pulumi.Input[str],\n app_id: Optional[pulumi.Input[str]] = None,\n e_tag: Optional[pulumi.Input[str]] = None,\n is_block_user_upload_enabled: Optional[pulumi.Input[bool]] = None,\n is_detailed_logging_enabled: Optional[pulumi.Input[bool]] = None,\n is_endpoint_parameters_enabled: Optional[pulumi.Input[bool]] = None,\n is_no_storage_enabled: Optional[pulumi.Input[bool]] = None,\n is_secure_site_enabled: Optional[pulumi.Input[bool]] = None,\n is_web_chat_speech_enabled: Optional[pulumi.Input[bool]] = None,\n is_webchat_preview_enabled: Optional[pulumi.Input[bool]] = None,\n tenant_id: Optional[pulumi.Input[str]] = None,\n trusted_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"is_enabled\", is_enabled)\n pulumi.set(__self__, \"is_v1_enabled\", is_v1_enabled)\n pulumi.set(__self__, \"is_v3_enabled\", is_v3_enabled)\n pulumi.set(__self__, \"site_name\", site_name)\n if app_id is not None:\n pulumi.set(__self__, \"app_id\", app_id)\n if e_tag is not None:\n pulumi.set(__self__, \"e_tag\", e_tag)\n if is_block_user_upload_enabled is not None:\n pulumi.set(__self__, \"is_block_user_upload_enabled\", is_block_user_upload_enabled)\n if is_detailed_logging_enabled is not None:\n pulumi.set(__self__, \"is_detailed_logging_enabled\", is_detailed_logging_enabled)\n if is_endpoint_parameters_enabled is not None:\n pulumi.set(__self__, \"is_endpoint_parameters_enabled\", is_endpoint_parameters_enabled)\n if is_no_storage_enabled is not None:\n pulumi.set(__self__, \"is_no_storage_enabled\", is_no_storage_enabled)\n if is_secure_site_enabled is not None:\n pulumi.set(__self__, \"is_secure_site_enabled\", is_secure_site_enabled)\n if is_web_chat_speech_enabled is None:\n is_web_chat_speech_enabled = False\n if is_web_chat_speech_enabled is not None:\n pulumi.set(__self__, \"is_web_chat_speech_enabled\", is_web_chat_speech_enabled)\n if is_webchat_preview_enabled is None:\n is_webchat_preview_enabled = False\n if is_webchat_preview_enabled is not None:\n pulumi.set(__self__, \"is_webchat_preview_enabled\", is_webchat_preview_enabled)\n if tenant_id is not None:\n pulumi.set(__self__, \"tenant_id\", tenant_id)\n if trusted_origins is not None:\n pulumi.set(__self__, \"trusted_origins\", trusted_origins)", "def main(input_filepath, output_model_filepath):\n logger = logging.getLogger(__name__)\n logger.info('training hotel cluster embeddings models')\n\n input_file = os.path.join(input_filepath, 'sentences.pkl')\n output_model_file = os.path.join(output_model_filepath, 'hotelcluster2vec.bin')\n\n train(input_file, output_model_file)", "def singularity_start(self, image):\n env_vars = self.action.get('env', {})\n\n for s in self.action.get('secrets', []):\n env_vars.update({s: os.environ[s]})\n\n for e, v in self.env.items():\n env_vars.update({e: v})\n\n env_vars.update({'HOME': os.environ['HOME']})\n\n # sets the env variables\n for k, v in env_vars.items():\n Client.setenv(k, v)\n\n e = Client.run(image=self.generate_image_name(image),\n args=' '.join(self.action.get('args', '')),\n return_result=True)\n return e['return_code']", "def dist_env():\n trainer_id = int(os.getenv(\"PADDLE_TRAINER_ID\", \"0\"))\n num_trainers = 1\n training_role = os.getenv(\"PADDLE_TRAINING_ROLE\", \"TRAINER\")\n assert (training_role == \"PSERVER\" or training_role == \"TRAINER\")\n\n # - PADDLE_TRAINER_ENDPOINTS means nccl2 mode.\n # - PADDLE_PSERVER_ENDPOINTS means pserver mode.\n # - PADDLE_CURRENT_ENDPOINT means current process endpoint.\n trainer_endpoints = os.getenv(\"PADDLE_TRAINER_ENDPOINTS\")\n pserver_endpoints = os.getenv(\"PADDLE_PSERVER_ENDPOINTS\")\n current_endpoint = os.getenv(\"PADDLE_CURRENT_ENDPOINT\")\n if trainer_endpoints:\n trainer_endpoints = trainer_endpoints.split(\",\")\n num_trainers = len(trainer_endpoints)\n elif pserver_endpoints:\n num_trainers = int(os.getenv(\"PADDLE_TRAINERS_NUM\"))\n\n return {\n \"trainer_id\": trainer_id,\n \"num_trainers\": num_trainers,\n \"current_endpoint\": current_endpoint,\n \"training_role\": training_role,\n \"pserver_endpoints\": pserver_endpoints,\n \"trainer_endpoints\": trainer_endpoints\n }", "def setup_tf_environment() -> None:\n _setup_cpu_environment()\n _setup_gpu_environment()", "def __init__(__self__, *,\n application_insights_id: pulumi.Input[str],\n identity: pulumi.Input['WorkspaceIdentityArgs'],\n key_vault_id: pulumi.Input[str],\n resource_group_name: pulumi.Input[str],\n storage_account_id: pulumi.Input[str],\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input['WorkspaceEncryptionArgs']] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None):\n pulumi.set(__self__, \"application_insights_id\", application_insights_id)\n pulumi.set(__self__, \"identity\", identity)\n pulumi.set(__self__, \"key_vault_id\", key_vault_id)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"storage_account_id\", storage_account_id)\n if container_registry_id is not None:\n pulumi.set(__self__, \"container_registry_id\", container_registry_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if encryption is not None:\n pulumi.set(__self__, \"encryption\", encryption)\n if friendly_name is not None:\n pulumi.set(__self__, \"friendly_name\", friendly_name)\n if high_business_impact is not None:\n pulumi.set(__self__, \"high_business_impact\", high_business_impact)\n if image_build_compute_name is not None:\n pulumi.set(__self__, \"image_build_compute_name\", image_build_compute_name)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if primary_user_assigned_identity is not None:\n pulumi.set(__self__, \"primary_user_assigned_identity\", primary_user_assigned_identity)\n if public_access_behind_virtual_network_enabled is not None:\n warnings.warn(\"\"\"`public_access_behind_virtual_network_enabled` will be removed in favour of the property `public_network_access_enabled` in version 4.0 of the AzureRM Provider.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"public_access_behind_virtual_network_enabled is deprecated: `public_access_behind_virtual_network_enabled` will be removed in favour of the property `public_network_access_enabled` in version 4.0 of the AzureRM Provider.\"\"\")\n if public_access_behind_virtual_network_enabled is not None:\n pulumi.set(__self__, \"public_access_behind_virtual_network_enabled\", public_access_behind_virtual_network_enabled)\n if public_network_access_enabled is not None:\n pulumi.set(__self__, \"public_network_access_enabled\", public_network_access_enabled)\n if sku_name is not None:\n pulumi.set(__self__, \"sku_name\", sku_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if v1_legacy_mode_enabled is not None:\n pulumi.set(__self__, \"v1_legacy_mode_enabled\", v1_legacy_mode_enabled)", "def __init__(self,\n env_name='blobble-world-v0'\n ):\n self._env_name = env_name\n\n # Take a timestamp. This will be used for any output files created in the output folder\n self._timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n\n # Create training and evaluation environments\n self._train_py_env = suite_gym.load(self._env_name)\n self._eval_py_env = suite_gym.load(self._env_name)\n\n # Convert the training and test environments to Tensors\n self._train_env = tf_py_environment.TFPyEnvironment(self._train_py_env)\n self._eval_env = tf_py_environment.TFPyEnvironment(self._eval_py_env)\n print('=====================================================')\n print('Environments created for : ', self._env_name)\n print('Training Environment')\n print(' Observation Spec:')\n print(' ', self._train_env.time_step_spec().observation)\n print(' Reward Spec:')\n print(' ', self._train_env.time_step_spec().reward)\n print(' Action Spec:')\n print(' ', self._train_env.action_spec())\n print('Evaluation Environment')\n print(' Observation Spec:')\n print(' ', self._eval_env.time_step_spec().observation)\n print(' Reward Spec:')\n print(' ', self._eval_env.time_step_spec().reward)\n print(' Action Spec:')\n print(' ', self._eval_env.action_spec())\n print('=====================================================')\n\n self._config = BlobbleConfig('blobble_config.ini')\n self._config.print_config()\n\n # Get the demonstration parameters and output folder. We don't need these just yet but it's\n # good to do now in case there is an error in the config file (exception will be thrown)\n self._output_folder = (self._config.get_output_params()['output_folder'])\n\n self._num_demo_episodes = int(self._config.get_output_params()['num_demonstration_episodes'])\n demo_video = (self._config.get_output_params()['demonstration_video'])\n if demo_video == 'True':\n self._demo_video = True\n else:\n self._demo_video = False\n\n # Get and check the advanced learning parameters\n self._learning_rate = float(self._config.get_learning_adv_params()['learning_rate'])\n self._fc_layer_params = tuple(self._config.get_learning_adv_params()['fc_layer_params'].split(','))\n\n print('Create and train a neural network agent')\n self._neural_network_agent = create_neural_network_agent(self._train_env,\n self._learning_rate,\n self._fc_layer_params)\n\n learning_params = self._config.get_learning_params()\n train_neural_network(self._neural_network_agent,\n self._train_env,\n self._eval_env,\n num_train_iterations=learning_params['training_iterations'],\n log_interval=learning_params['training_log_interval'],\n eval_interval=learning_params['eval_interval'],\n num_eval_episodes=learning_params['num_eval_episodes'],\n replay_buffer_max_length=learning_params['replay_buffer_max_length'],\n collect_steps_per_iteration=learning_params['collect_steps_per_iteration'],\n output_folder=self._output_folder,\n timestamp=self._timestamp)", "def main():\n run = Run.get_context()\n try:\n work_space = run.experiment.workspace\n except AttributeError:\n interactive_auth = InteractiveLoginAuthentication(\n tenant_id=os.getenv(\"TENANT_ID\")\n )\n work_space = Workspace.from_config(auth=interactive_auth)\n environment = work_space.environments[\"train_lstm\"]\n model = Model(work_space, \"currency\")\n service_name = \"currency-service\"\n inference_config = InferenceConfig(\n entry_script=\"predict_currency.py\", environment=environment\n )\n aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)\n scaler = Model(work_space, name=\"scaler\", version=1)\n service = Model.deploy(\n workspace=work_space,\n name=service_name,\n models=[model, scaler],\n inference_config=inference_config,\n deployment_config=aci_config,\n overwrite=True,\n )\n service.wait_for_deployment(show_output=True)\n print(service.get_logs())\n print(service.scoring_uri)", "def initialize(self, context):\n\n properties = context.system_properties\n self.map_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.device = torch.device(\n self.map_location + \":\" + str(properties.get(\"gpu_id\"))\n if torch.cuda.is_available()\n else self.map_location\n )\n self.manifest = context.manifest\n\n model_dir = properties.get(\"model_dir\")\n self.batch_size = properties.get(\"batch_size\")\n serialized_file = self.manifest[\"model\"][\"serializedFile\"]\n model_pt_path = os.path.join(model_dir, serialized_file)\n\n if not os.path.isfile(model_pt_path):\n raise RuntimeError(\"Missing the model.pt file\")\n\n logger.debug(\"Loading torchscript model\")\n self.model = self._load_torchscript_model(model_pt_path)\n\n self.model.to(self.device)\n self.model.eval()\n\n logger.debug(\"Model file %s loaded successfully\", model_pt_path)\n\n self.initialized = True" ]
[ "0.57388246", "0.54338783", "0.5398804", "0.5333285", "0.53162426", "0.5237506", "0.523501", "0.52327436", "0.51980984", "0.51619136", "0.5145377", "0.5143205", "0.5136607", "0.51189315", "0.5115787", "0.50833327", "0.50563383", "0.5043505", "0.50324553", "0.5023637", "0.5003302", "0.49907184", "0.49568686", "0.49459016", "0.49336395", "0.49159202", "0.49050325", "0.48855245", "0.48793685", "0.48778754" ]
0.6689239
0
Wrap function with ST cache method if streamlit is importable
def wrap_with_st_cache_if_avaiable(f): try: import streamlit as st logger.info("Using streamlit cache for load") return st.cache(f, allow_output_mutation=True, show_spinner=False) except: logger.exception("Could not import streamlit and apply caching") print("You need streamlit to run use this method") return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_functools_wraps(self):\n\n import streamlit as st\n\n @st.cache\n def f():\n return True\n\n self.assertEqual(True, hasattr(f, \"__wrapped__\"))", "def cached(func):\n return _lru_cache(None)(func)", "def cache(func):\n storage = {}\n\n def wrapper(*args, **kwargs):\n key = str(*args, **kwargs)\n if storage.get(key):\n return storage[key]\n else:\n result = func(*args, **kwargs)\n storage[key] = result\n return result\n\n return wrapper", "def cached(function):\n\t@wraps(function)\n\tdef check_cache_first(cls, *args):\n\t\tif not args in cls._cache:\n\t\t\tcode = function(cls, *args)\n\t\t\tif code:\n\t\t\t\tcls._cache[args] = code\n\t\t\t\treturn code\n\t\telse:\n\t\t\treturn cls._cache[args]\n\t\treturn None\n\treturn check_cache_first", "def cached():\n def decorator(fn): # define a decorator for a function \"fn\"\n cache_name = fn.func_name\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments\n if os.path.exists(cache_name):\n with gzip.GzipFile(cache_name, 'rb') as cachehandle:\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with gzip.GzipFile(cache_name, 'wb') as cachehandle:\n pickle.dump(res, cachehandle, pickle.HIGHEST_PROTOCOL)\n return res\n return wrapped\n return decorator # return this \"customized\" decorator that uses \"cachefile\"", "def cache_function(self, func):\n\n @wraps(func)\n def wrapper(*args):\n if self.__log:\n self.__logger.info(f\"Called {func.__name__} with {args}\")\n fileName = self.__build_file_name(func, args)\n\n if os.path.isfile(fileName):\n # Result is already stored in cache\n # Retrieve return value from cache\n return self.__read_cache(fileName)\n else:\n # Result is not stored in cache\n # Run function\n if len(args) > 0:\n returnVal = func(args)\n else:\n returnVal = func()\n\n # Store value in cache\n self.__write_cache(fileName, returnVal)\n\n # Give return value\n return returnVal\n\n return wrapper", "def _memorize(func):\n\n def _wrapper(self, *args, **kwargs):\n \"\"\"Wrapper to cache the function's output.\n \"\"\"\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val\n return _wrapper", "def wrapper():\n\n result = caching_services.try_get('permanent', func, None)\n if result is not None:\n return result\n\n result = func()\n caching_services.try_set('permanent', result, func, None)\n return result", "def cache(func):\n\n def func_wrapper(self, hook=None, result_name=None):\n \"\"\"Wrapper to cache the result of a function.\"\"\"\n if self._cache is not None:\n c = self._cache.copy()\n c['cache'] = True\n return c\n else:\n ret = func(self, hook=hook, result_name=result_name)\n if not isinstance(ret, dict):\n raise TypeError( # pragma: no cover\n \"A dictionary was expected not '{0}'.\\nIssue with class '{1}'\"\n \"\".format(\n type(ret), type(self)))\n self._cache = ret\n ret = ret.copy()\n ret['cache'] = False\n return ret\n return func_wrapper", "def cached_func(*args):\n try: # fails if cache is not instantiated\n return self.data['run'][func.__name__]\n except KeyError:\n value = func(*args)\n self.data['run'][func.__name__] = value\n return value", "def run(self, func, *args):\n @wraps(func)\n def cached_func(*args):\n \"\"\"Run wise cache function\"\"\"\n try: # fails if cache is not instantiated\n return self.data['run'][func.__name__]\n except KeyError:\n value = func(*args)\n self.data['run'][func.__name__] = value\n return value\n return cached_func", "def one_use(func):\n attribute = \"_cache_\" + func.__name__\n\n @property\n @functools.wraps(func)\n def decorated(self):\n if not hasattr(self, attribute):\n setattr(self, attribute, func(self))\n return getattr(self, attribute)\n return decorated", "def test_cache():\n\n def func(arg1, arg2):\n return arg1 * arg2\n\n first = cache(func)(100, 200)\n second = cache(func)(100, 200)\n assert first is second", "def dynCache(*args, **kwargs)->None:\n pass", "def unbound_cache(func):\n\n cache = {}\n\n @functools.wraps(func)\n def caching_wrapper(*args):\n try:\n return cache[args]\n except KeyError:\n result = func(*args)\n cache[args] = result\n return result\n\n return caching_wrapper", "def cache(fn):\n\tcache.c = dict()\n\tdef _fn(*args, **kwargs):\n\t\tkey = fn.__name__ + str(args) + str(kwargs)\n\t\ttry:\n\t\t\tret = cache.c[key]\n\t\texcept KeyError, e:\n\t\t\tret = fn(*args, **kwargs)\n\t\t\tcache.c[key] = ret\n\t\treturn ret\n\treturn _fn", "def cached(func):\n cache_dct = {}\n\n @wraps(func)\n def _lru_cache_decorator(*args):\n key = args\n if key in cache_dct:\n return cache_dct[key]\n else:\n cache_dct[key] = func(*args)\n return cache_dct[key]\n return _lru_cache_decorator", "def cachefor(name):\n def decorator(func):\n assert name not in cachefuncs\n cachefuncs[name] = func\n return func\n return decorator", "def decorator(func):\n\n def wrapper():\n \"\"\"\n decorates the given function and makes it a lazy one.\n\n :returns: function result.\n \"\"\"\n\n result = caching_services.try_get('permanent', func, None)\n if result is not None:\n return result\n\n result = func()\n caching_services.try_set('permanent', result, func, None)\n return result\n\n return update_wrapper(wrapper, func)", "def cached(key):\n def wrapper(function):\n def wrapped(d,g,i):\n if key not in d:\n d[key] = function(d,g,i)\n return d[key]\n return wrapped\n return wrapper", "def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache", "def cache(cache_path):\n def cache_decorator(generator):\n def wrapper():\n return cached(cache_path, generator)\n return wrapper\n return cache_decorator", "def cache_result(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n assert len(args) == 0 and len(kwargs) == 0, \"Wrapped call must be empty\"\n if not hasattr(f, \"cached_result\"):\n f.cached_result = f()\n return f.cached_result\n return wrapper", "def cache_result(func):\n\n @wraps(func)\n def with_cache(*args, **kwargs):\n \"\"\"\n Cached function\n \"\"\"\n key = '{}{}{}'.format(\n hash(func), hash(args), hash(frozenset(kwargs.items())))\n\n cached_result = cache.get(key)\n if cached_result is not None:\n return cached_result if cached_result != 'None' else None\n result = func(*args, **kwargs)\n cache.set(key, result if result is not None else 'None')\n\n return result\n\n return with_cache", "def _proxy_cache(from_func, to_func):\n to_func.cache_info = from_func.cache_info\n to_func.cache_clear = from_func.cache_clear", "def local_remote_wrapper(func, cache_ext='bin', overwrite_cache=False):\n @wraps(func)\n def wrapper(*args, **kwargs):\n global DEBUG\n if 'debug' in kwargs:\n DEBUG = kwargs['debug']\n assert ('url' in kwargs) or len(args), \"url must be specified\"\n url = (kwargs['url'] if 'url' in kwargs else args[0])\n hashseq = hashlib.md5(url.encode()).hexdigest()\n cache_url = cache_dir + '.'.join([func.__name__, hashseq, cache_ext])\n if 'local_cache' not in kwargs:\n if DEBUG > 3:\n print(f'searching for cache file for {url}')\n print(f' hash is {hashseq} ')\n print(f' with file location (assumed to be) {cache_url}')\n if os.path.isfile(cache_url):\n try:\n with open(cache_url, 'rb') as binfile:\n # kwargs['local_cache'] = pickle.loads(binfile.read())\n kwargs['local_cache'] = binfile.read()\n if DEBUG:\n print(f' FOUND local cache: {cache_url} for url: {url}')\n except EOFError:\n # in the case of a bad or empty file, don't get hung up\n pass\n if 'local_cache' not in kwargs:\n wait = sleep_between()\n if DEBUG > 1:\n print('...sleeping for {wait:.3f} seconds')\n # sleep a bit to prevent scraping from getting banned for these urls\n sleep(wait)\n # call the function to retrieve and beautify html data\n beautified, raw = func(*args, **kwargs)\n if 'local_cache' not in kwargs and not(os.path.isdir(cache_dir)):\n os.mkdir(cache_dir)\n if raw is not None and (\n overwrite_cache or not(os.path.isfile(cache_url))):\n with open(cache_url, 'wb') as bfile:\n # bfile.write(pickle.dumps(data))\n bfile.write(raw)\n if DEBUG:\n print(f' CREATED local cache: {cache_url} for url: {url}')\n return beautified, raw\n return wrapper", "def _wrapper(self, *args, **kwargs):\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val", "def cache(filename):\n\n def decorator(func):\n def wrapper(*args, **kw):\n self = args[0]\n path = os.path.join(self.cache_dir, filename)\n time0 = time.time()\n if os.path.exists(path):\n result = joblib.load(path)\n cost = time.time() - time0\n logger.info('[cache] loading {} costs {:.2f}s'.format(path, cost))\n return result\n result = func(*args, **kw)\n cost = time.time() - time0\n logger.info('[cache] obtaining {} costs {:.2f}s'.format(path, cost))\n joblib.dump(result, path)\n return result\n\n return wrapper\n\n return decorator", "def cached(cachefile):\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments \n # if cache exists -> load it and return its content\n if os.path.exists(cachefile):\n with open(cachefile, 'rb') as cachehandle:\n print(\"using cached result from '%s'\" % cachefile)\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with open(cachefile, 'wb') as cachehandle:\n print(\"saving result to cache '%s'\" % cachefile)\n pickle.dump(res, cachehandle)\n\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"", "def cached(cachefile):\n def decorator(fn): # define a decorator for a function \"fn\"\n def wrapped(*args, **kwargs): # define a wrapper that will finally call \"fn\" with all arguments \n # if cache exists -> load it and return its content\n if os.path.exists(cachefile):\n with open(cachefile, 'rb') as cachehandle:\n print(\"using cached result from '%s'\" % cachefile)\n return pickle.load(cachehandle)\n\n # execute the function with all arguments passed\n res = fn(*args, **kwargs)\n\n # write to cache file\n with open(cachefile, 'wb') as cachehandle:\n print(\"saving result to cache '%s'\" % cachefile)\n pickle.dump(res, cachehandle)\n\n return res\n\n return wrapped\n\n return decorator # return this \"customized\" decorator that uses \"cachefile\"" ]
[ "0.72335064", "0.67037886", "0.66820943", "0.6557552", "0.6514492", "0.6514082", "0.6456728", "0.637417", "0.63546365", "0.63348776", "0.6316537", "0.6292026", "0.62352824", "0.62345815", "0.6233367", "0.62333107", "0.62000644", "0.6180296", "0.6171165", "0.6138923", "0.61381596", "0.61342865", "0.6102084", "0.60984755", "0.6097335", "0.60712963", "0.60618323", "0.60439396", "0.6031184", "0.6031184" ]
0.8505075
0
Normalize a dict of chunks.
def normalize_chunks( chunks: Mapping[str, Union[int, Tuple[int, ...]]], dim_sizes: Mapping[str, int], ) -> Dict[str, int]: if not chunks.keys() <= dim_sizes.keys(): raise ValueError( 'all dimensions used in chunks must also have an indicated size: ' f'chunks={chunks} vs dim_sizes={dim_sizes}') result = {} for dim, size in dim_sizes.items(): if dim not in chunks: result[dim] = size elif isinstance(chunks[dim], tuple): unique_chunks = set(chunks[dim]) if len(unique_chunks) != 1: raise ValueError( f'chunks for dimension {dim} are not constant: {unique_chunks}', ) result[dim], = unique_chunks elif chunks[dim] == -1: result[dim] = size else: result[dim] = chunks[dim] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_all_data_in_dict(data: Data_dict_type, normalizers: Tuple[object, ...]) -> Data_dict_type:\n for key, item in data.items():\n values, sample_rate = item\n # save old shape and reshape data to supported format for normalizer\n old_shape = values.shape\n values = values.reshape((-1, values.shape[-1]))\n # normalize data\n for normalizer in normalizers:\n values = normalizer.transform(values)\n # Reshape data back to old shape\n values = values.reshape(old_shape)\n data[key] = (values, sample_rate)\n return data", "def _normalize(self, value_dict):\n median = np.median([value_dict[i] for i in list(value_dict.keys())])\n n = len(value_dict.keys())\n if median < 1.0 / float(n):\n divisor = 1.0 / float(n)\n else:\n divisor = median\n return_dict = {}\n for i in list(value_dict.keys()):\n return_dict[i] = float(value_dict[i]) / float(divisor)\n return return_dict", "def split_dict_equally(input_dict, chunks=2):\n return_list = [dict() for idx in range(chunks)]\n idx = 0\n for k,v in input_dict.iteritems():\n return_list[idx][k] = v\n if idx < chunks-1:\n idx += 1\n else:\n idx = 0\n return return_list", "def dict_normalization(dict_, nested=False):\n dict_norm = dict()\n if not nested:\n if dict_.values():\n d_max = max(dict_.values())\n d_min = min(dict_.values())\n if d_max - d_min == 0:\n dict_norm = {key: 1 for key in dict_}\n else:\n dict_norm = {key: (dict_[key] - d_min) / (d_max - d_min) for key in dict_}\n else:\n for key_1 in dict_:\n if dict_[key_1]:\n dict_norm[key_1] = dict()\n else: continue\n d_max = max(dict_[key_1].values())\n d_min = min(dict_[key_1].values())\n for key_2 in dict_[key_1]:\n if d_max - d_min == 0:\n dict_norm[key_1][key_2] = 1 / len(dict_[key_1])\n else:\n dict_norm[key_1][key_2] = (dict_[key_1][key_2] - d_min) / (d_max - d_min)\n return dict_norm", "def normalize(self):\n total = float(self.totalCount())\n if total == 0: return\n for key in self.keys():\n self[key] = self[key] / total", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def normalization(distribution):\r\n total_sum = 0\r\n for number in distribution.values():\r\n total_sum += number\r\n \r\n for bin in distribution.keys():\r\n distribution[bin] = float(distribution[bin]) / total_sum\r\n\r\n return distribution", "def _normalize(self, dictionnary):\r\n copy_dict = OrderedDict()\r\n for k,v in dictionnary.items():\r\n if isinstance(v, OrderedDict):\r\n copy_dict[k.replace('#','').replace('@','')] = self._normalize(v)\r\n else:\r\n copy_dict[k.replace('#','').replace('@','')] = v\r\n return copy_dict", "def normalise_dict(my_dict: Dict[Any, float]) -> Dict[Any, float]:\n values_sum = sum(my_dict.values())\n return {k: v / values_sum for k, v in my_dict.items()}", "def normalize(self):\n total = float(sum(self.values()))\n for key in self:\n self[key] /= total", "def normalize(self):\n total = float(sum(self.values()))\n for key in self:\n self[key] /= total", "def clean_arrays_dict(arrays_dict):\n for k in arrays_dict.keys():\n volumes_list = arrays_dict[k]\n arrays_dict[k] = [convert_Volume_to_slices(v) for v in volumes_list]", "def normalize(dictionary, num):\n for key in dictionary.keys():\n dictionary[key] = float(dictionary[key])/num\n return dictionary", "def normalize(counts):\n numvals = sum(counts.itervalues())\n if numvals <= 0:\n return counts\n res = dict()\n for (k,cnt) in counts.iteritems():\n res[k] = float(cnt)/float(numvals)\n return res", "def _normalize_data_to_send(info):\n # Remove the parts of the data that are unbounded in size.\n info = copy.deepcopy(info)\n for key in ['model_config', 'epoch_history']:\n if key in info:\n del info[key]\n return info", "def divide_dict(src_dict, num):\n return {key:(value/num) for key, value in src_dict.items()}", "def normalise(strategy: dict) -> dict:\n\n total = sum(strategy.values())\n if total > 0:\n return {x: strategy[x] / total for x in strategy}\n else:\n return {x: 1 / len(strategy) for x in strategy}", "def normalize(ds, config):\n logger.info(\"Applying normalization with config:\\n %s\", _dict_to_logstring(config))\n\n key = config[\"key\"]\n def _normalize(x):\n return dict(x, **{key: features.cmvn(x[key], **config.get(\"kwargs\", {}))})\n\n return (ds.batch(config.get(\"batch_size\", 1))\n .map(_normalize, num_parallel_calls=TF_AUTOTUNE)\n .unbatch())", "def normalize_dict(from_dict: Dict[str, Any], key_mapping: Dict[str, str]) -> Dict[str, Any]:\n to_dict = {}\n\n for new_key, old_key in key_mapping.items():\n if old_key in from_dict:\n to_dict[new_key] = from_dict[old_key]\n\n return to_dict", "def normalize(dist):\n\t\n\tif isinstance(dist, dict):\n\t\t# Make sure our keys/values line up in their lists\n\t\tkeys = dist.keys()\n\t\tvals = [dist[k] for k in keys]\n\t\tnormalize(vals)\n\t\tfor k,v in zip(keys,vals):\n\t\t\tdist[k] = v\n\t\treturn\n\tfdist = [float(d) for d in dist]\n\ts = sum(fdist)\n\tif s == 0:\n\t\treturn\n\tfdist = [d/s for d in fdist]\n\tfor i,d in enumerate(fdist):\n\t\tdist[i] = d", "def normalise_dict(input_dict, factor):\n\n output_dict = {}\n for key, value in input_dict.items():\n if factor == 'max':\n output_dict[key] = [float(i)/max(value) for i in value]\n else:\n output_dict[key] = [float(i)/factor for i in value]\n\n return output_dict", "def normalize_dict(\n items: Optional[RequestContent], normalize_data: bool = True\n) -> Optional[RequestContent]:\n\n def sort_dict(d):\n return dict(sorted(d.items(), key=itemgetter(0)))\n\n if not items:\n return None\n if isinstance(items, Mapping):\n return sort_dict(items)\n if normalize_data and isinstance(items, (bytes, str)):\n # Attempt to load body as JSON; not doing this by default as it could impact performance\n try:\n dict_items = json.loads(decode(items))\n dict_items = json.dumps(sort_dict(dict_items))\n return dict_items.encode('utf-8') if isinstance(items, bytes) else dict_items\n except Exception:\n pass\n\n return items", "def normalize_metadata(self, metadata):\n for key in metadata:\n metadata[key] = sp.utils.bytes_of_string(metadata[key])\n\n return metadata", "def _json_normalize(x: dict) -> dict:\n return json.loads(json.dumps(x))", "def normalize(self):\n for key in self.corpus.keys():\n sum_count = 0\n words = []\n counts = []\n for k, v in self.corpus[key].items():\n sum_count += v\n words.append(k)\n counts.append(v)\n prob = [float(count)/sum_count for count in counts]\n\n self.corpus[key] = [words, prob]", "def normalize_counter(c):\n total = sum(c.values())\n return {w:float(c[w])/total for w in c}", "def truncate_dict(dictionary: Dict, n: int) -> Dict:\n return {k: v for (k, v) in list(dictionary.items())[:n]}", "def reprocess_dict (dict1):\n out = {};\n for kk,value in dict1.iteritems():\n # parse keys\n (lo0,dur,decdec,freqmhz,nch),weight = kk[0].split(\"_\"),kk[1]\n if weight != \"natural\":\n weight += \":\" + kk[3];\n dec = -int(decdec.split(\"-\")[1]);\n freq = int(freqmhz[:-3])\n # parse layout\n lo = lo0;\n if lo[-2] in \"abcd\":\n lores = \"0.\"+lo[-1];\n lofreq = dict(a=650,b=800,c=1000,d=1400)[lo[-2]];\n lo = lo[:-2];\n else:\n lores = 0;\n lofreq = 0;\n lo = lo[4:];\n l00 = lo0[4:]\n wbins.add(weight);\n # make new entry\n out[lo0,lores,lofreq,freq,dec,weight] = [value,kk];\n return out;", "def transform_dict(dc: dict):\n tmp_dict = dict()\n for k, v in dc.items():\n k1, k2 = k.split(\"|\")\n v1 = {'e': v, 'c': k2}\n v2 = {'e': v, 'c': k1}\n insert_to_dict(tmp_dict, k1, v1)\n insert_to_dict(tmp_dict, k2, v2)\n return tmp_dict", "def _update_normalization_params(intermediate_normalization_dict, new_values):\n\n if MEAN_VALUE_KEY not in intermediate_normalization_dict:\n intermediate_normalization_dict = {\n NUM_VALUES_KEY: 0,\n MEAN_VALUE_KEY: 0.,\n MEAN_OF_SQUARES_KEY: 0.\n }\n\n # Update mean value.\n these_means = numpy.array([\n intermediate_normalization_dict[MEAN_VALUE_KEY], numpy.mean(new_values)\n ])\n these_weights = numpy.array([\n intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size\n ])\n intermediate_normalization_dict[MEAN_VALUE_KEY] = numpy.average(\n these_means, weights=these_weights\n )\n\n # Update mean of squares.\n these_means = numpy.array([\n intermediate_normalization_dict[MEAN_OF_SQUARES_KEY],\n numpy.mean(new_values ** 2)\n ])\n intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] = numpy.average(\n these_means, weights=these_weights\n )\n\n # Update number of values.\n intermediate_normalization_dict[NUM_VALUES_KEY] += new_values.size\n\n return intermediate_normalization_dict" ]
[ "0.6302474", "0.60837907", "0.5948247", "0.5786794", "0.57858247", "0.5774023", "0.5770888", "0.57661134", "0.5734257", "0.5642278", "0.55719125", "0.5567272", "0.5513552", "0.5469426", "0.5463131", "0.54538107", "0.5401542", "0.5381413", "0.5303782", "0.52984816", "0.52970815", "0.5232544", "0.5209353", "0.5199854", "0.5199515", "0.5168282", "0.514915", "0.51439536", "0.5134425", "0.5115147" ]
0.67038727
0
Make a rechunking plan.
def rechunking_plan( dim_sizes: Mapping[str, int], source_chunks: Mapping[str, int], target_chunks: Mapping[str, int], itemsize: int, max_mem: int, ) -> List[Dict[str, int]]: plan_shapes = algorithm.rechunking_plan( shape=tuple(dim_sizes.values()), source_chunks=tuple(source_chunks[dim] for dim in dim_sizes), target_chunks=tuple(target_chunks[dim] for dim in dim_sizes), itemsize=itemsize, max_mem=max_mem, ) return [dict(zip(dim_sizes.keys(), shapes)) for shapes in plan_shapes]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _rechunking(self, compressor, parallel=False, replace=False):\n target_path = tempfile.TemporaryDirectory()\n source_sf = strax.DataDirectory(self.path)\n st= self.st\n st.set_context_config(dict(allow_rechunk=False,\n n_chunks=10))\n st.storage = [source_sf]\n run_id = '0'\n st.make(run_id, self.target)\n assert st.is_stored(run_id, self.target)\n assert strax.utils.dir_size_mb(self.path) > 0\n original_n_files = len(glob.glob(os.path.join(self.path, '*', '*')))\n assert original_n_files > 3 # At least two files + metadata\n _, backend_key = source_sf.find(st.key_for(run_id, self.target))\n strax.rechunker(source_directory=backend_key,\n dest_directory=target_path.name if not replace else None,\n replace=True,\n compressor=compressor,\n target_size_mb=strax.default_chunk_size_mb * 2,\n parallel=parallel,\n max_workers=4,\n _timeout=5,\n )\n assert st.is_stored(run_id, self.target)\n # Should be empty, we just replaced the source\n assert strax.utils.dir_size_mb(target_path.name) == 0\n new_n_files = len(glob.glob(os.path.join(self.path, '*', '*',)))\n assert original_n_files > new_n_files\n st.set_context_config(dict(forbid_creation_of='*'))\n st.get_array(run_id, self.target)\n target_path.cleanup()", "def run_module_floor_plan(args):\n step = args.step\n if step == \"generate_images\":\n FloorplanGenerator = dataset_builder.ImageFloorplanGenerator(\n input_directory=args.input_directory, output_directory=args.output_directory\n )\n FloorplanGenerator.run(n_jobs=args.n_jobs, starting_block=args.starting_block)\n elif step == \"generate_dataset\":\n DatasetGenerator = dataset_builder.DatasetGenerator(\n input_directory=args.input_directory, output_directory=args.output_directory\n )\n DatasetGenerator.generate_dataset()", "def build_plan(self):\n assert False, \"Not implemented.\"", "def plan(self):\n raise NotImplementedError('You must implement the plan() method '\n 'yourself!')", "def extend_plan(self, length=None):\n\t\tif length is None:\n\t\t\tlength = self.H\n\n\t\tnew_ghost = []\n\t\tfor i in range(length):\n\t\t\tif i < len(self.planned_actions):\n\t\t\t\t# Update new ghost plan\n\t\t\t\tnew_ghost.append(self.planned_actions[i])\n\t\t\telif i < len(self.ghost_plan):\n\t\t\t\t# Use old ghost plan for new plan\n\t\t\t\tself.planned_actions.append(self.ghost_plan[i])\n\t\t\t\tnew_ghost.append(self.ghost_plan[i])\n\t\t\telif len(self.planned_actions) > 0:\n\t\t\t\t# No ghost plan available, just repeat action\n\t\t\t\tself.planned_actions.append(self.planned_actions[-1])\n\t\t\telse:\n\t\t\t\t# No past plan available, just use zero\n\t\t\t\tself.planned_actions.append(np.zeros(self.M))\n\n\t\t# Update the ghost plan if it has more information\n\t\tif len(new_ghost) >= len(self.ghost_plan):\n\t\t\tself.ghost_plan = new_ghost\n\n\t\t# Truncate the plan if it is too long\n\t\tself.planned_actions = self.planned_actions[:length]", "def _plan_mr_steps(cls, context, tmp, path_prefix):\n node_list = []\n template_params = {\n 'prev': '<bad marker>',\n 'next': None,\n 'date': context['execution_date'].date().isoformat(),\n 'tmp': tmp\n }\n for step_num, step in enumerate(cls.steps):\n template_params['next'] = os.path.join(tmp, step.__name__, 'auto-' + str(step_num))\n template_params['step'] = step.__name__\n template_params['chain'] = cls.__name__\n src_list = []\n for v in step.context_src(context):\n src = cls._apply_template(v, template_params)\n if '<bad marker>' in src:\n raise Exception('Bad src for step %s step_num %s.' % (step.__name__, step_num))\n if not src.startswith('/'):\n src = os.path.join(path_prefix, src)\n src_list.append(src)\n dst = cls._apply_template(step.context_dst(context), template_params)\n if not dst.startswith('/'):\n dst = os.path.join(path_prefix, dst)\n files = []\n for v in step.context_files(context):\n file_src = cls._apply_template(v, template_params)\n if '<bad marker>' in file_src:\n raise Exception('Bad file src for step %s step_num %s.' % (step.__name__, step_num))\n if not file_src.startswith('/'):\n file_src = os.path.join(path_prefix, file_src)\n files.append(file_src)\n node_list.append(ChainNode(src_list, 'mr', cls.__name__ + step.__name__, dst=dst, step=step, files=files, original_step_number=step_num))\n template_params['prev'] = dst\n return node_list", "def build(self, block_size):", "def plan_pearl(self, pearl):\n pid = pearl[\"id\"]\n layers = pearl[\"layers\"]\n start = 0\n\n # 1. Creates plan \n plan, workload, last_id = self.search(pid, start, layers)\n\n # 2. Adds work for plan\n for i in range(len(workload)):\n self.workload[i] += workload[i]\n work = sum(workload)\n \n # 3. Compute return path\n path = self.return_path(last_id, start)\n\n # 4. if it's not already at the gatekeeper, add Pass operations\n if path: \n prev = path[0]\n for curr in path[1:]:\n self.workload[prev] += 1\n plan.append([1, {\"Pass\":{\"pearl_id\":pid,\"to_worker\":curr}}])\n work += 1\n prev = curr\n return plan, work", "def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]", "def remesh_blocks():\n \n # Get the active object\n obj = bpy.context.active_object\n \n nameCopy = \"temp_copy\"\n\n # Switch in object mode \n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Remove all modifiers from the object\n obj.modifiers.clear()\n\n # Delete the existing copy \n for o in bpy.data.objects:\n if o.type == 'MESH' and o.name == nameCopy:\n # Delete the existing copy\n object_to_delete = bpy.data.objects[nameCopy]\n bpy.data.objects.remove(object_to_delete, do_unlink=True) \n \n \n # Make a copy of the object\n new_obj = obj.copy()\n new_obj.data = obj.data.copy()\n new_obj.animation_data_clear()\n bpy.context.collection.objects.link(new_obj)\n\n # Rename the copy\n new_obj.name = nameCopy\n\n # Hide the copy\n new_obj.hide_viewport = True\n\n # Remesh the faces of the object with blocks\n bpy.ops.object.modifier_add(type='REMESH')\n bpy.context.object.modifiers[\"Remesh\"].mode = 'BLOCKS'\n bpy.context.object.modifiers[\"Remesh\"].octree_depth = bpy.context.scene.level_blocks\n bpy.context.object.modifiers[\"Remesh\"].scale = 0.99\n bpy.context.object.modifiers[\"Remesh\"].use_remove_disconnected = False\n bpy.context.object.modifiers[\"Remesh\"].threshold = 1\n bpy.context.object.modifiers[\"Remesh\"].use_smooth_shade = False\n\n # Make intersection between the remesh object and the original\n bpy.ops.object.modifier_add(type='BOOLEAN')\n bpy.context.object.modifiers[\"Boolean\"].operation = 'INTERSECT'\n bpy.context.object.modifiers[\"Boolean\"].operand_type = 'OBJECT'\n bpy.context.object.modifiers[\"Boolean\"].object = bpy.data.objects[nameCopy]\n bpy.context.object.modifiers[\"Boolean\"].solver = 'FAST'\n bpy.context.object.modifiers[\"Boolean\"].double_threshold = 0", "def run_module_complete_floorplan(args):\n step = args.step\n if step == \"generate_images\":\n FloorplanGenerator = dataset_builder.ImageSingleStepGenerator(\n input_directory=args.input_directory, output_directory=args.output_directory\n )\n FloorplanGenerator.run(n_jobs=args.n_jobs, starting_block=args.starting_block)\n elif step == \"generate_dataset\":\n DatasetGenerator = dataset_builder.DatasetGenerator(\n input_directory=args.input_directory, output_directory=args.output_directory\n )\n DatasetGenerator.generate_dataset()", "def _plan_workorders(self, replan=False):\n self.ensure_one()\n\n if not self.workorder_ids:\n return\n # Schedule all work orders (new ones and those already created)\n qty_to_produce = max(self.product_qty - self.qty_produced, 0)\n qty_to_produce = self.product_uom_id._compute_quantity(qty_to_produce, self.product_id.uom_id)\n start_date = max(self.date_planned_start, datetime.datetime.now())\n if replan:\n workorder_ids = self.workorder_ids.filtered(lambda wo: wo.state in ['ready', 'pending'])\n # We plan the manufacturing order according to its `date_planned_start`, but if\n # `date_planned_start` is in the past, we plan it as soon as possible.\n workorder_ids.leave_id.unlink()\n else:\n workorder_ids = self.workorder_ids.filtered(lambda wo: not wo.date_planned_start)\n for workorder in workorder_ids:\n workcenters = workorder.workcenter_id | workorder.workcenter_id.alternative_workcenter_ids\n\n best_finished_date = datetime.datetime.max\n vals = {}\n for workcenter in workcenters:\n # compute theoretical duration\n if workorder.workcenter_id == workcenter:\n duration_expected = workorder.duration_expected\n else:\n duration_expected = workorder._get_duration_expected(alternative_workcenter=workcenter)\n\n from_date, to_date = workcenter._get_first_available_slot(start_date, duration_expected)\n # If the workcenter is unavailable, try planning on the next one\n if not from_date:\n continue\n # Check if this workcenter is better than the previous ones\n if to_date and to_date < best_finished_date:\n best_start_date = from_date\n best_finished_date = to_date\n best_workcenter = workcenter\n vals = {\n 'workcenter_id': workcenter.id,\n 'duration_expected': duration_expected,\n }\n\n # If none of the workcenter are available, raise\n if best_finished_date == datetime.datetime.max:\n raise UserError(_('Impossible to plan the workorder. Please check the workcenter availabilities.'))\n\n # Instantiate start_date for the next workorder planning\n if workorder.next_work_order_id:\n start_date = best_finished_date\n\n # Create leave on chosen workcenter calendar\n leave = self.env['resource.calendar.leaves'].create({\n 'name': workorder.display_name,\n 'calendar_id': best_workcenter.resource_calendar_id.id,\n 'date_from': best_start_date,\n 'date_to': best_finished_date,\n 'resource_id': best_workcenter.resource_id.id,\n 'time_type': 'other'\n })\n vals['leave_id'] = leave.id\n workorder.write(vals)\n self.with_context(force_date=True).write({\n 'date_planned_start': self.workorder_ids[0].date_planned_start,\n 'date_planned_finished': self.workorder_ids[-1].date_planned_finished\n })", "def create_starting_plans(self):\n ctxs = [np.zeros((self.height, self.width)),\n np.zeros((self.height, self.width)) + 0.2,\n np.zeros((self.height, self.width)) + 0.35,\n np.zeros((self.height, self.width)) + 0.5,\n np.zeros((self.height, self.width)) + 0.65,\n np.zeros((self.height, self.width)) + 0.80]\n ambient_lux = [0.0, 500.0, 1000.0, 3000.0, 5000.0, 7000.0, 9000.0]\n\n plans = []\n\n # Create sparse grid plans (and empty and full grid plans)\n for i in range(0, 6):\n bulbs_on = np.zeros((self.width, self.height))\n if i > 0:\n bulbs_on[::i, ::i] = 1\n cost = self.scenario.compute_cost(bulbs_on)\n ind = self.toolbox.individual(bulbs_on.flatten())\n plan = Plan(ind, cost)\n\n for ctx in ctxs:\n for lux in ambient_lux:\n fit = self.fitness_ea(ind, tops=ctx)\n plan.add_applied_context(ctx, self.scenario.bulbs_height, fit, lux)\n plans.append(plan)\n\n # Create dense grid plans\n for i in range(2, 5):\n bulbs_on = np.zeros((self.width, self.height))\n bulbs_on[::i, :] = 1\n bulbs_on[:, ::i] = 1\n cost = self.scenario.compute_cost(bulbs_on)\n ind = self.toolbox.individual(bulbs_on.flatten())\n plan = Plan(ind, cost)\n\n for ctx in ctxs:\n for lux in ambient_lux:\n fit = self.fitness_ea(ind, tops=ctx)\n plan.add_applied_context(ctx, self.scenario.bulbs_height, fit, lux)\n plans.append(plan)\n\n return plans", "def gen_rebatch(self, *args, **kwargs):\n _action = self._action_list[0]\n self._rest_batch = None\n while True:\n if self._rest_batch is None:\n cur_len = 0\n batches = []\n else:\n cur_len = len(self._rest_batch)\n batches = [self._rest_batch]\n self._rest_batch = None\n while cur_len < _action['batch_size']:\n try:\n new_batch = _action['pipeline'].next_batch(*args, **kwargs)\n except StopIteration:\n break\n else:\n batches.append(new_batch)\n cur_len += len(new_batch)\n if len(batches) == 0:\n break\n else:\n if _action['merge_fn'] is None:\n batch, self._rest_batch = batches[0].merge(batches, batch_size=_action['batch_size'])\n else:\n batch, self._rest_batch = _action['merge_fn'](batches, batch_size=_action['batch_size'])\n yield batch", "def grow(self):\n while self.splittable_nodes:\n self.split_next()", "def execute_plan(conf, plan, grouped_against=None, grouped_by=None):\n\n log = logging.getLogger(__name__)\n try:\n if isinstance(conf, list) or isinstance(conf, tuple):\n sample_conf = conf[0]\n log.info(\"Executing SimulationGroup plan\")\n if not sample_conf['General']['save_as']:\n obj = SimulationGroup([Simulation(simulator=Simulator(c)) for c\n in conf],\n grouped_against=grouped_against,\n grouped_by=grouped_by)\n else:\n obj = SimulationGroup([Simulation(conf=c) for c in conf],\n grouped_against=grouped_against,\n grouped_by=grouped_by)\n ID = str(obj)\n else:\n log.info(\"Executing Simulation plan\")\n if not conf['General']['save_as']:\n obj = Simulation(simulator=Simulator(conf))\n else:\n obj = Simulation(conf=conf)\n # Concatenate the field components in each layer into a single 3D\n # array, but add to blacklist so the concatenated arrays don't get\n # written to disk\n # FIXME: This is grossly memory-inefficient\n for f in ('Ex', 'Ey', 'Ez'):\n ks = ['{}_{}'.format(lname, f) for lname in obj.layers.keys()]\n obj.data[f] = np.concatenate([obj.data[k] for k in ks])\n obj.data.add_to_blacklist(f)\n ID = obj.id[0:10]\n\n for task_name in ('crunch', 'plot'):\n if task_name not in plan:\n continue\n task = plan[task_name]\n log.info(\"Beginning %s for obj %s\", task_name, ID)\n for func, data in task.items():\n if not data['compute']:\n continue\n else:\n argsets = data['args']\n if argsets and isinstance(argsets[0], list):\n for argset in argsets:\n if argset:\n _call_func(func, obj, argset)\n else:\n _call_func(func, obj, [])\n else:\n if argsets:\n _call_func(func, obj, argsets)\n else:\n _call_func(func, obj, [])\n log.info(\"Completed %s for obj %s\", task_name, ID)\n log.info(\"Plan execution for obj %s complete\", ID)\n if isinstance(obj, Simulation):\n log.info(\"Saving and clearing data for Simulation %s\", ID)\n if obj.conf['General']['sample_dict']:\n obj.write_data(blacklist=('normE', 'normEsquared', 'Ex', 'Ey',\n 'Ez'))\n else:\n obj.write_data()\n obj.clear_data()\n else:\n log.info(\"Clearing data for SimulationGroup %s\", ID)\n for sim in obj.sims:\n sim.clear_data()\n except Exception as e:\n if isinstance(conf, list) or isinstance(conf, tuple):\n log.error('Group raised exception')\n else:\n log.error('Conf %s raised exception', conf['General']['sim_dir'])\n raise", "def makeRoastTabs(ID, numbers):\n newFromTemplate(ID, TODAY)\n continueFromLastPR(ID, 1)\n newFromTemplate(ID, TOMORROW)\n populateBatches(ID, 2, numbers)", "def __init__(self, transform, num_subbursts=1, parts_subset=None, \n pct_subset=None, balance=True, random_start_points=False, seed1=4, seed2=33):\n self.dp = DataPipeline(\"\",\"\")\n self.metadata = self.dp.get_metadata()\n if parts_subset:\n self.metadata = self.metadata[self.metadata.part.isin(parts_subset)]\n if pct_subset:\n self.metadata = self.metadata.sample(frac=pct_subset, random_state=seed1)\n if balance:\n reals = self.metadata[self.metadata.label=='REAL']\n self.num_reals = len(reals)\n fakes = self.metadata[self.metadata.label=='FAKE']\n fakes = fakes.sample(n=self.num_reals, random_state=seed2)\n self.metadata = pd.concat([reals,fakes])\n\n self.num_subbursts = num_subbursts\n self.transform = transform\n self.padding = PADDING_CROP\n self.random_start_points = random_start_points", "def run_module_structure_plan(args):\n step = args.step\n if step == \"generate_images\":\n StructureGenerator = dataset_builder.ImageStructureGenerator(\n input_directory=args.input_directory, output_directory=args.output_directory\n )\n StructureGenerator.run(n_jobs=args.n_jobs, starting_block=args.starting_block)\n elif step == \"generate_dataset\":\n DatasetGenerator = dataset_builder.DatasetGenerator(\n input_directory=args.input_directory, output_directory=args.output_directory\n )\n DatasetGenerator.generate_dataset()", "def baseline_rechunk(indir_path, outdir_path, O, I, R, file_format, addition, distributed, debug_mode=False, clean_out_dir=False, dont_write=False):\n\n print(f\"Setting arguments...\")\n global DEBUG_LOCAL\n global DONT_WRITE\n global tracker\n global outdirs_dict, outdir_index\n outdirs_dict = dict()\n outdir_index = 0\n tracker = Tracker()\n DEBUG_LOCAL = True if debug_mode else False\n DONT_WRITE = True if dont_write else False\n\n print(\"Addition mode:\", addition)\n print(\"DONT_WRITE: \", DONT_WRITE)\n\n O, I, R = tuple(O), tuple(I), tuple(R)\n\n file_manager = get_file_manager(file_format)\n\n infiles_partition = get_blocks_shape(R, I)\n infiles_volumes = get_named_volumes(infiles_partition, I)\n outfiles_partition = get_blocks_shape(R, O)\n outfiles_volumes = get_named_volumes(outfiles_partition, O)\n outfiles_volumes = outfiles_volumes.values()\n\n if distributed:\n repartition_dict = None\n \n json_filename = '/disk0/gtimothee/repartition_dict.json'\n if not os.path.isfile(json_filename):\n # print(\"cannot find association dict json file\")\n sys.exit(1)\n else:\n pass # print(f\"json file found\")\n\n try: \n with open(json_filename) as f:\n repartition_dict = json.load(f)\n except Exception as e: \n print(e)\n # print(\"error (1)\")\n sys.exit(1)\n\n if repartition_dict == None:\n # print(\"error (2)\")\n sys.exit(1)\n else:\n pass # print(f\"Found reparition dict: {repartition_dict}\")\n\n input_files = repartition_dict.values()\n else:\n input_files = file_manager.get_input_files(indir_path)\n\n t_read = 0\n t_write = 0\n\n vols_written = list()\n nb_infile_openings = 0\n nb_infile_seeks = 0\n nb_outfile_openings = 0\n nb_outfile_seeks = 0\n buffer_index = 1\n for input_file in input_files:\n print(f\"Treating buffer: {buffer_index}...\")\n buffer_index += 1\n nb_infile_openings += 1\n\n involume = get_volume(input_file, infiles_volumes, infiles_partition)\n t1 = time.time()\n if not DONT_WRITE:\n data = file_manager.read_data_from_fp(input_file, slices=None)\n else:\n data = None\n t1 = time.time() - t1\n t_read += t1\n \n for outvolume in outfiles_volumes:\n if hypercubes_overlap(involume, outvolume):\n shape, t2, nb_outfile_seeks_tmp = write_to_outfile(involume, outvolume, data, outfiles_partition, outdir_path, O, file_manager, addition, tracker)\n t_write += t2\n vols_written.append(shape)\n # nb_outfile_openings += 1 already included in nb_outfile_seeks\n nb_outfile_seeks += nb_outfile_seeks_tmp\n \n file_manager.close_infiles()\n\n if DONT_WRITE:\n assert tracker.is_complete(((0,0,0), R))\n\n # print(\"\\nShapes written:\")\n # for row in vols_written: \n # print(row)\n\n if clean_out_dir:\n print(\"Cleaning output directory\")\n file_manager.clean_directory(outdir_path)\n\n get_opened_files()\n\n return t_read, t_write, [nb_outfile_openings, nb_outfile_seeks, nb_infile_openings, nb_infile_seeks]", "def build_reset_problem(self):\n\n print(\"Resetting blocks...\")\n print(\"Moved Blocks:\", self.moved_blocks)\n \n # Define block order by sorting by height\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=True)\n \n # Build the initial data structures\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n\n # Add all blocks to be moved to the data structure\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n\n # Return the planning data structure\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems", "def createTrialBatchesWithDynamics((taskRuleSet,nTasks,shuffle,batchNum)):\n if batchNum%100==0:\n print 'Running batch', batchNum\n\n nMiniblocks = nTasks\n nTasks = nTasks # Corresponds to the number of miniblocks\n n_trials_per_block = 3\n shuffle=True\n \n stimuliSet = task.createSensoryInputs()\n\n networkIO_DataFrame = {}\n networkIO_DataFrame['LogicRule'] = []\n networkIO_DataFrame['SensoryRule'] = []\n networkIO_DataFrame['MotorRule'] = []\n networkIO_DataFrame['Color1'] = []\n networkIO_DataFrame['Color2'] = []\n networkIO_DataFrame['Orientation1'] = []\n networkIO_DataFrame['Orientation2'] = []\n networkIO_DataFrame['Pitch1'] = []\n networkIO_DataFrame['Pitch2'] = []\n networkIO_DataFrame['Constant1'] = []\n networkIO_DataFrame['Constant2'] = []\n networkIO_DataFrame['MotorResponse'] = []\n\n # Create 1d array to randomly sample indices from\n stimIndices = np.arange(len(stimuliSet))\n taskIndices = np.arange(len(taskRuleSet))\n \n randomTaskIndices = np.random.choice(taskIndices,nTasks,replace=False)\n taskRuleSet2 = taskRuleSet.iloc[randomTaskIndices].copy(deep=True)\n taskRuleSet2 = taskRuleSet2.reset_index(drop=True)\n taskRuleSet = taskRuleSet2.copy(deep=True)\n\n# networkInputCode = []\n# networkOutputCode = []\n# for taskrule in taskRuleSet.index:\n# \n# randomStimuliIndices = np.random.choice(stimIndices,n_trials_per_block,replace=False)\n# stimuliSet2 = stimuliSet.iloc[randomStimuliIndices].copy(deep=True)\n# stimuliSet2 = stimuliSet2.reset_index(drop=True)\n# \n# for trial in stimuliSet2.index:\n#\n# networkInputCode.append(np.hstack((taskRuleSet.Code[taskrule], stimuliSet2.Code[trial])))\n# tmpresp, tmpcode = task.solveInputs(taskRuleSet.iloc[taskrule], stimuliSet2.iloc[trial])\n# networkOutputCode.append(tmpcode)\n\n \n\n ####\n # Construct trial dynamics\n rule_ind = np.arange(12) # rules are the first 12 indices of input vector\n stim_ind = np.arange(12,28) # stimuli are the last 16 indices of input vector\n input_size = len(rule_ind) + len(stim_ind)\n n_tp_total = 36 # total length of miniblock -- cpro task details\n input_matrix = np.zeros((input_size,n_tp_total,nMiniblocks))\n output_matrix = np.zeros((4,n_tp_total,nMiniblocks))\n trial = 0\n for block in range(nMiniblocks):\n # Define trial dynamics\n n_tp_encoding = 5\n n_tp_encodingdelay = np.random.randint(2,9) # encoding delay is jittered from 2 - 8 trs\n n_tp_trial = 3\n n_tp_probedelay = 2\n n_tp_trial_end = n_tp_total - n_tp_encoding - n_tp_encodingdelay - n_tp_trial*3 - n_tp_probedelay*2 # full miniblock is 36 trs\n\n\n rand_stim_ind = np.random.choice(stimIndices,n_trials_per_block,replace=False)\n stimuliSet2 = stimuliSet.iloc[rand_stim_ind].copy(deep=True)\n stimuliSet2 = stimuliSet2.reset_index(drop=True)\n \n # Create trial array\n networkInputCode2 = []\n networkOutputCode2 = []\n tp = 0\n # Encoding\n for i in range(n_tp_encoding):\n input_matrix[rule_ind,tp,block] = taskRuleSet.Code[block] \n tp += 1\n\n # Encoding delay\n tp += n_tp_encodingdelay\n\n # Trials\n for trial in range(n_trials_per_block):\n # First solve trial\n tmpresp, out_code = task.solveInputs(taskRuleSet.iloc[block], stimuliSet2.iloc[trial])\n\n # probe\n for i in range(n_tp_trial):\n input_matrix[stim_ind,tp,block] = stimuliSet2.Code[trial]\n # Commented out - response period will be limited to ITI following stimulus\n #output_matrix[:,tp,block] = out_code\n tp += 1\n\n # probe delay\n for i in range(n_tp_probedelay):\n output_matrix[:,tp,block] = out_code\n tp += 1\n \n if shuffle:\n ind = np.arange(input_matrix.shape[2],dtype=int)\n np.random.shuffle(ind)\n input_matrix = input_matrix[:,:,ind]\n output_matrix = output_matrix[:,:,ind]\n \n return input_matrix, output_matrix", "def advance_plan(self):\n\t\taction = self.planned_actions[0]\n\t\tself.planned_actions = self.planned_actions[1:]\n\t\tself.ghost_plan = self.ghost_plan[1:]\n\t\tself.extend_plan(self.H)\n\t\treturn action", "def unchunk():\n\n @filters\n def _dagpype_internal_fn_act(target):\n try:\n while True:\n a = (yield)\n if len(a) == 0:\n continue\n if a.ndim == 1:\n for i in range(a.shape[0]):\n target.send(a[i]) \n else:\n for i in range(a.shape[0]):\n target.send(tuple(a[i]))\n except GeneratorExit:\n if len(l) > 0:\n target.send(numpy.array(l, dtype = dtype_)) \n \n return _dagpype_internal_fn_act", "def run(\n planner,\n network,\n seed,\n timer,\n planner_affinity,\n rank_limit,\n performance_factor,\n mem_limit,\n slicer,\n stopwatch=None,\n):\n best_plan = None\n log = []\n\n try:\n # Continue the search for a new contraction tree until we have spent more than half of the estimated total\n # time on the search (i.e., we have spent more than the expected contraction time on the search).\n for tree, factored_network in planner.generate_contraction_trees(\n network, timer, seed=seed, affinity=planner_affinity\n ):\n util.log(\n \"Found tree of max-rank \" + str(tree.maxrank), util.Verbosity.progress\n )\n\n if best_plan is None or best_plan.tree.maxrank > tree.maxrank:\n best_plan = sliced_execution_plan.SlicedExecutionPlan(\n tree, factored_network\n )\n if stopwatch is not None:\n log.append((stopwatch.elapsed_time(), best_plan))\n\n if slicer is not None:\n slicer.slice_until(best_plan, memory=mem_limit, rank=rank_limit)\n if performance_factor is not None:\n estimated_contraction_time = (\n best_plan.total_FLOPs * performance_factor\n )\n timer.recap_timeout(estimated_contraction_time)\n elif stopwatch is not None:\n log.append(\n (\n stopwatch.elapsed_time(),\n sliced_execution_plan.SlicedExecutionPlan(\n tree, factored_network\n ),\n )\n )\n except TimeoutError:\n if best_plan is None:\n util.output_pair(\"Error\", \"Timeout during planning\", util.Verbosity.always)\n except MemoryError:\n util.output_pair(\n \"Error\", \"Out of Memory during planning\", util.Verbosity.always\n )\n except:\n util.log(traceback.format_exc(), util.Verbosity.always)\n util.output_pair(\"Error\", \"Exception during execution\", util.Verbosity.always)\n\n # Use the best tree that we have found so far\n return best_plan, log", "def greedy_shrink(self):\n while self.single_greedy_shrink_iteration():\n self.run_shrink_pass(\"lower_common_block_offset\")", "def summarize_plan(plan: Generator):\n read_cache: list[str] = []\n daq_keys = ['events', 'record', 'use_l3t', 'duration']\n daq_cfg = {k: None for k in daq_keys}\n for msg in plan:\n cmd = msg.command\n if cmd == 'open_run':\n print('{:=^80}'.format(' Open Run '))\n elif cmd == 'close_run':\n print('{:=^80}'.format(' Close Run '))\n elif cmd == 'configure':\n if msg.obj.name == 'daq':\n daq_cfg = {k: msg.kwargs[k] for k in daq_keys}\n print(\n f'Configure DAQ -> ('\n f'events={daq_cfg[\"events\"]}, '\n f'record={daq_cfg[\"record\"]}, '\n f'use_l3t={daq_cfg[\"use_l3t\"]}, '\n f'duration={daq_cfg[\"duration\"]})'\n )\n elif cmd == 'set':\n print('{motor.name} -> {args[0]}'.format(motor=msg.obj,\n args=msg.args))\n elif cmd == 'create':\n read_cache = []\n elif cmd == 'read':\n read_cache.append(msg.obj.name)\n if msg.obj.name == 'daq':\n print(f' Run DAQ for {daq_cfg[\"events\"]} events, '\n f'(record={daq_cfg[\"record\"]})')\n elif cmd == 'save':\n print(f' Read {read_cache}')", "def rpartition(self, x):\n pass", "def _make_stack(self, block, planes, blocks, stride=1, dilate=False):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n\n # use dilation instead of striding if true\n if dilate:\n self.dilation *= stride\n stride = 1\n\n # apply conv-1x1 to input identity if stride > 1 or output channels != input channels for dim. matching\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion)\n )\n\n layers = []\n # first layer\n # input = batch_size x self.inplanes x H x H\n # output = batch_size x planes * block.expansion x H/stride x H/stride\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n # subsequent layers\n for _ in range(1, blocks):\n # input = output = batch_size x planes * block.expansion x H' x H'\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)", "def algorithm(self, groupInstance = None, jobInstance = None,\n *args, **kwargs):\n # extract some global scheduling parameters\n self.jobNamePrefix = kwargs.get('jobNamePrefix', \"Repack\")\n self.maxSizeSingleLumi = kwargs['maxSizeSingleLumi']\n self.maxSizeMultiLumi = kwargs['maxSizeMultiLumi']\n self.maxInputEvents = kwargs['maxInputEvents']\n self.maxInputFiles = kwargs['maxInputFiles']\n self.maxLatency = kwargs['maxLatency']\n\n self.currentTime = time.time()\n\n self.createdGroup = False\n\n timePerEvent, sizePerEvent, memoryRequirement = \\\n self.getPerformanceParameters(kwargs.get('performance', {}))\n \n myThread = threading.currentThread()\n daoFactory = DAOFactory(package = \"T0.WMBS\",\n logger = logging,\n dbinterface = myThread.dbi)\n\n # keep for later\n self.insertSplitLumisDAO = daoFactory(classname = \"JobSplitting.InsertSplitLumis\")\n\n # data discovery\n getAvailableFilesDAO = daoFactory(classname = \"Subscriptions.GetAvailableRepackFiles\")\n availableFiles = getAvailableFilesDAO.execute(self.subscription[\"id\"])\n\n # nothing to do, stop immediately\n if len(availableFiles) == 0:\n return\n\n # data discovery for already used lumis\n getUsedLumisDAO = daoFactory(classname = \"Subscriptions.GetUsedLumis\")\n usedLumis = getUsedLumisDAO.execute(self.subscription[\"id\"], False)\n\n # empty lumis (as declared by StorageManager) are treated the\n # same way as used lumis, ie. we process around them\n getEmptyLumisDAO = daoFactory(classname = \"Subscriptions.GetLumiHolesForRepack\")\n usedLumis |= getEmptyLumisDAO.execute(self.subscription[\"id\"])\n\n # sort available files by lumi\n availableFileLumiDict = {}\n for result in availableFiles:\n lumi = result['lumi']\n if not lumi in availableFileLumiDict:\n availableFileLumiDict[lumi] = []\n availableFileLumiDict[lumi].append(result)\n\n # loop through lumis in order\n haveLumiHole = False\n filesByLumi = {}\n maxUsedLumi = max(usedLumis) if usedLumis else 0\n for lumi in range(1, 1+max(maxUsedLumi,max(availableFileLumiDict.keys()))):\n\n # lumi contains data => remember it for potential processing\n if lumi in availableFileLumiDict:\n\n filesByLumi[lumi] = availableFileLumiDict[lumi]\n\n # lumi is used and we have data => trigger processing\n elif lumi in usedLumis:\n\n if len(filesByLumi) > 0:\n\n if haveLumiHole:\n # if lumi hole check for maxLatency first\n if self.getDataAge(filesByLumi) > self.maxLatency:\n self.defineJobs(filesByLumi, True, memoryRequirement)\n filesByLumi = {}\n # if maxLatency not met ignore data for now\n else:\n filesByLumi = {}\n else:\n self.defineJobs(filesByLumi, True, memoryRequirement)\n filesByLumi = {}\n\n # if we had a lumi hole it is now not relevant anymore\n # the next data will have a used lumi in front of it\n haveLumiHole = False\n\n # lumi has no data and isn't used, ie. we have a lumi hole\n # also has an impact on how to handle later data\n else:\n\n if len(filesByLumi) > 0:\n\n # forceClose if maxLatency trigger is met\n if self.getDataAge(filesByLumi) > self.maxLatency:\n self.defineJobs(filesByLumi, True, memoryRequirement)\n filesByLumi = {}\n # follow the normal thresholds, but only if\n # there is no lumi hole in front of the data\n elif not haveLumiHole:\n self.defineJobs(filesByLumi, False, memoryRequirement)\n filesByLumi = {}\n # otherwise ignore the data for now\n else:\n filesByLumi = {}\n\n haveLumiHole = True\n\n # now handle whatever data is still left (at the high end of the lumi range)\n if haveLumiHole:\n if self.getDataAge(filesByLumi) > self.maxLatency:\n self.defineJobs(filesByLumi, True, memoryRequirement)\n else:\n fileset = self.subscription.getFileset()\n fileset.load()\n self.defineJobs(filesByLumi, not fileset.open, memoryRequirement)\n\n return" ]
[ "0.54398316", "0.53904545", "0.5279085", "0.5181603", "0.5129563", "0.51162356", "0.50968367", "0.50834996", "0.50642467", "0.5043662", "0.5040708", "0.5030235", "0.50188833", "0.4995437", "0.49625322", "0.4938163", "0.49274", "0.4926029", "0.49074957", "0.48996857", "0.48986053", "0.48930645", "0.48427975", "0.48376948", "0.4831864", "0.4821487", "0.47934586", "0.47906455", "0.47737423", "0.47712222" ]
0.6029432
0
Round down a chunkkey to offsets corresponding to new chunks.
def _round_chunk_key( chunk_key: core.ChunkKey, target_chunks: Mapping[str, int], ) -> core.ChunkKey: new_offsets = {} for dim, offset in chunk_key.items(): chunk_size = target_chunks.get(dim) if chunk_size is None: new_offsets[dim] = offset elif chunk_size == -1: new_offsets[dim] = 0 else: new_offsets[dim] = chunk_size * (offset // chunk_size) return core.ChunkKey(new_offsets)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_chunk(self):\n for key, value in self.piece_coordinates.items():\n # Why is the key a numpy.int type ???\n self.chunk[value] = key", "def _normalizeKeySlice(self, key):\n if key.start is None:\n kstart = (0, 0)\n else:\n kstart = key.start\n\n if key.stop is None:\n kstop = (self.width, self.height)\n else:\n kstop = key.stop\n\n if key.step is None:\n kstep = (1, 1)\n elif isinstance(key.step, int):\n # if only one int is specified, use it for both steps\n kstep = (key.step, key.step)\n else:\n kstep = key.step\n\n # x1 & y1 should be top-left, x2 & y2 should be bottom-right\n # So swap these values if need be.\n x1, y1 = kstart\n x2, y2 = kstop\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n\n try:\n x1, y1 = self._convertNegativeTupleKeyToPositiveTupleKey((x1, y1))\n\n # Because x2 and y2 can go 1 past the end of the max index, the\n # _convertNegativeTupleKeyToPositiveTupleKey() may raise an exception.\n # So we need to pass dummy values so the exception isn't raised.\n if x2 != self.width and x2 != -(self.width - 1) and \\\n y2 != self.height and y2 != -(self.height - 1):\n x2, y2 = self._convertNegativeTupleKeyToPositiveTupleKey((x2, y2))\n elif x2 != self.width and x2 != -(self.width - 1):\n x2, _dummy = self._convertNegativeTupleKeyToPositiveTupleKey((x2, 0))\n elif y2 != self.height and y2 != -(self.height - 1):\n _dummy, y2 = self._convertNegativeTupleKeyToPositiveTupleKey((0, y2))\n else:\n pass # In this case, we don't need to adust x2 and y2 at all. So do nothing.\n except KeyError:\n raise PyTextCanvasException('key must be a tuple of two ints')\n\n return (x1, y1, x2, y2, kstep[0], kstep[1])", "def _calculate_chunk_offsets(self):\n offset = 0\n offsets = []\n for chunk in self.data.iterchunks():\n offsets.append(offset)\n offset += len(chunk)\n return np.array(offsets)", "def build_map(chunk_start, result, total_chunks, start_id, end_id):\n size = len(chunk_start)\n for i in prange(size):\n beg = chunk_start[i]\n end = chunk_start[i + 1] if i < size - 1 else total_chunks\n if start_id < end and beg < end_id: # [beg, end) intersect [start_id, end_id)\n result[max(beg - start_id, 0) : (end - start_id), 0] = beg\n result[max(beg - start_id, 0) : (end - start_id), 1] = end", "def split_chunks(\n key: core.ChunkKey,\n dataset: xarray.Dataset,\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n # This function splits consolidated arrays into blocks of new sizes, e.g.,\n # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉\n # X = |x_10 x_11 ...| = ||x_10| |x_11| ...|\n # |x_20 x_21 ...| |⌊x_20⌋ ⌊x_21⌋ ...|\n # ⌊ ... ... ...⌋ ⌊ ... ... ...⌋\n # and emits them as (ChunkKey, xarray.Dataset) pairs.\n all_bounds = []\n for dim, chunk_size in target_chunks.items():\n start = key.get(dim, 0)\n stop = start + dataset.sizes[dim]\n all_bounds.append(_split_chunk_bounds(start, stop, chunk_size))\n\n for bounds in itertools.product(*all_bounds):\n offsets = dict(key)\n slices = {}\n for dim, (start, stop) in zip(target_chunks, bounds):\n base = key.get(dim, 0)\n offsets[dim] = start\n slices[dim] = slice(start - base, stop - base)\n\n new_key = core.ChunkKey(offsets)\n new_chunk = dataset.isel(slices)\n yield new_key, new_chunk", "def build_relative_position(query_size, key_size):\n q_ids = tf.range(query_size, dtype=tf.int32)\n k_ids = tf.range(key_size, dtype=tf.int32)\n rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)\n return tf.cast(rel_pos_ids, tf.int64)", "def get_position(self, key):\n return bisect.bisect_left(self.keys, key)", "def rehash(self, key) -> int:\n return (key + 1) % self.size", "def touchKBucket(self, key):", "def chunk_slices_to_unit_index(self, slices):\n # remove dimension for channel\n slices = slices[-len(self.chunk_shape):]\n return tuple((slice.start - b.start) // s for b, s, slice in zip(self.bounds, self.strides, slices))", "def dec(self, key: str) -> None:\n if key not in self.mapping:\n return\n cur_block = self.mapping[key]\n del self.mapping[key]\n cur_block.keys.remove(key)\n\n if cur_block.val != 1:\n if cur_block.val - 1 != cur_block.prev.val:\n new_block = Block(cur_block.val - 1)\n cur_block.prev.insert_after(new_block)\n else:\n new_block = cur_block.prev\n new_block.keys.add(key)\n self.mapping[key] = new_block\n\n if not cur_block.keys:\n cur_block.remove()", "def _update_prepend_key(self):\n self.prepend_key -= 1", "def _bucket_index(self, key):\n # return hash(key) % len(self.buckets)\n hash_value = 0 # hash is set to 0\n for char in key: # iterates through as much as the number of characters in key\n hash_value += ord(char) # return the unicode value to make the number different everytime\n return hash_value % len(self.buckets) # returns a number that will never be greater than the length of the bucket", "def __setitem__(self, key, value):\n # type: (Union[int, np.ndarray], Any) -> None\n # Convert all possible input key types to an array of integers\n if is_bool_dtype(key):\n key = np.argwhere(key).flatten()\n elif isinstance(key, slice):\n key = np.array(range(len(self))[key])\n elif is_integer(key):\n key = np.array([key])\n else:\n key = np.asanyarray(key)\n\n if pd.api.types.is_scalar(value):\n value = np.broadcast_to(value, len(key))\n else:\n value = np.asarray(value)\n\n if len(key) != len(value):\n raise ValueError(\"Length mismatch between index and value.\")\n\n affected_chunks_index = self._get_chunk_indexer(key)\n affected_chunks_unique = np.unique(affected_chunks_index)\n\n all_chunks = list(self.data.iterchunks())\n\n for ix, offset in zip(\n affected_chunks_unique, self.offsets[affected_chunks_unique]\n ):\n chunk = all_chunks[ix]\n\n # Translate the array-wide indices to indices of the chunk\n key_chunk_indices = np.argwhere(affected_chunks_index == ix).flatten()\n array_chunk_indices = key[key_chunk_indices] - offset\n\n arr = chunk.to_pandas().values\n # In the case where we zero-copy Arrow to Pandas conversion, the\n # the resulting arrays are read-only.\n if not arr.flags.writeable:\n arr = arr.copy()\n arr[array_chunk_indices] = value[key_chunk_indices]\n\n mask = None\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if (\n pa.types.is_integer(self.dtype.arrow_dtype)\n or pa.types.is_date(self.dtype.arrow_dtype)\n or pa.types.is_floating(self.dtype.arrow_dtype)\n or pa.types.is_boolean(self.dtype.arrow_dtype)\n ):\n nan_values = pd.isna(value[key_chunk_indices])\n if any(nan_values):\n nan_index = key_chunk_indices & nan_values\n mask = np.ones_like(arr, dtype=bool)\n mask[nan_index] = False\n pa_arr = pa.array(arr, self.dtype.arrow_dtype, mask=mask)\n all_chunks[ix] = pa_arr\n\n self.data = pa.chunked_array(all_chunks)", "def test_adjust_offsets_short(self):\n tool = pybedtools.BedTool(\"chr15 91512755 91512836 ENSG00000198901_1_147 0 -\", from_string=True)\n offsets = {\"ENSG00000198901_1_147\" : 10}\n results = adjust_offsets(tool, offsets)", "def _map_lines(self, delta: Delta) -> Dict[Tuple, Tuple]:\n\n # this is harder than I thought; I'll start with a super naive\n # approach and improve it later (or never)\n\n if delta.old_length == 0:\n return {(): tuple(range(delta.new_length))}\n if delta.new_length == 0:\n return {tuple(range(delta.old_length)): ()}\n\n result: Dict[Tuple[int, ...], Tuple[int, ...]] = {}\n\n for i in range(min(delta.old_length, delta.new_length) - 1):\n result[(i,)] = (i,)\n\n if delta.old_length >= delta.new_length:\n result[tuple(range(delta.new_length - 1, delta.old_length))] = (\n delta.new_length - 1,\n )\n else:\n result[(delta.old_length - 1,)] = tuple(\n range(delta.old_length - 1, delta.new_length)\n )\n\n return result", "def decrease_key(self, old_item, new_item):", "def _bucket_index(self, key):\n # Calculate the given key's hash code and transform into bucket index\n return hash(key) % len(self.buckets)", "def remove(self, key):\n i = key //1000\n j = key%1000\n self.container[i][j] = -1", "def _get_node_pos(self, key):\n if not self._hashring:\n return\n\n k = md5_bytes(key)\n key = (k[3] << 24) | (k[2] << 16) | (k[1] << 8) | k[0]\n\n nodes = self._sorted_keys\n pos = bisect(nodes, key)\n\n if pos == len(nodes):\n return 0\n return pos", "def _align_chunk_info(chunk_info):\n max_dumps = max(info['shape'][0] for info in chunk_info.values())\n for key, info in chunk_info.items():\n shape = info['shape']\n n_dumps = shape[0]\n if n_dumps < max_dumps:\n info['shape'] = (max_dumps,) + shape[1:]\n # We could just add a single new chunk, but that could cause an\n # inconveniently large chunk if there is a big difference between\n # n_dumps and max_dumps.\n time_chunks = info['chunks'][0] + (max_dumps - n_dumps) * (1,)\n info['chunks'] = (time_chunks,) + info['chunks'][1:]\n logger.debug('Adding %d phantom dumps to array %s', max_dumps - n_dumps, key)\n return chunk_info", "def header_offset_check(key):\n return {\n 'ver': UINT16,\n 'm_len': UINT16,\n 'seq_no': UINT32,\n 'time': UINT32,\n 'set_id': UINT16,\n 'set_len': UINT16,\n }.get(key, 0)", "def shrink_offset_pairs(self):\n\n def int_from_block(i):\n u, v = self.blocks[i].bounds\n block_bytes = self.shrink_target.buffer[u:v]\n return int_from_bytes(block_bytes)\n\n def block_len(i):\n return self.blocks[i].length\n\n # Try reoffseting every pair\n def reoffset_pair(pair, o):\n n = len(self.blocks)\n # Number of blocks may have changed, need to validate\n valid_pair = [\n p\n for p in pair\n if p < n and int_from_block(p) > 0 and self.is_payload_block(p)\n ]\n\n if len(valid_pair) < 2:\n return\n\n m = min([int_from_block(p) for p in valid_pair])\n\n new_blocks = [\n self.shrink_target.buffer[u:v]\n for u, v in self.shrink_target.all_block_bounds()\n ]\n for i in valid_pair:\n new_blocks[i] = int_to_bytes(int_from_block(i) + o - m, block_len(i))\n buffer = hbytes().join(new_blocks)\n return self.incorporate_new_buffer(buffer)\n\n def is_non_zero_payload(block):\n return not block.all_zero and self.is_payload_block(block.index)\n\n for block_i, block_j in self.each_pair_of_blocks(\n is_non_zero_payload, is_non_zero_payload\n ):\n i = block_i.index\n j = block_j.index\n\n value_i = int_from_block(i)\n value_j = int_from_block(j)\n\n offset = min(value_i, value_j)\n Integer.shrink(\n offset, lambda o: reoffset_pair((i, j), o), random=self.random\n )", "def lower_common_block_offset(self):\n if len(self.__changed_blocks) <= 1:\n return\n\n current = self.shrink_target\n\n blocked = [current.buffer[u:v] for u, v in current.all_block_bounds()]\n\n changed = [\n i\n for i in sorted(self.__changed_blocks)\n if not self.shrink_target.blocks[i].trivial\n ]\n\n if not changed:\n return\n\n ints = [int_from_bytes(blocked[i]) for i in changed]\n offset = min(ints)\n assert offset > 0\n\n for i in hrange(len(ints)):\n ints[i] -= offset\n\n def reoffset(o):\n new_blocks = list(blocked)\n for i, v in zip(changed, ints):\n new_blocks[i] = int_to_bytes(v + o, len(blocked[i]))\n return self.incorporate_new_buffer(hbytes().join(new_blocks))\n\n new_offset = Integer.shrink(offset, reoffset, random=self.random)\n if new_offset == offset:\n self.clear_change_tracking()", "def split_kbucket(self):\n new_kbucket = super(CachingKBucket, self).split_kbucket()\n\n cache_self, cache_new = util.partition(\n self._replacement_cache,\n self.contact_in_range\n )\n\n # Replacement caches are deques, so we can't directly assign\n # the values returned by partition.\n new_kbucket._replacement_cache.extend(cache_new)\n self._replacement_cache.clear()\n self._replacement_cache.extend(cache_self)\n\n self.fill_from_cache()\n new_kbucket.fill_from_cache()\n\n return new_kbucket", "def __getitem__(self, key):\n \n if type(key) is int or type(key) is long:\n if key >= len(self):\n raise IndexError, \"Index ({:d}) is bigger than my length ({:d})\".format(key, self.total_events)\n if key < 0:\n if (-key) > len(self):\n raise IndexError, \"Index ({:d}) is too small for my length ({:d})\".format(key, self.total_events)\n key = len(self) + key\n \n f = None\n for f in self:\n key -= 1\n if key < 0: break\n return f\n\n elif type(key) is slice:\n (start, stop, stride) = key.indices(self.total_events)\n valid = range(start, stop, stride)\n retval = []\n counter = 0\n for f in self:\n if counter in valid:\n retval.append(f)\n counter += 1\n return retval", "def linear_interpolation_indexes(self, key):\n if isinstance(key, slice):\n output = [{'key':(self.to_index(key),), 'weight':1.0}]\n return [{'key':(self.to_index(key),), 'weight':1.0}]\n elif isinstance(key, (int, float)):\n lowIndex = int(self.toIndex(key))\n highIndex = lowIndex + 1\n try:\n lowUnit = self.to_unit(lowIndex)\n highUnit = self.to_unit(highIndex)\n lmbd = (key - lowUnit) / (highUnit - lowUnit)\n return [{'key':(lowIndex,), 'weight': 1.0-lmbd},\n {'key':(highIndex,), 'weight': lmbd}]\n except:\n return [{'key':(lowIndex,), 'weight': 1.0}]\n else:\n raise ValueError(\"key must be a slice or a numeric type.\")", "def _split_chunk(self, collection_name: str, key: int):\n def split_command():\n self._mongo_client.admin.command('split', collection_name, middle={SHARD_KEY: key})\n self._try_until_done(split_command)\n self._chunks[collection_name][key] = MAIN_MONGO_SHARD_NAME\n logging.info(f\"MongoAgent: Split chunk of {collection_name} at {key}\")", "def keyIndex(self, key):\n key ^= bsr(key, 33)\n key *= 0xff51afd7ed558ccdL\n key ^= bsr(key, 33)\n key *= 0xc4ceb9fe1a85ec53L\n key ^= bsr(key, 33)\n return key", "def __getitem__(self, key):\n if not isinstance(key, int):\n raise TypeError\n if key < 0 or key >= len(self.data):\n raise IndexError\n batch = self.data[key]\n batch_size = len(batch)\n batch = list(zip(*batch))\n assert len(batch) == 6\n\n # orig_idx = lens\n token_ids = np.array(seq_padding(batch[0], self.max_len))\n s_start, s_end = np.array(batch[1]), np.array(batch[2])\n o_labels = np.array(batch[3])\n distance_to_s = np.array(seq_padding(batch[4], self.max_len))\n mask = np.array(seq_padding(batch[5], self.max_len))\n\n # print(token_ids, s_start, s_end, o_labels)\n\n return (token_ids, distance_to_s, s_start, s_end, o_labels, mask)" ]
[ "0.6085762", "0.59721315", "0.5535615", "0.54769576", "0.5392096", "0.53591967", "0.52559686", "0.5224978", "0.51803327", "0.5149737", "0.49530393", "0.49112108", "0.49017742", "0.4898832", "0.48969337", "0.4896401", "0.48882088", "0.48876253", "0.4837309", "0.48347116", "0.48325577", "0.48201516", "0.48168233", "0.47555828", "0.47544938", "0.4752155", "0.47473162", "0.47433814", "0.47316816", "0.47209328" ]
0.7670033
0
Combine chunks into a single (ChunkKey, Dataset) pair.
def consolidate_chunks( inputs: Iterable[Tuple[core.ChunkKey, xarray.Dataset]], combine_kwargs: Optional[Mapping[str, Any]] = None, ) -> Tuple[core.ChunkKey, xarray.Dataset]: inputs = list(inputs) keys = [key for key, _ in inputs] if len(set(keys)) < len(keys): raise ValueError(f'chunk keys are not unique: {keys}') # Reconstruct shared offsets along each dimension by inspecting chunk keys. unique_offsets = collections.defaultdict(set) for key in keys: for dim, offset in key.items(): unique_offsets[dim].add(offset) offsets = {k: sorted(v) for k, v in unique_offsets.items()} combined_key = core.ChunkKey({k: v[0] for k, v in offsets.items()}) # Consolidate inputs in a single xarray.Dataset. # `inputs` is a flat list like `[(k_00, ds_00), (k_01, ds_01), ...]` where # `k_ij` is a ChunkKey giving the (multi-dimensional) index of `ds_ij` in a # virtual larger Dataset. # Now we want to actually concatenate along all those dimensions, e.g., the # equivalent of building a large matrix out of sub-matrices: # ⌈[x_00 x_01] ...⌉ ⌈x_00 x_01 ...⌉ # X = |[x_10 x_11] ...| = |x_10 x_11 ...| # |[x_20 x_21] ...| |x_20 x_21 ...| # ⌊ ... ...⌋ ⌊ ... ... ...⌋ # In NumPy, this would be done with `np.block()`. offset_index = core.compute_offset_index(offsets) shape = [len(v) for v in offsets.values()] if np.prod(shape) != len(inputs): raise ValueError('some expected chunk keys are missing') nested_array = np.empty(dtype=object, shape=shape) for key, chunk in inputs: nested_key = tuple(offset_index[dim][key[dim]] for dim in offsets) assert nested_array[nested_key] is None nested_array[nested_key] = chunk kwargs = dict( data_vars='minimal', coords='minimal', join='exact', combine_attrs='override', ) if combine_kwargs is not None: kwargs.update(combine_kwargs) combined_dataset = xarray.combine_nested( nested_array.tolist(), concat_dim=list(offsets), **kwargs ) return combined_key, combined_dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "def split_chunks(\n key: core.ChunkKey,\n dataset: xarray.Dataset,\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n # This function splits consolidated arrays into blocks of new sizes, e.g.,\n # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉\n # X = |x_10 x_11 ...| = ||x_10| |x_11| ...|\n # |x_20 x_21 ...| |⌊x_20⌋ ⌊x_21⌋ ...|\n # ⌊ ... ... ...⌋ ⌊ ... ... ...⌋\n # and emits them as (ChunkKey, xarray.Dataset) pairs.\n all_bounds = []\n for dim, chunk_size in target_chunks.items():\n start = key.get(dim, 0)\n stop = start + dataset.sizes[dim]\n all_bounds.append(_split_chunk_bounds(start, stop, chunk_size))\n\n for bounds in itertools.product(*all_bounds):\n offsets = dict(key)\n slices = {}\n for dim, (start, stop) in zip(target_chunks, bounds):\n base = key.get(dim, 0)\n offsets[dim] = start\n slices[dim] = slice(start - base, stop - base)\n\n new_key = core.ChunkKey(offsets)\n new_chunk = dataset.isel(slices)\n yield new_key, new_chunk", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def combine_batches(chosen_dict):\n\n batches = set(sorted(chosen_dict.keys())) - {'meta_data'}\n batches = sorted(list(batches))\n root_dict = dict()\n root_dict['data'] = chosen_dict[batches[0]]['data']\n root_dict['labels'] = chosen_dict[batches[0]]['labels']\n root_dict['filenames'] = chosen_dict[batches[0]]['filenames']\n root_dict['meta_data'] = chosen_dict['meta_data']\n root_dict['meta_data'].append(batches[0])\n\n for curr_batch in batches[1:]:\n temp_dict = chosen_dict[curr_batch]\n root_dict['data'] = np.concatenate((root_dict['data'],\n temp_dict['data']),\n axis=0)\n root_dict['labels'] = root_dict['labels'] + temp_dict['labels']\n root_dict['filenames'] = root_dict['filenames'] + temp_dict['filenames']\n root_dict['meta_data'].append(curr_batch)\n\n tot_rows = root_dict['data'].shape[0]\n new_order = range(tot_rows)\n for _ in range(5):\n shuffle(new_order)\n\n ub_dict = dict()\n ub_data = np.zeros((tot_rows, 3072), dtype=root_dict['data'].dtype)\n ub_labels = [0] * tot_rows\n ub_filenames = [\"\"] * tot_rows\n\n for ctr, idx in enumerate(new_order):\n ub_data[ctr, :] = root_dict['data'][idx, :]\n ub_labels[ctr] = root_dict['labels'][idx]\n ub_filenames[ctr] = root_dict['filenames'][idx]\n\n ub_dict['data'] = ub_data\n ub_dict['labels'] = ub_labels\n ub_dict['filenames'] = ub_filenames\n ub_dict['meta_data'] = root_dict['meta_data']\n\n return ub_dict", "def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)", "def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n inputs_iter = iter(inputs)\n while True:\n try:\n chunk_data = []\n for _ in range(chunk_size):\n processed_data = next(inputs_iter)\n chunk_data.append(processed_data)\n yield chunk_data\n except StopIteration:\n if chunk_data:\n yield chunk_data\n break", "def compose_array_from_dataloader(dataloader, key=\"original\"):\n\n sample = dataloader.dataset[0][key]\n\n if key == \"label\":\n dtype = np.int\n output_shape = [len(dataloader.dataset)]\n else:\n dtype = np.float32\n output_shape = [len(dataloader.dataset)] + list(sample.shape)\n\n output_array = np.zeros(output_shape, dtype=dtype)\n output_array.setflags(write=True)\n global_batch_size = dataloader.batch_size\n\n with tqdm(total=len(dataloader)) as pbar:\n for idx, batch in enumerate(dataloader):\n array_to_add = batch[key].numpy()\n batch_size = array_to_add.shape[0]\n output_array[\n global_batch_size * idx : global_batch_size * idx + batch_size\n ] = array_to_add\n pbar.update(1)\n\n return output_array", "def _chunks_merge(chunks):\n chunks_ = []\n while chunks:\n chunk, chunks = chunks\n chunks_.append(chunk)\n return chunks_[0][:0].join(reversed(chunks_)) if chunks_ else b\"\"", "def individual_collate(batch):\n\n data = batch\n\n collected_data = defaultdict(list)\n\n for i in range(len(list(data))):\n for k in data[i].keys():\n collected_data[k].append(data[i][k])\n\n for k in collected_data.keys():\n collected_data[k] = torch.stack(collected_data[k])\n\n return collected_data", "def merge(*args, chunk_size=None):\n if chunk_size == None:\n chunk_size = args[0].chunk_size\n def gen():\n while True:\n # When stacking, arrays must be of similar shapes,\n # therefore we first stack all 1d arrays, and then stack them\n # with the 2d arrays.\n lst1dim = []\n lst2dim = []\n for stm in args:\n els = stm.read(chunk_size)\n if els.shape[0] == 0:\n return\n if els.ndim == 1:\n lst1dim.append(els)\n else:\n lst2dim.append(els)\n lst2dim.append(np.column_stack(lst1dim))\n yield np.hstack(lst2dim)\n return Stream(gen(), chunk_size=chunk_size)", "def seed_other_dataset(name: str, chunk_size: int, start=None, end=None):\n objects = []\n for chunk in pd.read_csv(name, chunksize=chunk_size, header=1):\n chunk_as_mat = chunk.to_numpy()\n chunk_start = datetime.datetime.strptime(str(chunk_as_mat[0][0]), \"%Y%m%d\")\n chunk_end = datetime.datetime.strptime(str(chunk_as_mat[-1][0]), \"%Y%m%d\")\n if start is not None and start > chunk_end:\n continue\n if end is not None and end < chunk_start:\n break\n # print(chunk.to_numpy())\n objects += insert_into_sql(chunk.to_numpy())\n return objects", "def get_chunks_meta(\n self, data_keys: List[str], fields: List[str] = None, error=\"raise\"\n ) -> List[Dict]:", "def _build_chunk_registry(self, backend_key, dtype):\n\n query = backend_key_to_query(backend_key)\n chunks_registry = self.db[self.col_name].find(\n {**query, 'provides_meta': False},\n {\"chunk_i\": 1, \"data\": 1})\n\n # We are going to convert this to a dictionary as that is\n # easier to lookup\n for doc in chunks_registry:\n chunk_key = doc.get('chunk_i', None)\n if chunk_key is None:\n # Should not happen because of the projection in find\n # but let's double-check:\n raise ValueError(\n f'Projection failed, got doc with no \"chunk_i\":\\n{doc}')\n # Update our registry with this chunks info. Use chunk_i as\n # chunk_key. Make it a *string* to avoid potential key-error\n # issues or json-encoding headaches.\n chunk_len = len(doc.get('data', []))\n result = np.zeros(chunk_len, dtype=dtype)\n for key in np.dtype(dtype).names:\n result[key] = [dd[key] for dd in doc['data']]\n self.chunks_registry[backend_key + str(chunk_key)] = result\n del doc\n\n # Some bookkeeping to make sure we don't buffer too much in this\n # backend. We still need to return at least one hence the 'and'.\n # See: https://github.com/AxFoundation/strax/issues/346\n if backend_key not in self._buffered_backend_keys:\n self._buffered_backend_keys.append(backend_key)\n while (\n (len(self._buffered_backend_keys) > 1 and\n sum(ch.nbytes for ch in self.chunks_registry.values()) / 1e6 > self._buff_mb)\n or len(self._buffered_backend_keys) > self._buff_nruns\n ):\n self._clean_first_key_from_registry()", "def load_chunks(self):\n for key, array in self.chunks.items():\n loaded_array = np.asarray(array)\n self.chunks[key] = loaded_array", "def cut_all_data_and_labels_on_chunks(data: Data_dict_type, labels: Labels_dict_type,\n window_size: float, window_step: float) -> Tuple[\n Data_dict_type, Labels_dict_type]:\n for key, item in data.items():\n # extract data and sample rate of videofile\n data_array, sample_rate = item\n # calculate size of window in units (indexes)\n window_size_in_units = int(np.round(window_size * sample_rate))\n window_step_in_units = int(np.round(window_step * sample_rate))\n try:\n # try to cut data on chunks with defined window\n data_array = cut_data_on_chunks(data_array, window_size_in_units, window_step_in_units)\n data_array = np.concatenate([x[np.newaxis, ...] for x in data_array], axis=0)\n except AttributeError:\n # if size of window or step of window are more than length of data, takes full data as one window.\n data_array = data_array[np.newaxis, ...]\n data[key] = (data_array, sample_rate)\n # labels cutting, everything the same as with data cutting\n labels_dataframe = labels[key]\n try:\n labels_dataframe = cut_data_on_chunks(labels_dataframe.values, window_size_in_units, window_step_in_units)\n labels_dataframe = np.concatenate([x[np.newaxis, ...] for x in labels_dataframe], axis=0)\n except AttributeError:\n # labels now will be saved in np.ndarray format\n labels_dataframe = labels_dataframe.values[np.newaxis, ...]\n labels[key] = labels_dataframe\n return data, labels", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def build_chunks(results, metadata):\n\n for result in results:\n chunk = connector_pb2.DataChunk()\n for field in metadata.fieldInfo:\n set_value(result, field.name, chunk)\n yield chunk", "def chunk_content(self):\n entries = DataObject.objects.filter(uuid=self.uuid)\n for entry in entries:\n if entry.compressed:\n data = BytesIO(entry.blob)\n z = zipfile.ZipFile(data, \"r\")\n content = z.read(\"data\")\n data.close()\n z.close()\n yield content\n else:\n yield entry.blob", "def combine_data(data_files_dict):\n key_list = list(data_files_dict.keys())\n no_col = len(data_files_dict[key_list[0]])\n combined = []\n for n in range(0, no_col):\n d = np.empty(shape=[0, 1])\n for k in data_files_dict:\n d = np.append(d, data_files_dict[k][n])\n combined.append(d)\n return combined", "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def chunk_generator(input_file, chunksize = 100000, dataset_name = \"\") :\n\n with h5py.File(input_file, 'r', libver = 'latest') as f :\n dataset = f[dataset_name]\n for x in range(0, dataset.size, chunksize) :\n yield dataset[x:x+chunksize]", "def _concatenate_batch(\n self, data_batch: Dict[str, List[str]]\n ) -> Tuple[List[Dict[str, any]], List[Dict[str, any]]]:\n concatenated_batch = []\n evidences_batch = []\n\n emotion_batch = data_batch[\"emotion\"]\n target_utterance_batch = data_batch[\"target_utterance\"]\n evidence_utterance_batch = data_batch[\"evidence_utterance\"]\n conversation_history_batch = data_batch[\"conversation_history\"]\n\n for i, (\n emotion,\n target_utterance,\n evidence_utterance,\n conversation_history,\n ) in enumerate(\n zip(\n emotion_batch,\n target_utterance_batch,\n evidence_utterance_batch,\n conversation_history_batch,\n )\n ):\n concatenated_qns = (\n \"The target utterance is \"\n + target_utterance\n + \"The evidence utterance is \"\n + evidence_utterance\n + \"What is the causal span from context that is relevant to the target utterance's emotion \"\n + emotion\n + \" ?\"\n )\n inputs = {\n \"id\": i,\n \"question\": concatenated_qns,\n \"answers\": [{\"text\": \" \", \"answer_start\": 0}],\n \"is_impossible\": False,\n }\n instance_dict = {\"context\": conversation_history, \"qas\": [inputs]}\n concatenated_batch.append(instance_dict)\n\n evidence = {\"id\": i, \"evidence\": evidence_utterance}\n evidences_batch.append(evidence)\n\n return concatenated_batch, evidences_batch", "def init_datasets(self, data_dict, label_dict):\n splits = data_dict.keys()\n dataset_dict = {\n key: ArrayDataset(data_dict[key], torch.LongTensor(label_dict[key]))\n for key in splits\n }\n return dataset_dict", "def _group_chunks_by_entities(self, chunks, entities):\n for entity in entities:\n chunks_to_concat = chunks.get_overlaps(\n entity['beginOffset'], len(entity['content']))\n if not chunks_to_concat: continue\n new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])\n new_chunk = Chunk(new_chunk_word)\n chunks.swap(chunks_to_concat, new_chunk)\n return chunks", "def _group_chunks_by_entities(self, chunks, entities):\n for entity in entities:\n chunks_to_concat = chunks.get_overlaps(\n entity['beginOffset'], len(entity['content']))\n if not chunks_to_concat:\n continue\n new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])\n new_chunk = Chunk(new_chunk_word)\n chunks.swap(chunks_to_concat, new_chunk)\n return chunks", "def provide_data(self):\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self._data]", "def get_data(\n group: str, min_length: int, window_size: int\n) -> Tuple[np.ndarray, np.ndarray]:\n\n group_asserts(group, both=True)\n\n added_filename = os.path.join(\n data.ALIGNMENT_DIR,\n \"chunks\",\n \"data\",\n f\"added-examples-{window_size}-{min_length}.csv\",\n )\n\n removed_filename = os.path.join(\n data.ALIGNMENT_DIR,\n \"chunks\",\n \"data\",\n f\"removed-examples-{window_size}-{min_length}.csv\",\n )\n\n with open(removed_filename, \"r\") as file:\n csvreader = csv.reader(file, dialect=\"excel\")\n added_rows = list(csvreader)\n\n with open(added_filename, \"r\") as file:\n csvreader = csv.reader(file, dialect=\"excel\")\n removed_rows = list(csvreader)\n\n if group == \"removed\":\n rows = removed_rows\n elif group == \"added\":\n rows = added_rows\n else:\n rows = added_rows + removed_rows\n\n X = np.zeros((len(rows), len(rows[0]) - 1))\n Y = np.zeros((len(rows),))\n\n for i, row in enumerate(rows):\n *features, target = parse_row(row)\n X[i] = features\n Y[i] = target\n\n return X, Y", "def batch_chunks(exp_chunks):\n import numpy as np\n batch_idx = np.array([chunk[0]['batch_id'] for chunk in exp_chunks])\n unique_batch_idx = np.unique(batch_idx)\n ids_per_array = [np.where(batch_idx == array_bidx)[0] for array_bidx in unique_batch_idx]\n exp_arrays = [[exp_chunks[idx] for idx in chunk_ids] for chunk_ids in ids_per_array]\n return exp_arrays", "def concatenate_data():", "def nb_read_data(data_chunk):\n\t#ensure that the data_chunk has the right length\n\n\tassert np.mod(data_chunk.shape[0],3)==0\n\n\tout=np.empty(data_chunk.shape[0]//3*2,dtype=np.uint16)\n\timage1 = np.empty((2048,2048),dtype=np.uint16)\n\timage2 = np.empty((2048,2048),dtype=np.uint16)\n\n\tfor i in nb.prange(data_chunk.shape[0]//3):\n\t\tfst_uint8=np.uint16(data_chunk[i*3])\n\t\tmid_uint8=np.uint16(data_chunk[i*3+1])\n\t\tlst_uint8=np.uint16(data_chunk[i*3+2])\n\n\t\tout[i*2] = (fst_uint8 << 4) + (mid_uint8 >> 4)\n\t\tout[i*2+1] = ((mid_uint8 % 16) << 8) + lst_uint8\n\n\treturn out" ]
[ "0.6005835", "0.5682695", "0.5602341", "0.5585601", "0.5574721", "0.5481608", "0.52914053", "0.5252008", "0.51839036", "0.5157331", "0.51571405", "0.51497865", "0.51459897", "0.511034", "0.50953734", "0.5063773", "0.50514185", "0.5030089", "0.5011495", "0.50045127", "0.50042385", "0.50019664", "0.4969346", "0.4939917", "0.4938862", "0.49307942", "0.49271405", "0.49238795", "0.49068213", "0.4901985" ]
0.68850577
0
Split a single (ChunkKey, xarray.Dataset) pair into many chunks.
def split_chunks( key: core.ChunkKey, dataset: xarray.Dataset, target_chunks: Mapping[str, int], ) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]: # This function splits consolidated arrays into blocks of new sizes, e.g., # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉ # X = |x_10 x_11 ...| = ||x_10| |x_11| ...| # |x_20 x_21 ...| |⌊x_20⌋ ⌊x_21⌋ ...| # ⌊ ... ... ...⌋ ⌊ ... ... ...⌋ # and emits them as (ChunkKey, xarray.Dataset) pairs. all_bounds = [] for dim, chunk_size in target_chunks.items(): start = key.get(dim, 0) stop = start + dataset.sizes[dim] all_bounds.append(_split_chunk_bounds(start, stop, chunk_size)) for bounds in itertools.product(*all_bounds): offsets = dict(key) slices = {} for dim, (start, stop) in zip(target_chunks, bounds): base = key.get(dim, 0) offsets[dim] = start slices[dim] = slice(start - base, stop - base) new_key = core.ChunkKey(offsets) new_chunk = dataset.isel(slices) yield new_key, new_chunk
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_dataset(x, batch_size):\r\n\tsize_modulo = len(x) % batch_size # hack to ensure data is batches successfully\r\n\tif size_modulo != 0:\r\n\t\tx = x[:-size_modulo]\r\n\tpartitioned = np.split(x, batch_size)\r\n\treturn partitioned", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)", "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "def _chunk_data(X, slices):\n\n # from object array to list\n slices = [sl for sl in slices if len(sl)]\n selected_times = np.hstack([np.ravel(sl) for sl in slices])\n start = np.min(selected_times)\n stop = np.max(selected_times) + 1\n slices_chunk = [sl - start for sl in slices]\n X_chunk = X[:, :, start:stop]\n return X_chunk, slices_chunk", "def split_chunk(chunk, *a, **kw):\n return split_chunk(chunk, *a, **kw)", "def cut_all_data_and_labels_on_chunks(data: Data_dict_type, labels: Labels_dict_type,\n window_size: float, window_step: float) -> Tuple[\n Data_dict_type, Labels_dict_type]:\n for key, item in data.items():\n # extract data and sample rate of videofile\n data_array, sample_rate = item\n # calculate size of window in units (indexes)\n window_size_in_units = int(np.round(window_size * sample_rate))\n window_step_in_units = int(np.round(window_step * sample_rate))\n try:\n # try to cut data on chunks with defined window\n data_array = cut_data_on_chunks(data_array, window_size_in_units, window_step_in_units)\n data_array = np.concatenate([x[np.newaxis, ...] for x in data_array], axis=0)\n except AttributeError:\n # if size of window or step of window are more than length of data, takes full data as one window.\n data_array = data_array[np.newaxis, ...]\n data[key] = (data_array, sample_rate)\n # labels cutting, everything the same as with data cutting\n labels_dataframe = labels[key]\n try:\n labels_dataframe = cut_data_on_chunks(labels_dataframe.values, window_size_in_units, window_step_in_units)\n labels_dataframe = np.concatenate([x[np.newaxis, ...] for x in labels_dataframe], axis=0)\n except AttributeError:\n # labels now will be saved in np.ndarray format\n labels_dataframe = labels_dataframe.values[np.newaxis, ...]\n labels[key] = labels_dataframe\n return data, labels", "def batch_chunks(exp_chunks):\n import numpy as np\n batch_idx = np.array([chunk[0]['batch_id'] for chunk in exp_chunks])\n unique_batch_idx = np.unique(batch_idx)\n ids_per_array = [np.where(batch_idx == array_bidx)[0] for array_bidx in unique_batch_idx]\n exp_arrays = [[exp_chunks[idx] for idx in chunk_ids] for chunk_ids in ids_per_array]\n return exp_arrays", "def consolidate_chunks(\n inputs: Iterable[Tuple[core.ChunkKey, xarray.Dataset]],\n combine_kwargs: Optional[Mapping[str, Any]] = None,\n) -> Tuple[core.ChunkKey, xarray.Dataset]:\n inputs = list(inputs)\n keys = [key for key, _ in inputs]\n if len(set(keys)) < len(keys):\n raise ValueError(f'chunk keys are not unique: {keys}')\n\n # Reconstruct shared offsets along each dimension by inspecting chunk keys.\n unique_offsets = collections.defaultdict(set)\n for key in keys:\n for dim, offset in key.items():\n unique_offsets[dim].add(offset)\n offsets = {k: sorted(v) for k, v in unique_offsets.items()}\n combined_key = core.ChunkKey({k: v[0] for k, v in offsets.items()})\n\n # Consolidate inputs in a single xarray.Dataset.\n # `inputs` is a flat list like `[(k_00, ds_00), (k_01, ds_01), ...]` where\n # `k_ij` is a ChunkKey giving the (multi-dimensional) index of `ds_ij` in a\n # virtual larger Dataset.\n # Now we want to actually concatenate along all those dimensions, e.g., the\n # equivalent of building a large matrix out of sub-matrices:\n # ⌈[x_00 x_01] ...⌉ ⌈x_00 x_01 ...⌉\n # X = |[x_10 x_11] ...| = |x_10 x_11 ...|\n # |[x_20 x_21] ...| |x_20 x_21 ...|\n # ⌊ ... ...⌋ ⌊ ... ... ...⌋\n # In NumPy, this would be done with `np.block()`.\n offset_index = core.compute_offset_index(offsets)\n shape = [len(v) for v in offsets.values()]\n if np.prod(shape) != len(inputs):\n raise ValueError('some expected chunk keys are missing')\n nested_array = np.empty(dtype=object, shape=shape)\n for key, chunk in inputs:\n nested_key = tuple(offset_index[dim][key[dim]] for dim in offsets)\n assert nested_array[nested_key] is None\n nested_array[nested_key] = chunk\n\n kwargs = dict(\n data_vars='minimal',\n coords='minimal',\n join='exact',\n combine_attrs='override',\n )\n if combine_kwargs is not None:\n kwargs.update(combine_kwargs)\n\n combined_dataset = xarray.combine_nested(\n nested_array.tolist(),\n concat_dim=list(offsets),\n **kwargs\n )\n return combined_key, combined_dataset", "def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]", "def split_into_chunks(x, n):\n csize = int(np.ceil(len(x) / n))\n out = list()\n \n i = 0\n while i * csize < len(x):\n out.append(x[(i * csize):(i * csize + csize)])\n i += 1\n\n return out", "def _split_chunk(self, collection_name: str, key: int):\n def split_command():\n self._mongo_client.admin.command('split', collection_name, middle={SHARD_KEY: key})\n self._try_until_done(split_command)\n self._chunks[collection_name][key] = MAIN_MONGO_SHARD_NAME\n logging.info(f\"MongoAgent: Split chunk of {collection_name} at {key}\")", "def split(x, axis, split_size):\n assert axis < x.ndim, 'Dimension out of range!'\n\n if isinstance(split_size, int):\n _split_size = [x.shape[axis] // split_size] * split_size\n\n elif isinstance(split_size, (list, tuple)):\n _split_size = split_size\n else:\n raise TypeError\n\n if x.ndim == 0:\n\n return [x for _ in range(len(_split_size))]\n\n return T.split(x, splits_size=_split_size, n_splits=len(_split_size), axis=axis)", "def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]", "def split_data(x, y, ratio, index=None):\n m = x.shape[0]\n splitter = np.cumsum(ratio)\n train_start = 0\n val_start = batch_size * ((splitter[0] * m) // batch_size)\n test_start = batch_size * ((splitter[1] * m) // batch_size)\n test_end = batch_size * ((splitter[2] * m) // batch_size)\n\n val_start = int(val_start)\n test_start = int(test_start)\n test_end = int(test_end)\n\n if index is not None:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n index[train_start:val_start],\n x[val_start:test_start, :], y[val_start:test_start, :],\n index[val_start:test_start],\n x[test_start:test_end, :], y[test_start:test_end, :],\n index[test_start:test_end]\n )\n\n\n\n else:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n x[val_start:test_start, :], y[val_start:test_start, :],\n x[test_start:test_end, :], y[test_start:test_end, :]\n )\n\n return split", "def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]", "def get_chunks(self, data, scale=1):\r\n x_chunks, y_chunks = [(0, self.rows)], [(0, self.cols)]\r\n if data.shape[0] > self.rows:\r\n x_chunks = self.perform_chunking(data.shape[0], self.rows)\r\n else:\r\n x_chunks = [(0, data.shape[0])]\r\n if data.shape[1] > self.cols:\r\n y_chunks = self.perform_chunking(data.shape[1], self.cols)\r\n else:\r\n y_chunks = [(0, data.shape[1])]\r\n return x_chunks, y_chunks", "def chunks(data, n):\n newn = int(len(data) / n) # chunk size \n \n for i in range(0, n-1):\n test_chunk = data[i*newn:i*newn+newn]\n train_chunk = [el for el in data if el not in test_chunk]\n yield train_chunk, test_chunk\n \n test_chunk = data[n*newn-newn:]\n train_chunk = [el for el in data if el not in test_chunk]\n \n yield train_chunk, test_chunk", "def split_dataset(dataset):\n sequence = dataset.split()\n return sequence", "def split_dataset(dataset):\n sequence = dataset.split()\n return sequence", "def batch_split(self) -> np.array:\n pass", "def test_split_data(self):\n Xlists = tuple([[np.zeros((200,9)) for b in range(14)] for c in range(9)])\n ybinarylists = [np.zeros((14,12)) for c in range(9)]\n indices = slice(7, 9)\n x_test, y_test = tutorial_pamap2.split_data(Xlists, ybinarylists, \\\n indices)\n test = y_test[0].shape == (12,) and x_test[0].shape == (200, 9)\n assert test", "def get_chunks(sequence, window_size, step=1):\n k = len(sequence)\n for i in range(0, k - window_size + 1, step):\n end = i + window_size\n chunk = sequence[i:i + window_size]\n assert len(chunk) == window_size\n yield chunk, end", "def slices_to_chunks(self, slices):\n return map(self.unit_index_to_chunk, self.slices_to_unit_indices(slices))", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n inputs_iter = iter(inputs)\n while True:\n try:\n chunk_data = []\n for _ in range(chunk_size):\n processed_data = next(inputs_iter)\n chunk_data.append(processed_data)\n yield chunk_data\n except StopIteration:\n if chunk_data:\n yield chunk_data\n break", "def get_k_fold_data(ds, k=10):\n splits = ds.split(k)\n for i in range(k):\n yield (concatenate(splits[j] for j in range(k) if j != i), splits[i])", "def split_samples(data):\n\n training_samples = data[0:9497]\n test_samples = data[9497:11300]\n\n return training_samples, test_samples", "def chunks(data, rows=10000):\n\n for i in range(0, len(data), rows):\n yield data[i:i+rows]", "def get_chunks(sequence, window_size, step=1):\n # get the sequence length\n k = len(sequence)\n # get the index for each end and chunk\n for i in range(0, k - window_size + 1, step):\n # generate the end of the window\n end = i + window_size\n # get the slice of the sequence\n chunk = sequence[i:i + window_size]\n # assure the the chunk is the expected size\n assert len(chunk) == window_size\n yield chunk, end" ]
[ "0.6560789", "0.6540121", "0.64344317", "0.63841146", "0.6350557", "0.6311908", "0.6191371", "0.61547345", "0.6076409", "0.6063916", "0.6042421", "0.6026232", "0.59983486", "0.599385", "0.5961747", "0.59533745", "0.58974886", "0.5865656", "0.5859766", "0.5859766", "0.5857885", "0.58498794", "0.58345866", "0.58322585", "0.5824989", "0.58140683", "0.581191", "0.57871425", "0.575957", "0.5755395" ]
0.75269866
0
Rechunk inmemory pairs of (ChunkKey, xarray.Dataset).
def in_memory_rechunk( inputs: List[Tuple[core.ChunkKey, xarray.Dataset]], target_chunks: Mapping[str, int], ) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]: key, dataset = consolidate_chunks(inputs) yield from split_chunks(key, dataset, target_chunks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_chunks(\n key: core.ChunkKey,\n dataset: xarray.Dataset,\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n # This function splits consolidated arrays into blocks of new sizes, e.g.,\n # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉\n # X = |x_10 x_11 ...| = ||x_10| |x_11| ...|\n # |x_20 x_21 ...| |⌊x_20⌋ ⌊x_21⌋ ...|\n # ⌊ ... ... ...⌋ ⌊ ... ... ...⌋\n # and emits them as (ChunkKey, xarray.Dataset) pairs.\n all_bounds = []\n for dim, chunk_size in target_chunks.items():\n start = key.get(dim, 0)\n stop = start + dataset.sizes[dim]\n all_bounds.append(_split_chunk_bounds(start, stop, chunk_size))\n\n for bounds in itertools.product(*all_bounds):\n offsets = dict(key)\n slices = {}\n for dim, (start, stop) in zip(target_chunks, bounds):\n base = key.get(dim, 0)\n offsets[dim] = start\n slices[dim] = slice(start - base, stop - base)\n\n new_key = core.ChunkKey(offsets)\n new_chunk = dataset.isel(slices)\n yield new_key, new_chunk", "def rechunk(array, chunk_size=None, chunk_overlap=None):\n\n # deal with chunk sizes\n ds = array.datashape.copy()\n if chunk_size is None:\n chunk_size = ds.chunk_size\n if isinstance(chunk_size, int):\n chunk_size = [chunk_size] * ds.ndim\n ds.chunk_size = chunk_size\n\n if chunk_overlap is None:\n chunk_overlap = ds.chunk_overlap\n if isinstance(chunk_overlap, int):\n chunk_overlap = [chunk_overlap] * ds.ndim\n ds.chunk_overlap = chunk_overlap\n\n if ds != array.datashape:\n array = array.redimension(ds.schema)\n return array", "def reslice_dataset(input_, output, axis=1, key_path=None, rescaling=False,\n nbit=16, minmax=None, skip=None, rotate=0.0, chunk=16,\n mode=\"constant\", crop=(0, 0, 0, 0, 0, 0),\n ncore=None, show_progress=True):\n if output is None:\n raise ValueError(\"Wrong output type !!!\")\n if axis != 1 and axis != 2:\n raise ValueError(\"Only two options for axis: 1 or 2\")\n else:\n axis = int(axis)\n __check_output(output)\n in_type = __get_input_type(input_)\n if in_type != \"tif\" and in_type != \"hdf\":\n raise ValueError(\"Wrong input type !!!\")\n results = __save_intermediate_data(input_, output, axis, crop, key_path,\n rotate, chunk, mode, ncore,\n show_progress)\n file_tmp, key_tmp, folder_tmp = results\n with h5py.File(file_tmp, 'r') as hdf_object:\n data = hdf_object[key_tmp]\n (depth1, height1, width1) = data.shape\n chunk = np.clip(chunk, 1, height1 - 1)\n last_chunk = height1 - chunk * (height1 // chunk)\n data_type = data.dtype\n res_type = str(data_type)\n if rescaling is True:\n if nbit == 16:\n res_type = \"uint16\"\n elif nbit == 8:\n res_type = \"uint8\"\n else:\n raise ValueError(\"Only two options for nbit: 8 or 16 !!!\")\n if str(data_type) != res_type:\n if data_type == np.uint8:\n minmax = (0, 255)\n elif data_type == np.uint16:\n minmax = (0, 65535)\n else:\n if skip is None:\n skip = min(20, int(0.02 * depth1))\n skip = int(np.clip(skip, 1, depth1 - 1))\n if minmax is None:\n f_alias = get_statistical_information_dataset\n minmax = f_alias(input_, percentile=(0, 100),\n skip=skip, crop=crop,\n key_path=key_path)[0:2]\n else:\n rescaling = False\n out_type = __get_output_type(output)\n t0 = timeit.default_timer()\n if out_type == \"hdf\":\n key_path = \"entry/data\" if key_path is None else key_path\n data_slice = losa.open_hdf_stream(output,\n (height1, depth1, width1),\n data_type=res_type,\n key_path=key_path,\n overwrite=True)\n for i in np.arange(0, height1 - last_chunk, chunk):\n if show_progress:\n t1 = timeit.default_timer()\n f_size = __get_dataset_size(output)\n msg = \"Save resliced data to file: {0:0.2f}MB.\" \\\n \" Time: {1:0.2f}s\".format(f_size, t1 - t0)\n len_msg = len(msg)\n sys.stdout.write(msg)\n sys.stdout.flush()\n mat_chunk = data[:, i: i + chunk, :]\n if rescaling:\n mat_tmp = []\n for j in np.arange(chunk):\n mat = rescale(mat_chunk[:, j, :], nbit, minmax)\n mat_tmp.append(mat)\n mat_tmp = np.asarray(mat_tmp)\n else:\n mat_tmp = np.moveaxis(mat_chunk, 1, 0)\n data_slice[i:i + chunk] = mat_tmp\n if show_progress:\n sys.stdout.write(\"\\r\" + \" \" * len_msg + \"\\r\")\n if last_chunk != 0:\n mat_chunk = data[:, height1 - last_chunk: height1, :]\n if rescaling:\n mat_tmp = []\n for j in np.arange(last_chunk):\n mat = rescale(mat_chunk[:, j, :], nbit, minmax)\n mat_tmp.append(mat)\n mat_tmp = np.asarray(mat_tmp)\n else:\n mat_tmp = np.moveaxis(mat_chunk, 1, 0)\n data_slice[height1 - last_chunk: height1] = mat_tmp\n else:\n list_file, len_msg = None, None\n for i in np.arange(0, height1 - last_chunk, chunk):\n if show_progress:\n t1 = timeit.default_timer()\n list_file = glob.glob(output + \"/*tif*\")\n if list_file:\n f_size = __get_dataset_size(output)\n msg = \"Save resliced data to file: {0:0.2f}MB.\" \\\n \" Time: {1:0.2f}s\".format(f_size, t1 - t0)\n len_msg = len(msg)\n sys.stdout.write(msg)\n sys.stdout.flush()\n mat_chunk = data[:, i: i + chunk, :]\n out_files = [output + \"/img_\" + (\"0000\" + str(\n i + j))[-5:] + \".tif\" for j in range(chunk)]\n if rescaling:\n mat_chunk = rescale(mat_chunk, nbit, minmax)\n losa.save_image_multiple(out_files, mat_chunk.astype(res_type),\n axis=1, ncore=ncore, prefer=\"threads\")\n if show_progress:\n if list_file:\n sys.stdout.write(\"\\r\" + \" \" * len_msg + \"\\r\")\n if last_chunk != 0:\n idx = height1 - last_chunk\n mat_chunk = data[:, idx: height1, :]\n out_files = [output + \"/img_\" + (\"0000\" + str(\n idx + j))[-5:] + \".tif\" for j in range(last_chunk)]\n if rescaling:\n mat_chunk = rescale(mat_chunk, nbit, minmax)\n losa.save_image_multiple(out_files, mat_chunk.astype(res_type),\n axis=1, ncore=ncore, prefer=\"threads\")\n if os.path.isdir(folder_tmp):\n shutil.rmtree(folder_tmp)\n if out_type == \"hdf\":\n shutil.rmtree(os.path.splitext(output)[0])\n if show_progress:\n t1 = timeit.default_timer()\n f_size = __get_dataset_size(output)\n print(\"Finish reslicing data! File size: {0:0.2f}MB. Time: {1:0.2f}s\"\n \"\".format(f_size, t1 - t0))", "def consolidate_chunks(\n inputs: Iterable[Tuple[core.ChunkKey, xarray.Dataset]],\n combine_kwargs: Optional[Mapping[str, Any]] = None,\n) -> Tuple[core.ChunkKey, xarray.Dataset]:\n inputs = list(inputs)\n keys = [key for key, _ in inputs]\n if len(set(keys)) < len(keys):\n raise ValueError(f'chunk keys are not unique: {keys}')\n\n # Reconstruct shared offsets along each dimension by inspecting chunk keys.\n unique_offsets = collections.defaultdict(set)\n for key in keys:\n for dim, offset in key.items():\n unique_offsets[dim].add(offset)\n offsets = {k: sorted(v) for k, v in unique_offsets.items()}\n combined_key = core.ChunkKey({k: v[0] for k, v in offsets.items()})\n\n # Consolidate inputs in a single xarray.Dataset.\n # `inputs` is a flat list like `[(k_00, ds_00), (k_01, ds_01), ...]` where\n # `k_ij` is a ChunkKey giving the (multi-dimensional) index of `ds_ij` in a\n # virtual larger Dataset.\n # Now we want to actually concatenate along all those dimensions, e.g., the\n # equivalent of building a large matrix out of sub-matrices:\n # ⌈[x_00 x_01] ...⌉ ⌈x_00 x_01 ...⌉\n # X = |[x_10 x_11] ...| = |x_10 x_11 ...|\n # |[x_20 x_21] ...| |x_20 x_21 ...|\n # ⌊ ... ...⌋ ⌊ ... ... ...⌋\n # In NumPy, this would be done with `np.block()`.\n offset_index = core.compute_offset_index(offsets)\n shape = [len(v) for v in offsets.values()]\n if np.prod(shape) != len(inputs):\n raise ValueError('some expected chunk keys are missing')\n nested_array = np.empty(dtype=object, shape=shape)\n for key, chunk in inputs:\n nested_key = tuple(offset_index[dim][key[dim]] for dim in offsets)\n assert nested_array[nested_key] is None\n nested_array[nested_key] = chunk\n\n kwargs = dict(\n data_vars='minimal',\n coords='minimal',\n join='exact',\n combine_attrs='override',\n )\n if combine_kwargs is not None:\n kwargs.update(combine_kwargs)\n\n combined_dataset = xarray.combine_nested(\n nested_array.tolist(),\n concat_dim=list(offsets),\n **kwargs\n )\n return combined_key, combined_dataset", "def load_chunks(self):\n for key, array in self.chunks.items():\n loaded_array = np.asarray(array)\n self.chunks[key] = loaded_array", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def _chunk_data(X, slices):\n\n # from object array to list\n slices = [sl for sl in slices if len(sl)]\n selected_times = np.hstack([np.ravel(sl) for sl in slices])\n start = np.min(selected_times)\n stop = np.max(selected_times) + 1\n slices_chunk = [sl - start for sl in slices]\n X_chunk = X[:, :, start:stop]\n return X_chunk, slices_chunk", "def batch_chunks(exp_chunks):\n import numpy as np\n batch_idx = np.array([chunk[0]['batch_id'] for chunk in exp_chunks])\n unique_batch_idx = np.unique(batch_idx)\n ids_per_array = [np.where(batch_idx == array_bidx)[0] for array_bidx in unique_batch_idx]\n exp_arrays = [[exp_chunks[idx] for idx in chunk_ids] for chunk_ids in ids_per_array]\n return exp_arrays", "def chunks(data, rows=10000):\n\n for i in range(0, len(data), rows):\n yield data[i:i+rows]", "def batch_dataset(x, batch_size):\r\n\tsize_modulo = len(x) % batch_size # hack to ensure data is batches successfully\r\n\tif size_modulo != 0:\r\n\t\tx = x[:-size_modulo]\r\n\tpartitioned = np.split(x, batch_size)\r\n\treturn partitioned", "def chunk_generator(input_file, chunksize = 100000, dataset_name = \"\") :\n\n with h5py.File(input_file, 'r', libver = 'latest') as f :\n dataset = f[dataset_name]\n for x in range(0, dataset.size, chunksize) :\n yield dataset[x:x+chunksize]", "def _build_chunk_registry(self, backend_key, dtype):\n\n query = backend_key_to_query(backend_key)\n chunks_registry = self.db[self.col_name].find(\n {**query, 'provides_meta': False},\n {\"chunk_i\": 1, \"data\": 1})\n\n # We are going to convert this to a dictionary as that is\n # easier to lookup\n for doc in chunks_registry:\n chunk_key = doc.get('chunk_i', None)\n if chunk_key is None:\n # Should not happen because of the projection in find\n # but let's double-check:\n raise ValueError(\n f'Projection failed, got doc with no \"chunk_i\":\\n{doc}')\n # Update our registry with this chunks info. Use chunk_i as\n # chunk_key. Make it a *string* to avoid potential key-error\n # issues or json-encoding headaches.\n chunk_len = len(doc.get('data', []))\n result = np.zeros(chunk_len, dtype=dtype)\n for key in np.dtype(dtype).names:\n result[key] = [dd[key] for dd in doc['data']]\n self.chunks_registry[backend_key + str(chunk_key)] = result\n del doc\n\n # Some bookkeeping to make sure we don't buffer too much in this\n # backend. We still need to return at least one hence the 'and'.\n # See: https://github.com/AxFoundation/strax/issues/346\n if backend_key not in self._buffered_backend_keys:\n self._buffered_backend_keys.append(backend_key)\n while (\n (len(self._buffered_backend_keys) > 1 and\n sum(ch.nbytes for ch in self.chunks_registry.values()) / 1e6 > self._buff_mb)\n or len(self._buffered_backend_keys) > self._buff_nruns\n ):\n self._clean_first_key_from_registry()", "def test_chunk_memory(self):\n layer = tl.Serial(tl.Dense(1024*1024), tl.Dense(128))\n chunked = tl.Chunk(layer, 256)\n x = np.random.uniform(size=(16*1024, 16))\n chunked.init(shapes.signature(x))\n y = chunked(x)\n z = tl.Accelerate(chunked)(x)\n self.assertEqual(y.shape, (16*1024, 128))\n self.assertEqual(z.shape, (16*1024, 128))", "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "def _put(self, name, chunk, row_offset):\n grp = self.grp[name]\n lo = row_offset\n if isinstance(chunk, pd.Series):\n chunk = chunk.to_frame()\n n_rows = len(chunk)\n else:\n n_rows = len(chunk[next(iter(chunk.keys()))])\n hi = lo + n_rows\n\n for name in chunk.keys():\n\n x = np.asarray(chunk[name])\n\n data, dtype, fillvalue = self._normalize_column(x, x.dtype)\n\n if name in grp.keys():\n dset = grp[name]\n if hi > len(dset):\n dset.resize((hi,))\n dset[lo:hi] = data\n else:\n try:\n enum_dict = h5py.check_dtype(enum=dtype)\n except AttributeError:\n enum_dict = None\n dset = grp.create_dataset(\n name,\n shape=(hi,),\n dtype=dtype,\n data=data,\n fillvalue=fillvalue,\n **self.storage_options\n )\n if enum_dict is not None:\n # store enum dictionary as attribute\n dset.attrs[\"categories\"] = sorted(\n enum_dict, key=enum_dict.__getitem__\n )", "def match_chunks(*arrays):\n target = arrays[0].datashape\n result = []\n for a in arrays:\n ds = a.datashape\n for i, j in zip(reversed(list(range(a.ndim))),\n reversed(list(range(target.ndim)))):\n ds = change_axis_schema(ds, i, chunk=target.chunk_size[j],\n overlap=target.chunk_overlap[j])\n if a.datashape.schema != ds.schema:\n a = a.redimension(ds.schema)\n result.append(a)\n\n return tuple(result)", "def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n inputs_iter = iter(inputs)\n while True:\n try:\n chunk_data = []\n for _ in range(chunk_size):\n processed_data = next(inputs_iter)\n chunk_data.append(processed_data)\n yield chunk_data\n except StopIteration:\n if chunk_data:\n yield chunk_data\n break", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]", "def data_partition(num_workers, data_set, separate=True):\n\n size = data_set.data.shape[0]\n ind = list(range(size))\n\n if separate:\n shuffle(ind)\n # worker_size is the number of samples per worker. The last worker however receives the additional samples\n worker_size = size // num_workers\n data = dict.fromkeys(list(range(num_workers)))\n\n for w in range(num_workers):\n if w is not num_workers - 1:\n data[w] = ind[w * worker_size: (w+1) * worker_size]\n # data[w][\"X\"] = X_train[ind[w * worker_size: (w + 1) * worker_size], :]\n # data[w][\"Y\"] = Y_train[ind[w * worker_size: (w + 1) * worker_size], :]\n else:\n data[w] = ind[w * worker_size:]\n # data[w][\"X\"] = X_train[ind[w * worker_size:], :]\n # data[w][\"Y\"] = Y_train[ind[w * worker_size:], :]\n\n else:\n data = dict.fromkeys(list(range(num_workers)))\n for w in range(num_workers):\n shuffle(ind)\n data[w] = ind\n # data[w][\"X\"] = X_train[ind, :]\n # data[w][\"Y\"] = Y_train[ind, :]\n\n return data", "def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r", "def pmap_dataset(ds, n_devices):\n n_data = len(ds[0])\n if n_data % n_devices:\n new_len = n_devices * (n_data // n_devices)\n warning_str = (\"Dataset of length {} can not be split onto {} devices.\"\n \"Truncating to {} data points.\".format(\n n_data, n_devices, new_len))\n warnings.warn(warning_str, UserWarning)\n ds = (arr[:new_len] for arr in ds)\n return jax.pmap(lambda x: x)(batch_split_axis(ds, n_devices))", "def _download_input_chunk(self):\n #FIXME choose the mip level based on the chunk key\n volume = Precomputed(self._storage)\n self._data = volume[self._xmin:self._xmax,\n self._ymin:self._ymax,\n self._zmin:self._zmax]", "def redo(self):\n if self._snapshot_index <= len(self._snapshots) - 2:\n snapshot = self._snapshots[self._snapshot_index + 1]\n for chunk_location in snapshot:\n dimension, cx, cz = chunk_location\n chunk = self._unserialise_chunk(dimension, cx, cz, 1)\n self._chunk_cache[chunk_location] = chunk\n self._snapshot_index += 1", "def split_axis(self, x_axis):\n\n self._check_key_type = False # Speed optimization\n\n x_ndim = self.dim_index(x_axis)\n keys = self._data.keys()\n x_vals, dim_values = self._split_keys_by_axis(keys, x_axis)\n\n split_data = map_type()\n\n for k in dim_values: # The shortened keys\n split_data[k] = map_type()\n for x in x_vals: # For a given x_axis value...\n # Generate a candidate expanded key\n expanded_key = k[:x_ndim] + (x,) + k[x_ndim:]\n if expanded_key in keys: # If the expanded key actually exists...\n split_data[k][x] = self[expanded_key]\n\n self._check_key_type = True # Re-enable checks\n return split_data", "def update_chunk(self):\n for key, value in self.piece_coordinates.items():\n # Why is the key a numpy.int type ???\n self.chunk[value] = key", "def rechunk_zarr(source, dest, nb_workers):\n nb_chunks = utils.chunk_dims(dest.shape, dest.chunks)\n\n args = []\n for chunk_idx in product(*tuple(range(n) for n in nb_chunks)):\n args.append((source, dest, chunk_idx))\n\n with multiprocessing.Pool(nb_workers) as pool:\n pool.starmap(fetch_and_write_chunk, args)", "def split_nrows(indata, nrows):\n\n outdata = {}\n\n if nrows == 1:\n outdata[1] = indata\t\n elif nrows == 2:\n\taxlen = int(math.floor(len(indata)/2))\n\toutdata[1] = indata[0:axlen]\n\toutdata[2] = indata[axlen:axlen*2]\n elif nrows == 3:\n\taxlen = int(math.floor(len(indata)/3))\n\toutdata[1] = indata[0:axlen]\n\toutdata[2] = indata[axlen:axlen*2]\n\toutdata[3] = indata[axlen*2:axlen*3]\n\n return outdata", "def cut_all_data_and_labels_on_chunks(data: Data_dict_type, labels: Labels_dict_type,\n window_size: float, window_step: float) -> Tuple[\n Data_dict_type, Labels_dict_type]:\n for key, item in data.items():\n # extract data and sample rate of videofile\n data_array, sample_rate = item\n # calculate size of window in units (indexes)\n window_size_in_units = int(np.round(window_size * sample_rate))\n window_step_in_units = int(np.round(window_step * sample_rate))\n try:\n # try to cut data on chunks with defined window\n data_array = cut_data_on_chunks(data_array, window_size_in_units, window_step_in_units)\n data_array = np.concatenate([x[np.newaxis, ...] for x in data_array], axis=0)\n except AttributeError:\n # if size of window or step of window are more than length of data, takes full data as one window.\n data_array = data_array[np.newaxis, ...]\n data[key] = (data_array, sample_rate)\n # labels cutting, everything the same as with data cutting\n labels_dataframe = labels[key]\n try:\n labels_dataframe = cut_data_on_chunks(labels_dataframe.values, window_size_in_units, window_step_in_units)\n labels_dataframe = np.concatenate([x[np.newaxis, ...] for x in labels_dataframe], axis=0)\n except AttributeError:\n # labels now will be saved in np.ndarray format\n labels_dataframe = labels_dataframe.values[np.newaxis, ...]\n labels[key] = labels_dataframe\n return data, labels", "def extract_chunks(the_files, the_bands=None):\n ds_config = {}\n gdal_ptrs = []\n datatypes = []\n for the_file in the_files:\n g = gdal.Open(the_file)\n gdal_ptrs.append(gdal.Open(the_file))\n datatypes.append(GDAL2NUMPY[g.GetRasterBand(1).DataType])\n\n block_size = g.GetRasterBand(1).GetBlockSize()\n nx = g.RasterXSize\n ny = g.RasterYSize\n if the_bands is None:\n the_bands = np.arange(g.RasterCount) + 1\n proj = g.GetProjectionRef()\n geoT = g.GetGeoTransform()\n ds_config['nx'] = nx\n ds_config['ny'] = ny\n ds_config['nb'] = g.RasterCount\n ds_config['geoT'] = geoT\n ds_config['proj'] = proj\n block_size = [block_size[0]*2, block_size[1]*2]\n print(\"Blocksize is (%d,%d)\" % (block_size[0], block_size[1]))\n # block_size = [ 256, 256 ]\n # store these numbers in variables that may change later\n nx_valid = block_size[0]\n ny_valid = block_size[1]\n # find total x and y blocks to be read\n nx_blocks = (int)((nx + block_size[0] - 1) / block_size[0])\n ny_blocks = (int)((ny + block_size[1] - 1) / block_size[1])\n buf_size = block_size[0] * block_size[1]\n ################################################################\n # start looping through blocks of data\n ################################################################\n # loop through X-lines\n for X in range(nx_blocks):\n # change the block size of the final piece\n if X == nx_blocks - 1:\n nx_valid = nx - X * block_size[0]\n buf_size = nx_valid * ny_valid\n\n # find X offset\n this_X = X * block_size[0]\n\n # reset buffer size for start of Y loop\n ny_valid = block_size[1]\n buf_size = nx_valid * ny_valid\n\n # loop through Y lines\n for Y in range(ny_blocks):\n # change the block size of the final piece\n if Y == ny_blocks - 1:\n ny_valid = ny - Y * block_size[1]\n buf_size = nx_valid * ny_valid\n\n # find Y offset\n this_Y = Y * block_size[1]\n data_in = []\n for ig, ptr in enumerate(gdal_ptrs):\n buf = ptr.ReadRaster(this_X, this_Y, nx_valid, ny_valid,\n buf_xsize=nx_valid, buf_ysize=ny_valid,\n band_list=the_bands)\n a = np.frombuffer(buf, dtype=datatypes[ig])\n data_in.append(a.reshape((\n len(the_bands), ny_valid, nx_valid)).squeeze())\n\n yield (ds_config, this_X, this_Y, nx_valid, ny_valid,\n data_in)" ]
[ "0.6820932", "0.6255683", "0.6226878", "0.61808604", "0.61686605", "0.6154692", "0.59964454", "0.5780077", "0.5754279", "0.565912", "0.565868", "0.5657204", "0.5638074", "0.5595732", "0.5479498", "0.5458643", "0.5443265", "0.54278415", "0.5369429", "0.5363588", "0.53392226", "0.5339011", "0.53381205", "0.5336235", "0.53156066", "0.53065056", "0.52881646", "0.5259945", "0.5206608", "0.52060056" ]
0.7626952
0
Tells info about current time
def time(self): time = datetime.datetime.now().strftime("%I:%M:%S") self.speak("the current time is") self.speak(time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def currentTime():\n return strftime(\"%H:%M:%S\", time.localtime())", "def current_time(cls) -> float:", "def current_time():\n now = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S.%f\")\n return now", "def getCurrentTime():\n\tnow = datetime.datetime.now()\n\thr = now.hour\n\tgreeting = \"\"\n\tampm = \"\"\n\tif (hr < 12): #morning\n\t\thr = hr\n\t\tgreeting = \"morning\"\n\t\tampm = \"am\"\n\telif (hr >= 12 and hr < 1): #afternoon\n\t\thr = hr\n\t\tgreeting = \"afternoon\"\n\t\tampm = \"noon\"\n\telif (hr > 12 and hr < 19): #evening\n\t\thr = hr - 12\n\t\tgreeting = \"evening\"\n\t\tampm = \"pm\"\n\telse: #night\n\t\thr = hr - 12\n\t\tgreeting = \"night\"\n\t\tampm = \"pm\"\n\treturn str(hr) + ':' + str(now.minute),ampm, ' in the ', greeting", "def current_time():\n now = datetime.datetime.now()\n time = now.strftime(\"%Y-%m-%d %H:%M:%S:%f\")\n return time", "def now():\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))", "def __get_current_time(self) -> datetime:\n #return datetime.strptime(\"11:30\", '%H:%M')\n return datetime.now()", "def _get_current_time() -> str:\n return datetime.now().strftime(\"%FT%H:%M:%S\")", "def current_time():\n return time.time()", "def get_current_time():\n return datetime.now()", "def time_now():\n cur_time = str(datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\"))\n return cur_time", "def get_current_time(self):\n return self.time", "def get_current_time():\n return datetime.datetime.now()", "def time_now():\n return time.time()", "def get_now_time():\r\n return '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') + ']: '", "def now():\r\n return time.time()", "def time( self, mess, args):\n return str(datetime.datetime.now())", "def time( self, mess, args):\n return str(datetime.datetime.now())", "def time_now():\n return datetime.datetime.now().time()", "async def time(self, ctx):\r\n time = market_time()\r\n await ctx.send(f'It is currently {time.time().strftime(\"%H:%M:%S\")} EDT for the market.')", "def timestamp_now():\n return datetime.now().strftime(\"%A, %B %d, %Y, %I:%M %p\")", "async def _time(self, ctx):\n try:\n await self.bot.say('@{0}:'.format(ctx.message.author.name) + '\\nDate is: **' + time.strftime(\"%A, %B %d, %Y\") + '**' + '\\nTime is: **' + time.strftime(\"%I:%M:%S %p\") + '**')\n except Exception as e:\n await self.bot.say(code.format(type(e).__name__ + ': ' + str(e)))", "def get_time():\r\n return datetime.datetime.now().strftime(\"%H\")+\":\"+datetime.datetime.now().strftime(\"%M\")+\":\"+datetime.datetime.now().strftime(\"%S\")", "def get_current_time():\n return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())", "def time_now():\n ts = datetime.datetime.now().timetuple()\n return '{wday} {day} {month} {year} {hour}:{minute:0>2d}:{second:0>2d} UTC'.format(\n year=ts.tm_year, month=calendar.month_name[ts.tm_mon],\n day=ts.tm_mday, wday=calendar.day_name[ts.tm_wday],\n hour=ts.tm_hour, minute=ts.tm_min, second=ts.tm_sec)", "def set_current_time(self):\n now = datetime.datetime.now()\n answer = self.UsbHost.send_command(self.state.ser, \"SetCurrentTime\", str(self.state.device_id),\n now.year, now.month, now.day,\n now.hour, now.minute, now.second, int(now.microsecond / 1000))\n if answer in wrong_answers:\n error_message(\"Не удалось задать время\")\n self.statusbar.showMessage(answer_translate[answer])\n else:\n self.state.syncro = True\n self.create_message()", "def now(self):\r\n return time.ctime(time.time())", "def current_time(self):\n return self._current_time", "def tnow():\n return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')", "def now(self, request):\n identity = self.bot.get_plugin('identity').get_identity_by_request(request)\n\n now = times.now()\n tz = self._get_user_timezone(identity)\n local = times.to_local(now, tz)\n\n request.respond('Server time: {}\\nLocal time:{}'.format(now, local))" ]
[ "0.7973656", "0.772625", "0.7605761", "0.75881535", "0.75294447", "0.7498588", "0.7495459", "0.7466368", "0.74531955", "0.7447623", "0.74185985", "0.73490584", "0.72843736", "0.72731084", "0.7260997", "0.72487897", "0.7235523", "0.7235523", "0.72326684", "0.7181535", "0.7128221", "0.712293", "0.71002966", "0.70934355", "0.70338744", "0.7022602", "0.70052826", "0.69945556", "0.69935834", "0.6991561" ]
0.78153354
1
Recursively update a dict. Subdict's won't be overwritten but also updated.
def deepupdate(original, update): for key, value in original.iteritems(): if not key in update: update[key] = value elif isinstance(value, dict): deepupdate(value, update[key]) return update
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recursive_update(to_update, update):\n if update:\n for key, value in update.items():\n if isinstance(value, dict):\n value = recursive_update(to_update.get(key, {}), value)\n to_update[key] = value\n return to_update", "def recursive_update(\n base_dict: typing.Dict[typing.Any, typing.Any],\n new_dict: typing.Mapping[typing.Any, typing.Any],\n ) -> None:\n for key, value in new_dict.items():\n if isinstance(value, collections.Mapping) and (\n base_dict.get(key) is not None\n ):\n TrainingConfig.recursive_update(base_dict[key], value)\n else:\n base_dict[key] = value", "def recursive_update_cfg(d, u):\n for k, v in u.iteritems():\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def recursive_update(d1, d2):\n\n for key, value in d2.items():\n if key in d1 and isinstance(d1[key], dict) and isinstance(value, dict):\n recursive_update(d1[key], value)\n else:\n d1[key] = value", "def recursive_mapping_update(d, u):\n if u is not None:\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = recursive_mapping_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def _update(value: Dict[str, Any], update: Dict[str, Any]):\n for key, val in update.items():\n\n if key not in value:\n value[key] = val\n elif isinstance(val, dict):\n value[key] = _update(value[key], val)\n else:\n value[key] = val\n return value", "def _update_dicts(master_dict, update_dict):\n for key, value in update_dict.items():\n for subkey, subvalue in value.items():\n master_dict[key][subkey] = subvalue", "def recursive_update(d1, d2):\n\n for k in d1.keys():\n if k not in d2:\n continue\n\n if isinstance(d1[k], dict) and isinstance(d2[k], dict):\n d1[k] = recursive_update(d1[k], d2[k])\n else:\n d1[k] = d2[k]\n\n for k in d2.keys():\n if k not in d1:\n d1[k] = d2[k]\n\n return d1", "def recursively_update_with(a, b):\n\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n recursively_update_with(a[key], b[key])\n else:\n a[key] = b[key]\n else:\n a[key] = b[key]", "def deep_update(d, u):\n for k, v in six.iteritems(u):\n dv = d.get(k, {})\n if not isinstance(dv, collections.abc.Mapping):\n d[k] = v\n elif isinstance(v, collections.abc.Mapping):\n d[k] = deep_update(dv, v)\n else:\n d[k] = v\n return d", "def update_nested_dict(old_dict, new_dict, extend_list_values=False):\n for k, v in new_dict.items():\n if k in old_dict.keys():\n if isinstance(v, dict) and isinstance(old_dict[k], dict):\n old_dict[k] = update_nested_dict(\n old_dict[k], v, extend_list_values=extend_list_values\n )\n elif (\n extend_list_values\n and isinstance(old_dict[k], list)\n and isinstance(v, list)\n ):\n old_dict[k].extend(v)\n elif v:\n old_dict[k] = v\n else:\n old_dict[k] = v\n return old_dict", "def _update_dict(full_key, val, d):\n for vk, vv in val.items():\n # The key of value is not in d.\n # if vk not in d:\n # # Exit.\n # raise ValueError(\"{}.{} does not exist in options\".format(full_key, vk))\n # else: # The key of val is in d.\n if isinstance(vv, list): # The value of the key is list.\n d[vk] = np.array(vv) # Store it as a numpy array.\n elif isinstance(vv, dict): # The value of the key is dictionary.\n _update_dict(full_key + \".\" + vk, vv, d[vk]) # Call the function again.\n else: # At the leaf of the dictionary.\n d[vk] = vv", "def merge_dict_recursive(target, src):\r\n for k in src.keys():\r\n if ((k in target and isinstance(target[k], dict) and\r\n isinstance(src[k], collections.Mapping))):\r\n merge_dict_recursive(target[k], src[k])\r\n else:\r\n target[k] = src[k]", "def update_dict(target, *updates):\r\n for update in updates:\r\n for key, val in list(update.items()):\r\n if isinstance(val, collections.Mapping):\r\n target[key] = update_dict(target.get(key, {}), val)\r\n else:\r\n target[key] = update[key]\r\n return target", "def deep_update(source, overrides):\n for key, value in overrides.iteritems():\n if isinstance(value, collections.Mapping) and value:\n returned = deep_update(source.get(key, {}), value)\n source[key] = returned\n else:\n source[key] = overrides[key]\n return source", "def deep_update(source: dict, overrides: Mapping):\n\n for key, value in overrides.items():\n if isinstance(value, Mapping) and value:\n returned = deep_update(source.get(key, {}), value)\n source[key] = returned\n else:\n source[key] = overrides[key]\n\n return source", "def update_double_dict(outer, inner):\n for k, v in outer.items():\n outer[k].update(inner[k])", "def update(d, u):\n\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def dict_deep_update(d, u, handlers=None):\n if handlers is None:\n handlers = {}\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = dict_deep_update(d.get(k, {}), v, handlers)\n d[k] = r\n elif k in d:\n h = handlers.get(type(v), None)\n if h is not None:\n d[k] = h(d[k], u[k])\n else:\n d[k] = u[k]\n else:\n d[k] = u[k]\n return d", "def test_deep_update(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.update_dict_key_value(\n mdict, \"C:F\", {\"foo\": \"bar\", \"qux\": \"quux\"}\n )\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\", \"foo\": \"bar\", \"qux\": \"quux\"}},\n },\n res,\n )\n\n # Test updating a non-existing subkey\n res = dictupdate.update_dict_key_value({}, \"foo:bar:baz\", {\"qux\": \"quux\"})\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": {\"qux\": \"quux\"}}}}, res)\n # Test updating a non-existing subkey, with a different delimiter\n res = dictupdate.update_dict_key_value(\n {}, \"foo bar baz\", {\"qux\": \"quux\"}, delimiter=\" \"\n )\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": {\"qux\": \"quux\"}}}}, res)", "def overwrite_dict(dict_base, dict_new, base_path=None):\n assert isinstance(dict_new, dict)\n for k in dict_new:\n # Add the current key to the path\n k_path = str(k) if base_path is None else f'{base_path}.{str(k)}'\n # Make sure that the key in the new dictionary matches one from the base dictionary\n assert k in dict_base, f'Could not find path {k_path} in the base dictionary'\n # Check that the types match between the base dictionary entry and the new one\n if dict_base[k] is not None:\n assert isinstance(type(dict_base[k]), type(dict_new[k])), \\\n 'The types at {} in the base dictionary do not match (expected {}, got {})'.format(\n k_path, str(type(dict_base[k])), str(type(dict_new[k])))\n # Recursively replace dictionary entries\n if isinstance(dict_base[k], dict):\n overwrite_dict(dict_base[k], dict_new[k], k_path)\n else:\n # Simply copy over leaf entries\n dict_base[k] = dict_new[k]", "def deepupdate(target, src, overwrite=True):\n for k, v in src.items():\n if type(v) == list:\n if k not in target:\n target[k] = copy.deepcopy(v)\n elif overwrite is True:\n target[k].extend(v)\n elif type(v) == dict:\n if k not in target:\n target[k] = copy.deepcopy(v)\n else:\n deepupdate(target[k], v, overwrite=overwrite)\n elif type(v) == set:\n if k not in target:\n target[k] = v.copy()\n elif overwrite is True:\n if type(target[k]) == list:\n target[k].extend(v)\n elif type(target[k]) == set:\n target[k].update(v)\n else:\n raise TypeError(\"Cannot update {} with {}\".format(\n type(target[k]),\n type(v))\n )\n else:\n if k not in target or overwrite is True:\n target[k] = copy.copy(v)", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def dict_merge(dest, upd, recursive_update=True, merge_lists=False):\n if (not isinstance(dest, collections.Mapping)) \\\n or (not isinstance(upd, collections.Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, collections.Mapping) \\\n and isinstance(val, collections.Mapping):\n ret = dict_merge(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) \\\n and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n else:\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest", "def update_dict(new,old):", "def test_recursive_update():\n\n test = Status.update_dict({'generation': TEST_1_ATTRS_1},\n {'generation': TEST_1_ATTRS_2})\n\n assert test['generation']['run_id'] == TEST_1_ATTRS_1['run_id']\n assert test['generation']['job_status'] == TEST_1_ATTRS_2['job_status']", "def recursiveSearchReplace(x, s, r):\n for k, v in x.items():\n if type(v) is dict:\n recursiveSearchReplace(v, s, r)\n else:\n if v == s:\n x[k] = r", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def update_dct(dic1, dic2):\n for key, val in dic2.items():\n if isinstance(val, dict):\n dic1.setdefault(key, {})\n update_dct(dic1[key], val)\n else:\n dic1[key] = val" ]
[ "0.7904014", "0.78887457", "0.76975274", "0.761823", "0.75225943", "0.7286748", "0.7235716", "0.7204504", "0.7094437", "0.7051808", "0.691271", "0.68377817", "0.68304867", "0.68073094", "0.6774031", "0.6769757", "0.67376924", "0.6737295", "0.6721304", "0.671807", "0.6696274", "0.6651971", "0.66461885", "0.65983677", "0.6586451", "0.65378064", "0.6443147", "0.64041686", "0.6383807", "0.63417184" ]
0.79647446
0
Converts a dataframe containing shap values in ohe format back to original genomic positions
def ohe_inverse(df_shap_values): # Auxiliary list to recreate original shap_values dataframe list_shap_original = [] # Regular expression to pick attributes names. # Since in our case attributes names are the genomic positions (i.e. an integer number), we use the regex below import re pattern = "^\d+" # Auxiliary dictionary to create one pd.DataFrame for each sample, summing the shap values for each attribute. # Later, these dataframes will be appended together, resulting in the final df. dic={} # for each sample. for i, sample in df_shap_values.iterrows(): # initialize an empty dictionary, that will contain "attribute : summed shap values" for # all attributes in this sample. dic = {} # The code below sums the importances for each category in each attribute in this sample. for pos in sample.index: attr = re.match(pattern, pos).group() if attr not in dic.keys(): dic[attr] = sample[pos] else: dic[attr] += sample[pos] # Create a df containing only the current sample df_sample = pd.DataFrame(dic, index=[i]) # Append it to a list that will become the full dataframe later list_shap_original.append(df_sample) # Create a DataFrame containing the shap values for the "original" attributes. shap_original = pd.concat(list_shap_original, axis=0) return shap_original
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def harmonize_eia_epa_orispl(df):\n # TODO: implement this.\n return df", "def power_point_old(osm_path): \n df = retrieve(osm_path,'points',['other_tags']) \n\n for row in df.itertuples():\n if df.loc[row.Index, \"other_tags\"] == None:\n df = df.drop(row.Index)\n elif not 'power' in df.loc[row.Index, \"other_tags\"]:\n df = df.drop(row.Index)\n \n df = df.reset_index(drop=True).rename(columns={'other_tags': 'asset'}) \n \n for row in range(len(df.index)):\n if '\"power\"=>\"tower\"' in df[\"asset\"][row]:\n df[\"asset\"][row] = 'power_tower' \n elif '\"power\"=>\"pole\"' in df[\"asset\"][row]:\n df[\"asset\"][row] = 'power_pole'\n else:\n df = df.drop(index=row)\n \n return df.reset_index(drop=True)", "def reformat_xyz(tile_gdf):\n tile_gdf['xyz'] = tile_gdf.id.apply(lambda x: x.lstrip('(,)').rstrip('(,)').split(','))\n tile_gdf['xyz'] = [[int(q) for q in p] for p in tile_gdf['xyz']]\n return tile_gdf", "def df_with_hexid_to_gdf(df, hexcolname='_id'):\n df_geometry=hexlist_to_geodataframe(df[hexcolname].to_list())\n #Creando el geodataframe\n gdf=gpd.GeoDataFrame(df, geometry=df_geometry['geometry'])\n gdf.crs = 'EPSG:4326'\n return gdf", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def df_with_hexid_to_centroids_gdf(df, hexcolname='hexid'):\n seriesofcoordinates=df[hexcolname].apply(h3.h3_to_geo)\n geometria=seriesofcoordinates.apply(lambda row: Point(row[1],row[0])) ## Patty reversed indices\n gdf=gpd.GeoDataFrame(df, geometry=geometria)\n return gdf", "def at_df(self, df):\n result = self.at(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def convert_to_geopandas(df):\n df['geometry'] = [Point(xy) for xy in zip(df.latitude, df.longitude)]\n crs = {'init': 'epsg:4326'}\n df = gpd.GeoDataFrame(df, crs=crs, geometry=df['geometry'])\n\n return df", "def prepare_for_influxdb(df):\n df = df.drop(columns=\"landkreis\", errors=\"ignore\") # prevent name collision in get_ags()\n df = get_ags(df)\n df[\"time\"] = df.apply(lambda x: 1000000000*int(datetime.timestamp((pd.to_datetime(x[\"timestamp\"])))), 1)\n df[\"measurement\"] = \"hystreet\"\n df[\"origin\"] = \"https://hystreet.com\"\n df = df.rename(columns={\n 'station_id': '_id',\n 'pedestrians_count': 'pedestrian_count',\n 'state': 'bundesland'\n })\n df['ags'] = pd.to_numeric(df['ags'])\n # import pdb; pdb.set_trace()\n return df", "def get_sites_pos(df, kompas, pwm, seqcol=\"Sequence\"):\n if df.empty:\n return df\n seqlist = df[seqcol].unique().tolist()\n poslist = []\n misscount = 0\n for seq in seqlist:\n x = kompas.predict_sequence(seq)\n if len(x) != 2:\n continue\n # WE LET \"SITE STR\" BE THE FIRST SITE IN THE BEGINNING\n poslist.append({seqcol:seq, \"site_str_pos\":x[0]['core_start'] + 2, 'site_str_start':x[0]['core_start'], 'site_wk_pos':x[1]['core_start'] + 2, 'site_wk_start':x[1]['core_start']})\n posdf = pd.DataFrame(poslist)\n posdf['site_str_score'], posdf['site_str_ori'], posdf['site_str_core'] = tg.pwm_score(posdf, pwm, \"site_str_start\", 4, 3, seqcol=seqcol)\n posdf['site_wk_score'], posdf['site_wk_ori'], posdf['site_wk_core'] = tg.pwm_score(posdf, pwm, \"site_wk_start\", 4, 3, seqcol=seqcol)\n posdf = posdf[(posdf[\"site_str_score\"] != -999) & (posdf[\"site_wk_score\"] != -999)]\n\n orimap = {0:\"-\",1:\"+\"}\n posdf[\"orientation\"] = posdf.apply(lambda x: \"%s/%s\" % (orimap[int(x[\"site_str_ori\"])], orimap[int(x[\"site_wk_ori\"])]),axis=1)\n posdf[\"distance\"] = posdf[\"site_wk_pos\"] - posdf[\"site_str_pos\"]\n\n # now we flip the left and right, we flip all but orientation\n flip_target = []\n for i,r in posdf.iterrows():\n if r[\"site_str_score\"] < r[\"site_wk_score\"]:\n flip_target.append(i)\n posdf.loc[flip_target,['site_str_score','site_wk_score']] = posdf.loc[flip_target,['site_wk_score','site_str_score']].values\n posdf.loc[flip_target,['site_str_pos','site_wk_pos']] = posdf.loc[flip_target,['site_wk_pos','site_str_pos']].values\n posdf.loc[flip_target,['site_str_ori','site_wk_ori']] = posdf.loc[flip_target,['site_wk_ori','site_str_ori']].values\n posdf.loc[flip_target,['site_str_core','site_wk_core']] = posdf.loc[flip_target,['site_wk_core','site_str_core']].values\n\n posdf = posdf[[seqcol,\"site_str_pos\",\"site_str_score\",\"site_wk_pos\",\"site_wk_score\" ,\"distance\",\"site_str_ori\",\"site_str_core\", \"site_wk_ori\",\"site_wk_core\",\"orientation\"]]\n posdf = df.merge(posdf,on=seqcol)\n return posdf", "def site_frame_to_global(df):\n\n # Swap x and y axes, and invert z\n df = swap_columns(df,'x','y')\n df = swap_columns(df,'xe','ye')\n df.loc[:,\"z\"] *= -1\n return df", "def shap_to_df(model, X, precalc_shap=None, **kwargs):\n if precalc_shap is not None:\n shap_values = precalc_shap\n else:\n shap_values = shap_calc(model, X, **kwargs)\n if isinstance(X, pd.DataFrame):\n return pd.DataFrame(shap_values, columns=X.columns, index=X.index)\n\n elif isinstance(X, np.ndarray) and len(X.shape) == 2:\n return pd.DataFrame(\n shap_values, columns=[f\"col_{ix}\" for ix in range(X.shape[1])]\n )\n\n else:\n raise NotImplementedError(\"X must be a dataframe or a 2d array\")", "def _tunnel_shearzone_data(self):\n file_loc = self.data_path / \"03_GeologicalMapping\" / \"01_TunnelIntersections\"\n columns = [\"x\", \"y\", \"z\", \"true_dip_direction\", \"dip\", \"tunnel\", \"shearzone\"]\n\n path = file_loc / \"Tunnel_intersections.txt\"\n df = pd.read_csv(path, sep=None, names=columns, engine=\"python\")\n df[\"shearzone\"] = df[\"shearzone\"].apply(rename_sz)\n df = df.rename(\n columns={\n \"true_dip_direction\": \"azimuth_struc\",\n \"tunnel\": \"borehole\",\n }\n )\n return df", "def iat_df(self, df):\n result = self.iat(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result", "def octa_cox_data_to_ss(data):\n t = pandas.Series((\n data['TIME_StartTime'] -\n data['TIME_StartTime'].values[0]) / 1.0e6, name='t, sec')\n xh = pandas.DataFrame(\n data[[\n 'LPOS_X', 'LPOS_Y', 'LPOS_Z',\n 'LPOS_VX', 'LPOS_VY', 'LPOS_VZ',\n 'ATT_Roll', 'ATT_Pitch', 'ATT_Yaw',\n 'ATT_RollRate', 'ATT_PitchRate', 'ATT_YawRate']].values,\n columns=[\n 'X', 'Y', 'Z', 'V_X', 'V_Y', 'V_Z',\n 'Phi', 'Theta', 'Psi',\n 'P', 'Q', 'R'], index=t)\n y = pandas.DataFrame(\n data[[\n 'GPS_Lat', 'GPS_Lon', 'GPS_Alt',\n 'SENS_BaroAlt',\n 'IMU1_AccX', 'IMU1_AccY', 'IMU1_AccZ',\n 'IMU1_GyroX', 'IMU1_GyroY', 'IMU1_GyroZ',\n 'IMU1_MagX', 'IMU1_MagY', 'IMU1_MagZ']].values,\n columns=[\n 'GPS_Lat', 'GPS_Lon', 'GPS_Alt',\n 'Baro_Alt',\n 'Acc_X', 'Acc_Y', 'Acc_Z',\n 'Gyro_X', 'Gyro_Y', 'Gyro_Z',\n 'Mag_X', 'Mag_Y', 'Mag_Z'], index=t)\n u_raw = pandas.DataFrame(\n ((data[[\n 'OUT0_Out0', 'OUT0_Out1', 'OUT0_Out2',\n 'OUT0_Out3', 'OUT0_Out4', 'OUT0_Out5', 'OUT0_Out6',\n 'OUT0_Out7']] - 1000.0) / 1000.0).values,\n columns=['1', '2', '3', '4', '5', '6', '7', '8'], index=t)\n c_mix_octo = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1], # thrust\n [-1, 1, 1, -1, -1, 1, 1, -1], # roll\n [-1, -1, 1, 1, -1, -1, 1, 1], # pitch\n [1, -1, 1, -1, 1, -1, 1, -1], # yaw\n ]) / 8.0\n u = pandas.DataFrame(\n c_mix_octo.dot(u_raw.T).T,\n columns=['thrust', 'roll', 'pitch', 'yaw'],\n index=t)\n return t, xh, u, y, u_raw", "def _extract_genos(df):\n\n def _extract_genotype(geno_field):\n \"\"\"Extract the genotype from a format field.\"\"\"\n # Assume the genotype is the first format field and raise if it's not\n geno = geno_field.split(':')[0]\n if not GENO_REGEX.search(geno):\n raise ValueError('\"{}\" does not look like a genotype'.format(geno))\n return geno\n\n # Genotype columns range from the 10th to the last one\n df.iloc[:, 9:] = df.iloc[:, 9:].applymap(_extract_genotype)", "def convert_to_index(df, table):\n df[USER_IDX] = df[COMP_MID].apply(table.user_to_index)\n df[PROD_IDX] = df[PROD_NO].apply(table.item_to_index)\n index = df[(df[USER_IDX] < 0) | (df[PROD_IDX] < 0)].index\n df.drop(index=index, columns=[COMP_MID, PROD_NO], inplace=True)", "def transform(self, mols_ohe):\n\n latent = self.mol_to_latent_model.predict(mols_ohe)\n return latent.reshape((latent.shape[0], 1, latent.shape[1]))", "def _get_all_oshapes(self):\n iseq_name = self.node_list[0]\n iseq = self.builder.nodes[iseq_name]\n iseq_mainshape = iseq.oshapes['main']\n \n return {'main' : iseq_mainshape,\n 'loc' : iseq_mainshape,\n 'invscaled' : iseq_mainshape + [iseq_mainshape[-1]],\n 'invscaleoffd' : iseq_mainshape + [iseq_mainshape[-1]]}", "def transform(self, df: DataFrame) -> DataFrame:\n return df", "def extractCoord(df):\n dfcol = df['Coord']\n for i in range(len(dfcol)):\n dfcol[i] = dfcol[i][6:-1]\n return df", "def ohe_inverse_LR(normalized_alphas):\n\n normalized_alphas = np.abs(normalized_alphas)\n\n # Regular expression to pick attributes names.\n # Since in our case attributes names are the genomic positions (i.e. an integer number), we use the regex below\n import re\n pattern = \"^\\d+\"\n\n # Auxiliary dictionary to create one pd.DataFrame for each sample, summing the shap values for each attribute.\n # Later, these dataframes will be appended together, resulting in the final df.\n dic={}\n\n for index, alpha in normalized_alphas.iteritems():\n # print(index)\n attr = re.match(pattern, index).group()\n if attr not in dic.keys():\n dic[attr] = (0.5 * alpha)\n else:\n dic[attr] += (0.5 * alpha)\n\n shap_original = pd.Series(dic)\n\n return shap_original", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def simplify_directed_as_dataframe(df: gpd.GeoDataFrame) -> gpd.GeoDataFrame:\n df.reset_index(inplace=True)\n\n g = gt.Graph(directed=True)\n osm_id = g.new_edge_property('string')\n highway = g.new_edge_property('string')\n level = g.new_edge_property('int')\n lanes = g.new_edge_property('int')\n width = g.new_edge_property('float')\n bicycle = g.new_edge_property('bool')\n bicycle_safety = g.new_edge_property('int')\n foot = g.new_edge_property('bool')\n foot_safety = g.new_edge_property('int')\n max_speed = g.new_edge_property('int')\n motorcar = g.new_edge_property('bool')\n linestring = g.new_edge_property('python::object')\n\n edgelist = df[\n ['u', 'v', 'osm_id', 'highway', 'level', 'lanes', 'width', 'bicycle', 'bicycle_safety', 'foot', 'foot_safety',\n 'max_speed', 'motorcar', 'geometry']].values\n\n nodes_id = g.add_edge_list(edgelist, hashed=True,\n eprops=[osm_id, highway, level, lanes, width, bicycle, bicycle_safety, foot, foot_safety,\n max_speed, motorcar, linestring])\n\n # we are gonna replace the original repeated nodes with a linestring\n e_path = g.new_ep('vector<int64_t>')\n for e in g.edges():\n e_path[e] = []\n\n vs = g.get_vertices()\n in_out_deg_2 = (g.get_in_degrees(vs) == 2) & (g.get_out_degrees(vs) == 2)\n\n logging.debug('selecting degree 4 candidates')\n candidates = set()\n for i, v in enumerate(vs):\n if in_out_deg_2[i]:\n ns = list(set(g.get_all_neighbors(v)))\n if len(ns) == 2:\n u, w = ns[0], ns[1]\n uv, vw, wv, vu = g.edge(u, v), g.edge(v, w), g.edge(w, v), g.edge(v, u)\n if highway[uv] == highway[vw] and highway[wv] == highway[vu]:\n candidates.add(v)\n logging.debug('found {} degree 4 candidates to simplify'.format(len(candidates)))\n\n seen = set()\n unregister_candidates = set()\n\n for i, candidate in enumerate(candidates):\n if i == 100000:\n logging.debug('100000 degree 4 candidates')\n if candidate in seen:\n continue\n\n seen.add(candidate)\n\n u, w = g.get_out_neighbors(candidate)\n is_u_fringe, is_w_fringe = u not in candidates, w not in candidates\n\n cu, cw = g.edge(candidate, u), g.edge(candidate, w)\n\n us = []\n ws = []\n\n while not is_u_fringe:\n seen.add(u)\n us.append(u)\n neighbors = set(g.get_out_neighbors(u))\n neighbors -= seen\n if len(neighbors) > 0:\n u = neighbors.pop()\n is_u_fringe = u not in candidates\n elif u == w:\n us.pop(-1)\n u = us.pop(-1)\n unregister_candidates.add(u)\n unregister_candidates.add(w)\n is_u_fringe = True\n is_w_fringe = True\n g.remove_edge(g.edge(s=u, t=w))\n g.remove_edge(g.edge(s=w, t=u))\n else:\n logging.debug('degree 2: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n while not is_w_fringe:\n seen.add(w)\n ws.append(w)\n neighbors = set(g.get_out_neighbors(w))\n neighbors -= seen\n if len(neighbors) > 0:\n w = neighbors.pop()\n is_w_fringe = w not in candidates\n else:\n logging.debug('degree 2: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n if is_u_fringe and is_w_fringe:\n e = g.add_edge(source=u, target=w)\n path = [u] + list(reversed(us)) + [candidate] + ws + [w]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[cw], highway[cw], level[cw], lanes[cw], width[cw], bicycle[cw], \\\n bicycle_safety[cw], \\\n foot[cw], foot_safety[cw], max_speed[cw], motorcar[cw]\n\n e = g.add_edge(source=w, target=u)\n path = [w] + list(reversed(ws)) + [candidate] + us + [u]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[cu], highway[cu], level[cu], lanes[cu], width[cu], bicycle[cu], \\\n bicycle_safety[cu], \\\n foot[cu], foot_safety[cu], max_speed[cu], motorcar[cu]\n\n else:\n logging.debug(\n 'unexpected behavior, source={0}, target={1}, candidate={2}, us={3}, ws={4}'.format(u, w, candidate, us,\n ws))\n\n unseen = candidates - seen\n if len(unseen) > 0:\n logging.debug(\n 'Network scan after degree 4 simplification uncomplete: candidates {0} have not been examined'.format(\n unseen))\n\n candidates -= unregister_candidates\n g.remove_vertex(list(candidates))\n\n vs = g.get_vertices()\n in_out_deg_1 = (g.get_in_degrees(vs) == 1) & (g.get_out_degrees(vs) == 1)\n\n logging.debug('selecting degree 2 candidates')\n candidates = set()\n for i, v in enumerate(vs):\n if in_out_deg_1[i]:\n u = g.get_in_neighbors(v)[0]\n w = g.get_out_neighbors(v)[0]\n\n if u != w:\n uv, vw = g.edge(u, v), g.edge(v, w)\n if highway[uv] == highway[vw]:\n candidates.add(v)\n logging.debug('found {} degree 2 candidates to simplify'.format(len(candidates)))\n\n seen = set()\n unregister_candidates = set()\n\n for candidate in candidates:\n if candidate in seen:\n continue\n\n seen.add(candidate)\n\n u = g.get_in_neighbors(candidate)[0]\n w = g.get_out_neighbors(candidate)[0]\n\n uc = g.edge(u, candidate)\n\n is_u_fringe, is_w_fringe = u not in candidates, w not in candidates\n\n us = []\n ws = []\n\n while not is_u_fringe:\n seen.add(u)\n us.append(u)\n neighbors = set(g.get_in_neighbors(u))\n neighbors -= seen\n if len(neighbors) > 0:\n u = neighbors.pop()\n is_u_fringe = u not in candidates\n elif u == w:\n us.pop(-1)\n u = us.pop(-1)\n unregister_candidates.add(u)\n unregister_candidates.add(w)\n is_u_fringe = True\n is_w_fringe = True\n g.remove_edge(g.edge(s=w, t=u))\n else:\n logging.debug('degree 1: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n while not is_w_fringe:\n seen.add(w)\n ws.append(w)\n neighbors = set(g.get_out_neighbors(w))\n neighbors -= seen\n if len(neighbors) > 0:\n w = neighbors.pop()\n is_w_fringe = w not in candidates\n else:\n logging.debug('degree 1: we got here somehow {} {} {} {}', candidate, u, v,\n g.get_all_neighbors(candidate))\n break\n\n if is_u_fringe and is_w_fringe:\n e = g.add_edge(source=u, target=w)\n path = [u] + list(reversed(us)) + [candidate] + ws + [w]\n e_path[e] = [int(nodes_id[node]) for node in path]\n linestrings = [linestring[g.edge(a, b)] for a, b in pairwise(path)]\n linestring[e] = join_linestrings(linestrings)\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e], foot_safety[e], \\\n max_speed[e], motorcar[e] = osm_id[uc], highway[uc], level[uc], lanes[uc], width[uc], bicycle[uc], \\\n bicycle_safety[uc], \\\n foot[uc], foot_safety[uc], max_speed[uc], motorcar[uc]\n else:\n logging.error('unexpected behavior, source={0}, target={1}, candidate={2}, us={3}, ws={4}', u, w, us, ws)\n\n unseen = candidates - seen\n if len(unseen) > 0:\n logging.debug(\n 'Network scan after degree 2 simplification not finished: candidates {0} have not been examined'.format(\n unseen))\n\n candidates -= unregister_candidates\n g.remove_vertex(list(candidates))\n\n logging.debug(' linestring path')\n edges_tuples = []\n for e in g.edges():\n source, target, path = nodes_id[e.source()], nodes_id[e.target()], e_path[e]\n if len(path) == 0:\n path = [source, target]\n else:\n path = [int(i) for i in path]\n\n e_tuples = (g.edge_index[e], source, target, path,\n osm_id[e], highway[e], level[e], lanes[e], width[e], bicycle[e], bicycle_safety[e], foot[e],\n foot_safety[e], max_speed[e], motorcar[e], linestring[e])\n edges_tuples.append(e_tuples)\n\n df_edges_simplified = pd.DataFrame.from_records(edges_tuples, index='edge_id',\n columns=['edge_id', 'u', 'v', 'path', 'osm_id', 'highway',\n 'level', 'lanes', 'width', 'bicycle', 'bicycle_safety',\n 'foot', 'foot_safety', 'max_speed', 'motorcar',\n 'geometry'])\n\n df_edges_simplified.osm_id = df_edges_simplified.osm_id.str.split('-').str[0]\n df_edges_simplified = gpd.GeoDataFrame(df_edges_simplified, geometry='geometry')\n df_edges_simplified.crs = df.crs\n return df_edges_simplified", "def dst(df):\n pass", "def vectorize(df):\n\tt = calc_affine(df)\n\ta = df.values\n\t# zeros an nan are left open space, means mask = True!\n\tmaske = (df != 0).fillna(True)\n\tgdf = gpd.GeoDataFrame()\n\tgeoms = []\n\tvalue = []\n\tfor s,v in rasterio.features.shapes(a,transform=t,mask=maske.values):\n\t\tgeoms.append(shape(s))\n\t\tvalue.append(v)\n\tgdf['geometry'] = geoms\n\tgdf = gdf.set_geometry('geometry')\n\tgdf['val']=value\n\treturn gdf", "def preprocess_feature(df):", "def clean(df):", "def inverse_transform(self, df):\n return df", "def add_orient_oxy_bin(df):\n df[\"orient_bin\"] = None\n df.loc[df[\"Orientation\"] > 40, \"orient_bin\"] = -1 # Right\n df.loc[(df[\"Orientation\"] > -60) &\n (df[\"Orientation\"] <= 40), \"orient_bin\"] = 0 # Back\n df.loc[(df[\"Orientation\"] > -\n 361) & (df[\"Orientation\"] <= -\n 60), \"orient_bin\"] = 1 # Left\n\n df[\"low_oxygen\"] = 0 # Not low\n df.loc[df[\"SpO2(%)\"] <= 88, \"low_oxygen\"] = 1 # Yes low oxygen\n return df" ]
[ "0.61097676", "0.5519744", "0.54567635", "0.5409611", "0.54091376", "0.5289816", "0.5179169", "0.51285565", "0.50935245", "0.5074704", "0.50697887", "0.5064374", "0.50546235", "0.50294065", "0.502251", "0.5016597", "0.49828702", "0.4946064", "0.49396178", "0.49290437", "0.49255058", "0.4924691", "0.4919789", "0.49177617", "0.48956275", "0.48927367", "0.48895094", "0.48869345", "0.48627082", "0.48501718" ]
0.68825835
0
creates a new shape with hyp's attributes
def draw(hyp): print 'g.createShape(',hyp.getAttList(),')' print type(hyp.getAttList()) g.createShape(hyp.getAttList())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(hyp):\r\n print 'g.createShape(',hyp.getAttList(),')'\r\n print type(hyp.getAttList())\r\n g.createShape(hyp.getAttList())", "def shape(self) -> Shape:", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def shape(self):", "def shape(self):", "def __init__(self, shape):\n\n self.shape = shape", "def NewShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_NewShape(self, *args)", "def __init__(self, hypos, alpha=1.0):\n Pmf.__init__(self)\n for hypo in hypos:\n self.Set(hypo, hypo**(-alpha))\n self.Normalize()", "def create_shape(settings, inst, repr=None):\r\n return wrap_shape_creation(\r\n settings,\r\n ifcopenshell_wrapper.create_shape(settings, inst.wrapped_data, repr.wrapped_data if repr is not None else None),\r\n )", "def shape(self):\n path = super(Arrow, self).shape()\n path.addPolygon(self.arrowHead)\n return path", "def __clone_layout_placeholder(self, layout_ph):\n id = self.__next_shape_id\n ph_type = layout_ph.type\n orient = layout_ph.orient\n shapename = self.__next_ph_name(ph_type, id, orient)\n\n sp = self.__new_placeholder_sp(layout_ph, id, ph_type, orient,\n shapename)\n self.__spTree.append(sp)\n shape = Shape(sp)\n self.__shapes.append(shape)\n return shape", "def amplify_2d_shape(shape, x_amplify, y_amplify):", "def hydroline(self) -> Polyline:\n return self._geometry", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def create(self, teeth=10, length=0.3):\n # teeth are alternative face so spans times 2\n spans = teeth * 2\n\n self.transform, self.constructor = cmds.polyPipe(subdivisionsAxis=spans)\n\n # side face index: start---end-----every 2\n # `range` create a sequence like this\n side_faces = range(spans * 2, spans * 3, 2)\n\n # how cmd works is selection load/unload at runtime\n cmds.select(clear=True)\n\n for face in side_faces:\n cmds.select(\"%s.f[%s]\" % (self.transform, face), add=True)\n\n self.extrude = cmds.polyExtrudeFacet(localTranslateZ=length)[0]\n\n if self.debug:\n print(self.extrude)", "def create_line_hyp_space(n_features):\n hyp_space = []\n for i in range(1, n_features + 1):\n for j in range(n_features - i + 1):\n hyp = [0 for _ in range(n_features)]\n hyp[j:j + i] = [1 for _ in range(i)]\n hyp_space.append(hyp)\n hyp_space = np.array(hyp_space)\n return hyp_space", "def create_noche(y0, y1):\n # Defining the location and colors of each vertex of the shape \n vertices = [\n # positions colors\n -1.0, y0, 0.0, 0.15, 0.16, 0.5,\n 1.0, y0, 0.0, 0.15, 0.16, 0.5,\n 1.0, y1, 0.0, 0.15, 0.16, 0.1,\n -1.0, y1, 0.0, 0.15, 0.16, 0.1]\n\n # Defining connections among vertices\n # We have a triangle every 3 indices specified\n indices = [0, 1, 2,\n 2, 3, 0]\n\n return Shape(vertices, indices)", "def add_shape(self, spec):\n color_, shape_ = spec\n if shape_ is None:\n shape_ = self.random_shape()\n if color_ is None:\n color_ = self.random_color()\n x = shape.rand_pos()\n y = shape.rand_pos()\n return shape.SHAPE_IMPLS[shape_](x=x, y=y, color_=color_)", "def __init__(self, hyps, prop):\n typecheck.checkinstance('Thm', hyps, [Term], prop, Term)\n self.hyps = tuple(term_ord.sorted_terms(hyps))\n self.prop = prop", "def create_figure_new(self):\n kw = {}\n self.p = figure(plot_height=400, plot_width=400, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)\n self.p.circle(x=[0],y=[0])", "def shape(self) -> Shape:\n raise NotImplementedError()", "def __init__(self, x, y, th):\n self.x = x\n self.y = y\n self.th = th", "def from_shapely(cls, shape):\n return super(Point, cls).from_shapely(shape)", "def get_random_h(shape_f0, shape_ap):\n\tf0 = get_f0(shape_f0)\n\tap = get_ap(shape_ap)\n\n\t#for i in range(len(f0)):\n\t#\tplt.plot(f0[i,0,:])\n\t#\tplt.show()\n\n\t#f0 = f0*(1-ap[:,:,-1])\n\tif len(shape_f0) == 3:\n\t\tf0[:,0,:] = f0[:,0,:]*(1-ap[:,:,0])\n\telif len(shape_f0) == 1:\n\t\tf0[:] = f0[:]*(1-ap[:,0])\n\n\tf0[f0<50] = 0\n\n\n\t# for i in range(len(f0)):\n\t# \tplt.subplot(211)\n\t# \tplt.plot(f0[i,0,:])\n\t# \tplt.subplot(212)\n\t# \tplt.imshow(np.rot90(ap[i,:,:]), aspect=\"auto\")\n\t# \tplt.show()\n\n\n\treturn f0, ap", "def generate(pts):\n cmds.polyCreateFacet(name=\"shirt\", p=points)\n cmds.polyTriangulate()\n cmds.polySubdivideFacet(dv=SUBDIVISIONS)\n cmds.polyTriangulate()", "def fix(hobj):\n\n h.execute('create axon[2]', hobj)\n\n for sec in hobj.axon:\n sec.L = 30\n sec.diam = 1\n hobj.axonal.append(sec=sec)\n hobj.all.append(sec=sec) # need to remove this comment\n\n hobj.axon[0].connect(hobj.soma[0], 0.5, 0)\n hobj.axon[1].connect(hobj.axon[0], 1, 0)\n\n h.define_shape()", "def getShape(transform):\n\n pass", "def transform(self, H): # or update()\n vh = H @ self.vertices.T\n vh = vh.T \n va = vh[:,:2]\n return Polygon(va)", "def generatePolygons():", "def set_shape(self):\n\t\theigh = 150.\n\t\tself.own_shape_matrix = np.array([[50., 0., 0., 0.],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 50., 0., 0.],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 0., heigh, heigh/2],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 0., 0., 1.]])\n\t\t\n\t\t\n\t\tpass" ]
[ "0.6620612", "0.62583685", "0.5957094", "0.5908689", "0.5908689", "0.5786528", "0.5775833", "0.5735391", "0.5731915", "0.5704661", "0.56915826", "0.568746", "0.56355876", "0.5630756", "0.5630004", "0.55272645", "0.550738", "0.5436085", "0.54052126", "0.54014254", "0.53949374", "0.5378061", "0.5362264", "0.5353042", "0.53269", "0.53207344", "0.5302966", "0.5291973", "0.5291679", "0.52916735" ]
0.6593295
1
hides the existing shape associated with id
def hide(id): if type(id) is int: # shapeID g.hide(g.database[id]) else: # id refers to hypothetical shape shapeID=pickShape(local_vars[id]) g.hide(g.database[shapeID])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hide(id):\r\n if type(id) is int: # shapeID\r\n g.hide(g.database[id])\r\n else: # id refers to hypothetical shape\r\n shapeID=pickShape(local_vars[id])\r\n g.hide(g.database[shapeID])", "def hide_shape(self, shape_id):\n\n if shape_id:\n self.itemconfigure(shape_id, state=\"hidden\")", "def show_shape(self, shape_id):\n\n if shape_id:\n self.itemconfigure(shape_id, state=\"normal\")", "def hideLayer(self, id):\n\n #log.debug('hideLayer: hiding layer %s' % str(id))\n\n self.layer_mapping[id].visible = False\n self.Refresh()", "def hidePlot(self, index):\n self.pathItem_list[index].hide()", "def hide(self, item_id):\n pass", "def HideObject(object_id):\n return HideObjects(object_id)==1", "def hide(self):\n self.geometry(\"%dx%d%+d%+d\" % (0, 0, 0, 0))", "def hide(self):\n\n if not 'd-none' in str(self.class_):\n self.old_class = self.class_\n self.class_ = 'd-none'\n\n self.viz = False\n\n return self", "def hide(self):\n self.visible = False", "def hide(self):\n raise NotImplementedError", "def toggleShapeVis(self, transform, value):\n\n if cmds.objExists(transform):\n shape = cmds.listRelatives(transform, shapes=True)\n if shape is not None:\n cmds.setAttr(shape[0] + \".v\", lock=False)\n cmds.setAttr(shape[0] + \".v\", value)\n cmds.setAttr(shape[0] + \".v\", lock=True)", "def hide(self):\n self.set_visible(False)", "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)", "def hide_param(self,name):\n if name in self.representations:\n self.representations[name]['frame'].pack_forget()\n # CEBNOTE: forgetting label and widget rather than frame would\n # just hide while still occupying space (i.e. the empty frame\n # stays in place, and so widgets could later be inserted at\n # exact same place)\n #self.representations[name]['label'].pack_forget()\n #self.representations[name]['widget'].pack_forget()\n # unhide_param would need modifying too", "def toggle_border_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n poly.setBrush(QColor(0, 0, 0, 0))\n\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(True)\n # Enable selection of the edges of the polygon, if the edge has a marker display it\n for edge in self.edge_list:\n edge.childItems()[0].setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(True)", "def hideBtnImg(*args, **kwargs):\n\targs[0].get_image().hide()", "def hide(self):\n self.row_box.grid_remove()\n self.field_name_box.grid_remove()\n self.field_name_label.grid_remove()\n self.value_box.grid_remove()\n self.active_value_widget.grid_remove()", "def unHide(self):\n self.visible = True", "def remove_drawing_poly(self):\n\n self.drawing_poly = QPolygonF()\n self.drawing_points_coords = []\n\n for p in self.drawing_points:\n p.setVisible(False)\n\n for line in self.connecting_line_list:\n line.setVisible(False)\n if self.connecting_line:\n self.connecting_line.setVisible(False)\n self.connecting_line = None\n self.first_draw = True\n if self.set_tooltip:\n self.set_tooltip(\"\")", "def IsObjectHidden(object_id):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n return rhobj.IsHidden", "def hide(self):\n self.course.quick_action(self.id, 'hide')", "def hideIsoSurfaces(self):\n #research\n profprint()\n contourNode = slicer.util.getNode(self.contourNode)\n widget = slicer.modules.NeedleFinderWidget\n if contourNode != None:\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked()-1))\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked()-1))", "def delete_shape(self, shape_id):\n\n self.variables.shape_ids.remove(shape_id)\n del self.variables.vector_objects[str(shape_id)]\n self.delete(shape_id)\n if shape_id == self.variables.current_shape_id:\n self.variables.current_shape_id = None", "def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()", "def destroy(self):\r\n self.visible = False", "def remove_object(self, n_id):\r\n\r\n # remove shapes\r\n for patch in self.shapes[n_id]:\r\n patch.remove()\r\n del self.shapes[n_id]\r\n\r\n # remove text\r\n if self.show_labels:\r\n for text in self.labels[n_id]:\r\n text.remove()\r\n del self.labels[n_id]", "def ensure_hidden(self):\n self.set_visible(False)", "def hideIsoSurfaces(self):\r\n # research\r\n profprint()\r\n contourNode = slicer.util.getNode(self.contourNode)\r\n widget = slicer.modules.NeedleFinderWidget\r\n if contourNode != None:\r\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked() - 1))\r\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked() - 1))", "def unhide(self):\n self.course.quick_action(self.id, 'show')" ]
[ "0.8581231", "0.8390346", "0.71212643", "0.6763957", "0.66287816", "0.6290862", "0.6058897", "0.59502864", "0.5834213", "0.5813145", "0.56963134", "0.5608406", "0.56070375", "0.557525", "0.5532165", "0.5472659", "0.5464938", "0.54630136", "0.5410584", "0.53952074", "0.5380336", "0.53540635", "0.53539234", "0.53499085", "0.5321201", "0.53204113", "0.5299697", "0.52921754", "0.52785563", "0.5273376" ]
0.8552118
1
fills unspecified attributes of var with attributes of most recently mentioned shape that matches attributes in var
def one2(var): varAttList = local_vars[var] options = g.database.findMatches(local_vars[var]) shapeAttList = g.database[g.referenceOrder.pickMostRecent(options)].getAttList() local_vars[var] = g.AttributeList(shapeAttList.items()+ varAttList.items())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one2(var):\r\n varAttList = local_vars[var]\r\n options = g.database.findMatches(local_vars[var])\r\n shapeAttList = g.database[g.referenceOrder.pickMostRecent(options)].getAttList()\r\n local_vars[var] = g.AttributeList(shapeAttList.items()+ varAttList.items())", "def copy_attributes(var1, var2):\n for each in var1.ncattrs():\n if each != \"_FillValue\":\n setattr(var2, each, getattr(var1, each))", "def test_attribute_types(self):\n\n # Read a data file\n layername = 'test_buildings.shp'\n filename = '%s/%s' % (TESTDATA, layername)\n V = read_layer(filename)\n\n # Make a smaller dataset\n V_ref = V.get_topN('FLOOR_AREA', 5)\n\n geometry = V_ref.get_geometry()\n #data = V_ref.get_data()\n projection = V_ref.get_projection()\n\n # Create new attributes with a range of types\n keys = ['None', 'String', 'Boolean', 'Integer', 'Real',\n 'Array 1D', 'Array 2D']\n values = [None, 'Test', True, 3, 3.14,\n numpy.array([2.56]), numpy.array([[6.21]])]\n\n data = []\n for i in range(len(geometry)):\n D = {}\n for j, key in enumerate(keys):\n if key == 'Boolean':\n # Add a little variation\n if i % 2 == 0:\n D[key] = not values[j]\n else:\n D[key] = values[j]\n else:\n D[key] = values[j]\n data.append(D)\n\n # Create new object from test data\n V_new = Vector(data=data, projection=projection, geometry=geometry)\n\n # Write this new object, read it again and check\n tmp_filename = unique_filename(suffix='.shp')\n V_new.write_to_file(tmp_filename)\n\n V_tmp = read_layer(tmp_filename)\n\n #print V_new.get_data()[1]\n #print V_tmp.get_data()[1]\n\n assert V_tmp.projection == V_new.projection\n assert numpy.allclose(V_tmp.geometry, V_new.geometry)\n assert V_tmp.data == V_new.data\n assert V_tmp.get_data() == V_new.get_data()\n assert V_tmp == V_new\n assert not V_tmp != V_new", "def one1(var):\r\n varAttList = local_vars[var]\r\n itAttList = g.getIt()\r\n local_vars[var] = g.AttributeList(itAttList.items() + varAttList.items())", "def one1(var):\n varAttList = local_vars[var]\n itAttList = g.getIt()\n local_vars[var] = g.AttributeList(itAttList.items() + varAttList.items())", "async def infer_shape_env_setitem(track, env, key, x):\n return NOSHAPE", "def copy_attrs(varin,varout):\n for attr_name in varin.ncattrs():\n varout.setncattr(attr_name,varin.getncattr(attr_name))", "async def infer_shape_identity(track, x):\n return await x['shape']", "def fill_attributes(ml_file, other_file):\n with xr.load_dataset(other_file) as other:\n with xr.open_dataset(ml_file) as ml:\n for variable in other.variables:\n if variable in ml.variables:\n other[variable].attrs = ml[variable].attrs\n other.to_netcdf(other_file)", "def get_attributes(self, shape):\n attributes = {}\n identifier_names = [i.name for i in self.identifiers]\n\n for name, member in shape.members.items():\n snake_cased = xform_name(name)\n if snake_cased in identifier_names:\n # Skip identifiers, these are set through other means\n continue\n snake_cased = self._get_name(\n 'attribute', snake_cased, snake_case=False\n )\n attributes[snake_cased] = (name, member)\n\n return attributes", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def id3(x, y, attributes, max_depth, weight, depth=0):\r\n tree = {}\r\n new_attr = []\r\n arr, count = np.unique(y, return_counts=True)\r\n # checking edge cases - reached maximum depth, or no attributes\r\n if len(attributes) == 0 or depth == max_depth or len(x) == 0:\r\n return np.bincount(y).argmax()\r\n # if all the values of y are one return one\r\n elif len(np.unique(y)) == 1:\r\n return arr[0]\r\n else:\r\n # calculating mutual information values\r\n info_values = {} \r\n # over number of columns\r\n for i in range(data.dim):\r\n oldX = partition(x[:,i])\r\n oldKeys = oldX.keys()\r\n # check in attributes recieved from bagging\r\n for attr in attributes:\r\n binX = []\r\n key , value = attr\r\n # check for key and value\r\n if i == key and value in oldKeys:\r\n # get the index\r\n index = oldX[value]\r\n for n in range(len(x)):\r\n if n in index:\r\n # if match binary classification 1 / 0 and appending to binX list\r\n binX.append(1)\r\n else:\r\n binX.append(0)\r\n # adding to a dictionary \r\n info_values[(i, value)] = mutual_information(binX, y, weight)\r\n \r\n # getting the maximum feature value\r\n best_feature_index = max(info_values, key=info_values.get) \r\n best_feature, best_val = best_feature_index\r\n # creating the best partition\r\n x_best_part = partition(x[:,best_feature])\r\n #selecting other than the best feature value from the dictionary\r\n new_attr = list(filter(lambda x: x!= (best_feature, best_val), attributes))\r\n # increasing depth\r\n depth += 1\r\n\r\n # Calling id3 recursively, checking over 0,1 making a prediction as True / False \r\n for n in range(0,2):\r\n if n == 0:\r\n # recursively calling id3 over the best values of the x partition\r\n bestX = x[x_best_part[best_val]]\r\n bestY = y[x_best_part[best_val]]\r\n tree[best_feature, best_val, 'True'] = id3(bestX, bestY, new_attr, max_depth,weight, depth)\r\n else:\r\n \t # recursively calling id3 selecting other than best features\r\n othr_idx = []\r\n for i in x_best_part:\r\n if i != best_val:\r\n othr_idx.extend(x_best_part[i])\r\n\r\n otherX = x[othr_idx]\r\n otherY = y[othr_idx]\r\n tree[best_feature, best_val, 'False'] = id3(otherX, otherY, new_attr, max_depth,weight, depth)\r\n return tree", "def lnhattr(shape):\n\n arnold_nodes = ('rcurve', 'cwdth', 'srate', 'ai_curve_shaderr', 'ai_curve_shaderg', 'ai_curve_shaderb')\n for ar in arnold_nodes:\n cmds.setAttr(shape + \".\" + ar, l=True, k=False, cb=False)", "def __init__(self):\n self.X = None\n self.Y = None\n self.features = None\n self.max = self.min = None\n self._look_up = None\n self.attr_weight = None", "async def infer_shape_J(track, x):\n return track.jtag(await x.get_shallow('shape'))", "def fixdims(var):\n\t\n\tfor ii in var.dims:\n\t\tkk=ii[:3].lower()\n\t\t\n\t\tif kk == 'lat':\n\t\t\tvar=var.rename({ii:'lat'})\n\t\t\t\n\t\tif kk == 'lon':\n\t\t\tvar=var.rename({ii:'lon'})\n\t\t\t\n\t\tif kk == 'lev' or kk == 'ple' or kk == 'pre':\n\t\t\tvar=var.rename({ii:'level'})\n\n\t\tif kk == 'tim':\n\t\t\tvar=var.rename({ii:'time'})\n\n\treturn var", "async def infer_shape_getattr(track, data, item):\n async def on_dcattr(data, data_t, item_v):\n data_sh = await data['shape']\n return data_sh.shape[item_v]\n\n return await static_getter(\n track, data, item,\n fetch=getattr,\n on_dcattr=on_dcattr\n )", "def _convert_to_original_shape(\n attribute_features: tf.Tensor,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n attribute: Text,\n ) -> tf.Tensor:\n # in order to convert the attribute features with shape\n # (combined batch-size and dialogue length x 1 x units)\n # to a shape of (batch-size x dialogue length x units)\n # we use tf.scatter_nd. Therefore, we need the target shape and the indices\n # mapping the values of attribute features to the position in the resulting\n # tensor.\n\n # attribute_mask has shape batch x dialogue_len x 1\n attribute_mask = tf_batch_data[attribute][MASK][0]\n\n if attribute in SENTENCE_FEATURES_TO_ENCODE + STATE_LEVEL_FEATURES:\n dialogue_lengths = tf.cast(\n tf_batch_data[DIALOGUE][LENGTH][0], dtype=tf.int32\n )\n dialogue_indices = tf_batch_data[DIALOGUE][INDICES][0]\n else:\n # for labels, dialogue length is a fake dim and equal to 1\n dialogue_lengths = tf.ones((tf.shape(attribute_mask)[0],), dtype=tf.int32)\n dialogue_indices = tf.zeros((tf.shape(attribute_mask)[0],), dtype=tf.int32)\n\n batch_dim = tf.shape(attribute_mask)[0]\n dialogue_dim = tf.shape(attribute_mask)[1]\n units = attribute_features.shape[-1]\n\n # attribute_mask has shape (batch x dialogue_len x 1), remove last dimension\n attribute_mask = tf.cast(tf.squeeze(attribute_mask, axis=-1), dtype=tf.int32)\n # sum of attribute mask contains number of dialogue turns with \"real\" features\n non_fake_dialogue_lengths = tf.reduce_sum(attribute_mask, axis=-1)\n # create the batch indices\n batch_indices = tf.repeat(tf.range(batch_dim), non_fake_dialogue_lengths)\n\n # attribute_mask has shape (batch x dialogue_len x 1), while\n # dialogue_indices has shape (combined_dialogue_len,)\n # in order to find positions of real input we need to flatten\n # attribute mask to (combined_dialogue_len,)\n dialogue_indices_mask = tf.boolean_mask(\n attribute_mask, tf.sequence_mask(dialogue_lengths, dtype=tf.int32)\n )\n # pick only those indices that contain \"real\" input\n dialogue_indices = tf.boolean_mask(dialogue_indices, dialogue_indices_mask)\n\n indices = tf.stack([batch_indices, dialogue_indices], axis=1)\n\n shape = tf.convert_to_tensor([batch_dim, dialogue_dim, units])\n attribute_features = tf.squeeze(attribute_features, axis=1)\n\n return tf.scatter_nd(indices, attribute_features, shape)", "async def infer_shape_tuple_setitem(track, seq, idx, value):\n seq_sh = await seq['shape']\n idx_v = await idx['value']\n value_sh = await value['shape']\n new_sh = list(seq_sh.shape)\n new_sh[idx_v] = value_sh\n return TupleShape(new_sh)", "def merge_attrs(self):\n for aid in self.attrs:\n new_val = self.attrs[aid]\n if aid in self.attributes:\n if ('value' in self.attributes[aid] and\n self.attributes[aid]['value'] != new_val):\n pass\n # print \"Updating attribute %s[%s] %s -> %s\" % (\n # self.name, aid, self.attributes[aid]['value'], new_val)\n else:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, new_val)\n self.remember_custom_attribute(self.name, aid, new_val)\n self.attributes[aid] = {}\n self.attributes[aid]['nv'] = new_val", "def set_shape(self, shape):\n self._shape = self._shape.merge_with(shape)", "async def infer_shape_return(track, v):\n return await v['shape']", "def my_featurize(apartment):\n return x, y", "def _infer_shape(schema):\n for feature in schema.feature:\n # Currently we infer shape only for features with valency 1.\n if (feature.presence.min_fraction == 1 and\n feature.value_count.min == feature.value_count.max == 1):\n feature.shape.dim.add().size = 1", "def FreeVariable( name, shape, dtype):\n shape = np.atleast_1d(shape)\n var = TensorType(str(dtype), shape == 1)(name)\n var.dshape = tuple(shape)\n var.dsize = int(np.prod(shape))\n return var", "def shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\r\n problem_chars=PROBLEMCHARS, default_tag_type='regular'):\r\n\r\n node_atts = {}\r\n way_atts = {}\r\n way_nodes = []\r\n tags = [] # Handle secondary tags the same way for both node and way elements\r\n\r\n if element.tag == 'node': #fill dictionary with k/v pairs from NODE_FIELDS\r\n for i in node_attr_fields:\r\n node_atts[i] = element.attrib[i]\r\n\r\n if element.tag == 'way':\r\n for i in way_attr_fields:\r\n way_atts[i] = element.attrib[i]\r\n\r\n for tag in element.iter(\"tag\"): #loop through tags looking for problem values\r\n dic = {}\r\n attributes = tag.attrib\r\n if tag.attrib['k'] == \"addr:street\":\r\n tag.attrib['v'] = update_name_street(tag.attrib['v'], streetmapping)\r\n elif tag.attrib['k'] == \"addr:city\":\r\n tag.attrib['v'] = update_name_city(tag.attrib['v'], citymapping)\r\n elif tag.attrib['k'] == \"addr:postcode\":\r\n tag.attrib['v'] = update_zips(tag.attrib['v'])\r\n elif tag.attrib['k'] == \"cuisine\":\r\n tag.attrib['v'] = update_name_cuisine(tag.attrib['v'], cuisinemapping)\r\n \r\n if problem_chars.search(tag.attrib['k']):\r\n continue\r\n\r\n if element.tag == 'node': #add node id for attributes\r\n dic['id'] = node_atts['id']\r\n else:\r\n dic['id'] = way_atts['id'] #add way id for attributes\r\n\r\n dic['value'] = attributes['v'] #value of key for each type\r\n\r\n colon_k=LOWER_COLON.search(tag.attrib['k'])\r\n \r\n if colon_k:\r\n #print colon_k.group(0)\r\n #print tag.attrib['k']\r\n dic['key'],dic['type'] = right_key(tag.attrib['k']) #call function to split at colon\r\n else:\r\n dic['key'] = attributes['k'] #assign regular that there was no colon problem\r\n dic['type'] = 'regular'\r\n\r\n tags.append(dic)\r\n\r\n if element.tag == 'way':\r\n position = 0\r\n for nd in element.iter(\"nd\"): #loop through nd child tags numbering them\r\n way_node_dic = {}\r\n way_node_dic['id'] = way_atts['id']\r\n way_node_dic['node_id'] = nd.attrib['ref']\r\n way_node_dic['position'] = position\r\n position = position + 1\r\n way_nodes.append(way_node_dic)\r\n \r\n \r\n \r\n if element.tag == 'node': #process the above for node tags for final formatting\r\n return {'node': node_atts, 'node_tags': tags}\r\n\r\n elif element.tag == 'way': #process the above for way tags for final formatting\r\n return {'way': way_atts, 'way_nodes': way_nodes, 'way_tags': tags}", "def redimension(array, dimensions, attributes, dim_boundaries=None):\n if array.dim_names == dimensions and array.att_names == attributes:\n return array\n dim_boundaries = dim_boundaries or {}\n\n orig_atts = set(array.att_names)\n orig_dims = set(array.dim_names)\n\n to_promote = [d for d in dimensions if d in orig_atts] # att->dim\n to_demote = [a for a in attributes if a in orig_dims] # dim->att\n array = cast_to_integer(array, to_promote)\n\n # need a dummy attribute, otherwise result has no attributes\n if not attributes:\n dummy = _new_attribute_label('__dummy', array)\n array = array.apply(dummy, 0)\n attributes = [dummy]\n\n # build the attribute schema\n new_att = {}\n for r in array.sdbtype.full_rep:\n if r[0] in attributes: # copy schema\n new_att[r[0]] = _att_schema_item(r)\n for d in to_demote: # change attribute to dimension\n new_att[d] = '%s:int64' % d\n\n new_att = ','.join(new_att[a] for a in attributes)\n\n # build the dimension schema\n ds = array.datashape\n new_dim = {}\n for n, l, h, ch, co in zip(ds.dim_names, ds.dim_low, ds.dim_high,\n ds.chunk_size, ds.chunk_overlap):\n h = h if h is not None else '*'\n if n in dimensions:\n new_dim[n] = '{0}={1}:{2},{3},{4}'.format(n, l, h, ch, co)\n\n if to_promote:\n # don't do limits here, too expensive!\n # XXX this does wrong thing if attribute has negative values\n # for k, v in limits(array, to_promote).items():\n for k in to_promote:\n v = dim_boundaries.get(k, (0, '*'))\n new_dim[k] = _dim_schema_item(k, v)\n\n new_dim = ','.join(new_dim[d] for d in dimensions)\n\n schema = '<{0}> [{1}]'.format(new_att, new_dim)\n return array.redimension(schema)", "def copy_attrs(data_orig, data_new):\n\n if isinstance(data_orig, Dataset):\n\n # Variables\n for v in data_orig.data_vars:\n field = data_orig[v]\n for attr, val in field.attrs.items():\n data_new[v].attrs[attr] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n # Metadata\n for attr, val in data_orig.attrs.items():\n data_new.attrs[attr] = val\n\n elif isinstance(data_orig, DataArray):\n\n # Variable Metadata\n for att, val in data_orig.attrs.items():\n data_new.attrs[att] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n else:\n raise ValueError(\"Couldn't handle type %r\" % type(data_orig))\n\n return data_new", "def test_copy_attributes(self):\n\n v = Vector({ 'x': 3 }, { 'y': True })\n n = v.copy()\n\n self.assertEqual(v.attributes, n.attributes)\n\n v.attributes['y'] = False\n self.assertFalse(v.attributes['y'])\n self.assertTrue(n.attributes['y'])\n v.attributes['y'] = True", "def shape(self):" ]
[ "0.6883614", "0.6200598", "0.56799114", "0.54352635", "0.5365718", "0.530961", "0.52707833", "0.51479536", "0.51225156", "0.50734746", "0.50443786", "0.5040507", "0.50123763", "0.50016546", "0.49899435", "0.4980406", "0.49721068", "0.496868", "0.49663883", "0.49450296", "0.49385688", "0.4922082", "0.49112782", "0.4903051", "0.4902512", "0.49012157", "0.48856458", "0.48666164", "0.48511514", "0.484102" ]
0.68042654
1
applies a predicate to object represented by id
def applyPredicate(id,cmd): if type(id) is int: # shapeID attList = g.database[id].getAttList() g.updateAttList(attList, cmd) g.updateShape(id,attList) elif type(id) is HypotheticalShape: attList = id.getAttList() try: shapeID=pickShape(attList) g.updateShape(shapeID,attList) except IndexError: return elif type(id) is g.AttributeList: attList=id try: shapeID=pickShape(attList) g.updateShape(shapeID,attList) except IndexError: return elif type(id) is str: # local var attList = local_vars[id] g.updateAttList(attList, cmd) elif isinstance(id, Set): for shapeID in id.getShapeIDs(): applyPredicate(shapeID, cmd) else: print "Cannot apply predicate to unknown object 'id'"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyPredicate(id,cmd):\r\n\r\n if type(id) is int: # shapeID\r\n attList = g.database[id].getAttList()\r\n g.updateAttList(attList, cmd)\r\n g.updateShape(id,attList)\r\n\r\n elif type(id) is HypotheticalShape:\r\n attList = id.getAttList()\r\n try:\r\n shapeID=pickShape(attList)\r\n g.updateShape(shapeID,attList)\r\n except IndexError:\r\n return\r\n\r\n elif type(id) is g.AttributeList:\r\n attList=id\r\n try:\r\n shapeID=pickShape(attList)\r\n g.updateShape(shapeID,attList)\r\n except IndexError:\r\n return\r\n\r\n elif type(id) is str:\r\n # local var\r\n attList = local_vars[id]\r\n g.updateAttList(attList, cmd)\r\n\r\n elif isinstance(id, Set):\r\n for shapeID in id.getShapeIDs():\r\n applyPredicate(shapeID, cmd)\r\n else:\r\n print \"Cannot apply predicate to unknown object 'id'\"", "def __call__ (self, item, * args, ** kw) :\n return self.predicate (item, * args, ** kw)", "def _addPredicate(self, pid, chunks):\n parent = chunks[pid]\n sub = None\n obj = None\n aux = list()\n auxlabel = \"\"\n # 1st round find absolute subject & object\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # Process by categories.\n if child.func in SubDict:\n sub = child\n elif child.func in ObjDict:\n obj = child\n\n # 2nd round find potential subject & object with aux.\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # Process by categories.\n if child.func in SubDict or child.func in ObjDict:\n continue\n elif child.func in ObjPostDict:\n if not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n elif child.func in SubPassiveObjDict:\n if parent.passive == 1:\n if not obj and child.type in EntityTypeDict:\n obj = child\n elif not sub and child.type in EntityTypeDict:\n sub = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n if not sub and child.type in EntityTypeDict:\n sub = child\n elif not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n elif child.func in ObjPassiveSubDict:\n if parent.passive == 1:\n if not sub and child.type in EntityTypeDict:\n sub = child\n elif not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n if not obj and child.type in EntityTypeDict:\n obj = child\n elif not sub and child.type in EntityTypeDict:\n sub = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n\n if parent.passive == 0:\n # Add parent and subject.\n # if sub and obj:\n # parent.main = \"<{0}>[{2}]{1}\".format(sub.main, parent.main, obj.main)\n # elif sub:\n # parent.main = \"<{0}>[NONE]{1}\".format(sub.main, parent.main)\n # elif obj:\n # parent.main = \"<NONE>[{1}]{0}\".format(parent.main, obj.main)\n if sub:\n parent.main = \"<{0}>{1}\".format(sub.main, parent.main)\n self._addNode(parent, sub=sub.main)\n if not self.G.has_node(sub.main):\n self._addNode(sub)\n self._addEdge(sub.main, parent.main, label=\"主体\\n\", etype=\"sub\")\n else:\n self._addNode(parent)\n # Add object.\n if obj:\n if not self.G.has_node(obj.main):\n self._addNode(obj)\n self._addEdge(parent.main, obj.main, label=\"客体\\n\" + auxlabel, etype=\"obj\")\n else:\n # Add obj as sub\n # if sub and obj:\n # parent.main = \"<{0}>[{2}]{1}\".format(sub.main, parent.main, obj.main)\n # elif obj:\n # parent.main = \"<{0}>[NONE]{1}\".format(obj.main, parent.main)\n # elif sub:\n # parent.main = \"<NONE>[{1}]{0}\".format(parent.main, sub.main)\n if obj:\n parent.main = \"<{0}>{1}\".format(obj.main, parent.main)\n self._addNode(parent, sub=obj.main)\n if not self.G.has_node(obj.main):\n self._addNode(obj)\n self._addEdge(obj.main, parent.main, label=\"主体\\n\", etype=\"sub\")\n else:\n self._addNode(parent)\n # Add sub as obj\n if sub:\n if not self.G.has_node(sub.main):\n self._addNode(sub)\n self._addEdge(parent.main, sub.main, label=\"客体\\n\", etype=\"obj\")\n # # Add obj as aux.\n # if obj:\n # aux.append(obj.id)\n # auxlabel += \"[{0}]\\n\".format(obj.surface)\n self._processAux(aux, parent.main, chunks)", "def check_id(self, id):", "def predicate (self, X, * args, ** kw) :\n self.lhs = self.lhs.predicate (X, * args, ** kw)\n return self", "def applypredicates(predicate,\n objects_dic,\n predicates_rules,\n gstate):\n\n pname = predicate[\"name\"]\n predicate_rule = predicates_rules[pname]\n objects_list_ref = predicate_rule[\"objects\"]\n # objects in the real pddl file\n objects = copy.deepcopy(predicate[\"objectNames\"])\n if \"custom_obj\" in predicate_rule:\n # addtional custom object not in the real pddl file\n custom_obj = predicate_rule[\"custom_obj\"]\n # complete object list\n object_list = objects + custom_obj\n objects_list_ref = objects_list_ref + custom_obj\n else:\n object_list = objects\n\n obj_ref_dic = dict(zip(objects_list_ref, object_list))\n for rulename in predicate_rule[\"rules\"]:\n if \"value\" in predicate_rule[rulename]:\n rule = predicate_rule[rulename]\n left, propertyname = get_objname_property(rule[\"left\"], obj_ref_dic)\n value = predicate_rule[rulename][\"value\"]\n if \"function\" in value:\n fproperty = value[\"function\"]\n fname = fproperty[\"fname\"]\n obj_indexs = fproperty[\"obj_indexs\"]\n if \"settings\" in fproperty:\n settings = fproperty[\"settings\"]\n else:\n settings = {}\n state = gstate[fname]\n obj_list = []\n for obj_index in obj_indexs:\n objname = obj_ref_dic[obj_index]\n obj_list.append({objname: objects_dic[objname]})\n result = Custom_functions.customf_controller(fname, obj_list, settings, state, False)\n update_object(objects_dic[left], propertyname, gstate, fname, result)\n elif \"equal\" in value:\n right_value = value[\"equal\"]\n if type(right_value) is not dict:\n objects_dic[left][propertyname[0]] = right_value\n else:\n if \"r\" in right_value: # for color\n objects_dic[left][propertyname[0]] = right_value\n else:\n right_object, right_property = get_objname_property(right_value, obj_ref_dic)\n objects_dic[left][propertyname[0]] = objects_dic[right_object][right_property]\n\n elif \"add\" in value:\n rightvalue = 0\n for additem in value[\"add\"]:\n if type(additem) is dict:\n\n right_object, right_property = get_objname_property(additem, obj_ref_dic)\n addvalue = objects_dic[right_object][right_property]\n rightvalue += addvalue\n else:\n rightvalue += additem\n objects_dic[left][propertyname[0]] = rightvalue\n else:\n # if the rule is action rule\n action = predicate_rule[rulename][\"action\"]\n if \"function\" in action:\n fproperty = action[\"function\"]\n fname = fproperty[\"fname\"]\n obj_indexs = fproperty[\"obj_indexs\"]\n if \"settings\" in fproperty:\n settings = fproperty[\"settings\"]\n else:\n settings = {}\n state = gstate[fname]\n obj_list = []\n for obj_index in obj_indexs:\n objname = obj_ref_dic[obj_index]\n obj_list.append({objname: objects_dic[objname]})\n\n key, value = Custom_functions.customf_controller(fname, obj_list, settings, state, False)\n objects_dic[key] = value", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def k(self, id):\n return id in self._m", "def get_object(id):", "def find_object_by_id(stix_objects, obj_id):\n ret_obj = None\n for obj in stix_objects:\n if obj[\"id\"] == obj_id:\n ret_obj = obj\n break\n return ret_obj", "def predicate (self, X, * args, ** kw) :\n XR = X.REF (X.ETW, _polymorphic_x = self._polymorphic_x)\n self.predicates = list \\\n (p.predicate (XR, * args, ** kw) for p in self.predicates)\n return self", "def __getitem__(self,id):\n \n # make sure id is an integer\n try:\n if not isinstance(id,IntType):\n id=atoi(id)\n except ValueError:\n raise KeyError, id\n \n # make sure it's in our list of children\n if not self.ids.has_key(id):\n raise KeyError, id\n \n # return the posting\n return self.data[id].__of__(self)", "def query_rule_by_id(runtime, idd):\r\n return runtime.policy_parser.query_policy_by_id(idd).rule", "def find_by_id(self, subject_id: str) -> any:\n pass", "def by_id(self, id):\n\n query = self.session.query(self.model_class)\n query = query.filter(self.model_class.id == id)\n return query.first()", "def contains(self, object_id):\n return libplasma.contains(self.conn, object_id)", "def predicate(f):\n wrapper = Predicate(f)\n update_wrapper(wrapper, f)\n return wrapper", "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "def retract(self, id):\n def _retract(pipe):\n if pipe.hexists(self.feed_items, id):\n pipe.multi()\n pipe.hdel(self.feed_items, id)\n pipe.hdel(self.feed_cancelled, id)\n pipe.zrem(self.feed_published, id)\n pipe.srem(self.feed_stalled, id)\n pipe.zrem(self.feed_claimed, id)\n pipe.lrem(self.feed_ids, 1, id)\n \n self.redis.transaction(_retract, self.feed_items)", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def has_id(subject, id):\n if request.method == 'GET':\n args = get_args(request.args)\n verb = \"list\"\n args[\"id\"] = id\n if request.method == 'PUT':\n args = get_args(request.json)\n verb = \"update\"\n subject = singular[subject];\n args[\"id\"] = id\n if request.method == 'DELETE':\n verb = \"delete\"\n subject = singular[subject];\n return apicall(verb, subject, args);", "def find(cls, animal_id):\n raise NotImplementedError", "def change_predicate(self, new_predicate):\n raise NotImplementedError", "def find_by_id(object_id, items):\n for item in items:\n if object_id == item[\"id\"]:\n return item\n\n raise Exception(f\"Item with {object_id} not found\")", "def _filter(self, probs: Tensor, ids: Tensor) -> Tuple[Tensor, List[int]]:\n raise NotImplementedError" ]
[ "0.73754346", "0.5838974", "0.577372", "0.5400787", "0.5399971", "0.534218", "0.53274", "0.53145176", "0.5280384", "0.5277906", "0.5170549", "0.5151107", "0.51495343", "0.5148643", "0.51419514", "0.51378804", "0.51228505", "0.5114596", "0.5080061", "0.5057794", "0.5043167", "0.5043167", "0.5043167", "0.5043167", "0.5043167", "0.504203", "0.5021058", "0.49815702", "0.4925973", "0.48915702" ]
0.7315682
1
creates a new shape with hyp's attributes
def draw(hyp): print 'g.createShape(',hyp.getAttList(),')' print type(hyp.getAttList()) g.createShape(hyp.getAttList())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(hyp):\n print 'g.createShape(',hyp.getAttList(),')'\n print type(hyp.getAttList())\n g.createShape(hyp.getAttList())", "def shape(self) -> Shape:", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def shape(self):", "def shape(self):", "def __init__(self, shape):\n\n self.shape = shape", "def NewShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_NewShape(self, *args)", "def __init__(self, hypos, alpha=1.0):\n Pmf.__init__(self)\n for hypo in hypos:\n self.Set(hypo, hypo**(-alpha))\n self.Normalize()", "def create_shape(settings, inst, repr=None):\r\n return wrap_shape_creation(\r\n settings,\r\n ifcopenshell_wrapper.create_shape(settings, inst.wrapped_data, repr.wrapped_data if repr is not None else None),\r\n )", "def shape(self):\n path = super(Arrow, self).shape()\n path.addPolygon(self.arrowHead)\n return path", "def __clone_layout_placeholder(self, layout_ph):\n id = self.__next_shape_id\n ph_type = layout_ph.type\n orient = layout_ph.orient\n shapename = self.__next_ph_name(ph_type, id, orient)\n\n sp = self.__new_placeholder_sp(layout_ph, id, ph_type, orient,\n shapename)\n self.__spTree.append(sp)\n shape = Shape(sp)\n self.__shapes.append(shape)\n return shape", "def amplify_2d_shape(shape, x_amplify, y_amplify):", "def hydroline(self) -> Polyline:\n return self._geometry", "def create(self, teeth=10, length=0.3):\n # teeth are alternative face so spans times 2\n spans = teeth * 2\n\n self.transform, self.constructor = cmds.polyPipe(subdivisionsAxis=spans)\n\n # side face index: start---end-----every 2\n # `range` create a sequence like this\n side_faces = range(spans * 2, spans * 3, 2)\n\n # how cmd works is selection load/unload at runtime\n cmds.select(clear=True)\n\n for face in side_faces:\n cmds.select(\"%s.f[%s]\" % (self.transform, face), add=True)\n\n self.extrude = cmds.polyExtrudeFacet(localTranslateZ=length)[0]\n\n if self.debug:\n print(self.extrude)", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def create_line_hyp_space(n_features):\n hyp_space = []\n for i in range(1, n_features + 1):\n for j in range(n_features - i + 1):\n hyp = [0 for _ in range(n_features)]\n hyp[j:j + i] = [1 for _ in range(i)]\n hyp_space.append(hyp)\n hyp_space = np.array(hyp_space)\n return hyp_space", "def create_noche(y0, y1):\n # Defining the location and colors of each vertex of the shape \n vertices = [\n # positions colors\n -1.0, y0, 0.0, 0.15, 0.16, 0.5,\n 1.0, y0, 0.0, 0.15, 0.16, 0.5,\n 1.0, y1, 0.0, 0.15, 0.16, 0.1,\n -1.0, y1, 0.0, 0.15, 0.16, 0.1]\n\n # Defining connections among vertices\n # We have a triangle every 3 indices specified\n indices = [0, 1, 2,\n 2, 3, 0]\n\n return Shape(vertices, indices)", "def add_shape(self, spec):\n color_, shape_ = spec\n if shape_ is None:\n shape_ = self.random_shape()\n if color_ is None:\n color_ = self.random_color()\n x = shape.rand_pos()\n y = shape.rand_pos()\n return shape.SHAPE_IMPLS[shape_](x=x, y=y, color_=color_)", "def __init__(self, hyps, prop):\n typecheck.checkinstance('Thm', hyps, [Term], prop, Term)\n self.hyps = tuple(term_ord.sorted_terms(hyps))\n self.prop = prop", "def create_figure_new(self):\n kw = {}\n self.p = figure(plot_height=400, plot_width=400, tools='pan,wheel_zoom,box_zoom,reset,hover', **kw)\n self.p.circle(x=[0],y=[0])", "def shape(self) -> Shape:\n raise NotImplementedError()", "def __init__(self, x, y, th):\n self.x = x\n self.y = y\n self.th = th", "def from_shapely(cls, shape):\n return super(Point, cls).from_shapely(shape)", "def get_random_h(shape_f0, shape_ap):\n\tf0 = get_f0(shape_f0)\n\tap = get_ap(shape_ap)\n\n\t#for i in range(len(f0)):\n\t#\tplt.plot(f0[i,0,:])\n\t#\tplt.show()\n\n\t#f0 = f0*(1-ap[:,:,-1])\n\tif len(shape_f0) == 3:\n\t\tf0[:,0,:] = f0[:,0,:]*(1-ap[:,:,0])\n\telif len(shape_f0) == 1:\n\t\tf0[:] = f0[:]*(1-ap[:,0])\n\n\tf0[f0<50] = 0\n\n\n\t# for i in range(len(f0)):\n\t# \tplt.subplot(211)\n\t# \tplt.plot(f0[i,0,:])\n\t# \tplt.subplot(212)\n\t# \tplt.imshow(np.rot90(ap[i,:,:]), aspect=\"auto\")\n\t# \tplt.show()\n\n\n\treturn f0, ap", "def generate(pts):\n cmds.polyCreateFacet(name=\"shirt\", p=points)\n cmds.polyTriangulate()\n cmds.polySubdivideFacet(dv=SUBDIVISIONS)\n cmds.polyTriangulate()", "def fix(hobj):\n\n h.execute('create axon[2]', hobj)\n\n for sec in hobj.axon:\n sec.L = 30\n sec.diam = 1\n hobj.axonal.append(sec=sec)\n hobj.all.append(sec=sec) # need to remove this comment\n\n hobj.axon[0].connect(hobj.soma[0], 0.5, 0)\n hobj.axon[1].connect(hobj.axon[0], 1, 0)\n\n h.define_shape()", "def getShape(transform):\n\n pass", "def generatePolygons():", "def set_shape(self):\n\t\theigh = 150.\n\t\tself.own_shape_matrix = np.array([[50., 0., 0., 0.],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 50., 0., 0.],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 0., heigh, heigh/2],\n\t\t\t\t\t\t\t\t\t\t\t[ 0., 0., 0., 1.]])\n\t\t\n\t\t\n\t\tpass", "def transform(self, H): # or update()\n vh = H @ self.vertices.T\n vh = vh.T \n va = vh[:,:2]\n return Polygon(va)" ]
[ "0.6593559", "0.6257868", "0.5956994", "0.5908373", "0.5908373", "0.5786683", "0.5775889", "0.5735456", "0.57318187", "0.5704653", "0.56927305", "0.5687583", "0.563451", "0.563041", "0.5629834", "0.55266005", "0.5505664", "0.5436107", "0.54052895", "0.5402675", "0.5394044", "0.53769255", "0.536441", "0.5353379", "0.53281033", "0.53202355", "0.53028655", "0.5291553", "0.52911454", "0.5290767" ]
0.66208655
0
hides the existing shape associated with id
def hide(id): if type(id) is int: # shapeID g.hide(g.database[id]) else: # id refers to hypothetical shape shapeID=pickShape(local_vars[id]) g.hide(g.database[shapeID])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hide(id):\n if type(id) is int: # shapeID\n g.hide(g.database[id])\n else: # id refers to hypothetical shape\n shapeID=pickShape(local_vars[id])\n g.hide(g.database[shapeID])", "def hide_shape(self, shape_id):\n\n if shape_id:\n self.itemconfigure(shape_id, state=\"hidden\")", "def show_shape(self, shape_id):\n\n if shape_id:\n self.itemconfigure(shape_id, state=\"normal\")", "def hideLayer(self, id):\n\n #log.debug('hideLayer: hiding layer %s' % str(id))\n\n self.layer_mapping[id].visible = False\n self.Refresh()", "def hidePlot(self, index):\n self.pathItem_list[index].hide()", "def hide(self, item_id):\n pass", "def HideObject(object_id):\n return HideObjects(object_id)==1", "def hide(self):\n self.geometry(\"%dx%d%+d%+d\" % (0, 0, 0, 0))", "def hide(self):\n\n if not 'd-none' in str(self.class_):\n self.old_class = self.class_\n self.class_ = 'd-none'\n\n self.viz = False\n\n return self", "def hide(self):\n self.visible = False", "def hide(self):\n raise NotImplementedError", "def toggleShapeVis(self, transform, value):\n\n if cmds.objExists(transform):\n shape = cmds.listRelatives(transform, shapes=True)\n if shape is not None:\n cmds.setAttr(shape[0] + \".v\", lock=False)\n cmds.setAttr(shape[0] + \".v\", value)\n cmds.setAttr(shape[0] + \".v\", lock=True)", "def hide(self):\n self.set_visible(False)", "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)", "def hide_param(self,name):\n if name in self.representations:\n self.representations[name]['frame'].pack_forget()\n # CEBNOTE: forgetting label and widget rather than frame would\n # just hide while still occupying space (i.e. the empty frame\n # stays in place, and so widgets could later be inserted at\n # exact same place)\n #self.representations[name]['label'].pack_forget()\n #self.representations[name]['widget'].pack_forget()\n # unhide_param would need modifying too", "def toggle_border_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n poly.setBrush(QColor(0, 0, 0, 0))\n\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(True)\n # Enable selection of the edges of the polygon, if the edge has a marker display it\n for edge in self.edge_list:\n edge.childItems()[0].setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(True)", "def hideBtnImg(*args, **kwargs):\n\targs[0].get_image().hide()", "def hide(self):\n self.row_box.grid_remove()\n self.field_name_box.grid_remove()\n self.field_name_label.grid_remove()\n self.value_box.grid_remove()\n self.active_value_widget.grid_remove()", "def unHide(self):\n self.visible = True", "def remove_drawing_poly(self):\n\n self.drawing_poly = QPolygonF()\n self.drawing_points_coords = []\n\n for p in self.drawing_points:\n p.setVisible(False)\n\n for line in self.connecting_line_list:\n line.setVisible(False)\n if self.connecting_line:\n self.connecting_line.setVisible(False)\n self.connecting_line = None\n self.first_draw = True\n if self.set_tooltip:\n self.set_tooltip(\"\")", "def IsObjectHidden(object_id):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n return rhobj.IsHidden", "def hide(self):\n self.course.quick_action(self.id, 'hide')", "def hideIsoSurfaces(self):\n #research\n profprint()\n contourNode = slicer.util.getNode(self.contourNode)\n widget = slicer.modules.NeedleFinderWidget\n if contourNode != None:\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked()-1))\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked()-1))", "def delete_shape(self, shape_id):\n\n self.variables.shape_ids.remove(shape_id)\n del self.variables.vector_objects[str(shape_id)]\n self.delete(shape_id)\n if shape_id == self.variables.current_shape_id:\n self.variables.current_shape_id = None", "def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()", "def destroy(self):\r\n self.visible = False", "def remove_object(self, n_id):\r\n\r\n # remove shapes\r\n for patch in self.shapes[n_id]:\r\n patch.remove()\r\n del self.shapes[n_id]\r\n\r\n # remove text\r\n if self.show_labels:\r\n for text in self.labels[n_id]:\r\n text.remove()\r\n del self.labels[n_id]", "def ensure_hidden(self):\n self.set_visible(False)", "def hideIsoSurfaces(self):\r\n # research\r\n profprint()\r\n contourNode = slicer.util.getNode(self.contourNode)\r\n widget = slicer.modules.NeedleFinderWidget\r\n if contourNode != None:\r\n contourNode.SetDisplayVisibility(abs(widget.hideContourButton.isChecked() - 1))\r\n contourNode.GetModelDisplayNode().SetSliceIntersectionVisibility(abs(widget.hideContourButton.isChecked() - 1))", "def unhide(self):\n self.course.quick_action(self.id, 'show')" ]
[ "0.8552118", "0.8390346", "0.71212643", "0.6763957", "0.66287816", "0.6290862", "0.6058897", "0.59502864", "0.5834213", "0.5813145", "0.56963134", "0.5608406", "0.56070375", "0.557525", "0.5532165", "0.5472659", "0.5464938", "0.54630136", "0.5410584", "0.53952074", "0.5380336", "0.53540635", "0.53539234", "0.53499085", "0.5321201", "0.53204113", "0.5299697", "0.52921754", "0.52785563", "0.5273376" ]
0.8581231
0
fills unspecified attributes of var with attributes of most recently mentioned shape that matches attributes in var
def one2(var): varAttList = local_vars[var] options = g.database.findMatches(local_vars[var]) shapeAttList = g.database[g.referenceOrder.pickMostRecent(options)].getAttList() local_vars[var] = g.AttributeList(shapeAttList.items()+ varAttList.items())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one2(var):\n varAttList = local_vars[var]\n options = g.database.findMatches(local_vars[var])\n shapeAttList = g.database[g.referenceOrder.pickMostRecent(options)].getAttList()\n local_vars[var] = g.AttributeList(shapeAttList.items()+ varAttList.items())", "def copy_attributes(var1, var2):\n for each in var1.ncattrs():\n if each != \"_FillValue\":\n setattr(var2, each, getattr(var1, each))", "def test_attribute_types(self):\n\n # Read a data file\n layername = 'test_buildings.shp'\n filename = '%s/%s' % (TESTDATA, layername)\n V = read_layer(filename)\n\n # Make a smaller dataset\n V_ref = V.get_topN('FLOOR_AREA', 5)\n\n geometry = V_ref.get_geometry()\n #data = V_ref.get_data()\n projection = V_ref.get_projection()\n\n # Create new attributes with a range of types\n keys = ['None', 'String', 'Boolean', 'Integer', 'Real',\n 'Array 1D', 'Array 2D']\n values = [None, 'Test', True, 3, 3.14,\n numpy.array([2.56]), numpy.array([[6.21]])]\n\n data = []\n for i in range(len(geometry)):\n D = {}\n for j, key in enumerate(keys):\n if key == 'Boolean':\n # Add a little variation\n if i % 2 == 0:\n D[key] = not values[j]\n else:\n D[key] = values[j]\n else:\n D[key] = values[j]\n data.append(D)\n\n # Create new object from test data\n V_new = Vector(data=data, projection=projection, geometry=geometry)\n\n # Write this new object, read it again and check\n tmp_filename = unique_filename(suffix='.shp')\n V_new.write_to_file(tmp_filename)\n\n V_tmp = read_layer(tmp_filename)\n\n #print V_new.get_data()[1]\n #print V_tmp.get_data()[1]\n\n assert V_tmp.projection == V_new.projection\n assert numpy.allclose(V_tmp.geometry, V_new.geometry)\n assert V_tmp.data == V_new.data\n assert V_tmp.get_data() == V_new.get_data()\n assert V_tmp == V_new\n assert not V_tmp != V_new", "def one1(var):\r\n varAttList = local_vars[var]\r\n itAttList = g.getIt()\r\n local_vars[var] = g.AttributeList(itAttList.items() + varAttList.items())", "def one1(var):\n varAttList = local_vars[var]\n itAttList = g.getIt()\n local_vars[var] = g.AttributeList(itAttList.items() + varAttList.items())", "async def infer_shape_env_setitem(track, env, key, x):\n return NOSHAPE", "def copy_attrs(varin,varout):\n for attr_name in varin.ncattrs():\n varout.setncattr(attr_name,varin.getncattr(attr_name))", "async def infer_shape_identity(track, x):\n return await x['shape']", "def fill_attributes(ml_file, other_file):\n with xr.load_dataset(other_file) as other:\n with xr.open_dataset(ml_file) as ml:\n for variable in other.variables:\n if variable in ml.variables:\n other[variable].attrs = ml[variable].attrs\n other.to_netcdf(other_file)", "def get_attributes(self, shape):\n attributes = {}\n identifier_names = [i.name for i in self.identifiers]\n\n for name, member in shape.members.items():\n snake_cased = xform_name(name)\n if snake_cased in identifier_names:\n # Skip identifiers, these are set through other means\n continue\n snake_cased = self._get_name(\n 'attribute', snake_cased, snake_case=False\n )\n attributes[snake_cased] = (name, member)\n\n return attributes", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def id3(x, y, attributes, max_depth, weight, depth=0):\r\n tree = {}\r\n new_attr = []\r\n arr, count = np.unique(y, return_counts=True)\r\n # checking edge cases - reached maximum depth, or no attributes\r\n if len(attributes) == 0 or depth == max_depth or len(x) == 0:\r\n return np.bincount(y).argmax()\r\n # if all the values of y are one return one\r\n elif len(np.unique(y)) == 1:\r\n return arr[0]\r\n else:\r\n # calculating mutual information values\r\n info_values = {} \r\n # over number of columns\r\n for i in range(data.dim):\r\n oldX = partition(x[:,i])\r\n oldKeys = oldX.keys()\r\n # check in attributes recieved from bagging\r\n for attr in attributes:\r\n binX = []\r\n key , value = attr\r\n # check for key and value\r\n if i == key and value in oldKeys:\r\n # get the index\r\n index = oldX[value]\r\n for n in range(len(x)):\r\n if n in index:\r\n # if match binary classification 1 / 0 and appending to binX list\r\n binX.append(1)\r\n else:\r\n binX.append(0)\r\n # adding to a dictionary \r\n info_values[(i, value)] = mutual_information(binX, y, weight)\r\n \r\n # getting the maximum feature value\r\n best_feature_index = max(info_values, key=info_values.get) \r\n best_feature, best_val = best_feature_index\r\n # creating the best partition\r\n x_best_part = partition(x[:,best_feature])\r\n #selecting other than the best feature value from the dictionary\r\n new_attr = list(filter(lambda x: x!= (best_feature, best_val), attributes))\r\n # increasing depth\r\n depth += 1\r\n\r\n # Calling id3 recursively, checking over 0,1 making a prediction as True / False \r\n for n in range(0,2):\r\n if n == 0:\r\n # recursively calling id3 over the best values of the x partition\r\n bestX = x[x_best_part[best_val]]\r\n bestY = y[x_best_part[best_val]]\r\n tree[best_feature, best_val, 'True'] = id3(bestX, bestY, new_attr, max_depth,weight, depth)\r\n else:\r\n \t # recursively calling id3 selecting other than best features\r\n othr_idx = []\r\n for i in x_best_part:\r\n if i != best_val:\r\n othr_idx.extend(x_best_part[i])\r\n\r\n otherX = x[othr_idx]\r\n otherY = y[othr_idx]\r\n tree[best_feature, best_val, 'False'] = id3(otherX, otherY, new_attr, max_depth,weight, depth)\r\n return tree", "def lnhattr(shape):\n\n arnold_nodes = ('rcurve', 'cwdth', 'srate', 'ai_curve_shaderr', 'ai_curve_shaderg', 'ai_curve_shaderb')\n for ar in arnold_nodes:\n cmds.setAttr(shape + \".\" + ar, l=True, k=False, cb=False)", "def __init__(self):\n self.X = None\n self.Y = None\n self.features = None\n self.max = self.min = None\n self._look_up = None\n self.attr_weight = None", "async def infer_shape_J(track, x):\n return track.jtag(await x.get_shallow('shape'))", "def fixdims(var):\n\t\n\tfor ii in var.dims:\n\t\tkk=ii[:3].lower()\n\t\t\n\t\tif kk == 'lat':\n\t\t\tvar=var.rename({ii:'lat'})\n\t\t\t\n\t\tif kk == 'lon':\n\t\t\tvar=var.rename({ii:'lon'})\n\t\t\t\n\t\tif kk == 'lev' or kk == 'ple' or kk == 'pre':\n\t\t\tvar=var.rename({ii:'level'})\n\n\t\tif kk == 'tim':\n\t\t\tvar=var.rename({ii:'time'})\n\n\treturn var", "async def infer_shape_getattr(track, data, item):\n async def on_dcattr(data, data_t, item_v):\n data_sh = await data['shape']\n return data_sh.shape[item_v]\n\n return await static_getter(\n track, data, item,\n fetch=getattr,\n on_dcattr=on_dcattr\n )", "def _convert_to_original_shape(\n attribute_features: tf.Tensor,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n attribute: Text,\n ) -> tf.Tensor:\n # in order to convert the attribute features with shape\n # (combined batch-size and dialogue length x 1 x units)\n # to a shape of (batch-size x dialogue length x units)\n # we use tf.scatter_nd. Therefore, we need the target shape and the indices\n # mapping the values of attribute features to the position in the resulting\n # tensor.\n\n # attribute_mask has shape batch x dialogue_len x 1\n attribute_mask = tf_batch_data[attribute][MASK][0]\n\n if attribute in SENTENCE_FEATURES_TO_ENCODE + STATE_LEVEL_FEATURES:\n dialogue_lengths = tf.cast(\n tf_batch_data[DIALOGUE][LENGTH][0], dtype=tf.int32\n )\n dialogue_indices = tf_batch_data[DIALOGUE][INDICES][0]\n else:\n # for labels, dialogue length is a fake dim and equal to 1\n dialogue_lengths = tf.ones((tf.shape(attribute_mask)[0],), dtype=tf.int32)\n dialogue_indices = tf.zeros((tf.shape(attribute_mask)[0],), dtype=tf.int32)\n\n batch_dim = tf.shape(attribute_mask)[0]\n dialogue_dim = tf.shape(attribute_mask)[1]\n units = attribute_features.shape[-1]\n\n # attribute_mask has shape (batch x dialogue_len x 1), remove last dimension\n attribute_mask = tf.cast(tf.squeeze(attribute_mask, axis=-1), dtype=tf.int32)\n # sum of attribute mask contains number of dialogue turns with \"real\" features\n non_fake_dialogue_lengths = tf.reduce_sum(attribute_mask, axis=-1)\n # create the batch indices\n batch_indices = tf.repeat(tf.range(batch_dim), non_fake_dialogue_lengths)\n\n # attribute_mask has shape (batch x dialogue_len x 1), while\n # dialogue_indices has shape (combined_dialogue_len,)\n # in order to find positions of real input we need to flatten\n # attribute mask to (combined_dialogue_len,)\n dialogue_indices_mask = tf.boolean_mask(\n attribute_mask, tf.sequence_mask(dialogue_lengths, dtype=tf.int32)\n )\n # pick only those indices that contain \"real\" input\n dialogue_indices = tf.boolean_mask(dialogue_indices, dialogue_indices_mask)\n\n indices = tf.stack([batch_indices, dialogue_indices], axis=1)\n\n shape = tf.convert_to_tensor([batch_dim, dialogue_dim, units])\n attribute_features = tf.squeeze(attribute_features, axis=1)\n\n return tf.scatter_nd(indices, attribute_features, shape)", "async def infer_shape_tuple_setitem(track, seq, idx, value):\n seq_sh = await seq['shape']\n idx_v = await idx['value']\n value_sh = await value['shape']\n new_sh = list(seq_sh.shape)\n new_sh[idx_v] = value_sh\n return TupleShape(new_sh)", "def merge_attrs(self):\n for aid in self.attrs:\n new_val = self.attrs[aid]\n if aid in self.attributes:\n if ('value' in self.attributes[aid] and\n self.attributes[aid]['value'] != new_val):\n pass\n # print \"Updating attribute %s[%s] %s -> %s\" % (\n # self.name, aid, self.attributes[aid]['value'], new_val)\n else:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, new_val)\n self.remember_custom_attribute(self.name, aid, new_val)\n self.attributes[aid] = {}\n self.attributes[aid]['nv'] = new_val", "def set_shape(self, shape):\n self._shape = self._shape.merge_with(shape)", "async def infer_shape_return(track, v):\n return await v['shape']", "def my_featurize(apartment):\n return x, y", "def FreeVariable( name, shape, dtype):\n shape = np.atleast_1d(shape)\n var = TensorType(str(dtype), shape == 1)(name)\n var.dshape = tuple(shape)\n var.dsize = int(np.prod(shape))\n return var", "def _infer_shape(schema):\n for feature in schema.feature:\n # Currently we infer shape only for features with valency 1.\n if (feature.presence.min_fraction == 1 and\n feature.value_count.min == feature.value_count.max == 1):\n feature.shape.dim.add().size = 1", "def shape_element(element, node_attr_fields=NODE_FIELDS, way_attr_fields=WAY_FIELDS,\r\n problem_chars=PROBLEMCHARS, default_tag_type='regular'):\r\n\r\n node_atts = {}\r\n way_atts = {}\r\n way_nodes = []\r\n tags = [] # Handle secondary tags the same way for both node and way elements\r\n\r\n if element.tag == 'node': #fill dictionary with k/v pairs from NODE_FIELDS\r\n for i in node_attr_fields:\r\n node_atts[i] = element.attrib[i]\r\n\r\n if element.tag == 'way':\r\n for i in way_attr_fields:\r\n way_atts[i] = element.attrib[i]\r\n\r\n for tag in element.iter(\"tag\"): #loop through tags looking for problem values\r\n dic = {}\r\n attributes = tag.attrib\r\n if tag.attrib['k'] == \"addr:street\":\r\n tag.attrib['v'] = update_name_street(tag.attrib['v'], streetmapping)\r\n elif tag.attrib['k'] == \"addr:city\":\r\n tag.attrib['v'] = update_name_city(tag.attrib['v'], citymapping)\r\n elif tag.attrib['k'] == \"addr:postcode\":\r\n tag.attrib['v'] = update_zips(tag.attrib['v'])\r\n elif tag.attrib['k'] == \"cuisine\":\r\n tag.attrib['v'] = update_name_cuisine(tag.attrib['v'], cuisinemapping)\r\n \r\n if problem_chars.search(tag.attrib['k']):\r\n continue\r\n\r\n if element.tag == 'node': #add node id for attributes\r\n dic['id'] = node_atts['id']\r\n else:\r\n dic['id'] = way_atts['id'] #add way id for attributes\r\n\r\n dic['value'] = attributes['v'] #value of key for each type\r\n\r\n colon_k=LOWER_COLON.search(tag.attrib['k'])\r\n \r\n if colon_k:\r\n #print colon_k.group(0)\r\n #print tag.attrib['k']\r\n dic['key'],dic['type'] = right_key(tag.attrib['k']) #call function to split at colon\r\n else:\r\n dic['key'] = attributes['k'] #assign regular that there was no colon problem\r\n dic['type'] = 'regular'\r\n\r\n tags.append(dic)\r\n\r\n if element.tag == 'way':\r\n position = 0\r\n for nd in element.iter(\"nd\"): #loop through nd child tags numbering them\r\n way_node_dic = {}\r\n way_node_dic['id'] = way_atts['id']\r\n way_node_dic['node_id'] = nd.attrib['ref']\r\n way_node_dic['position'] = position\r\n position = position + 1\r\n way_nodes.append(way_node_dic)\r\n \r\n \r\n \r\n if element.tag == 'node': #process the above for node tags for final formatting\r\n return {'node': node_atts, 'node_tags': tags}\r\n\r\n elif element.tag == 'way': #process the above for way tags for final formatting\r\n return {'way': way_atts, 'way_nodes': way_nodes, 'way_tags': tags}", "def redimension(array, dimensions, attributes, dim_boundaries=None):\n if array.dim_names == dimensions and array.att_names == attributes:\n return array\n dim_boundaries = dim_boundaries or {}\n\n orig_atts = set(array.att_names)\n orig_dims = set(array.dim_names)\n\n to_promote = [d for d in dimensions if d in orig_atts] # att->dim\n to_demote = [a for a in attributes if a in orig_dims] # dim->att\n array = cast_to_integer(array, to_promote)\n\n # need a dummy attribute, otherwise result has no attributes\n if not attributes:\n dummy = _new_attribute_label('__dummy', array)\n array = array.apply(dummy, 0)\n attributes = [dummy]\n\n # build the attribute schema\n new_att = {}\n for r in array.sdbtype.full_rep:\n if r[0] in attributes: # copy schema\n new_att[r[0]] = _att_schema_item(r)\n for d in to_demote: # change attribute to dimension\n new_att[d] = '%s:int64' % d\n\n new_att = ','.join(new_att[a] for a in attributes)\n\n # build the dimension schema\n ds = array.datashape\n new_dim = {}\n for n, l, h, ch, co in zip(ds.dim_names, ds.dim_low, ds.dim_high,\n ds.chunk_size, ds.chunk_overlap):\n h = h if h is not None else '*'\n if n in dimensions:\n new_dim[n] = '{0}={1}:{2},{3},{4}'.format(n, l, h, ch, co)\n\n if to_promote:\n # don't do limits here, too expensive!\n # XXX this does wrong thing if attribute has negative values\n # for k, v in limits(array, to_promote).items():\n for k in to_promote:\n v = dim_boundaries.get(k, (0, '*'))\n new_dim[k] = _dim_schema_item(k, v)\n\n new_dim = ','.join(new_dim[d] for d in dimensions)\n\n schema = '<{0}> [{1}]'.format(new_att, new_dim)\n return array.redimension(schema)", "def copy_attrs(data_orig, data_new):\n\n if isinstance(data_orig, Dataset):\n\n # Variables\n for v in data_orig.data_vars:\n field = data_orig[v]\n for attr, val in field.attrs.items():\n data_new[v].attrs[attr] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n # Metadata\n for attr, val in data_orig.attrs.items():\n data_new.attrs[attr] = val\n\n elif isinstance(data_orig, DataArray):\n\n # Variable Metadata\n for att, val in data_orig.attrs.items():\n data_new.attrs[att] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n else:\n raise ValueError(\"Couldn't handle type %r\" % type(data_orig))\n\n return data_new", "def test_copy_attributes(self):\n\n v = Vector({ 'x': 3 }, { 'y': True })\n n = v.copy()\n\n self.assertEqual(v.attributes, n.attributes)\n\n v.attributes['y'] = False\n self.assertFalse(v.attributes['y'])\n self.assertTrue(n.attributes['y'])\n v.attributes['y'] = True", "def shape(self):" ]
[ "0.68038476", "0.61991006", "0.5679668", "0.5434393", "0.5364872", "0.53093505", "0.5270253", "0.5148796", "0.51211107", "0.5072938", "0.504459", "0.50410783", "0.50117296", "0.5000911", "0.4990542", "0.4980776", "0.4973292", "0.49685407", "0.4966164", "0.4943376", "0.49383464", "0.49237496", "0.49128845", "0.49046665", "0.49036315", "0.4900678", "0.4885268", "0.4865586", "0.4849821", "0.48426655" ]
0.68832254
0
applies a predicate to object represented by id
def applyPredicate(id,cmd): if type(id) is int: # shapeID attList = g.database[id].getAttList() g.updateAttList(attList, cmd) g.updateShape(id,attList) elif type(id) is HypotheticalShape: attList = id.getAttList() try: shapeID=pickShape(attList) g.updateShape(shapeID,attList) except IndexError: return elif type(id) is g.AttributeList: attList=id try: shapeID=pickShape(attList) g.updateShape(shapeID,attList) except IndexError: return elif type(id) is str: # local var attList = local_vars[id] g.updateAttList(attList, cmd) elif isinstance(id, Set): for shapeID in id.getShapeIDs(): applyPredicate(shapeID, cmd) else: print "Cannot apply predicate to unknown object 'id'"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyPredicate(id,cmd):\n\n if type(id) is int: # shapeID\n attList = g.database[id].getAttList()\n g.updateAttList(attList, cmd)\n g.updateShape(id,attList)\n\n elif type(id) is HypotheticalShape:\n attList = id.getAttList()\n try:\n shapeID=pickShape(attList)\n g.updateShape(shapeID,attList)\n except IndexError:\n return\n\n elif type(id) is g.AttributeList:\n attList=id\n try:\n shapeID=pickShape(attList)\n g.updateShape(shapeID,attList)\n except IndexError:\n return\n\n elif type(id) is str:\n # local var\n attList = local_vars[id]\n g.updateAttList(attList, cmd)\n\n elif isinstance(id, Set):\n for shapeID in id.getShapeIDs():\n applyPredicate(shapeID, cmd)\n else:\n print \"Cannot apply predicate to unknown object 'id'\"", "def __call__ (self, item, * args, ** kw) :\n return self.predicate (item, * args, ** kw)", "def _addPredicate(self, pid, chunks):\n parent = chunks[pid]\n sub = None\n obj = None\n aux = list()\n auxlabel = \"\"\n # 1st round find absolute subject & object\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # Process by categories.\n if child.func in SubDict:\n sub = child\n elif child.func in ObjDict:\n obj = child\n\n # 2nd round find potential subject & object with aux.\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # Process by categories.\n if child.func in SubDict or child.func in ObjDict:\n continue\n elif child.func in ObjPostDict:\n if not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n elif child.func in SubPassiveObjDict:\n if parent.passive == 1:\n if not obj and child.type in EntityTypeDict:\n obj = child\n elif not sub and child.type in EntityTypeDict:\n sub = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n if not sub and child.type in EntityTypeDict:\n sub = child\n elif not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n elif child.func in ObjPassiveSubDict:\n if parent.passive == 1:\n if not sub and child.type in EntityTypeDict:\n sub = child\n elif not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n if not obj and child.type in EntityTypeDict:\n obj = child\n elif not sub and child.type in EntityTypeDict:\n sub = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n\n if parent.passive == 0:\n # Add parent and subject.\n # if sub and obj:\n # parent.main = \"<{0}>[{2}]{1}\".format(sub.main, parent.main, obj.main)\n # elif sub:\n # parent.main = \"<{0}>[NONE]{1}\".format(sub.main, parent.main)\n # elif obj:\n # parent.main = \"<NONE>[{1}]{0}\".format(parent.main, obj.main)\n if sub:\n parent.main = \"<{0}>{1}\".format(sub.main, parent.main)\n self._addNode(parent, sub=sub.main)\n if not self.G.has_node(sub.main):\n self._addNode(sub)\n self._addEdge(sub.main, parent.main, label=\"主体\\n\", etype=\"sub\")\n else:\n self._addNode(parent)\n # Add object.\n if obj:\n if not self.G.has_node(obj.main):\n self._addNode(obj)\n self._addEdge(parent.main, obj.main, label=\"客体\\n\" + auxlabel, etype=\"obj\")\n else:\n # Add obj as sub\n # if sub and obj:\n # parent.main = \"<{0}>[{2}]{1}\".format(sub.main, parent.main, obj.main)\n # elif obj:\n # parent.main = \"<{0}>[NONE]{1}\".format(obj.main, parent.main)\n # elif sub:\n # parent.main = \"<NONE>[{1}]{0}\".format(parent.main, sub.main)\n if obj:\n parent.main = \"<{0}>{1}\".format(obj.main, parent.main)\n self._addNode(parent, sub=obj.main)\n if not self.G.has_node(obj.main):\n self._addNode(obj)\n self._addEdge(obj.main, parent.main, label=\"主体\\n\", etype=\"sub\")\n else:\n self._addNode(parent)\n # Add sub as obj\n if sub:\n if not self.G.has_node(sub.main):\n self._addNode(sub)\n self._addEdge(parent.main, sub.main, label=\"客体\\n\", etype=\"obj\")\n # # Add obj as aux.\n # if obj:\n # aux.append(obj.id)\n # auxlabel += \"[{0}]\\n\".format(obj.surface)\n self._processAux(aux, parent.main, chunks)", "def predicate (self, X, * args, ** kw) :\n self.lhs = self.lhs.predicate (X, * args, ** kw)\n return self", "def check_id(self, id):", "def applypredicates(predicate,\n objects_dic,\n predicates_rules,\n gstate):\n\n pname = predicate[\"name\"]\n predicate_rule = predicates_rules[pname]\n objects_list_ref = predicate_rule[\"objects\"]\n # objects in the real pddl file\n objects = copy.deepcopy(predicate[\"objectNames\"])\n if \"custom_obj\" in predicate_rule:\n # addtional custom object not in the real pddl file\n custom_obj = predicate_rule[\"custom_obj\"]\n # complete object list\n object_list = objects + custom_obj\n objects_list_ref = objects_list_ref + custom_obj\n else:\n object_list = objects\n\n obj_ref_dic = dict(zip(objects_list_ref, object_list))\n for rulename in predicate_rule[\"rules\"]:\n if \"value\" in predicate_rule[rulename]:\n rule = predicate_rule[rulename]\n left, propertyname = get_objname_property(rule[\"left\"], obj_ref_dic)\n value = predicate_rule[rulename][\"value\"]\n if \"function\" in value:\n fproperty = value[\"function\"]\n fname = fproperty[\"fname\"]\n obj_indexs = fproperty[\"obj_indexs\"]\n if \"settings\" in fproperty:\n settings = fproperty[\"settings\"]\n else:\n settings = {}\n state = gstate[fname]\n obj_list = []\n for obj_index in obj_indexs:\n objname = obj_ref_dic[obj_index]\n obj_list.append({objname: objects_dic[objname]})\n result = Custom_functions.customf_controller(fname, obj_list, settings, state, False)\n update_object(objects_dic[left], propertyname, gstate, fname, result)\n elif \"equal\" in value:\n right_value = value[\"equal\"]\n if type(right_value) is not dict:\n objects_dic[left][propertyname[0]] = right_value\n else:\n if \"r\" in right_value: # for color\n objects_dic[left][propertyname[0]] = right_value\n else:\n right_object, right_property = get_objname_property(right_value, obj_ref_dic)\n objects_dic[left][propertyname[0]] = objects_dic[right_object][right_property]\n\n elif \"add\" in value:\n rightvalue = 0\n for additem in value[\"add\"]:\n if type(additem) is dict:\n\n right_object, right_property = get_objname_property(additem, obj_ref_dic)\n addvalue = objects_dic[right_object][right_property]\n rightvalue += addvalue\n else:\n rightvalue += additem\n objects_dic[left][propertyname[0]] = rightvalue\n else:\n # if the rule is action rule\n action = predicate_rule[rulename][\"action\"]\n if \"function\" in action:\n fproperty = action[\"function\"]\n fname = fproperty[\"fname\"]\n obj_indexs = fproperty[\"obj_indexs\"]\n if \"settings\" in fproperty:\n settings = fproperty[\"settings\"]\n else:\n settings = {}\n state = gstate[fname]\n obj_list = []\n for obj_index in obj_indexs:\n objname = obj_ref_dic[obj_index]\n obj_list.append({objname: objects_dic[objname]})\n\n key, value = Custom_functions.customf_controller(fname, obj_list, settings, state, False)\n objects_dic[key] = value", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def k(self, id):\n return id in self._m", "def get_object(id):", "def find_object_by_id(stix_objects, obj_id):\n ret_obj = None\n for obj in stix_objects:\n if obj[\"id\"] == obj_id:\n ret_obj = obj\n break\n return ret_obj", "def predicate (self, X, * args, ** kw) :\n XR = X.REF (X.ETW, _polymorphic_x = self._polymorphic_x)\n self.predicates = list \\\n (p.predicate (XR, * args, ** kw) for p in self.predicates)\n return self", "def __getitem__(self,id):\n \n # make sure id is an integer\n try:\n if not isinstance(id,IntType):\n id=atoi(id)\n except ValueError:\n raise KeyError, id\n \n # make sure it's in our list of children\n if not self.ids.has_key(id):\n raise KeyError, id\n \n # return the posting\n return self.data[id].__of__(self)", "def query_rule_by_id(runtime, idd):\r\n return runtime.policy_parser.query_policy_by_id(idd).rule", "def find_by_id(self, subject_id: str) -> any:\n pass", "def by_id(self, id):\n\n query = self.session.query(self.model_class)\n query = query.filter(self.model_class.id == id)\n return query.first()", "def contains(self, object_id):\n return libplasma.contains(self.conn, object_id)", "def predicate(f):\n wrapper = Predicate(f)\n update_wrapper(wrapper, f)\n return wrapper", "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "def retract(self, id):\n def _retract(pipe):\n if pipe.hexists(self.feed_items, id):\n pipe.multi()\n pipe.hdel(self.feed_items, id)\n pipe.hdel(self.feed_cancelled, id)\n pipe.zrem(self.feed_published, id)\n pipe.srem(self.feed_stalled, id)\n pipe.zrem(self.feed_claimed, id)\n pipe.lrem(self.feed_ids, 1, id)\n \n self.redis.transaction(_retract, self.feed_items)", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def id_contains(self, id_contains):\n\n self._id_contains = id_contains", "def has_id(subject, id):\n if request.method == 'GET':\n args = get_args(request.args)\n verb = \"list\"\n args[\"id\"] = id\n if request.method == 'PUT':\n args = get_args(request.json)\n verb = \"update\"\n subject = singular[subject];\n args[\"id\"] = id\n if request.method == 'DELETE':\n verb = \"delete\"\n subject = singular[subject];\n return apicall(verb, subject, args);", "def find(cls, animal_id):\n raise NotImplementedError", "def change_predicate(self, new_predicate):\n raise NotImplementedError", "def find_by_id(object_id, items):\n for item in items:\n if object_id == item[\"id\"]:\n return item\n\n raise Exception(f\"Item with {object_id} not found\")", "def _filter(self, probs: Tensor, ids: Tensor) -> Tuple[Tensor, List[int]]:\n raise NotImplementedError" ]
[ "0.7314072", "0.58414406", "0.5774458", "0.5401331", "0.53996676", "0.5343018", "0.5326704", "0.5313459", "0.5280696", "0.527821", "0.5169472", "0.51523036", "0.5150036", "0.5147061", "0.5142404", "0.5136667", "0.51229036", "0.5114203", "0.50794065", "0.50568193", "0.50444686", "0.50444686", "0.50444686", "0.50444686", "0.50444686", "0.5041198", "0.50214225", "0.49828446", "0.4926566", "0.48915362" ]
0.7374118
0
Compute gravity gradient torques.
def _compute_gravity_torque(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO", "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n sat2body = body2sat.revert()\n\n satM = self.state_observer.spacecraftState.getMass()\n mCub = self.inCub['mass_frac'] * satM\n\n self._gTorque = Vector3D.ZERO\n\n for CoM in self.inCub['CoM']:\n\n S_dmPos = self.satPos_s.add(CoM)\n\n r2 = S_dmPos.getNormSq()\n gNewton = Vector3D(-self.muGM / (sqrt(r2) * r2), S_dmPos)\n\n B_dmPos = sat2body.applyTo(S_dmPos)\n\n gDist = Vector3D(self.GravityModel.gradient(curr_date,\n B_dmPos,\n self.muGM))\n\n g_Dist_s = body2sat.applyTo(gDist)\n\n dmForce = Vector3D(mCub, gNewton.add(g_Dist_s))\n self._gTorque = self._gTorque.add(self.V3_cross(CoM, dmForce))\n\n else:\n self._gTorque = Vector3D.ZERO", "def gravity_gradients(self, p):\n Le = self._Le(p)\n wf = self._wf(p)\n EeLe = np.einsum(\"i...,i\", self.Ee, Le)\n FfWf = np.einsum(\"i...,i\", self.Ff, wf)\n return G * self.d * (EeLe - FfWf)", "def gradient_cf(self, potential, get_energy=True):\n xn, xe, lpn, lpe, alpha, o1, o2 = self(None)\n fn_, fe_ = potential((xn, xe))\n fn_ = (fn_ + self.tw * lpn) * self.wn\n fe_ = (fe_ - lpe) * self.we\n fn = fn_ * alpha\n fe = fe_ * alpha\n dmu = tf.math.divide_no_nan(tf.reduce_sum(fn * self.xn, axis=-1, keepdims=True), self.sigma)\n dsg = tf.math.divide_no_nan(tf.reduce_sum(fn * self.x22, axis=-1, keepdims=True), self.sigma)\n dmu1 = tf.math.divide_no_nan(tf.reduce_sum(fe * self.xi, -1, keepdims=True), o1)\n dmu2 = tf.reduce_sum(fe * self.xj, -1, keepdims=True) / o2\n dsg1 = tf.math.divide_no_nan(tf.reduce_sum(fe * self.xi22, -1, keepdims=True), o1)\n dsg2 = tf.reduce_sum(fe * self.xj22, -1, keepdims=True) / o2\n\n dmu += (tf.concat([dmu1, self.top0], 2) + tf.concat(\n [self.btm0, tf.reshape(tf.reduce_sum(tf.reshape(dmu2[:, :, :729, ...], self.s1), [3, 5]), self.s3),\n tf.reshape(tf.reduce_sum(tf.reshape(dmu2[:, :, 729:810, ...], self.s2), [3, 5]), self.s3),\n tf.reduce_sum(dmu2[:, :, 810:, ...], 2, True)], 2))\n\n dsg += (tf.concat([dsg1, self.top0], 2) + tf.concat(\n [self.btm0, tf.reshape(tf.reduce_sum(tf.reshape(dsg2[:, :, :729, ...], self.s1), [3, 5]), self.s3),\n tf.reshape(tf.reduce_sum(tf.reshape(dsg2[:, :, 729:810, ...], self.s2), [3, 5]), self.s3),\n tf.reduce_sum(dsg2[:, :, 810:, ...], 2, True)], 2))\n\n dalpha = (tf.reduce_sum(fn_, [2, 4], keepdims=True) + tf.reduce_sum(fe_, [2, 4], keepdims=True))\n dw = alpha * (dalpha - tf.reduce_sum(dalpha * alpha, 3, keepdims=True))\n energy = tf.zeros(fn.shape[:2], tf.float64) if not get_energy else \\\n -(tf.reduce_sum(fn, [2, 3, 4]) + tf.reduce_sum(fe, [2, 3, 4]))\n return (-dmu * sqrt2, -dsg, -dw), energy", "def weight_update_conjugate_gradient(self, network):\n # compute beta: Fletcher-Reeves\n num = 0.0\n for l, layer in enumerate(network.layers):\n num += np.sum(self.dc_db[l] ** 2)\n num += np.sum(self.dc_dq[l] ** 2)\n num += np.sum(self.dc_drx_inp[l] ** 2)\n num += np.sum(self.dc_dry_inp[l] ** 2)\n num += np.sum(self.dc_drx_pos_out[l] ** 2)\n num += np.sum(self.dc_dry_pos_out[l] ** 2)\n num += np.sum(self.dc_drx_neg_out[l] ** 2)\n num += np.sum(self.dc_dry_neg_out[l] ** 2)\n\n # Initialize velocities to zero for momentum\n if self.vel_b is None or self.vel_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n # Take steepest descent step\n for l, layer in enumerate(network.layers):\n layer.b -= self.alpha * self.dc_db[l]\n layer.q -= self.alpha * self.dc_dq[l]\n layer.rx_inp -= self.alpha * self.dc_drx_inp[l]\n layer.ry_inp -= self.alpha * self.dc_dry_inp[l]\n layer.rx_pos_out -= self.alpha * self.dc_drx_pos_out[l]\n layer.ry_pos_out -= self.alpha * self.dc_dry_pos_out[l]\n layer.rx_neg_out -= self.alpha * self.dc_drx_neg_out[l]\n layer.ry_neg_out -= self.alpha * self.dc_dry_neg_out[l]\n\n else:\n # compute beta\n beta = num / self.denominator\n\n # compute s_n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = -self.alpha * self.dc_db[l] + beta * self.ms_b[l]\n self.ms_q[l] = -self.alpha * self.dc_dq[l] + beta * self.ms_q[l]\n self.ms_rx_inp[l] = -self.alpha * self.dc_drx_inp[l] + beta * self.ms_rx_inp[l]\n self.ms_ry_inp[l] = -self.alpha * self.dc_dry_inp[l] + beta * self.ms_ry_inp[l]\n self.ms_rx_pos_out[l] = -self.alpha * self.dc_drx_pos_out[l] + beta * self.ms_rx_pos_out[l]\n self.ms_ry_pos_out[l] = -self.alpha * self.dc_dry_pos_out[l] + beta * self.ms_ry_pos_out[l]\n self.ms_rx_neg_out[l] = -self.alpha * self.dc_drx_neg_out[l] + beta * self.ms_rx_neg_out[l]\n self.ms_ry_neg_out[l] = -self.alpha * self.dc_dry_neg_out[l] + beta * self.ms_ry_neg_out[l]\n\n # Take step\n for l, layer in enumerate(network.layers):\n layer.b += self.alpha * self.ms_b[l]\n layer.q += self.alpha * self.ms_q[l]\n layer.rx_inp += self.alpha * self.ms_rx_inp[l]\n layer.ry_inp += self.alpha * self.ms_ry_inp[l]\n layer.rx_pos_out += self.alpha * self.ms_rx_pos_out[l]\n layer.ry_pos_out += self.alpha * self.ms_ry_pos_out[l]\n layer.rx_neg_out += self.alpha * self.ms_rx_neg_out[l]\n layer.ry_neg_out += self.alpha * self.ms_ry_neg_out[l]\n\n # store num for next iteration to be used as denominator\n self.denominator = num", "def calculate_grad_grad_B_tensor(self, two_ways=False):\n\n # Shortcuts\n s = self\n \n X1c = s.X1c\n Y1s = s.Y1s\n Y1c = s.Y1c\n\n X20 = s.X20\n X2s = s.X2s\n X2c = s.X2c\n\n Y20 = s.Y20\n Y2s = s.Y2s\n Y2c = s.Y2c\n\n Z20 = s.Z20\n Z2s = s.Z2s\n Z2c = s.Z2c\n\n iota_N0 = s.iotaN\n iota = s.iota\n lp = np.abs(s.G0) / s.B0\n\n curvature = s.curvature\n torsion = s.torsion\n\n sign_G = s.sG\n sign_psi = s.spsi\n B0 = s.B0\n G0 = s.G0\n I2 = s.I2\n G2 = s.G2\n p2 = s.p2\n\n B20 = s.B20\n B2s = s.B2s\n B2c = s.B2c\n\n d_X1c_d_varphi = s.d_X1c_d_varphi\n d_Y1s_d_varphi = s.d_Y1s_d_varphi\n d_Y1c_d_varphi = s.d_Y1c_d_varphi\n\n d_X20_d_varphi = s.d_X20_d_varphi\n d_X2s_d_varphi = s.d_X2s_d_varphi\n d_X2c_d_varphi = s.d_X2c_d_varphi\n\n d_Y20_d_varphi = s.d_Y20_d_varphi\n d_Y2s_d_varphi = s.d_Y2s_d_varphi\n d_Y2c_d_varphi = s.d_Y2c_d_varphi\n\n d_Z20_d_varphi = s.d_Z20_d_varphi\n d_Z2s_d_varphi = s.d_Z2s_d_varphi\n d_Z2c_d_varphi = s.d_Z2c_d_varphi\n\n d2_X1c_d_varphi2 = s.d2_X1c_d_varphi2\n d2_Y1s_d_varphi2 = s.d2_Y1s_d_varphi2\n d2_Y1c_d_varphi2 = s.d2_Y1c_d_varphi2\n d_curvature_d_varphi = s.d_curvature_d_varphi\n d_torsion_d_varphi = s.d_torsion_d_varphi\n\n grad_grad_B = np.zeros((s.nphi, 3, 3, 3))\n grad_grad_B_alt = np.zeros((s.nphi, 3, 3, 3))\n\n # The elements that follow are computed in the Mathematica notebook \"20200407-01 Grad grad B tensor near axis\"\n # and then formatted for fortran by the python script process_grad_grad_B_tensor_code\n\n # The order is (normal, binormal, tangent). So element 123 means nbt.\n\n # Element 111\n grad_grad_B[:,0,0,0] =(B0*B0*B0*B0*lp*lp*(8*iota_N0*X2c*Y1c*\\\n Y1s + 4*iota_N0*X2s*\\\n (-Y1c*Y1c + Y1s*Y1s) + \\\n 2*iota_N0*X1c*Y1s*Y20 + \\\n 2*iota_N0*X1c*Y1s*Y2c - \\\n 2*iota_N0*X1c*Y1c*Y2s + \\\n 5*iota_N0*X1c*X1c*Y1c*Y1s*\\\n curvature - \\\n 2*Y1c*Y20*d_X1c_d_varphi + \\\n 2*Y1c*Y2c*d_X1c_d_varphi + \\\n 2*Y1s*Y2s*d_X1c_d_varphi + \\\n 5*X1c*Y1s*Y1s*curvature*\\\n d_X1c_d_varphi + \\\n 2*Y1c*Y1c*d_X20_d_varphi + \\\n 2*Y1s*Y1s*d_X20_d_varphi - \\\n 2*Y1c*Y1c*d_X2c_d_varphi + \\\n 2*Y1s*Y1s*d_X2c_d_varphi - \\\n 4*Y1c*Y1s*d_X2s_d_varphi))/\\\n (G0*G0*G0)\n\n # Element 112\n grad_grad_B[:,0,0,1] =(B0*B0*B0*B0*lp*lp*(Y1c*Y1c*\\\n (-6*iota_N0*Y2s + \\\n 5*iota_N0*X1c*Y1s*\\\n curvature + \\\n 2*(lp*X20*torsion - \\\n lp*X2c*torsion + \\\n d_Y20_d_varphi - \\\n d_Y2c_d_varphi)) + \\\n Y1s*(5*iota_N0*X1c*Y1s*Y1s*\\\n curvature + \\\n 2*(lp*X1c*Y2s*torsion + \\\n Y2s*d_Y1c_d_varphi - \\\n (Y20 + Y2c)*\\\n d_Y1s_d_varphi) + \\\n Y1s*(6*iota_N0*Y2s + \\\n 2*lp*X20*torsion + \\\n 2*lp*X2c*torsion + \\\n 5*lp*X1c*X1c*curvature*\\\n torsion + \\\n 5*X1c*curvature*\\\n d_Y1c_d_varphi + \\\n 2*d_Y20_d_varphi + \\\n 2*d_Y2c_d_varphi)) + \\\n Y1c*(2*(lp*X1c*\\\n (-Y20 + Y2c)*torsion - \\\n Y20*d_Y1c_d_varphi + \\\n Y2c*d_Y1c_d_varphi + \\\n Y2s*d_Y1s_d_varphi) + \\\n Y1s*(12*iota_N0*Y2c - \\\n 4*lp*X2s*torsion - \\\n 5*X1c*curvature*\\\n d_Y1s_d_varphi - \\\n 4*d_Y2s_d_varphi))))/(G0*G0*G0)\n\n # Element 113\n grad_grad_B[:,0,0,2] =-((B0*B0*B0*lp*lp*(2*Y1c*Y1c*\\\n (2*B2c*G0*lp + B0*G2*lp + B0*I2*lp*iota - \\\n 2*G0*lp*B20 + 2*B0*G0*iota_N0*Z2s + \\\n B0*G0*lp*X20*curvature - \\\n B0*G0*lp*X2c*curvature - \\\n B0*G0*d_Z20_d_varphi + \\\n B0*G0*d_Z2c_d_varphi) + \\\n Y1s*(-2*B0*G0*lp*X1c*Y2s*\\\n curvature + \\\n Y1s*(-4*B2c*G0*lp + 2*B0*G2*lp + \\\n 2*B0*I2*lp*iota - 4*G0*lp*B20 - \\\n 4*B0*G0*iota_N0*Z2s + \\\n 2*B0*G0*lp*X20*curvature + \\\n 2*B0*G0*lp*X2c*curvature + \\\n B0*G0*lp*X1c*X1c*curvature*curvature - \\\n 2*B0*G0*d_Z20_d_varphi - \\\n 2*B0*G0*d_Z2c_d_varphi)) + \\\n 2*G0*Y1c*(B0*lp*X1c*\\\n (Y20 - Y2c)*curvature + \\\n 2*Y1s*(2*B2s*lp - 2*B0*iota_N0*Z2c - \\\n B0*lp*X2s*curvature + \\\n B0*d_Z2s_d_varphi))))/(G0*G0*G0*G0))\n\n # Element 121\n grad_grad_B[:,0,1,0] =-((B0*B0*B0*B0*lp*lp*(3*iota_N0*X1c*X1c*X1c*Y1s*\\\n curvature + \\\n 3*lp*X1c*X1c*Y1s*Y1s*curvature*\\\n torsion + \\\n 2*(X2s*Y1s*\\\n (-2*lp*Y1c*torsion + \\\n d_X1c_d_varphi) + \\\n X20*(lp*Y1c*Y1c*torsion + \\\n lp*Y1s*Y1s*torsion - \\\n Y1c*d_X1c_d_varphi) + \\\n X2c*(-(lp*Y1c*Y1c*\\\n torsion) + \\\n lp*Y1s*Y1s*torsion + \\\n Y1c*d_X1c_d_varphi)) - \\\n 2*X1c*(3*iota_N0*X2s*Y1c - \\\n iota_N0*X20*Y1s - \\\n 3*iota_N0*X2c*Y1s + \\\n lp*Y1c*Y20*torsion - \\\n lp*Y1c*Y2c*torsion - \\\n lp*Y1s*Y2s*torsion - \\\n Y1c*d_X20_d_varphi + \\\n Y1c*d_X2c_d_varphi + \\\n Y1s*d_X2s_d_varphi)))/\\\n (G0*G0*G0))\n\n # Element 122\n grad_grad_B[:,0,1,1] =(B0*B0*B0*B0*lp*lp*(-4*iota_N0*X1c*Y1s*\\\n Y2c + 4*iota_N0*X1c*Y1c*\\\n Y2s - 3*iota_N0*X1c*X1c*Y1c*\\\n Y1s*curvature + \\\n 2*X20*Y1c*d_Y1c_d_varphi + \\\n 2*X20*Y1s*d_Y1s_d_varphi + \\\n 3*X1c*X1c*Y1s*curvature*\\\n d_Y1s_d_varphi + \\\n 2*X2s*(iota_N0*Y1c*Y1c - \\\n Y1s*(iota_N0*Y1s + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi) - \\\n 2*X2c*(Y1c*\\\n (2*iota_N0*Y1s + d_Y1c_d_varphi) \\\n - Y1s*d_Y1s_d_varphi) - \\\n 2*X1c*Y1c*d_Y20_d_varphi + \\\n 2*X1c*Y1c*d_Y2c_d_varphi + \\\n 2*X1c*Y1s*d_Y2s_d_varphi))/\\\n (G0*G0*G0)\n # (2*iota_N0*Y1s + d_Y1c_d_varphi) \\\\\n\n # Element 123\n grad_grad_B[:,0,1,2] =(2*B0*B0*B0*lp*lp*X1c*\\\n (Y1c*(2*B2c*G0*lp + B0*G2*lp + B0*I2*lp*iota - \\\n 2*G0*lp*B20 + 2*B0*G0*iota_N0*Z2s + \\\n 2*B0*G0*lp*X20*curvature - \\\n 2*B0*G0*lp*X2c*curvature - \\\n B0*G0*d_Z20_d_varphi + \\\n B0*G0*d_Z2c_d_varphi) + \\\n G0*Y1s*(2*B2s*lp - 2*B0*iota_N0*Z2c - \\\n 2*B0*lp*X2s*curvature + \\\n B0*d_Z2s_d_varphi)))/(G0*G0*G0*G0)\n\n # Element 131\n grad_grad_B[:,0,2,0] =(B0*B0*B0*B0*lp*(-4*lp*lp*X2s*Y1c*Y1s*\\\n curvature + \\\n 2*lp*lp*X2c*(-Y1c*Y1c + Y1s*Y1s)*\\\n curvature + \\\n 2*lp*lp*X20*(Y1c*Y1c + Y1s*Y1s)*\\\n curvature - \\\n 2*lp*lp*X1c*Y1c*Y20*\\\n curvature + \\\n 2*lp*lp*X1c*Y1c*Y2c*\\\n curvature + \\\n 2*lp*lp*X1c*Y1s*Y2s*\\\n curvature + \\\n 3*lp*lp*X1c*X1c*Y1s*Y1s*\\\n curvature*curvature + \\\n lp*iota_N0*X1c*X1c*X1c*Y1s*\\\n torsion - lp*iota_N0*X1c*\\\n Y1c*Y1c*Y1s*torsion - \\\n lp*iota_N0*X1c*Y1s*Y1s*Y1s*\\\n torsion - Y1s*Y1s*\\\n d_X1c_d_varphi*d_X1c_d_varphi + \\\n iota_N0*X1c*X1c*Y1s*\\\n d_Y1c_d_varphi - \\\n lp*X1c*Y1s*Y1s*torsion*\\\n d_Y1c_d_varphi - \\\n iota_N0*X1c*X1c*Y1c*\\\n d_Y1s_d_varphi + \\\n lp*X1c*Y1c*Y1s*\\\n torsion*d_Y1s_d_varphi + \\\n X1c*Y1s*Y1s*d2_X1c_d_varphi2))/\\\n (G0*G0*G0)\n\n # Element 132\n grad_grad_B[:,0,2,1] =(B0*B0*B0*B0*lp*(-(Y1s*d_X1c_d_varphi*\\\n (iota_N0*Y1c*Y1c + \\\n Y1s*(iota_N0*Y1s + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi)) + \\\n lp*X1c*X1c*Y1s*\\\n (2*iota_N0*Y1c*torsion - \\\n torsion*d_Y1s_d_varphi + \\\n Y1s*d_torsion_d_varphi) + \\\n X1c*(Y1c*d_Y1s_d_varphi*\\\n (-(iota_N0*Y1c) + d_Y1s_d_varphi) \\\n + Y1s*Y1s*(lp*torsion*\\\n d_X1c_d_varphi + \\\n iota_N0*d_Y1s_d_varphi + \\\n d2_Y1c_d_varphi2) - \\\n Y1s*(d_Y1c_d_varphi*\\\n d_Y1s_d_varphi + \\\n Y1c*(-2*iota_N0*d_Y1c_d_varphi + \\\n d2_Y1s_d_varphi2)))))/(G0*G0*G0)\n # (-(iota_N0*Y1c) + d_Y1s_d_varphi) \\\\\n\n # Element 133\n grad_grad_B[:,0,2,2] =(B0*B0*B0*B0*lp*lp*X1c*Y1s*\\\n (-(Y1s*curvature*\\\n d_X1c_d_varphi) + \\\n X1c*(-(iota_N0*Y1c*\\\n curvature) + \\\n Y1s*d_curvature_d_varphi)))/\\\n (G0*G0*G0)\n\n # Element 211\n grad_grad_B[:,1,0,0] =(-2*B0*B0*B0*B0*lp*lp*X1c*\\\n (-2*iota_N0*X2s*Y1c + \\\n 2*iota_N0*X2c*Y1s - \\\n iota_N0*X1c*Y2s + \\\n iota_N0*X1c*X1c*Y1s*curvature + \\\n lp*X1c*Y1s*Y1s*curvature*\\\n torsion - Y20*\\\n d_X1c_d_varphi + \\\n Y2c*d_X1c_d_varphi + \\\n Y1c*d_X20_d_varphi - \\\n Y1c*d_X2c_d_varphi - \\\n Y1s*d_X2s_d_varphi))/(G0*G0*G0)\n\n # Element 212\n grad_grad_B[:,1,0,1] =(2*B0*B0*B0*B0*lp*lp*X1c*\\\n (lp*X1c*Y20*torsion - \\\n lp*X1c*Y2c*torsion + \\\n Y20*d_Y1c_d_varphi - \\\n Y2c*d_Y1c_d_varphi - \\\n Y2s*d_Y1s_d_varphi + \\\n Y1c*(3*iota_N0*Y2s - \\\n lp*X20*torsion + \\\n lp*X2c*torsion - \\\n d_Y20_d_varphi + d_Y2c_d_varphi) \\\n + Y1s*(iota_N0*Y20 - \\\n 3*iota_N0*Y2c - \\\n iota_N0*X1c*Y1c*curvature + \\\n lp*X2s*torsion + \\\n X1c*curvature*\\\n d_Y1s_d_varphi + d_Y2s_d_varphi))\\\n )/(G0*G0*G0)\n # d_Y20_d_varphi + d_Y2c_d_varphi) \\\\\n # d_Y1s_d_varphi + d_Y2s_d_varphi))\\\\\n\n # Element 213\n grad_grad_B[:,1,0,2] =(2*B0*B0*B0*lp*lp*X1c*\\\n (Y1c*(2*B2c*G0*lp + B0*G2*lp + B0*I2*lp*iota - \\\n 2*G0*lp*B20 + 2*B0*G0*iota_N0*Z2s + \\\n B0*G0*lp*X20*curvature - \\\n B0*G0*lp*X2c*curvature - \\\n B0*G0*d_Z20_d_varphi + \\\n B0*G0*d_Z2c_d_varphi) + \\\n G0*(B0*lp*X1c*(Y20 - Y2c)*\\\n curvature + \\\n Y1s*(2*B2s*lp - 2*B0*iota_N0*Z2c - \\\n B0*lp*X2s*curvature + \\\n B0*d_Z2s_d_varphi))))/(G0*G0*G0*G0)\n\n # Element 221\n grad_grad_B[:,1,1,0] =(-2*B0*B0*B0*B0*lp*lp*X1c*\\\n (lp*X2c*Y1c*torsion + \\\n lp*X2s*Y1s*torsion - \\\n X2c*d_X1c_d_varphi + \\\n X20*(-(lp*Y1c*torsion) + \\\n d_X1c_d_varphi) + \\\n X1c*(3*iota_N0*X2s + \\\n lp*Y20*torsion - \\\n lp*Y2c*torsion - \\\n d_X20_d_varphi + d_X2c_d_varphi)))/\\\n (G0*G0*G0)\n\n # Element 222\n grad_grad_B[:,1,1,1] =(-2*B0*B0*B0*B0*lp*lp*X1c*\\\n (-(iota_N0*X2c*Y1s) + \\\n 2*iota_N0*X1c*Y2s - \\\n X2c*d_Y1c_d_varphi + \\\n X20*(iota_N0*Y1s + \\\n d_Y1c_d_varphi) + \\\n X2s*(iota_N0*Y1c - \\\n d_Y1s_d_varphi) - \\\n X1c*d_Y20_d_varphi + \\\n X1c*d_Y2c_d_varphi))/(G0*G0*G0)\n\n # Element 223\n grad_grad_B[:,1,1,2] =(-2*B0*B0*B0*lp*lp*X1c*X1c*\\\n (2*B2c*G0*lp + B0*G2*lp + B0*I2*lp*iota - 2*G0*lp*B20 + \\\n 2*B0*G0*iota_N0*Z2s + \\\n 2*B0*G0*lp*X20*curvature - \\\n 2*B0*G0*lp*X2c*curvature - \\\n B0*G0*d_Z20_d_varphi + \\\n B0*G0*d_Z2c_d_varphi))/(G0*G0*G0*G0)\n\n # Element 231\n grad_grad_B[:,1,2,0] =(B0*B0*B0*B0*lp*X1c*(-2*lp*lp*X20*Y1c*\\\n curvature + \\\n 2*lp*lp*X2c*Y1c*curvature + \\\n 2*lp*lp*X2s*Y1s*curvature + \\\n 2*lp*lp*X1c*Y20*curvature - \\\n 2*lp*lp*X1c*Y2c*curvature + \\\n 2*lp*iota_N0*X1c*Y1c*Y1s*\\\n torsion - iota_N0*X1c*Y1s*\\\n d_X1c_d_varphi + \\\n lp*Y1s*Y1s*torsion*\\\n d_X1c_d_varphi + \\\n iota_N0*X1c*X1c*d_Y1s_d_varphi - \\\n lp*X1c*Y1s*torsion*\\\n d_Y1s_d_varphi - \\\n lp*X1c*Y1s*Y1s*\\\n d_torsion_d_varphi))/(G0*G0*G0)\n\n # Element 232\n grad_grad_B[:,1,2,1] =(B0*B0*B0*B0*lp*X1c*(-(lp*iota_N0*X1c*X1c*\\\n Y1s*torsion) + \\\n lp*Y1s*torsion*\\\n (iota_N0*Y1c*Y1c + \\\n Y1s*(iota_N0*Y1s + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi) + \\\n X1c*((iota_N0*Y1c - \\\n d_Y1s_d_varphi)*d_Y1s_d_varphi \\\n + Y1s*(-(iota_N0*d_Y1c_d_varphi) + \\\n d2_Y1s_d_varphi2))))/(G0*G0*G0)\n # d_Y1s_d_varphi)*d_Y1s_d_varphi \\\\\n\n # Element 233\n grad_grad_B[:,1,2,2] =(B0*B0*B0*B0*lp*lp*X1c*X1c*Y1s*curvature*\\\n (iota_N0*X1c + 2*lp*Y1s*torsion))/\\\n (G0*G0*G0)\n\n # Element 311\n grad_grad_B[:,2,0,0] =(B0*B0*B0*B0*lp*X1c*Y1s*\\\n (lp*iota_N0*X1c*X1c*torsion - \\\n lp*iota_N0*Y1c*Y1c*torsion - \\\n lp*iota_N0*Y1s*Y1s*torsion - \\\n lp*Y1s*torsion*\\\n d_Y1c_d_varphi + \\\n X1c*(2*lp*lp*Y1s*curvature*curvature + \\\n iota_N0*d_Y1c_d_varphi) + \\\n d_X1c_d_varphi*d_Y1s_d_varphi + \\\n Y1c*(iota_N0*d_X1c_d_varphi + \\\n lp*torsion*d_Y1s_d_varphi) + \\\n Y1s*d2_X1c_d_varphi2))/(G0*G0*G0)\n\n # Element 312\n grad_grad_B[:,2,0,1] =(B0*B0*B0*B0*lp*X1c*Y1s*\\\n (lp*X1c*(2*iota_N0*Y1c*\\\n torsion + \\\n Y1s*d_torsion_d_varphi) + \\\n Y1s*(2*lp*torsion*\\\n d_X1c_d_varphi + \\\n 2*iota_N0*d_Y1s_d_varphi + \\\n d2_Y1c_d_varphi2) + \\\n Y1c*(2*iota_N0*d_Y1c_d_varphi - \\\n d2_Y1s_d_varphi2)))/(G0*G0*G0)\n\n # Element 313\n grad_grad_B[:,2,0,2] =(B0*B0*B0*B0*lp*lp*X1c*X1c*Y1s*\\\n (-(iota_N0*Y1c*curvature) + \\\n curvature*d_Y1s_d_varphi + \\\n Y1s*d_curvature_d_varphi))/\\\n (G0*G0*G0)\n\n # Element 321\n grad_grad_B[:,2,1,0] =-((B0*B0*B0*B0*lp*X1c*X1c*Y1s*\\\n (-2*lp*iota_N0*Y1c*torsion + \\\n 2*iota_N0*d_X1c_d_varphi + \\\n 2*lp*torsion*d_Y1s_d_varphi + \\\n lp*Y1s*d_torsion_d_varphi))/\\\n (G0*G0*G0))\n\n # Element 322\n grad_grad_B[:,2,1,1] =-((B0*B0*B0*B0*lp*X1c*Y1s*\\\n (lp*iota_N0*X1c*X1c*torsion - \\\n lp*iota_N0*Y1c*Y1c*torsion - \\\n lp*iota_N0*Y1s*Y1s*torsion - \\\n lp*Y1s*torsion*\\\n d_Y1c_d_varphi - \\\n d_X1c_d_varphi*d_Y1s_d_varphi + \\\n Y1c*(iota_N0*d_X1c_d_varphi + \\\n lp*torsion*d_Y1s_d_varphi) + \\\n X1c*(iota_N0*d_Y1c_d_varphi - \\\n d2_Y1s_d_varphi2)))/(G0*G0*G0))\n\n # Element 323\n grad_grad_B[:,2,1,2] =(B0*B0*B0*B0*lp*lp*X1c*X1c*Y1s*curvature*\\\n (iota_N0*X1c + 2*lp*Y1s*torsion))/\\\n (G0*G0*G0)\n\n # Element 331\n grad_grad_B[:,2,2,0] =(B0*B0*B0*B0*lp*lp*X1c*X1c*Y1s*\\\n (-(iota_N0*Y1c*curvature) + \\\n curvature*d_Y1s_d_varphi + \\\n Y1s*d_curvature_d_varphi))/\\\n (G0*G0*G0)\n\n # Element 332\n grad_grad_B[:,2,2,1] =-((B0*B0*B0*B0*lp*lp*X1c*Y1s*curvature*\\\n (iota_N0*Y1c*Y1c + \\\n Y1s*(iota_N0*Y1s + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi))/(G0*G0*G0))\n\n # Element 333\n grad_grad_B[:,2,2,2] =(-2*B0*B0*B0*B0*lp*lp*lp*X1c*X1c*Y1s*Y1s*\\\n curvature*curvature)/(G0*G0*G0)\n\n\n self.grad_grad_B = grad_grad_B\n\n # Compute the (inverse) scale length\n squared = grad_grad_B * grad_grad_B\n norm_squared = np.sum(squared, axis=(1,2,3))\n self.grad_grad_B_inverse_scale_length_vs_varphi = np.sqrt(np.sqrt(norm_squared) / (4*B0))\n self.grad_grad_B_inverse_scale_length = np.max(self.grad_grad_B_inverse_scale_length_vs_varphi)\n\n if not two_ways:\n return\n\n # Build the whole tensor again using Rogerio's approach,\n # \"20200424-01 Rogerio's GradGradB calculation.nb\"\n # and verify the two calculations match.\n\n # Element 111\n grad_grad_B_alt[:,0,0,0] =(-2*B0*(-4*sign_G*sign_psi*iota_N0*X2c*Y1c*\\\n Y1s + iota_N0*X1c*X1c*\\\n Y1c*(Y1c*\\\n (-Y20 + Y2c) + \\\n Y1s*(Y2s - \\\n 2*sign_G*sign_psi*curvature)) + \\\n X20*Y1c*Y1c*Y1s*\\\n d_X1c_d_varphi - \\\n X2c*Y1c*Y1c*Y1s*\\\n d_X1c_d_varphi + \\\n X20*Y1s*Y1s*Y1s*\\\n d_X1c_d_varphi + \\\n X2c*Y1s*Y1s*Y1s*\\\n d_X1c_d_varphi + \\\n sign_G*sign_psi*Y1c*Y20*\\\n d_X1c_d_varphi - \\\n sign_G*sign_psi*Y1c*Y2c*\\\n d_X1c_d_varphi - \\\n sign_G*sign_psi*Y1s*Y2s*\\\n d_X1c_d_varphi - \\\n 2*X2s*(sign_G*sign_psi*iota_N0*Y1s*Y1s + \\\n Y1c*Y1c*\\\n (-(sign_G*sign_psi*iota_N0) + \\\n iota_N0*X1c*Y1s) + \\\n Y1c*Y1s*Y1s*\\\n d_X1c_d_varphi) + \\\n X1c*(iota_N0*X2c*Y1c*\\\n (-Y1c*Y1c + Y1s*Y1s) + \\\n iota_N0*X20*Y1c*\\\n (Y1c*Y1c + Y1s*Y1s) - \\\n sign_G*sign_psi*iota_N0*Y1s*Y20 - \\\n sign_G*sign_psi*iota_N0*Y1s*Y2c + \\\n sign_G*sign_psi*iota_N0*Y1c*Y2s - \\\n Y1c*Y1s*Y20*\\\n d_X1c_d_varphi + \\\n Y1c*Y1s*Y2c*\\\n d_X1c_d_varphi + \\\n Y1s*Y1s*Y2s*\\\n d_X1c_d_varphi - \\\n 2*sign_G*sign_psi*Y1s*Y1s*curvature*\\\n d_X1c_d_varphi) - \\\n sign_G*sign_psi*Y1c*Y1c*d_X20_d_varphi - \\\n sign_G*sign_psi*Y1s*Y1s*d_X20_d_varphi + \\\n sign_G*sign_psi*Y1c*Y1c*d_X2c_d_varphi - \\\n sign_G*sign_psi*Y1s*Y1s*d_X2c_d_varphi + \\\n 2*sign_G*sign_psi*Y1c*Y1s*\\\n d_X2s_d_varphi))/(lp*sign_psi)\n\n # Element 112\n grad_grad_B_alt[:,0,0,1] =(2*B0*(2*iota_N0*X2s*Y1c*Y1c*Y1c*\\\n Y1s + 2*iota_N0*X2s*\\\n Y1c*Y1s*Y1s*Y1s + \\\n iota_N0*X1c*Y1c*Y1c*Y1c*\\\n Y20 + iota_N0*X1c*Y1c*\\\n Y1s*Y1s*Y20 - \\\n iota_N0*X1c*Y1c*Y1c*Y1c*\\\n Y2c + 6*sign_G*sign_psi*iota_N0*Y1c*\\\n Y1s*Y2c - \\\n iota_N0*X1c*Y1c*Y1s*Y1s*\\\n Y2c - 3*sign_G*sign_psi*iota_N0*Y1c*Y1c*\\\n Y2s - iota_N0*X1c*\\\n Y1c*Y1c*Y1s*Y2s + \\\n 3*sign_G*sign_psi*iota_N0*Y1s*Y1s*Y2s - \\\n iota_N0*X1c*Y1s*Y1s*Y1s*\\\n Y2s + 2*sign_G*sign_psi*iota_N0*X1c*\\\n Y1c*Y1c*Y1s*curvature + \\\n 2*sign_G*sign_psi*iota_N0*X1c*Y1s*Y1s*Y1s*\\\n curvature - \\\n 2*lp*sign_G*sign_psi*X2s*Y1c*\\\n Y1s*torsion + \\\n 2*lp*X1c*X2s*Y1c*\\\n Y1s*Y1s*torsion - \\\n lp*sign_G*sign_psi*X1c*Y1c*\\\n Y20*torsion + \\\n lp*X1c*X1c*Y1c*Y1s*\\\n Y20*torsion + \\\n lp*sign_G*sign_psi*X1c*Y1c*\\\n Y2c*torsion - \\\n lp*X1c*X1c*Y1c*Y1s*\\\n Y2c*torsion + \\\n lp*sign_G*sign_psi*X1c*Y1s*\\\n Y2s*torsion - \\\n lp*X1c*X1c*Y1s*Y1s*Y2s*\\\n torsion + \\\n 2*lp*sign_G*sign_psi*X1c*X1c*Y1s*Y1s*\\\n curvature*torsion + \\\n 2*X2s*Y1c*Y1s*Y1s*\\\n d_Y1c_d_varphi - \\\n sign_G*sign_psi*Y1c*Y20*\\\n d_Y1c_d_varphi + \\\n X1c*Y1c*Y1s*\\\n Y20*d_Y1c_d_varphi + \\\n sign_G*sign_psi*Y1c*Y2c*\\\n d_Y1c_d_varphi - \\\n X1c*Y1c*Y1s*\\\n Y2c*d_Y1c_d_varphi + \\\n sign_G*sign_psi*Y1s*Y2s*\\\n d_Y1c_d_varphi - \\\n X1c*Y1s*Y1s*Y2s*\\\n d_Y1c_d_varphi + \\\n 2*sign_G*sign_psi*X1c*Y1s*Y1s*\\\n curvature*d_Y1c_d_varphi - \\\n 2*X2s*Y1c*Y1c*Y1s*\\\n d_Y1s_d_varphi - \\\n X1c*Y1c*Y1c*Y20*\\\n d_Y1s_d_varphi - \\\n sign_G*sign_psi*Y1s*Y20*\\\n d_Y1s_d_varphi + \\\n X1c*Y1c*Y1c*Y2c*\\\n d_Y1s_d_varphi - \\\n sign_G*sign_psi*Y1s*Y2c*\\\n d_Y1s_d_varphi + \\\n sign_G*sign_psi*Y1c*Y2s*\\\n d_Y1s_d_varphi + \\\n X1c*Y1c*Y1s*\\\n Y2s*d_Y1s_d_varphi - \\\n 2*sign_G*sign_psi*X1c*Y1c*Y1s*\\\n curvature*d_Y1s_d_varphi + \\\n X2c*(Y1c*Y1c - Y1s*Y1s)*\\\n (iota_N0*Y1c*Y1c + iota_N0*Y1s*Y1s - \\\n lp*sign_G*sign_psi*torsion + \\\n Y1s*(lp*X1c*torsion + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi) - \\\n X20*(Y1c*Y1c + Y1s*Y1s)*\\\n (iota_N0*Y1c*Y1c + iota_N0*Y1s*Y1s - \\\n lp*sign_G*sign_psi*torsion + \\\n Y1s*(lp*X1c*torsion + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi) + \\\n sign_G*sign_psi*Y1c*Y1c*d_Y20_d_varphi + \\\n sign_G*sign_psi*Y1s*Y1s*d_Y20_d_varphi - \\\n sign_G*sign_psi*Y1c*Y1c*d_Y2c_d_varphi + \\\n sign_G*sign_psi*Y1s*Y1s*d_Y2c_d_varphi - \\\n 2*sign_G*sign_psi*Y1c*Y1s*\\\n d_Y2s_d_varphi))/(lp*sign_psi)\n\n # Element 113\n grad_grad_B_alt[:,0,0,2] =(-2*(Y1c*Y1c*(G2*sign_psi + I2*sign_psi*iota - \\\n 2*lp*sign_G*sign_psi*B20 + \\\n 2*lp*sign_G*sign_psi*B2c + \\\n 2*B0*sign_G*sign_psi*iota_N0*Z2s + \\\n B0*lp*sign_G*sign_psi*X20*curvature - \\\n B0*lp*sign_G*sign_psi*X2c*curvature + \\\n B0*lp*X1c*X20*Y1s*\\\n curvature - \\\n B0*lp*X1c*X2c*Y1s*\\\n curvature - \\\n B0*sign_G*sign_psi*d_Z20_d_varphi + \\\n B0*sign_G*sign_psi*d_Z2c_d_varphi) + \\\n Y1s*(B0*lp*X1c*\\\n (X20 + X2c)*Y1s*Y1s*\\\n curvature - \\\n B0*lp*sign_G*sign_psi*X1c*Y2s*\\\n curvature + \\\n Y1s*(G2*sign_psi + I2*sign_psi*iota - \\\n 2*lp*sign_G*sign_psi*B20 - \\\n 2*lp*sign_G*sign_psi*B2c - \\\n 2*B0*sign_G*sign_psi*iota_N0*Z2s + \\\n B0*lp*sign_G*sign_psi*X20*curvature + \\\n B0*lp*sign_G*sign_psi*X2c*curvature + \\\n B0*lp*X1c*X1c*Y2s*\\\n curvature + \\\n B0*lp*sign_G*sign_psi*X1c*X1c*\\\n curvature*curvature - \\\n B0*sign_G*sign_psi*d_Z20_d_varphi - \\\n B0*sign_G*sign_psi*d_Z2c_d_varphi)) + \\\n Y1c*(4*lp*sign_G*sign_psi*B2s*\\\n Y1s - \\\n B0*(2*lp*X1c*X2s*\\\n Y1s*Y1s*curvature + \\\n lp*sign_G*sign_psi*X1c*\\\n (-Y20 + Y2c)*\\\n curvature + \\\n Y1s*\\\n (4*sign_G*sign_psi*iota_N0*Z2c + \\\n 2*lp*sign_G*sign_psi*X2s*curvature + \\\n lp*X1c*X1c*Y20*\\\n curvature - \\\n lp*X1c*X1c*Y2c*\\\n curvature - \\\n 2*sign_G*sign_psi*d_Z2s_d_varphi)))))/\\\n (lp*sign_psi)\n\n # Element 121\n grad_grad_B_alt[:,0,1,0] =(-2*B0*(iota_N0*X1c*X1c*X1c*\\\n (Y1c*(Y20 - Y2c) + \\\n Y1s*(-Y2s + \\\n sign_G*sign_psi*curvature)) - \\\n X1c*X1c*(iota_N0*X2c*\\\n (-Y1c*Y1c + Y1s*Y1s) + \\\n iota_N0*X20*\\\n (Y1c*Y1c + Y1s*Y1s) + \\\n Y1s*(-2*iota_N0*X2s*\\\n Y1c + \\\n lp*(Y1c*\\\n (-Y20 + Y2c) + \\\n Y1s*\\\n (Y2s - sign_G*sign_psi*curvature))*\\\n torsion)) + \\\n sign_G*sign_psi*(X2s*Y1s*\\\n (-2*lp*Y1c*torsion + \\\n d_X1c_d_varphi) + \\\n X20*(lp*Y1c*Y1c*\\\n torsion + \\\n lp*Y1s*Y1s*torsion - \\\n Y1c*d_X1c_d_varphi) + \\\n X2c*(-(lp*Y1c*Y1c*\\\n torsion) + \\\n lp*Y1s*Y1s*torsion + \\\n Y1c*d_X1c_d_varphi)) + \\\n X1c*(3*sign_G*sign_psi*iota_N0*X2c*\\\n Y1s + \\\n lp*X2c*Y1c*Y1c*Y1s*\\\n torsion - \\\n lp*X2c*Y1s*Y1s*Y1s*\\\n torsion - \\\n lp*sign_G*sign_psi*Y1c*Y20*\\\n torsion + \\\n lp*sign_G*sign_psi*Y1c*Y2c*\\\n torsion + \\\n lp*sign_G*sign_psi*Y1s*Y2s*\\\n torsion - \\\n X20*Y1s*\\\n (-(sign_G*sign_psi*iota_N0) + \\\n lp*Y1c*Y1c*torsion + \\\n lp*Y1s*Y1s*torsion) + \\\n X2s*Y1c*\\\n (-3*sign_G*sign_psi*iota_N0 + \\\n 2*lp*Y1s*Y1s*torsion) + \\\n sign_G*sign_psi*Y1c*d_X20_d_varphi - \\\n sign_G*sign_psi*Y1c*d_X2c_d_varphi - \\\n sign_G*sign_psi*Y1s*d_X2s_d_varphi)))/\\\n (lp*sign_psi)\n\n # Element 122\n grad_grad_B_alt[:,0,1,1] =(2*B0*(-(X1c*X1c*\\\n (Y1c*(Y20 - Y2c) + \\\n Y1s*(-Y2s + \\\n sign_G*sign_psi*curvature))*\\\n (iota_N0*Y1c - d_Y1s_d_varphi)) \\\n + X2s*(iota_N0*Y1c*Y1c*\\\n (sign_G*sign_psi - 2*X1c*Y1s) - \\\n sign_G*sign_psi*Y1s*\\\n (iota_N0*Y1s + \\\n d_Y1c_d_varphi) + \\\n Y1c*(-(sign_G*sign_psi) + \\\n 2*X1c*Y1s)*\\\n d_Y1s_d_varphi) + \\\n sign_G*sign_psi*(X20*\\\n (Y1c*d_Y1c_d_varphi + \\\n Y1s*d_Y1s_d_varphi) + \\\n X2c*(-(Y1c*\\\n (2*iota_N0*Y1s + \\\n d_Y1c_d_varphi)) + \\\n Y1s*d_Y1s_d_varphi)) + \\\n X1c*(-(X2c*\\\n (Y1c*Y1c - Y1s*Y1s)*\\\n (iota_N0*Y1c - \\\n d_Y1s_d_varphi)) + \\\n X20*(Y1c*Y1c + Y1s*Y1s)*\\\n (iota_N0*Y1c - d_Y1s_d_varphi) \\\n + sign_G*sign_psi*(Y1c*\\\n (2*iota_N0*Y2s - \\\n d_Y20_d_varphi + \\\n d_Y2c_d_varphi) + \\\n Y1s*\\\n (-2*iota_N0*Y2c + \\\n d_Y2s_d_varphi)))))/(lp*sign_psi)\n\n # Element 123\n grad_grad_B_alt[:,0,1,2] =(2*X1c*(Y1c*\\\n (G2 + I2*iota - 2*lp*sign_G*B20 + \\\n 2*lp*sign_G*B2c + \\\n 2*B0*sign_G*iota_N0*Z2s + \\\n 2*B0*lp*sign_G*X20*curvature - \\\n 2*B0*lp*sign_G*X2c*curvature - \\\n B0*sign_G*d_Z20_d_varphi + \\\n B0*sign_G*d_Z2c_d_varphi) + \\\n sign_G*Y1s*(2*lp*B2s + \\\n B0*(-2*iota_N0*Z2c - \\\n 2*lp*X2s*curvature + \\\n d_Z2s_d_varphi))))/(lp)\n\n # Element 131\n grad_grad_B_alt[:,0,2,0] =(B0*(-(lp*sign_G*sign_psi*iota_N0*Y1c*Y1c*\\\n torsion) + \\\n lp*iota_N0*X1c*X1c*X1c*Y1s*\\\n torsion + \\\n X1c*X1c*(lp*lp*Y1s*Y1s*\\\n torsion*torsion + \\\n iota_N0*Y1s*d_Y1c_d_varphi - \\\n iota_N0*Y1c*d_Y1s_d_varphi) + \\\n sign_G*sign_psi*Y1c*\\\n (iota_N0*d_X1c_d_varphi + \\\n 2*lp*torsion*d_Y1s_d_varphi) + \\\n X1c*Y1s*\\\n (2*lp*lp*sign_G*sign_psi*curvature*curvature - \\\n lp*lp*sign_G*sign_psi*torsion*torsion - \\\n iota_N0*Y1c*d_X1c_d_varphi + \\\n lp*torsion*\\\n (Y1s*d_Y1c_d_varphi - \\\n Y1c*d_Y1s_d_varphi)) - \\\n Y1s*(Y1s*\\\n (lp*sign_G*sign_psi*iota_N0*torsion + \\\n d_X1c_d_varphi*d_X1c_d_varphi) + \\\n sign_G*sign_psi*(2*lp*torsion*\\\n d_Y1c_d_varphi - \\\n d2_X1c_d_varphi2))))/(lp*lp*sign_G)\n\n # Element 132\n grad_grad_B_alt[:,0,2,1] =(B0*(-(iota_N0*Y1c*Y1c*Y1s*\\\n d_X1c_d_varphi) + \\\n lp*X1c*X1c*Y1s*torsion*\\\n (iota_N0*Y1c - d_Y1s_d_varphi) + \\\n X1c*(-(iota_N0*Y1c*Y1c*\\\n d_Y1s_d_varphi) + \\\n Y1c*(lp*sign_G*sign_psi*iota_N0*\\\n torsion + \\\n iota_N0*Y1s*\\\n d_Y1c_d_varphi + \\\n d_Y1s_d_varphi*d_Y1s_d_varphi) - \\\n Y1s*(lp*Y1s*torsion*\\\n d_X1c_d_varphi + \\\n d_Y1c_d_varphi*\\\n d_Y1s_d_varphi - \\\n lp*sign_G*sign_psi*d_torsion_d_varphi)) + \\\n Y1s*(-(iota_N0*Y1s*Y1s*\\\n d_X1c_d_varphi) - \\\n Y1s*d_X1c_d_varphi*\\\n d_Y1c_d_varphi + \\\n sign_G*sign_psi*(2*lp*torsion*\\\n d_X1c_d_varphi + \\\n iota_N0*d_Y1s_d_varphi + \\\n d2_Y1c_d_varphi2)) + \\\n Y1c*(sign_G*sign_psi*iota_N0*\\\n d_Y1c_d_varphi + \\\n Y1s*d_X1c_d_varphi*\\\n d_Y1s_d_varphi - \\\n sign_G*sign_psi*d2_Y1s_d_varphi2)))/\\\n (lp*lp*sign_G)\n\n # Element 133\n grad_grad_B_alt[:,0,2,2] =-((B0*(Y1s*curvature*\\\n d_X1c_d_varphi + \\\n X1c*(iota_N0*Y1c*\\\n curvature - \\\n Y1s*d_curvature_d_varphi)))/\\\n (lp*sign_psi))\n\n # Element 211\n grad_grad_B_alt[:,1,0,0] =(-2*B0*X1c*(2*sign_G*sign_psi*iota_N0*X2c*\\\n Y1s + iota_N0*X1c*X1c*\\\n (Y1c*(Y20 - Y2c) + \\\n sign_G*sign_psi*Y1s*curvature) - \\\n X20*Y1c*Y1s*\\\n d_X1c_d_varphi + \\\n X2c*Y1c*Y1s*\\\n d_X1c_d_varphi - \\\n sign_G*sign_psi*Y20*d_X1c_d_varphi + \\\n sign_G*sign_psi*Y2c*d_X1c_d_varphi + \\\n X2s*(Y1c*\\\n (-2*sign_G*sign_psi*iota_N0 + \\\n iota_N0*X1c*Y1s) + \\\n Y1s*Y1s*d_X1c_d_varphi) + \\\n X1c*(-(iota_N0*X20*\\\n Y1c*Y1c) + \\\n iota_N0*X2c*Y1c*Y1c - \\\n sign_G*sign_psi*iota_N0*Y2s + \\\n lp*sign_G*sign_psi*Y1s*Y1s*curvature*\\\n torsion + \\\n Y1s*Y20*\\\n d_X1c_d_varphi - \\\n Y1s*Y2c*\\\n d_X1c_d_varphi) + \\\n sign_G*sign_psi*Y1c*d_X20_d_varphi - \\\n sign_G*sign_psi*Y1c*d_X2c_d_varphi - \\\n sign_G*sign_psi*Y1s*d_X2s_d_varphi))/\\\n (lp*sign_psi)\n\n # Element 212\n grad_grad_B_alt[:,1,0,1] =(-2*B0*X1c*(iota_N0*X2s*\\\n Y1c*Y1c*Y1s + \\\n iota_N0*X2s*Y1s*Y1s*Y1s + \\\n iota_N0*X1c*Y1c*Y1c*\\\n Y20 - sign_G*sign_psi*iota_N0*Y1s*\\\n Y20 + iota_N0*X1c*\\\n Y1s*Y1s*Y20 - \\\n iota_N0*X1c*Y1c*Y1c*\\\n Y2c + 3*sign_G*sign_psi*iota_N0*Y1s*\\\n Y2c - iota_N0*X1c*\\\n Y1s*Y1s*Y2c - \\\n 3*sign_G*sign_psi*iota_N0*Y1c*Y2s + \\\n sign_G*sign_psi*iota_N0*X1c*Y1c*\\\n Y1s*curvature - \\\n lp*sign_G*sign_psi*X2s*Y1s*\\\n torsion + \\\n lp*X1c*X2s*Y1s*Y1s*\\\n torsion - \\\n lp*sign_G*sign_psi*X1c*Y20*\\\n torsion + \\\n lp*X1c*X1c*Y1s*Y20*\\\n torsion + \\\n lp*sign_G*sign_psi*X1c*Y2c*\\\n torsion - \\\n lp*X1c*X1c*Y1s*Y2c*\\\n torsion + \\\n X2s*Y1s*Y1s*\\\n d_Y1c_d_varphi - \\\n sign_G*sign_psi*Y20*d_Y1c_d_varphi + \\\n X1c*Y1s*Y20*\\\n d_Y1c_d_varphi + \\\n sign_G*sign_psi*Y2c*d_Y1c_d_varphi - \\\n X1c*Y1s*Y2c*\\\n d_Y1c_d_varphi - \\\n X2s*Y1c*Y1s*\\\n d_Y1s_d_varphi - \\\n X1c*Y1c*Y20*\\\n d_Y1s_d_varphi + \\\n X1c*Y1c*Y2c*\\\n d_Y1s_d_varphi + \\\n sign_G*sign_psi*Y2s*d_Y1s_d_varphi - \\\n sign_G*sign_psi*X1c*Y1s*\\\n curvature*d_Y1s_d_varphi - \\\n X20*Y1c*\\\n (iota_N0*Y1c*Y1c + iota_N0*Y1s*Y1s - \\\n lp*sign_G*sign_psi*torsion + \\\n Y1s*(lp*X1c*torsion + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi) + \\\n X2c*Y1c*\\\n (iota_N0*Y1c*Y1c + iota_N0*Y1s*Y1s - \\\n lp*sign_G*sign_psi*torsion + \\\n Y1s*(lp*X1c*torsion + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi) + \\\n sign_G*sign_psi*Y1c*d_Y20_d_varphi - \\\n sign_G*sign_psi*Y1c*d_Y2c_d_varphi - \\\n sign_G*sign_psi*Y1s*d_Y2s_d_varphi))/\\\n (lp*sign_psi)\n\n # Element 213\n grad_grad_B_alt[:,1,0,2] =(2*X1c*(2*lp*sign_G*sign_psi*B2s*\\\n Y1s + Y1c*\\\n (G2*sign_psi + I2*sign_psi*iota - \\\n 2*lp*sign_G*sign_psi*B20 + \\\n 2*lp*sign_G*sign_psi*B2c + \\\n 2*B0*sign_G*sign_psi*iota_N0*Z2s + \\\n B0*lp*sign_G*sign_psi*X20*curvature - \\\n B0*lp*sign_G*sign_psi*X2c*curvature + \\\n B0*lp*X1c*X20*Y1s*\\\n curvature - \\\n B0*lp*X1c*X2c*Y1s*\\\n curvature - \\\n B0*sign_G*sign_psi*d_Z20_d_varphi + \\\n B0*sign_G*sign_psi*d_Z2c_d_varphi) - \\\n B0*(lp*X1c*X2s*Y1s*Y1s*\\\n curvature + \\\n lp*sign_G*sign_psi*X1c*\\\n (-Y20 + Y2c)*\\\n curvature + \\\n Y1s*(2*sign_G*sign_psi*iota_N0*Z2c + \\\n lp*sign_G*sign_psi*X2s*curvature + \\\n lp*X1c*X1c*Y20*\\\n curvature - \\\n lp*X1c*X1c*Y2c*\\\n curvature - \\\n sign_G*sign_psi*d_Z2s_d_varphi))))/\\\n (lp*sign_psi)\n\n # Element 221\n grad_grad_B_alt[:,1,1,0] =(2*B0*X1c*(iota_N0*X1c*X1c*X1c*\\\n (Y20 - Y2c) + \\\n X1c*X1c*(-(iota_N0*X20*\\\n Y1c) + \\\n iota_N0*X2c*Y1c + \\\n Y1s*(iota_N0*X2s + \\\n lp*(Y20 - Y2c)*\\\n torsion)) - \\\n sign_G*sign_psi*(lp*X2s*Y1s*\\\n torsion + \\\n X2c*(lp*Y1c*\\\n torsion - d_X1c_d_varphi) + \\\n X20*(-(lp*Y1c*\\\n torsion) + d_X1c_d_varphi)) \\\n + X1c*(-(lp*X20*Y1c*\\\n Y1s*torsion) + \\\n lp*X2c*Y1c*Y1s*\\\n torsion - \\\n lp*sign_G*sign_psi*Y20*torsion + \\\n lp*sign_G*sign_psi*Y2c*torsion + \\\n X2s*(-3*sign_G*sign_psi*iota_N0 + \\\n lp*Y1s*Y1s*torsion) + \\\n sign_G*sign_psi*d_X20_d_varphi - \\\n sign_G*sign_psi*d_X2c_d_varphi)))/\\\n (lp*sign_psi)\n\n # Element 222\n grad_grad_B_alt[:,1,1,1] =(-2*B0*X1c*(sign_G*sign_psi*\\\n (X20 - X2c)*\\\n (iota_N0*Y1s + d_Y1c_d_varphi) + \\\n X2s*(sign_G*sign_psi - \\\n X1c*Y1s)*\\\n (iota_N0*Y1c - d_Y1s_d_varphi) - \\\n X1c*X1c*(Y20 - Y2c)*\\\n (iota_N0*Y1c - d_Y1s_d_varphi) + \\\n X1c*(X20*Y1c*\\\n (iota_N0*Y1c - d_Y1s_d_varphi) \\\n + X2c*Y1c*\\\n (-(iota_N0*Y1c) + \\\n d_Y1s_d_varphi) + \\\n sign_G*sign_psi*(2*iota_N0*Y2s - \\\n d_Y20_d_varphi + \\\n d_Y2c_d_varphi))))/(lp*sign_psi)\n\n # Element 223\n grad_grad_B_alt[:,1,1,2] =(-2*X1c*X1c*(G2 + I2*iota - 2*lp*sign_G*B20 + \\\n 2*lp*sign_G*B2c + 2*B0*sign_G*iota_N0*Z2s + \\\n 2*B0*lp*sign_G*X20*curvature - \\\n 2*B0*lp*sign_G*X2c*curvature - \\\n B0*sign_G*d_Z20_d_varphi + \\\n B0*sign_G*d_Z2c_d_varphi))/(lp)\n\n # Element 231\n grad_grad_B_alt[:,1,2,0] =(B0*X1c*(lp*iota_N0*Y1c*\\\n (sign_G*sign_psi + X1c*Y1s)*\\\n torsion + \\\n (-(sign_G*sign_psi*iota_N0) + \\\n lp*Y1s*Y1s*torsion)*\\\n d_X1c_d_varphi + \\\n iota_N0*X1c*X1c*d_Y1s_d_varphi - \\\n 2*lp*sign_G*sign_psi*torsion*\\\n d_Y1s_d_varphi + \\\n lp*X1c*Y1s*torsion*\\\n d_Y1s_d_varphi - \\\n lp*sign_G*sign_psi*Y1s*d_torsion_d_varphi))/\\\n (lp*lp*sign_G)\n\n # Element 232\n grad_grad_B_alt[:,1,2,1] =(B0*X1c*(lp*iota_N0*Y1c*Y1c*\\\n Y1s*torsion + \\\n lp*iota_N0*Y1s*Y1s*Y1s*torsion - \\\n lp*lp*sign_G*sign_psi*Y1s*torsion*torsion - \\\n sign_G*sign_psi*iota_N0*d_Y1c_d_varphi + \\\n lp*Y1s*Y1s*torsion*\\\n d_Y1c_d_varphi - \\\n lp*Y1c*Y1s*torsion*\\\n d_Y1s_d_varphi + \\\n X1c*(-(lp*sign_G*sign_psi*iota_N0*\\\n torsion) + \\\n lp*lp*Y1s*Y1s*torsion*torsion + \\\n (iota_N0*Y1c - d_Y1s_d_varphi)*\\\n d_Y1s_d_varphi) + \\\n sign_G*sign_psi*d2_Y1s_d_varphi2))/(lp*lp*sign_G)\n\n # Element 233\n grad_grad_B_alt[:,1,2,2] =(B0*X1c*curvature*\\\n (iota_N0*X1c + \\\n 2*lp*Y1s*torsion))/(lp*sign_psi)\n\n # Element 311\n grad_grad_B_alt[:,2,0,0] =(B0*(2*lp*lp*sign_G*sign_psi*curvature*curvature + \\\n lp*iota_N0*X1c*X1c*torsion - \\\n lp*iota_N0*Y1c*Y1c*torsion - \\\n lp*iota_N0*Y1s*Y1s*torsion + \\\n iota_N0*Y1c*d_X1c_d_varphi + \\\n iota_N0*X1c*d_Y1c_d_varphi - \\\n lp*Y1s*torsion*\\\n d_Y1c_d_varphi + \\\n lp*Y1c*torsion*\\\n d_Y1s_d_varphi + \\\n d_X1c_d_varphi*d_Y1s_d_varphi + \\\n Y1s*d2_X1c_d_varphi2))/\\\n (lp*lp*sign_psi)\n\n # Element 312\n grad_grad_B_alt[:,2,0,1] =(B0*(lp*X1c*(2*iota_N0*Y1c*\\\n torsion + \\\n Y1s*d_torsion_d_varphi) + \\\n Y1s*(2*lp*torsion*\\\n d_X1c_d_varphi + \\\n 2*iota_N0*d_Y1s_d_varphi + \\\n d2_Y1c_d_varphi2) + \\\n Y1c*(2*iota_N0*d_Y1c_d_varphi - \\\n d2_Y1s_d_varphi2)))/(lp*lp*sign_psi)\n\n # Element 313\n grad_grad_B_alt[:,2,0,2] =(B0*(-(iota_N0*X1c*Y1c*\\\n curvature) - \\\n Y1s*curvature*\\\n d_X1c_d_varphi + \\\n sign_G*sign_psi*d_curvature_d_varphi))/(lp*sign_psi)\n\n # Element 321\n grad_grad_B_alt[:,2,1,0] =(B0*X1c*(2*lp*iota_N0*Y1c*\\\n torsion - \\\n 2*iota_N0*d_X1c_d_varphi - \\\n lp*(2*torsion*d_Y1s_d_varphi + \\\n Y1s*d_torsion_d_varphi)))/\\\n (lp*lp*sign_psi)\n\n # Element 322\n grad_grad_B_alt[:,2,1,1] =-((B0*(iota_N0*Y1c*d_X1c_d_varphi + \\\n iota_N0*X1c*d_Y1c_d_varphi - \\\n d_X1c_d_varphi*\\\n d_Y1s_d_varphi + \\\n lp*torsion*\\\n (iota_N0*X1c*X1c - \\\n iota_N0*Y1c*Y1c - \\\n Y1s*(iota_N0*Y1s + \\\n d_Y1c_d_varphi) + \\\n Y1c*d_Y1s_d_varphi) - \\\n X1c*d2_Y1s_d_varphi2))/\\\n (lp*lp*sign_psi))\n\n # Element 323\n grad_grad_B_alt[:,2,1,2] =(B0*curvature*(iota_N0*X1c*X1c + \\\n lp*sign_G*sign_psi*torsion + \\\n lp*X1c*Y1s*torsion))/\\\n (lp*sign_psi)\n\n # Element 331\n grad_grad_B_alt[:,2,2,0] =(B0*(-(iota_N0*X1c*Y1c*\\\n curvature) - \\\n Y1s*curvature*\\\n d_X1c_d_varphi + \\\n sign_G*sign_psi*d_curvature_d_varphi))/(lp*sign_psi)\n\n # Element 332\n grad_grad_B_alt[:,2,2,1] =-((B0*curvature*(iota_N0*Y1c*Y1c + \\\n iota_N0*Y1s*Y1s - \\\n lp*sign_G*sign_psi*torsion + \\\n Y1s*(lp*X1c*torsion + \\\n d_Y1c_d_varphi) - \\\n Y1c*d_Y1s_d_varphi))/\\\n (lp*sign_psi))\n\n # Element 333\n grad_grad_B_alt[:,2,2,2] =(-2*B0*curvature*curvature)/sign_G\n\n self.grad_grad_B_alt = grad_grad_B_alt", "def compute_gravity(self):\r\n # compute the gravity from the Gauss form.\r\n # if it fails, marks divergence\r\n try:\r\n self.gravsolver.solve()\r\n except:\r\n print(\"GRAVITY DIVERGED\")\r\n\r\n # write to log\r\n self.logfile.write(\"%s: STOPPED DUE TO DIVERGENCE IN GRAVITY \\n\" %\r\n (self.convert_time(time.time() -\r\n self.start_time)))\r\n self.diverged = True # set diverged to True, break the run\r\n return\r\n\r\n # split and update the gravity function with the answers\r\n # note the gravscale\r\n gravg, gravs = self.gravgs.split()\r\n\r\n # assign the result to the gravity function\r\n self.gravity.assign(project(gravg/self.gravscale, self.V))", "def compute_forces_mesh(self):\n f = self.ptclgrid.grid[:self.size,:self.size]*self.grad_phi_mesh()\n return f", "def getGradients(T,x,y):\n\n # These are place holders, you will overwrite them in your code.\n dummy = torch.zeros(3,4)\n Tx = dummy\n Ty = dummy\n\n # TODO: your code here to compute Tx, Ty\n \n return Tx, Ty", "def magnetic_pressure_gradient_force(self, method='spectral'):\n import numpy as np\n gradB, B = self.magnetic_gradient_tensor(method=method, return_B=True)\n F = np.zeros_like(B)\n for i in range(3):\n for j in range(3):\n F[i] -= B[j] * gradB[i,j]\n return F", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def Gradient(Walker,particle):\n\n h=0.001\n dPsi = zeros(shape=shape(Walker.Re[particle]))\n for i in range(Walker.sys_dim):\n Y=Walker.Re[particle][i]\n Walker.Re[particle][i]-=h\n wfs1=wfs(Walker)\n Walker.Re[particle][i]+=2.0*h\n wfs2=wfs(Walker)\n dPsi[i] = (wfs2-wfs1)/2/h\n Walker.Re[particle][i]=Y\n\n return dPsi", "def calculate_grad_B_tensor(self):\n s = self # Shorthand\n tensor = Struct()\n \n factor = s.spsi * s.B0 / s.d_l_d_varphi\n tensor.tn = s.sG * s.B0 * s.curvature\n tensor.nt = tensor.tn\n tensor.bb = factor * (s.X1c * s.d_Y1s_d_varphi - s.iotaN * s.X1c * s.Y1c)\n tensor.nn = factor * (s.d_X1c_d_varphi * s.Y1s + s.iotaN * s.X1c * s.Y1c)\n tensor.bn = factor * (-s.sG * s.spsi * s.d_l_d_varphi * s.torsion \\\n - s.iotaN * s.X1c * s.X1c)\n tensor.nb = factor * (s.d_Y1c_d_varphi * s.Y1s - s.d_Y1s_d_varphi * s.Y1c \\\n + s.sG * s.spsi * s.d_l_d_varphi * s.torsion \\\n + s.iotaN * (s.Y1s * s.Y1s + s.Y1c * s.Y1c))\n\n self.grad_B_tensor = tensor\n self.grad_B_colon_grad_B = tensor.tn * tensor.tn + tensor.nt * tensor.nt \\\n + tensor.bb * tensor.bb + tensor.nn * tensor.nn \\\n + tensor.nb * tensor.nb + tensor.bn * tensor.bn\n\n self.L_grad_B = s.B0 * np.sqrt(2 / self.grad_B_colon_grad_B)\n self.inv_L_grad_B = 1.0 / self.L_grad_B\n self.min_L_grad_B = fourier_minimum(self.L_grad_B)", "def gTorque(self):\n pass", "def gravity(acceleration, *bodies):\n\n gravity_force = []\n for body in bodies:\n if not isinstance(body, BodyBase):\n raise TypeError(f'{type(body)} is not a body type')\n gravity_force.append(Force(body.masscenter, body.mass * acceleration))\n return gravity_force", "def reach_gradient(self):\n step_size = 0.05\n min_step_size = 0.001\n moved_closer = True\n while_loop_counter = 0\n max_steps = 100\n old_total_cost = 10\n epsilon = 0.005\n\n # While moved closer and not reached minimum step size\n while moved_closer and step_size > min_step_size:\n while_loop_counter += 1\n # Set a maximum number of steps per change to see progress - used for testing\n if while_loop_counter > max_steps:\n break\n new_total_cost = 0\n text = \"\"\n i = 0\n\n # Go through each joint within the arm\n for joint_key, joint_value in self.joint_angles.items():\n # Text to show for each joint change\n text += str(self.joint_names[i]) + \" \"\n i += 1\n\n # Old endpoint values\n old_value = joint_value\n\n # Update joints in ROS with current self.joint_angle values\n self.update_angles()\n self.get_current_ee_pose() # Old endpoint\n\n # Determine cost from current end effector to target\n old_cost = self.cost(self.arm_endpoint)\n\n # Gradient of old values\n gradient = self.gradient(joint_key)\n if gradient > 0: # Determine direction of gradient\n direction = 1\n else:\n direction = -1\n\n # Determine new angle value based on gradient\n self.joint_angles[joint_key] = (old_value - direction * step_size)\n\n if self.joint_angles[joint_key] < self.joint_min[joint_key]:\n self.joint_angles[joint_key] = self.joint_min[joint_key]\n elif self.joint_angles[joint_key] > self.joint_max[joint_key]:\n self.joint_angles[joint_key] = self.joint_max[joint_key]\n\n # Update joint angle values within ROS and get new endpoint value\n self.update_angles()\n self.get_current_ee_pose()\n\n # Determine cost from current end effector to target\n new_cost = self.cost(self.arm_endpoint)\n\n # Determine the cost of\n if new_cost > old_cost:\n self.joint_angles[joint_key] = old_value\n new_total_cost += old_cost\n text += \": No change \\n\"\n else:\n text += \": Improved by \" + str(direction * step_size) + \"\\n\"\n new_total_cost += new_cost\n\n # Display change of each joint through text\n print(\"Robot part changes: \\n\", text)\n self.cost_values += [new_total_cost]\n\n # Check if improved from previous position\n if old_total_cost < new_total_cost:\n step_size -= .01\n moved_closer = False\n else:\n moved_closer = True\n\n print(\"abs(old_total_cost - new_total_cost): \", abs(old_total_cost - new_total_cost))\n print(\"new_total_cost: \", new_total_cost)\n # If changes are less than epsilon, we stop\n if abs(old_total_cost - new_total_cost) < epsilon:\n break\n old_total_cost = new_total_cost\n\n # Save new joint angle values\n save_file = \"/OptimizedAngles.csv\"\n print(\"Saving new joint angles at \", save_file)\n self.save_new_joint_angles(save_file)", "def get_grads(self) -> torch.Tensor:\n grads = []\n for pp in list(self.net.parameters()):\n grads.append(pp.grad.view(-1))\n return torch.cat(grads)", "def compute_gradients(self):\n raise NotImplementedError()", "def _transform_gradients(self, g):\r\n x = self._get_params()\r\n [np.put(x, i, x * t.gradfactor(x[i])) for i, t in zip(self.constrained_indices, self.constraints)]\r\n [np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]]\r\n if len(self.tied_indices) or len(self.fixed_indices):\r\n to_remove = np.hstack((self.fixed_indices + [t[1:] for t in self.tied_indices]))\r\n return np.delete(g, to_remove)\r\n else:\r\n return g", "def compute_gradients(self, logits, target):\n\n target_length = target.shape[0]\n num_time_steps = logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n\n # expand labels by inserting a blank between each pair\n normalized_logits = softmax(logits)\n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n alpha = self.compute_forward_variables(normalized_logits, target) \n beta = self.compute_backward_variables(normalized_logits, target)\n\n # rescale\n alpha = alpha / np.sum(alpha, axis=0)\n beta = beta / np.sum(beta, axis=0)\n alphabeta = alpha * beta\n print \"alpha\"\n print alpha\n\n # compute zt\n z = Counter()\n for t in xrange(num_time_steps):\n for s, k in enumerate(l):\n z[t] += alphabeta[s, t] / normalized_logits[t, k]\n \n # normalized_logits is time steps t by labels k\n # alpha is 2 * target_length - 1 by time steps\n lab_zk = np.zeros_like(normalized_logits)\n for s, k in enumerate(l):\n for t in xrange(num_time_steps):\n lab_zk[t, k] += alphabeta[s, t]\n\n grad = normalized_logits\n for k in xrange(target.shape[0]):\n for t in xrange(num_time_steps):\n ytk = normalized_logits[t, k]\n constant = 1.0 / (ytk * z[t])\n grad[t, k] = ytk - constant * lab_zk[t, k]\n \n return grad", "def compute_torques(self, rotation, omega, dt):\n # shift time from integration start to time of attitude integration step\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n self.satPos_s = np.array([self.satPos_s.x,\n self.satPos_s.y,\n self.satPos_s.z], dtype='float64')\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))", "def sgdgc(cost, params, lr=1.0, max_magnitude=5.0, infDecay=0.1):\n grads = T.grad(cost=cost, wrt=params)\n updates = []\n\n norm = norm_gs(params, grads)\n sqrtnorm = T.sqrt(norm)\n #not_finite = T.or_(T.isnan(sqrtnorm), T.isinf(sqrtnorm))\n adj_norm_gs = T.switch(T.ge(sqrtnorm, max_magnitude), max_magnitude / sqrtnorm, 1.)\n\n for p, g in zip(params, grads):\n #g = T.switch(not_finite, infDecay * p, g * adj_norm_gs)\n updates.append((p, p - lr * g * adj_norm_gs)) \n \n return updates, norm", "def force_gravity(body1, body2, rel_positions):\n r_mag = np.linalg.norm(rel_positions)\n F = (CONST_G * body1.mass * body2.mass / r_mag ** 3) * rel_positions\n \n return F", "def _transform_gradients(self, g):\n\n x = self._get_params()\n g[self.constrained_positive_indices] = g[self.constrained_positive_indices]*x[self.constrained_positive_indices]\n g[self.constrained_negative_indices] = g[self.constrained_negative_indices]*x[self.constrained_negative_indices]\n [np.put(g,i,g[i]*(x[i]-l)*(h-x[i])/(h-l)) for i,l,h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]\n [np.put(g,i,v) for i,v in [(t[0],np.sum(g[t])) for t in self.tied_indices]]\n if len(self.tied_indices) or len(self.constrained_fixed_indices):\n to_remove = np.hstack((self.constrained_fixed_indices+[t[1:] for t in self.tied_indices]))\n return np.delete(g,to_remove)\n else:\n return g", "def make_gradient(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(0)\n boundary_r, boundary_z = bcs\n\n # calculate preliminary quantities\n dim_r, dim_z = bcs.grid.shape\n scale_r, scale_z = 1 / (2 * bcs.grid.discretization)\n\n value_outer = boundary_r.high.make_virtual_point_evaluator()\n region_z = boundary_z.make_region_evaluator()\n\n # use processing for large enough arrays\n parallel = dim_r * dim_z >= config[\"numba.parallel_threshold\"]\n\n @jit_allocate_out(parallel=parallel, out_shape=(3, dim_r, dim_z))\n def gradient(arr, out=None):\n \"\"\"apply gradient operator to array `arr`\"\"\"\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, _, arr_z_h = region_z(arr, (i, j))\n out[0, i, j] = (arr[1, i] - arr[0, i]) * scale_r\n out[1, i, j] = (arr_z_h - arr_z_l) * scale_z\n out[2, i, j] = 0 # no phi dependence by definition\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, _, arr_z_h = region_z(arr, (i, j))\n out[0, i, j] = (arr[i + 1, j] - arr[i - 1, j]) * scale_r\n out[1, i, j] = (arr_z_h - arr_z_l) * scale_z\n out[2, i, j] = 0 # no phi dependence by definition\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, _, arr_z_h = region_z(arr, (i, j))\n arr_r_h = value_outer(arr, (i, j))\n out[0, i, j] = (arr_r_h - arr[i - 1, j]) * scale_r\n out[1, i, j] = (arr_z_h - arr_z_l) * scale_z\n out[2, i, j] = 0 # no phi dependence by definition\n\n return out\n\n return gradient # type: ignore", "def add_grad_updates(self):\n \n gradients = T.grad(self.cost, self.theta)\n \n for target_param, grad in zip(self.theta, gradients):\n \n if target_param.name ==\"W\" and self.num_hidden ==0\\\n and self.zero_diag:\n \n grad = grad - T.diag(T.diag(grad)) # no x i - xi connections\n # for all i = 1, ..., D\n ##############################################################\n if target_param.name ==\"b\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n elif target_param.name ==\"bhid\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n else:\n \n if self.use_momentum:\n \n # alternative definition (mostly seen):\n #g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n #T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n #self.updates[target_param] = target_param + g_tilda\n \n g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n (1-self.momentum)*grad\n \n self.updates[target_param] = target_param +\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*g_tilda\n \n # store g_tilda for next iteration:\n self.updates[self.grad_vec[target_param.name]] = g_tilda\n \n else:\n \n self.updates[target_param] = target_param -\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n \n if (\"PCD\" in self.algorithm) and self.num_hidden > 0:\n \n self.updates[self.persistent_gibbs] = self.hid_samples", "def magnetic_tension(self, method='spectral'):\n import numpy as np\n gradB, B = self.magnetic_gradient_tensor(method=method, return_B=True)\n F = np.zeros_like(B)\n for i in range(3):\n for j in range(3):\n F[j] += B[i] * gradB[i,j]\n return F", "def compute_grads(self, X, y):\n grads = np.zeros_like(self.theta)\n sigsoftk = self.softmax_mat(X)\n m = X.shape[1]\n for k in range(self.nclasses):\n # To compute the gradient we sum over every training\n # example, i.e., sum(axis=1)\n grad = ((sigsoftk - y)[k,:] * X).sum(axis=1) / m\n grads[:,k] = grad\n return grads", "def g_tensor(self,gpara,gperp):\n gx = gperp\n gy = gperp\n gz = gpara\n\n self.gx = gx\n self.gy = gy\n self.gz = gz\n self.g_grid = np.array([[gx*gx, gx*gy, gx*gz],[gy*gx, gy*gy, gy*gz],[gz*gx, gz*gy, gz*gz]])\n # rotate the crystal coordinates so that I'm now in the coordinate system \n # given by the zeeman tensor's principal axes", "def compute_gradient(self):\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)" ]
[ "0.7358119", "0.64381224", "0.62688065", "0.5906272", "0.5894993", "0.5838472", "0.575906", "0.5756718", "0.5749612", "0.56591", "0.5626756", "0.56163937", "0.5612647", "0.5589206", "0.550804", "0.55040807", "0.5498407", "0.5491252", "0.5456741", "0.5433564", "0.54157126", "0.5407857", "0.54057515", "0.53920394", "0.5388796", "0.5384886", "0.53814495", "0.5371396", "0.5370833", "0.5348573" ]
0.673073
1
Property holding magnetic torque vector.
def mTorque(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_torque(self):\n return self.node.sdo[0x6077].phys # rate torque(mN.m) /1000", "def aTorque(self):\n pass", "def gTorque(self):\n pass", "def _compute_gravity_torque(self):\n pass", "def setMotorTorque(self, torque):\r\n if torque < 0.0:\r\n torque = 0.0\r\n elif torque > 1.0:\r\n torque = 1.0\r\n torque *= self.maxTorque\r\n if self.reverse:\r\n torque *= -1\r\n dTorque = 2\r\n if self.torque < torque:\r\n self.torque += dTorque\r\n elif self.torque > torque:\r\n self.torque -= dTorque\r\n \r\n for tire in self.tires:\r\n if tire.torque:\r\n tire.shape.setMotorTorque( self.torque )", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_solar_torque(self):\n pass", "def _compute_aero_torque(self):\n pass", "def get_motor_load_torque(self):\n # Start with the brake normal\n # change to 17deg (tan 17?)\n # change to torque using the pitch of the thread on the ball screw\n # (^ make sure to take friction into account)\n # That should give us the torque acting on the motor. If this torque is greater than the motor max torque, it will slip\n # Take into account that the max holding torque is different from the max torque. How do we know if the motor is holding or moving? \n # How do we control the stepper motor? Where are the routines for that? \n pass", "def torque(system, /, use_demag=True):\n if use_demag:\n total_field = (mm.consts.mu0 *\n (oc.compute(system.energy.demag.effective_field, system)\n + system.energy.zeeman.H))\n else:\n total_field = mm.consts.mu0 * np.array(system.energy.zeeman.H)\n norm_field = df.Field(system.m.mesh, dim=1,\n value=(system.m.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n moment = system.m * volume\n torque = (moment & total_field)\n return (df.integral(torque * df.dV / volume**2, direction='xyz'))", "def robot_arm_vel(self):\n return self.sim.data.qvel[self.arm_index]", "def make_torque(self):\n def torque_func(m):\n heff = self.field(m)\n total_torque = torque.landau_lifshitz(m, heff, self.damping)\n if self.stt != 0:\n total_torque += torque.slonczewski(m, self.Jc, self.stt)\n return total_torque\n self.torque = torque_func", "def sTorque(self):\n pass", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j*kr\n\n front_term = (\n self.current * self.length / (4 * np.pi * r**2) * (ikr + 1) *\n np.exp(-ikr)\n )\n return -front_term * self.cross_orientation(dxyz) / r", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j*kr\n\n front_term = self.moment / (4. * np.pi * r**3) * np.exp(-ikr)\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n\n return front_term * (symmetric_term + oriented_term)", "def get_velocity(self):\n return self.momentum/self.mass", "def magnetometer(self):\n self._mag[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_M), 16)\n self._mag[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_M), 16)\n self._mag[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_M), 16)\n\n return vector(self._mag)", "def comp_torque(self, output):\n\n N0 = output.elec.N0\n omega = 2 * pi * N0 / 60\n\n P = output.elec.Pem_av_ref\n losses = output.elec.Pj_losses # TODO update since there may also be other losses\n\n Tem_av_ref = (P - losses) / omega\n\n output.elec.Tem_av_ref = Tem_av_ref", "def joints_torque(self):\r\n return self._arm.joints_torque", "def getVelocity(self):\n return self.v", "def vel(self,M):\n v = np.sqrt((M*Body.G)/self.r)\n \n return v", "def getVelocity(self):\n\n return self.vel", "def motor_velocities(self):\n return np.asarray(self._robot_state.velocity)", "def velocity(self):\n return self._velocity", "def velocity(self):\n return self._velocity", "def magnetic_tension(self, method='spectral'):\n import numpy as np\n gradB, B = self.magnetic_gradient_tensor(method=method, return_B=True)\n F = np.zeros_like(B)\n for i in range(3):\n for j in range(3):\n F[j] += B[i] * gradB[i,j]\n return F", "def velocity(self):\n return self.base_platform.velocity", "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO", "def base_velocity(self):\n raise NotImplementedError('Not yet implemented!')" ]
[ "0.69905686", "0.6883635", "0.6844292", "0.67961264", "0.6745202", "0.66349584", "0.66349584", "0.6534814", "0.6531397", "0.6504692", "0.648461", "0.64651704", "0.64503294", "0.63484573", "0.63299674", "0.6295242", "0.62508523", "0.6243299", "0.6232335", "0.62192", "0.6147876", "0.614523", "0.60955256", "0.6033389", "0.6008471", "0.6008471", "0.5897151", "0.58660126", "0.58391404", "0.5827336" ]
0.73894155
0
Initializes dipole Model. This method uses the simplified dipole model implemented in DipoleModel.py Which needs to initialize the induced Magnetic density in the hysteresis rods. It also adds the hysteresis rods and bar magnets specified in the settings file to the satellite using the DipoleModel class.
def _initialize_dipole_model(self, model): for key, hyst in model['Hysteresis'].items(): direction = np.array([float(x) for x in hyst['dir'].split(" ")]) self.dipoleM.addHysteresis(direction, hyst['vol'], hyst['Hc'], hyst['Bs'], hyst['Br']) # initialize values for Hysteresis (need B-field @ initial position) spacecraft_state = self.state_observer.spacecraftState self.inertial2Sat = spacecraft_state.getAttitude().getRotation() self.satPos_i = spacecraft_state.getPVCoordinates().getPosition() gP = self.earth.transform(self.satPos_i, self.in_frame, self.in_date) topoframe = TopocentricFrame(self.earth, gP, 'ENU') topo2inertial = topoframe.getTransformTo(self.in_frame, self.in_date) lat = gP.getLatitude() lon = gP.getLongitude() alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km] # get B-field in geodetic system (X:East, Y:North, Z:Nadir) B_geo = FileDataHandler.mag_field_model.calculateField( degrees(lat), degrees(lon), alt).getFieldVector() # convert geodetic frame to inertial and from [nT] to [T] B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo)) B_b = self.inertial2Sat.applyTo(B_i) B_field = np.array([B_b.x, B_b.y, B_b.z]) self.dipoleM.initializeHysteresisModel(B_field) # add bar magnets to satellite for key, bar in model['BarMagnet'].items(): direction = np.array([float(x) for x in bar['dir'].split(" ")]) self.dipoleM.addBarMagnet(direction, bar['m'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n super().__init__(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=UnimodalVirtualSensorModel(\n virtual_sensor_model=[\n DoorVirtualSensorModel(modalities={\"image\"}),\n DoorVirtualSensorModel(modalities={\"pos\", \"sensors\"}),\n ],\n state_dim=3,\n ),\n )", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def __init__(\n self,\n dem_path: str,\n model_name: Optional[str] = None,\n ela: int = 2850,\n m: float = 0.006,\n plot: bool = True,\n ) -> None:\n\n # Load DEM ------------------------------------------------------------\n dem = open(dem_path)\n ele = dem.read(1).astype(np.float32)\n\n # Instance variables --------------------------------------------------\n self.model_name = Path(dem_path).stem if model_name is None else model_name\n self.dem_path = dem_path\n\n # Mass balance parameters\n self.m = m # Mass balance gradient\n self.ela_start = ela # Equilibrium line altitude\n self._setup_params() # Variable parameters (i, ela, steady_state)\n\n # 2D arrays\n self.ele_orig = np.copy(ele) # Original topography\n self._setup_ndarrays() # Variable arrays (ele, h, u ,hs)\n\n # Coordinate reference system and dem resolution\n self._dem_meta = dem.meta\n self.res = dem.res[0]\n\n # Geographical extent of the dem\n x0, y0, x1, y1 = dem.bounds\n self.extent = (x0, x1, y1, y0)\n\n # Setup statistics\n self._setup_stats()\n\n # Setup plot\n self.plot = plot", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def setup_d2d(self):\n\n self.config[\"d2d\"] = dict()\n\n self.config[\"d2d\"][LC.WHITE] = dict()\n self.config[\"d2d\"][LC.GROWTH] = dict()\n\n self.config[\"d2d\"][LC.WHITE][\"analog-gain\"] = 1.0\n self.config[\"d2d\"][LC.WHITE][\"digital-gain\"] = 1.0\n self.config[\"d2d\"][LC.GROWTH][\"analog-gain\"] = 1.0\n self.config[\"d2d\"][LC.GROWTH][\"digital-gain\"] = 1.0\n\n self.config[\"d2d\"][\"timestamp\"] = time.time()\n\n self.save_config_to_file()", "def initialize_variables(self):\n super(D2Model, self).initialize_variables()\n\n s = \"::: initializing 2D variables :::\"\n print_text(s, cls=self)\n\n # Depth below sea level :\n class Depth(Expression):\n def eval(self, values, x):\n values[0] = abs(min(0, x[2]))\n self.D = Depth(element=self.Q.ufl_element())\n \n # Enthalpy model\n self.theta_surface = Function(self.Q, name='theta_surface')\n self.theta_float = Function(self.Q, name='theta_float')\n self.theta_app = Function(self.Q, name='theta_app')\n self.theta = Function(self.Q, name='theta')\n self.theta0 = Function(self.Q, name='theta0')\n self.W0 = Function(self.Q, name='W0')\n self.thetahat = Function(self.Q, name='thetahat')\n self.uhat = Function(self.Q, name='uhat')\n self.vhat = Function(self.Q, name='vhat')\n self.what = Function(self.Q, name='what')\n self.mhat = Function(self.Q, name='mhat')\n self.rho_b = Function(self.Q, name='rho_b')\n\n # Age model \n self.age = Function(self.Q, name='age')\n self.a0 = Function(self.Q, name='a0')\n\n # Surface climate model\n self.precip = Function(self.Q, name='precip')\n\n # Stokes-balance model :\n self.u_s = Function(self.Q, name='u_s')\n self.u_t = Function(self.Q, name='u_t')\n self.F_id = Function(self.Q, name='F_id')\n self.F_jd = Function(self.Q, name='F_jd')\n self.F_ib = Function(self.Q, name='F_ib')\n self.F_jb = Function(self.Q, name='F_jb')\n self.F_ip = Function(self.Q, name='F_ip')\n self.F_jp = Function(self.Q, name='F_jp')\n self.F_ii = Function(self.Q, name='F_ii')\n self.F_ij = Function(self.Q, name='F_ij')\n self.F_iz = Function(self.Q, name='F_iz')\n self.F_ji = Function(self.Q, name='F_ji')\n self.F_jj = Function(self.Q, name='F_jj')\n self.F_jz = Function(self.Q, name='F_jz')\n self.tau_iz = Function(self.Q, name='tau_iz')\n self.tau_jz = Function(self.Q, name='tau_jz')", "def __init__(self):\n self._read_calibration_data()\n self.set_oversamplings_and_mode(\n HumidityOversampling.x08,\n TemperatureOversampling.x08,\n PressureOversampling.x16,\n SensorMode.Normal)\n self.set_config(\n InactiveDuration.ms1000,\n FilterCoefficient.fc04)", "def __init__(self, model, settings):\n super().__init__(model, settings)\n self.model_part = self.model.CreateModelPart(self.settings[\"model_part_name\"].GetString())\n self.model_part.ProcessInfo.SetValue(KM.DOMAIN_SIZE, self.settings[\"domain_size\"].GetInt())\n self.model_part.ProcessInfo.SetValue(KM.GRAVITY_Z, self.settings[\"gravity\"].GetDouble())\n self.EstimateDeltaTimeUtility = SW.EstimateTimeStepUtility(self.GetComputingModelPart(), self.settings[\"time_stepping\"])", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def _setup(self):\n\n from AlGDock.topology import Topology\n self.top = Topology(self.args)\n self.top_RL = Topology(self.args, includeReceptor=True)\n\n # Initialize rmsd calculation function\n from AlGDock.RMSD import hRMSD\n self.get_rmsds = hRMSD(self.args.FNs['prmtop']['L'], \\\n self.top.inv_prmtop_atom_order_L)\n\n # Obtain reference pose\n if self.data['CD'].pose > -1:\n if ('starting_poses' in self.data['CD'].confs.keys()) and \\\n (self.data['CD'].confs['starting_poses'] is not None):\n starting_pose = np.copy(self.data['CD'].confs['starting_poses'][0])\n else:\n (confs, Es) = self._get_confs_to_rescore(site=False, \\\n minimize=False, sort=False)\n if self.args.params['CD']['pose'] < len(confs):\n starting_pose = np.copy(confs[self.args.params['CD']['pose']])\n self.data['CD'].confs['starting_poses'] = [np.copy(starting_pose)]\n else:\n self._clear('CD')\n self._store_infinite_f_RL()\n raise Exception('Pose index greater than number of poses')\n else:\n starting_pose = None\n\n from AlGDock.system import System\n self.system = System(self.args,\n self.log,\n self.top,\n self.top_RL,\n starting_pose=starting_pose)\n\n # Measure the binding site\n if (self.args.params['CD']['site'] == 'Measure'):\n self.args.params['CD']['site'] = 'Sphere'\n if self.args.params['CD']['site_measured'] is not None:\n (self.args.params['CD']['site_max_R'],self.args.params['CD']['site_center']) = \\\n self.args.params['CD']['site_measured']\n else:\n print '\\n*** Measuring the binding site ***'\n self.system.setParams(\n self.system.paramsFromAlpha(1.0, 'CD', site=False))\n (confs, Es) = self._get_confs_to_rescore(site=False, minimize=True)\n if len(confs) > 0:\n # Use the center of mass for configurations\n # within 20 RT of the lowest energy\n cutoffE = Es['total'][-1] + 20 * (R * self.T)\n coms = []\n for (conf, E) in reversed(zip(confs, Es['total'])):\n if E <= cutoffE:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, conf))\n coms.append(np.array(self.top.universe.centerOfMass()))\n else:\n break\n print ' %d configurations fit in the binding site' % len(coms)\n coms = np.array(coms)\n center = (np.min(coms, 0) + np.max(coms, 0)) / 2\n max_R = max(\n np.ceil(np.max(np.sqrt(np.sum(\n (coms - center)**2, 1))) * 10.) / 10., 0.6)\n self.args.params['CD']['site_max_R'] = max_R\n self.args.params['CD']['site_center'] = center\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[-1]))\n if ((self.args.params['CD']['site_max_R'] is None) or \\\n (self.args.params['CD']['site_center'] is None)):\n raise Exception('No binding site parameters!')\n else:\n self.args.params['CD']['site_measured'] = \\\n (self.args.params['CD']['site_max_R'], \\\n self.args.params['CD']['site_center'])\n\n # Read the reference ligand and receptor coordinates\n import AlGDock.IO\n IO_crd = AlGDock.IO.crd()\n if self.args.FNs['inpcrd']['R'] is not None:\n if os.path.isfile(self.args.FNs['inpcrd']['L']):\n lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)\n self.data['CD'].confs['receptor'] = IO_crd.read(\\\n self.args.FNs['inpcrd']['R'], multiplier=0.1)\n elif self.args.FNs['inpcrd']['RL'] is not None:\n complex_crd = IO_crd.read(self.args.FNs['inpcrd']['RL'], multiplier=0.1)\n lig_crd = complex_crd[self.top_RL.L_first_atom:self.top_RL.L_first_atom + \\\n self.top.universe.numberOfAtoms(),:]\n self.data['CD'].confs['receptor'] = np.vstack(\\\n (complex_crd[:self.top_RL.L_first_atom,:],\\\n complex_crd[self.top_RL.L_first_atom + self.top.universe.numberOfAtoms():,:]))\n elif self.args.FNs['inpcrd']['L'] is not None:\n self.data['CD'].confs['receptor'] = None\n if os.path.isfile(self.args.FNs['inpcrd']['L']):\n lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)\n else:\n lig_crd = None\n\n if lig_crd is not None:\n self.data['CD'].confs['ligand'] = lig_crd[self.top.\n inv_prmtop_atom_order_L, :]\n self.top.universe.setConfiguration(\\\n Configuration(self.top.universe,self.data['CD'].confs['ligand']))\n if self.top_RL.universe is not None:\n self.top_RL.universe.setConfiguration(\\\n Configuration(self.top_RL.universe, \\\n np.vstack((self.data['CD'].confs['receptor'],self.data['CD'].confs['ligand']))))\n\n if self.args.params['CD']['rmsd'] is not False:\n if self.args.params['CD']['rmsd'] is True:\n if lig_crd is not None:\n rmsd_crd = lig_crd[self.top.inv_prmtop_atom_order_L, :]\n else:\n raise Exception('Reference structure for rmsd calculations unknown')\n else:\n rmsd_crd = IO_crd.read(self.args.params['CD']['rmsd'], \\\n natoms=self.top.universe.numberOfAtoms(), multiplier=0.1)\n rmsd_crd = rmsd_crd[self.top.inv_prmtop_atom_order_L, :]\n self.data['CD'].confs['rmsd'] = rmsd_crd\n\n self.get_rmsds.set_ref_configuration(self.data['CD'].confs['rmsd'])\n\n # If configurations are being rescored, start with a docked structure\n (confs, Es) = self._get_confs_to_rescore(site=False, minimize=False)\n if len(confs) > 0:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[-1]))\n\n from AlGDock.simulation_iterator import SimulationIterator\n self.iterator = SimulationIterator(self.args, self.top, self.system)\n\n # Load progress\n from AlGDock.postprocessing import Postprocessing\n Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run(readOnly=True)\n\n self.calc_f_L(readOnly=True)\n self.calc_f_RL(readOnly=True)\n\n if self.args.random_seed > 0:\n np.random.seed(self.args.random_seed)", "def __init__(self, cpara, dz_soil):\n\n # --- grid ---\n self.z = np.linspace(0, cpara['grid']['zmax'], cpara['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n\n # --- switches ---\n self.Switch_Eflow = cpara['ctr']['Eflow'] # True assumes constant U/ustar at upper boundary\n self.Switch_WMA = cpara['ctr']['WMA'] # True solves scalar profiles\n self.Switch_Ebal = cpara['ctr']['Ebal'] # True solves leaf energy balance\n\n logger.info('Eflow: %s, WMA: %s, Ebal: %s',\n self.Switch_Eflow,\n self.Switch_WMA,\n self.Switch_Ebal)\n\n # --- PlantTypes ---\n ptypes = []\n ptnames = list(cpara['planttypes'].keys())\n ptnames.sort()\n for pt in ptnames:\n ptypes.append(PlantType(self.z, cpara['planttypes'][pt], dz_soil, ctr=cpara['ctr'], loc=cpara['loc']))\n self.planttypes = ptypes\n \n # --- stand characteristics: sum over planttypes---\n\n # total leaf area index [m2 m-2]\n self.LAI = sum([pt.LAI for pt in self.planttypes])\n # total leaf area density [m2 m-3]\n self.lad = sum([pt.lad for pt in self.planttypes])\n\n # layerwise mean leaf characteristic dimension [m] for interception model\n self.leaf_length = sum([pt.leafp['lt'] * pt.lad for pt in self.planttypes]) / (self.lad + EPS)\n\n # root area density [m2 m-3]\n rad = np.zeros(np.shape(dz_soil))\n imax = 1\n for pt in self.planttypes:\n rad[:len(pt.Roots.rad)] += pt.Roots.rad\n imax = max(imax, len(pt.Roots.rad))\n \n self.ix_roots = np.array(range(imax)) # soil model layers corresponding to root zone\n self.rad = rad[self.ix_roots]\n \n # total root area index [m2 m-2]\n self.RAI = sum([pt.Roots.RAI for pt in self.planttypes])\n # distribution of roots across soil model layers [-]\n self.root_distr = self.rad * dz_soil[self.ix_roots] / (self.RAI + EPS)\n\n # canopy height [m]\n if len(np.where(self.lad > 0)[0]) > 0:\n f = np.where(self.lad > 0)[0][-1]\n self.hc = self.z[f].copy()\n else:\n self.hc = 0.0\n\n # --- create radiation, micromet, interception, and forestfloor model instances\n self.radiation = Radiation(cpara['radiation'], self.Switch_Ebal)\n\n self.micromet = Micromet(self.z, self.lad, self.hc, cpara['micromet'])\n\n self.interception = Interception(cpara['interception'], self.lad * self.dz)\n\n self.forestfloor = ForestFloor(cpara['forestfloor'])", "def __init__(self):\n logger.debug('Initializing %s model.' % self.__class__.__name__)\n self.dependent_attributes = ['_alpha',\n '_log_like',\n '_gradient','_K',\n '_log_det']\n self._previous_parameters = None # previous parameters from last call\n self.grad_method = None # could be {'finite_difference','adjoint'}\n self.noise_var_constraint = '+ve' # Gaussian noise variance constraint\n return", "def __init__(self, root, io):\n parts.hand.Hand.__init__(self, root=root, io=io)\n\n dxl_motors = OrderedDict({\n name: dict(conf)\n for name, conf in self.dxl_motors.items()\n })\n\n self.attach_dxl_motors(dxl_motors)\n\n \"\"\"\n self._load_sensor = self.io.find_module('force_gripper')\n self._load_sensor.offset = 4\n self._load_sensor.scale = 10000\n \"\"\"", "def __init__(self, para, ini_cond):\n\n # grid\n self.z = np.linspace(0, para['grid']['zmax'], para['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n self.zref = para['zref'] # height of forcing data [m]\n \n # moss properties\n self.hc = para['hc'] # canopy height (m)\n self.lad = para['lad'] # shoot-area density (m2m-3)\n self.LAI = sum(self.lad*self.dz)\n \n self.canopy_nodes = np.where(self.lad > 0)[0]\n \n # hydraulic\n self.porosity = para['hydraulic']['porosity']\n self.pF = para['hydraulic']['pF']\n self.Ksat = para['hydraulic']['Ksat']\n self.freezing_curve = para['hydraulic']['freezing_curve']\n \n # radiation\n self.albedo = para['radiation'] # 'PAR', 'NIR'\n self.emissivity = para['radiation']['emissivity']\n self.clump = para['radiation']['clumping']\n self.leaf_angle = para['radiation']['leaf_angle']\n \n #self.radiation = para['radiation']\n \n # compute non-dimensional flow velocity Un = U/ust and momentum diffusivity\n Utop = ini_cond['Utop'] # U/ust at zref\n Ubot = 0.0 # no-slip\n self.Sc = para['Schmidt_nr']\n _, self.Un, self.Kmn, _ = closure_model_U_moss(self.z, self.lad, self.hc, Utop, Ubot) \n \n self.U = None\n self.Ks = None\n self.length_scale = para['length_scale']\n \n self.Switch_WMA = False\n \n # initial states\n self.T = ini_cond['T']\n self.Wtot = ini_cond['Wtot']\n self.Wliq, self.Wice, _ = frozen_water(self.T, self.Wot, fp=self.freezing_curve, To=0.0)\n self.h = water_retention(self.pF, theta=self.Wliq)", "def __init__(self, \n modeled_dem_name, \n modern_dem_name, \n outlet_id, \n category_file=None, \n category_values=None, \n weight_file=None,\n weight_values=None):\n\n # save dem names\n self.modern_dem_name = modern_dem_name\n self.modeled_dem_name = modeled_dem_name\n \n # Read and remember the modern DEM\n (self.grid, self.z) = self.read_topography(modern_dem_name)\n self.grid.set_watershed_boundary_condition_outlet_id(outlet_id,\n self.z, \n nodata_value=-9999)\n # Read and remember the modeled DEM \n (self.mgrid, self.mz) = self.read_topography(modeled_dem_name)\n self.mgrid.set_watershed_boundary_condition_outlet_id(outlet_id,\n self.mz, \n nodata_value=-9999)\n if self.mz.size != self.z.size:\n raise ValueError(('Size of provided DEMS is different.'))\n \n if category_file and category_values:\n raise ValueError(('Provide either an array-like structure of catetory ',\n 'values or a filename, not both.'))\n if weight_file and weight_values:\n raise ValueError(('Provide either an array-like structure of weight ',\n 'values or a filename, not both.'))\n if category_file:\n if os.path.exists(category_file):\n catagory_values = np.loadtxt(category_file)\n if catagory_values.size != self.z.size:\n raise ValueError(('Size of catagory array is different than the ',\n 'provided DEM.'))\n if weight_file:\n if os.path.exists(weight_file):\n weight_values = np.loadtxt(weight_file)\n if weight_values.size != self.z.size:\n raise ValueError(('Size of weight array is different than the ',\n 'provided DEM.'))\n try:\n np.asarray(weight_values).size == self.z.size \n except TypeError:\n weight_values = np.ones_like(self.z)\n \n self.category_values = category_values\n self.weight_values = weight_values\n self.cat_vals = np.sort(np.unique(self.category_values[self.grid.core_nodes]))\n self.metric = {}", "def initialize(self):\n self.lib.Initialize()\n\n self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,\n 'External Exposure': 7, 'External FVB EM': 9,\n 'Software Trigger': 10,\n 'External Charge Shifting': 12}\n self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}\n\n # Initial values\n\n self.readout_packing_state = False\n self.readout_packing = self.readout_packing_state\n\n self.readout_mode_mode = 'Image'\n self.readout_mode = self.readout_mode_mode\n\n self.photon_counting_mode_state = False\n self.photon_counting_mode = self.photon_counting_mode_state\n\n self.frame_transfer_mode_state = False\n self.frame_transfer_mode = self.frame_transfer_mode_state\n\n self.fan_mode_index = 'onfull'\n self.fan_mode = self.fan_mode_index\n\n self.EM_gain_mode_index = 'RealGain'\n self.EM_gain_mode = self.EM_gain_mode_index\n\n self.cooled_on_shutdown_value = False\n self.cooled_on_shutdown = self.cooled_on_shutdown_value\n\n self.baseline_offset_value = 100\n self.baseline_offset = self.baseline_offset_value\n\n self.adv_trigger_mode_state = True\n self.adv_trigger_mode = self.adv_trigger_mode_state\n\n self.acq_mode = 'Single Scan'\n self.acquisition_mode = self.acq_mode\n\n self.amp_typ = 0\n\n self.horiz_shift_speed_index = 0\n self.horiz_shift_speed = self.horiz_shift_speed_index\n\n self.vert_shift_speed_index = 0\n self.vert_shift_speed = self.vert_shift_speed_index\n\n self.preamp_index = 0\n self.preamp = self.preamp_index\n\n self.temperature_sp = 0 * degC\n self.temperature_setpoint = self.temperature_sp\n\n self.auxout = np.zeros(4, dtype=bool)\n for i in np.arange(1, 5):\n self.out_aux_port[i] = False\n\n self.trigger_mode_index = 'Internal'\n self.trigger_mode = self.trigger_mode_index", "def set_all(self):\n\n self.ecm = EnergyConsumptionModel(\n vehicle_type=\"car\",\n vehicle_size=list(self.array.coords[\"size\"].values),\n powertrains=list(self.array.coords[\"powertrain\"].values),\n cycle=self.cycle,\n gradient=self.gradient,\n country=self.country,\n )\n\n diff = 1.0\n\n while diff > 0.0001:\n old_driving_mass = self[\"driving mass\"].sum().values\n self.set_vehicle_mass()\n self.set_power_parameters()\n self.set_component_masses()\n self.set_auxiliaries()\n self.set_power_battery_properties()\n self.set_battery_properties()\n self.set_energy_stored_properties()\n self.set_recuperation()\n\n if \"FCEV\" in self.array.powertrain.values:\n self.set_fuel_cell_power()\n self.set_fuel_cell_mass()\n\n # if user-provided values are passed,\n # they override the default values\n if \"capacity\" in self.energy_storage:\n self.override_battery_capacity()\n\n diff = (self[\"driving mass\"].sum().values - old_driving_mass) / self[\n \"driving mass\"\n ].sum()\n\n self.set_ttw_efficiency()\n self.calculate_ttw_energy()\n self.set_ttw_efficiency()\n\n self.set_range()\n\n if self.target_range:\n self.override_range()\n\n self.set_share_recuperated_energy()\n self.set_battery_fuel_cell_replacements()\n self.adjust_cost()\n\n self.set_electric_utility_factor()\n self.set_electricity_consumption()\n self.set_costs()\n self.set_hot_emissions()\n self.set_particulates_emission()\n self.set_noise_emissions()\n self.create_PHEV()\n if self.drop_hybrids:\n self.drop_hybrid()\n\n self.remove_energy_consumption_from_unavailable_vehicles()", "def setup(self):\n\n module = [m for m in Rt.modules if m.name == self.module_name][0]\n\n # Take CPACS file from the optimisation\n cpacs_path = module.cpacs_in\n tixi = open_tixi(cpacs_path)\n self.Model = load_surrogate(tixi)\n tixi.save(cpacs_path)\n\n df = self.Model.df\n df.set_index(\"Name\", inplace=True)\n for name in df.index:\n if df.loc[name, \"type\"] == \"obj\":\n self.add_output(name)\n elif df.loc[name, \"type\"] == \"des\":\n self.add_input(name)\n\n self.xd = df.loc[[name for name in df.index if df.loc[name, \"type\"] == \"des\"]]\n self.yd = df.loc[[name for name in df.index if df.loc[name, \"type\"] == \"obj\"]]", "def setup(self, path_to_conf_file):\n\n self.track = Track.SENSORS\n self.num_frames = 0\n\n with open(path_to_conf_file, 'r') as f:\n config = yaml.safe_load(f)\n\n for key, value in config.items():\n setattr(self, key, value)\n\n self.device = torch.device('cuda')\n\n self.image_model = CameraModel(config).to(self.device)\n self.image_model.load_state_dict(torch.load(self.main_model_dir))\n self.image_model.eval()\n\n self.vizs = []\n\n self.waypointer = None\n\n if self.log_wandb:\n wandb.init(project='carla_evaluate')\n \n self.steers = torch.tensor(np.linspace(-self.max_steers,self.max_steers,self.num_steers)).float().to(self.device)\n self.throts = torch.tensor(np.linspace(0,self.max_throts,self.num_throts)).float().to(self.device)\n\n self.prev_steer = 0\n self.lane_change_counter = 0\n self.stop_counter = 0", "def __init__(self, config_file=False,\n simulation='2D_square',\n D=1.56, J=5.88, ku=0.41, mu_s=3, B=(0, 0, 0), Demag=None,\n mesh_nx=50, mesh_ny=50, mesh_a=0.2715\n ):\n\n self.simulation = simulation\n\n if config_file:\n tmp_config = {}\n configs = execfile(config_file, tmp_config)\n\n self.D = configs[\"D\"] * const.meV\n self.J = configs[\"J\"] * const.meV\n self.ku = configs[\"ku\"] * const.meV\n self.mu_s = configs[\"mu_s\"] * const.mu_B\n self.m_field = configs[\"m_field\"]\n if configs[\"B\"] is not None:\n self.B = configs[\"B\"]\n\n else:\n self.D = D * const.meV\n self.J = J * const.meV\n self.ku = ku * const.meV\n self.mu_s = mu_s * const.mu_B\n self.B = B\n self.Demag = Demag\n\n self.mesh_nx = mesh_nx\n self.mesh_ny = mesh_ny\n self.mesh_a = mesh_a\n\n # Dictionary to translate a vector component into the corresponding\n # indexes in Fidimag arrays, i.e. x --> 0, y --> 1, z --> 2\n self.v_dict = {'x': 0, 'y': 1, 'z': 2}\n\n # Measure for dm / dt\n self.DEGREE_PER_NANOSECOND = 2 * np.pi / (360 * 1e-9)", "def __init__(self):\n super(Config, self).__init__()\n self.run_control[\"output_dir\"] = os.getcwd()+\"/output/arctan_baseline/single_turn_injection/tracking\"\n ring_tof = 1149.185123\n field_files = glob.glob(\"output/arctan_baseline/bump_quest_v9/find_bump_r_*_theta_90/find_bump_parameters_001.out\")\n substitutions = config.get_baseline_substitution(0.22, ring_tof)\n substitutions = self.ramp_fields(\n substitutions,\n field_files,\n [i for i, f in enumerate(field_files)],\n ring_tof,\n will_step = False\n )\n self.substitution_list = [substitutions] # used for plotting unperturbed CO\n\n self.run_control[\"find_closed_orbits_4d\"] = False\n self.run_control[\"find_da\"] = False\n self.run_control[\"find_bump_parameters\"] = False\n self.run_control[\"track_bump\"] = False\n self.run_control[\"track_beam\"] = True\n self.find_closed_orbits[\"subs_overrides\"][\"__n_turns__\"] = 0.11\n self.find_closed_orbits[\"subs_overrides\"][\"__do_bump__\"] = False\n self.find_closed_orbits[\"final_subs_overrides\"].update(substitutions)\n self.find_closed_orbits[\"max_iterations\"] = 0\n self.find_closed_orbits[\"do_minuit\"] = True\n self.find_closed_orbits[\"minuit_iterations\"] = 10 \n self.find_closed_orbits[\"us_cell\"] = 0\n self.find_closed_orbits[\"ds_cell\"] = 1\n\n self.find_da = {\n \"run_dir\":\"tmp/find_da/\",\n \"probe_files\":\"RINGPROBE01.h5\",\n \"subs_overrides\":{\"__n_turns__\":101.1, \"__do_magnet_field_maps__\":\"False\", \"__step_size__\":0.01},\n \"get_output_file\":\"get_da\",\n \"scan_output_file\":\"scan_da\",\n \"row_list\":None,\n \"scan_x_list\":[],\n \"scan_y_list\":[],\n \"x_seed\":1.,\n \"y_seed\":1.,\n \"min_delta\":0.9,\n \"max_delta\":1000.,\n \"required_n_hits\":100,\n \"dt_tolerance\":0.5, # fraction of closed orbit dt\n \"max_iterations\":15,\n \"decoupled\":True,\n }\n\n max_amp = 0.01\n self.track_beam_dummy = {\n \"run_dir\":\"tmp/track_beam/\",\n \"save_dir\":\"track_beam\",\n \"print_events\":[0, 1, -1],\n \"settings\":[{\n \"name\":\"forwards\",\n \"direction\":\"forwards\",\n \"probe_files\":\"RINGPROBE01.h5\", \n \"beam\":{\n \"type\":\"beam_gen\",\n \"closed_orbit_file\":\"closed_orbits_cache\",\n \"eigen_emittances\":[[0, 0]]*3,#+[[max_amp, max_amp]],\n \"n_per_dimension\":2,\n \"variables\":[\"x\",\"x'\",\"y\",\"y'\"],\n \"amplitude_dist\":\"uniform\", #\"grid\", # \n \"phase_dist\":\"uniform\", #\"grid\", # \n \"max_amplitude_4d\":max_amp, # amplitude_dist != grid\n \"energy\":3.0,\n },\n \"subs_overrides\":{\n \"__n_turns__\":45.1,\n \"__hdf5__\":\"True\",\n \"__do_magnet_field_maps__\":\"False\",\n \"__do_bump__\":\"True\",\n \"__step_size__\":0.01\n },\n },],\n }\n\n T0 = ring_tof\n self.track_beam = {\n \"run_dir\":\"tmp/track_beam/\",\n \"save_dir\":\"track_beam_rf_on_2\",\n \"print_events\":[i for i in range(1)],\n \"variables\":[\"x\", \"x'\", \"y\", \"y'\", \"t\", \"energy\"],\n \"settings\":[{\n \"name\":\"grid\",\n \"direction\":\"forwards\",\n \"probe_files\":\"RINGPROBE01.h5\", \n \"beam\":{\n \"type\":\"grid\",\n \"energy\":3.0,\n \"start\":[4357.646683446333, 0.0, -116.7090485272821, 0.0, 0.0, 941.272],\n \"stop\":[4357.646683446333, 0.0, -116.7090485272821, 0.0, T0, 941.272],\n \"nsteps\":[1, 1, 1, 1, 4+1, 1],\n },\n \"subs_overrides\":{\n \"__n_turns__\":100.1,\n \"__hdf5__\":True,\n \"__do_magnet_field_maps__\":False,\n \"__do_bump__\":True,\n \"__do_rf__\":True,\n \"__do_foil__\":False,\n \"__step_size__\":0.01\n },\n },],\n }", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def initialise_fluids(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n c.fluid.val[air] = 0.8\n if not c.fluid.val_set[flue_gas]:\n c.fluid.val[flue_gas] = 0.2\n c.target.propagate_fluid_to_target(c, c.target)", "def initialize_system(self):\n self.mfd.set_mesh(self.mesh)\n [[div_data, div_row, div_col], \n [div_t_data, div_t_row, div_t_col]] = self.mfd.build_div()\n [self.m_x_coo_data, \n m_x_coo_row, \n m_x_coo_col] = self.mfd.build_m(save_update_info=True)\n\n self.m_x_coo_length = len(self.m_x_coo_data)\n \n # The data for the bottom right should be zeros. \n [c_data, c_row, c_col] = self.mfd.build_bottom_right()\n \n [coupling_data, coupling_row, coupling_col] = self.mfd.build_coupling_terms()\n\n self.div = sparse.coo_matrix((np.array(div_data), \n (np.add(np.array(div_row), \n -self.mesh.get_number_of_faces()), \n np.array(div_col))))\n self.div = self.div.tocsr()\n\n lhs_data = self.m_x_coo_data\n lhs_row = m_x_coo_row\n lhs_col = m_x_coo_col\n \n lhs_data += div_data\n lhs_row += div_row\n lhs_col += div_col\n\n lhs_data += div_t_data\n lhs_row += div_t_row\n lhs_col += div_t_col \n \n self.c_start = len(lhs_data)\n \n lhs_data += c_data\n lhs_row += c_row\n lhs_col += c_col \n\n self.c_end = len(c_data)\n\n lhs_data += coupling_data\n lhs_row += coupling_row\n lhs_col += coupling_col\n\n # Convert m_x_coo_data to numpy array. \n self.m_x_coo_data = np.array(self.m_x_coo_data)\n\n self.lhs_coo = sparse.coo_matrix((np.array(lhs_data), \n (np.array(lhs_row), \n np.array(lhs_col))))\n\n # RHS construction is for Neumann and Dirichlet \n # boundaries specified by the mesh. \n self.rhs_mfd = self.mfd.build_rhs()", "def __init__(self):\n INSTALL_DIR = dirname(__file__)\n CONFIG_DIR = '/etc/Model2WADL/'\n logging.basicConfig(level=logging.ERROR)\n logging.config.fileConfig([join(CONFIG_DIR, 'logging.conf'), expanduser('~/.logging.conf'), 'logging.conf'])\n self.__log = logging.getLogger('thesis')\n\n self.__log.debug(\"Reading general configuration from Model2WADL.cfg\")\n self.__m2wConfig = ConfigParser.SafeConfigParser()\n self.__m2wConfig.read(\n [join(CONFIG_DIR, 'Physical2Virtual.cfg'), expanduser('~/.Physical2Virtual.cfg'), 'Physical2Virtual.cfg'])\n\n self.__baseURI = self.__m2wConfig.get(\"Config\", \"baseURI\")\n self.__basePackage = self.__m2wConfig.get(\"Config\", \"basePackage\")\n self.__schemaFile = self.__m2wConfig.get(\"Config\", \"schemaFile\")\n self.__model = None\n self.__input = None\n self.__output = None", "def __init__(self):\n super().__init__()\n\n # Gadget state\n \n self.isDoorOpen = False\n self.verified = True\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n \n self.ir_sensor = InfraredSensor()\n self.ir_sensor.mode = self.ir_sensor.MODE_IR_REMOTE\n self.color_sensor = ColorSensor()\n self.color_sensor.mode = 'COL-COLOR' # WHITE\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()", "def __init__(self):\n super().__init__()\n self.indices_dir = ''\n self.split_file = ''\n\n self.model = '' # string identifying the model\n self.experiment = '' # string to describe experiment\n self.maps = [data.ID_MAP_T1H2O, data.ID_MAP_FF, data.ID_MAP_B1] # the used maps\n self.patch_size = [1, 32, 32]\n\n # training configuration\n self.loss = 'mse' # string identifying the loss function (huber, mse or mae)\n self.learning_rate = 0.01 # the learning rate\n self.dropout_p = 0.2\n self.norm = 'bn' # none, bn\n\n # we use the mean absolute error as best model score\n self.best_model_score_is_positive = True\n self.best_model_score_name = 'mae'", "def __init__(self, path, model_index=1):\n self.shape_total_df = _load_neutron_total_shape(path)\n self.shape_dict_mcnp = None\n self.shape_df_mcnp = None\n self.shape_df_mcnp_norm = None\n self.shape_df_interp = None\n self.shape_tof_df_interp = None\n self.shape_tof_df_dir = None\n\n self.result_shape_fit = None\n self.param_df_dir = None\n self.param_df = None\n self.linear_df = None\n self.linear_df_dir = None\n self.model_param_names = None\n self.e_min = None\n self.e_max = None\n self.t_us_mcnp = None\n self.t_us_conv_proton = np.linspace(t_min_us, t_max_us, t_nbr).round(3)\n self.result_neutron_folder = None\n self._energy_list = None\n self._energy_list_dropped = None\n\n self.model_map = {1: 'ikeda_carpenter',\n 2: 'cole_windsor',\n 3: 'pseudo_voigt',\n 4: 'ikeda_carpenter_jparc',\n 5: 'cole_windsor_jparc',\n }\n self.model_index = None\n self.model_used = None\n self.model = None\n self.__set_model(model_index)\n\n if self.result_neutron_folder is None:\n self.result_neutron_folder = self._check_and_make_subdir('result', 'neutron_pulse', self.model_used)\n\n self.proton_pulse = ProtonPulse(path=proton_path)", "def __init__(self):\n\n super().__init__(\n filter_models=[\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(modalities={\"image\"}),\n ),\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(\n modalities={\"pos\", \"sensors\"}\n ),\n ),\n ],\n state_dim=3,\n )" ]
[ "0.60846996", "0.59922695", "0.59226173", "0.57868224", "0.5778244", "0.5775409", "0.5774516", "0.5749749", "0.5731533", "0.570221", "0.5688919", "0.5684113", "0.5678075", "0.56579274", "0.5642495", "0.5609805", "0.5589588", "0.55426383", "0.5534838", "0.5527638", "0.54799986", "0.54585123", "0.5445937", "0.5445635", "0.54405063", "0.5426208", "0.5425944", "0.5425458", "0.54218477", "0.5414025" ]
0.7724515
1
Update satellite state obtained from orbit propagation. This method should be called before each attitude integration step! It updates internal variables needed for disturbance torque computation.
def update_satellite_state(self, integration_date): self.in_date = integration_date self.spacecraft_state = self.state_observer.spacecraftState self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition() self.satVel_i = self.spacecraft_state.getPVCoordinates().getVelocity()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_satellite_state(self, current_date):\n self.in_date = current_date\n self.spacecraft_state = self.state_observer.spacecraftState\n\n self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition()\n self.satVel_i = self.spacecraft_state.getPVCoordinates().getVelocity()", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def update(self, dt):\n # get normal random iscipyut\n u = self.prng.normal(size=(1, self._x.shape[1]))\n # calculate state time derivative with state space equation\n dx_dt = self._A.dot(self._x) + self._B * u\n # apply update with Euler integration\n self._x += dx_dt * dt", "def update(self):\n\n SolidSolver.update(self)\n\n self.__nextStep()", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def synchronize_state(self):\n theta = self.unicycle_state[:,2]\n v = self.unicycle_state[:,3]\n self.position[:, :2] = self.unicycle_state[:,:2]\n self.orientation[:,2] = theta\n vx = v * np.cos(theta)\n vy = v * np.sin(theta)\n\n self.velocity[:, 0] = vx\n self.velocity[:, 1] = vy\n\n self.angular_velocity[:, 2] = self.unicycle_state[:, 4]", "def _update_(self):\n self._update_distance_()\n self._check_literature_name_()", "def temp_update(self):\n a_w = self.k / self.dx\n a_e = self.k / self.dx\n a_n = self.k / self.dy\n a_s = self.k / self.dy\n a_p = a_w + a_e + a_n + a_s + self.rho * self.cp * self.dx / self.dt\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n self.T[i,\n j] = (a_w * self.T[i - 1, j] + a_e * self.T[i + 1, j] +\n a_s * self.T[i, j - 1] + a_n * self.T[i, j + 1]) / a_p", "def update(self):\n self.brain.update()", "def update_state(self, a, obs, t):\n \n self.update_weights(a, obs, t) # only update weights, not particles \n self.update_running_average_weights(t) \n return None", "def _update_loc(self) -> None:\n self.state[:, :, Boids.Attr.LOC] += self.state[:, :, Boids.Attr.VEL]\n # wrap-around the simulated environment\n self.state[:, :, Boids.Attr.LOC] %= np.expand_dims(self.env_bounds, axis=1)", "def updateState(self):\n\t\t# ask for current pose data\n\t\tcomm.write(b'id1 mav.pose_sensor get_local_data \\n')\n\t\t# update x value\n\t\tcomm.read_until(b'\"x\": ') # b'' as Telnet needs a bytes object instead of string since Python3\n\t\tread = comm.read_until(b',') # returns read values + finishing ','\n\t\tread = read[:-1] # cut that ','\n\t\tcurrent_state.x = float(read)\n\t\tself.state_x_label.set_text(\"%0.2f\" % current_state.x)\n\t\t# update y value\n\t\tcomm.read_until(b'\"y\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y = float(read)\n\t\tself.state_y_label.set_text(\"%0.2f\" % current_state.y)\n\t\t# update z value\n\t\tcomm.read_until(b'\"z\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.z = float(read)\n\t\tself.state_z_label.set_text(\"%0.2f\" % current_state.z)\n\t\t# update yaw value\n\t\tcomm.read_until(b'\"yaw\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.psi = float(read)\n\t\tself.state_psi_label.set_text(\"%0.2f\" % current_state.psi)\n\t\t# update pitch value\n\t\tcomm.read_until(b'\"pitch\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.theta = float(read)\n\t\tself.state_theta_label.set_text(\"%0.2f\" % current_state.theta)\n\t\t# update roll value\n\t\tcomm.read_until(b'\"roll\": ')\n\t\tread = comm.read_until(b'}')\n\t\tread = read[:-1]\n\t\tcurrent_state.phi = float(read)\n\t\tself.state_phi_label.set_text(\"%0.2f\" % current_state.phi)\n\n\t\t# ask for current velocity data\n\t\tcomm.write(b'id1 mav.velocity_sensor get_local_data \\n')\n\t\t# update p value\n\t\tcomm.read_until(b'\"angular_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.p = float(read)\n\t\tself.state_p_label.set_text(\"%0.2f\" % current_state.p)\n\t\t# update q value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.q = float(read)\n\t\tself.state_q_label.set_text(\"%0.2f\" % current_state.q)\n\t\t# update r value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.r = float(read)\n\t\tself.state_r_label.set_text(\"%0.2f\" % current_state.r)\n\n\t\t# update x_dot value\n\t\tcomm.read_until(b'\"world_linear_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.x_dot = float(read)\n\t\tself.state_x_dot_label.set_text(\"%0.2f\" % current_state.x_dot)\n\t\t# update y_dot value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y_dot = float(read)\n\t\tself.state_y_dot_label.set_text(\"%0.2f\" % current_state.y_dot)\n\t\t# update z_dot value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.z_dot = float(read)\n\t\tself.state_z_dot_label.set_text(\"%0.2f\" % current_state.z_dot)\n\n\t\t# update first waypoint for trajectory in GUI\n\t\twaypoints_gui[0] = [current_state.x, current_state.y, current_state.z, current_state.psi]\n\n\t\treturn GLib.SOURCE_CONTINUE", "def update_vehicle_state(self):\n sim_timestep = 1. / self.simulation_rate\n # Decompose v into x and y component.\n if self.v != self.commands['speed']:\n self.v = self.commands['speed']\n vx = numpy.cos(self.yaw) * self.v\n vy = numpy.sin(self.yaw) * self.v\n # Update vehicles position\n self.x += vx * sim_timestep\n self.y += vy * sim_timestep\n self.yaw += ((self.v / self.axles_distance) *\n numpy.tan(self.commands['steering_angle']) *\n sim_timestep)\n # Make sure self.yaw is never negative.\n # self.yaw 0..2pi\n if self.yaw > 2*numpy.pi:\n self.yaw = 0.\n elif self.yaw < 0.:\n self.yaw += 2*numpy.pi", "def update_satellite_state(self, current_date):\n pass", "def update_state(self, a, delta):\n # input check\n if delta >= MAX_STEER:\n delta = MAX_STEER\n elif delta <= -MAX_STEER:\n delta = -MAX_STEER\n\n self.x = self.x + self.v * np.cos(self.yaw) * DT\n self.y = self.y + self.v * np.sin(self.yaw) * DT\n self.yaw = self.yaw + self.v / self.WB * np.tan(delta) * DT\n self.v = self.v + a * DT\n\n if self.v > MAX_SPEED:\n self.v = MAX_SPEED\n elif self.v < MIN_SPEED:\n self.v = MIN_SPEED\n\n return self", "def update(self):\n if (not self._run) or (not self.IA.is_loaded()):\n return\n self.IA.BG_MAP.update(speed=self.speed)\n self.IA.O_ATUAL.update()\n self._desintegrator.update()", "def update(self):\n self._ba_attrs = self._api.get_current_data_point(self._ba_uuid)\n self._state = self._ba_attrs[\"temperature\"]", "def update(self,dt):\n t1 = time()\n\n if SPLIT:\n self.check_refine()\n if AMALGAMATE:\n self.check_amalg(self.nl_default)\n\n t = time()\n self.rebuild_lists()\n self.timing['nlist rebuild time'] = time() - t\n\n # Is this derivative step required?\n t = time()\n self.derivatives()\n self.timing['deriv time'] = time() - t\n \n t = time()\n self.step(self.gather_state,self.derivatives, \\\n self.gather_derivatives,self.scatter_state,dt)\n self.timing['integrate time'] = time() - t\n \n self.box.apply(self)\n\n if self.thermostat:\n self.apply_thermostat(self.thermostat_temp)\n \n self.timing['update time'] = time() - t1\n self.steps += 1", "def update(self):\n self.arest.update()", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def _update_positions(self):\n self._velocities += self._accelerations * self.time_step\n self._positions += self._velocities * self.time_step", "def update(self, tfinal):\n t = 0; kk = 0\n nstep = int(np.round(tfinal/self.dt))+1 # number of time steps\n self.omega = np.zeros((nstep,self.npts))\n self.theta = np.zeros((nstep,self.npts))\n\n while t <(tfinal+1e-10):\n self.return_map()\n self.omega[kk] = self.y[0]\n self.theta[kk] = self.y[1]\n\n self.y = RK4(self.y, self.dt, t, self.deri)\n kk += 1; t += self.dt\n\n return self", "def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()", "def update(self):\n # GPS data\n self.model.GPS_latitude.set(self._kernel.data.lat)\n self.model.GPS_longitude.set(self._kernel.data.lon)\n \n self.model.GPS_heading.set(self._kernel.data.gps_heading)\n self.model.GPS_speed.set(self._kernel.data.speed)\n self.model.GPS_altitude.set(self._kernel.data.altitude)\n \n self.model.GPS_fix.set(self._kernel.data.fix)\n self.model.GPS_satellite_count.set(self._kernel.data.num_sat)\n \n # compass data\n self.model.compass_heading.set(self._kernel.data.compass_heading)\n \n # time data\n self.model.time.set(self._kernel.data.timestamp.isoformat())\n self.model.date.set(self._kernel.data.datestamp.isoformat())\n \n # other data\n self.model.temperature.set(self._kernel.data.temperature)", "def update(self):\n if self.api is None:\n return\n self.api.update()\n\n if self.var_type == 'Time':\n self.var_state = self.api.result['timeRelease']\n return\n\n result = self.api.result[self.var_period.lower()]\n if self.var_type == 'Sky':\n sky = result['sky']\n self.var_state = sky['name']\n self.var_icon = get_sky_icon(sky['code'])\n else:\n temp = result['temperature']\n if self.var_detail == 'Max':\n self.var_state = round(float(temp['tmax']), 1)\n else:\n self.var_state = round(float(temp['tmin']), 1)", "def _update(self):\n # update current position based on speed\n distance = self.speed * self.update_period\n result = great_circle(distance=distance,\n azimuth=self._ahrs.heading,\n latitude=self._current_location.lat,\n longitude=self._current_location.lng)\n self._current_location = Point(result['latitude'], result['longitude'])\n self._gps.lat = self._current_location.lat\n self._gps.lng = self._current_location.lng\n\n if self.target_waypoint and not self.arrived:\n # update compass heading if we have a target waypoint\n self._ahrs.heading = heading_to_point(self._current_location,\n self.target_waypoint)\n # check if we have hit our target\n if self.distance_to_target <= self.TARGET_DISTANCE:\n try:\n # if there are waypoints qued up keep going\n self.move_to_waypoint(self.waypoints.popleft())\n except IndexError:\n # otherwise we have arrived\n self.arrived = True\n self.speed = 0\n logger.info('Arrived at Waypoint({}, {})'.format(self.target_waypoint.lat,\n self.target_waypoint.lng))\n\n else:\n # update heading and speed based on motor speeds\n self.speed = (self._left_motor.speed + self._right_motor.speed) // 2\n self._ahrs.heading += ((self._left_motor.speed - self._right_motor.speed) / 10)\n self._ahrs.heading = abs(self._ahrs.heading % 360)", "def update_location(self, loc, dt): #pylint: disable=invalid-name\n self.observer.date = dt\n self.sat.compute(self.observer)\n loc.az = float(self.sat.az)\n loc.al = float(self.sat.alt)", "def update_target_net(self):\n if self.n_steps % self.target_update_interval == 0:\n self.target_q.load_state_dict(self.working_q.state_dict())", "def update(self):\n self._state = None\n self._attributes = {}\n\n vehicles = self._api.get_vehicles(self._latitude, self._longitude)\n scooter = {}\n\n if vehicles:\n for vehicle in vehicles:\n location_vehicle = (vehicle[\"location\"][0], vehicle[\"location\"][1])\n location_hass = (self._latitude, self._longitude)\n vehicle[\"distance\"] = distance(location_vehicle, location_hass).m\n\n scooter = sorted(vehicles, key=lambda item: item[\"distance\"])[0]\n\n if scooter:\n self._state = round(scooter[\"distance\"])\n self._attributes[ATTR_LATITUDE] = round(scooter[\"location\"][0], 5)\n self._attributes[ATTR_LONGITUDE] = round(scooter[\"location\"][1], 5)\n self._attributes[ATTR_BATTERY_LEVEL] = round(scooter[\"battery\"])\n self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION", "def _update_step(self, *, observations: types.ObservationsTorch) -> None:" ]
[ "0.6536202", "0.6313927", "0.61441183", "0.6109408", "0.59719265", "0.5909231", "0.59028345", "0.5894888", "0.58819085", "0.58797926", "0.5865231", "0.58123153", "0.58112884", "0.5784101", "0.5752916", "0.57316554", "0.57180375", "0.5714949", "0.5713429", "0.570852", "0.56988806", "0.569871", "0.5696352", "0.5680584", "0.5667327", "0.56262386", "0.56115913", "0.56006455", "0.5589646", "0.55569726" ]
0.66748387
0
Compute disturbance torques acting on satellite. This method computes the disturbance torques, which are set to active in satellite's setting file.
def compute_torques(self, rotation, omega, dt): # shift time from integration start to time of attitude integration step curr_date = self.in_date.shiftedBy(dt) self.inertial2Sat = rotation self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i) self.satPos_s = np.array([self.satPos_s.x, self.satPos_s.y, self.satPos_s.z], dtype='float64') self._compute_gravity_torque(curr_date) self._compute_magnetic_torque(curr_date) self._compute_solar_torque(curr_date) self._compute_aero_torque(curr_date, omega) return self._gTorque.add( self._mTorque.add( self._sTorque.add( self._aTorque)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_torques(self, rotation, omega, dt):\n # shift time @ which attitude integration currently is\n try:\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n omega = Vector3D(float(omega[0]), float(omega[1]), float(omega[2]))\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n # external torque has to be set separately because it is received\n # through a ros subscriber\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))\n except Exception:\n print traceback.print_exc()\n raise", "def calculate_impedance_torques(self, position_error, orientation_error):\n desired_force = (np.multiply(np.array(position_error), np.array(self.impedance_kp[0:3]))\n - np.multiply(np.array(self.current_lin_velocity), self.impedance_kv[0:3]))\n\n desired_torque = (np.multiply(np.array(orientation_error), np.array(self.impedance_kp[3:6]))\n - np.multiply(np.array(self.current_ang_velocity), self.impedance_kv[3:6]))\n\n uncoupling = True\n if (uncoupling):\n decoupled_force = np.dot(self.lambda_x_matrix, desired_force)\n decoupled_torque = np.dot(self.lambda_r_matrix, desired_torque)\n decoupled_wrench = np.concatenate([decoupled_force, decoupled_torque])\n else:\n desired_wrench = np.concatenate([desired_force, desired_torque])\n decoupled_wrench = np.dot(self.lambda_matrix, desired_wrench)\n\n torques = np.dot(self.J_full.T, decoupled_wrench)\n\n if self.initial_joint is not None:\n # TODO where does 10 come from?\n joint_kp = 10\n joint_kv = np.sqrt(joint_kp) * 2\n pose_torques = np.dot(self.mass_matrix, (joint_kp * (\n self.initial_joint - self.current_joint_position) - joint_kv * self.current_joint_velocity))\n nullspace_torques = np.dot(self.nullspace_matrix.transpose(), pose_torques)\n torques += nullspace_torques\n self.torques = torques\n\n return torques", "def _compute_pd_torques(\n self,\n desired_motor_angles: np.ndarray,\n kp: np.ndarray,\n desired_motor_velocities,\n kd: np.ndarray,\n ) -> Tuple[np.ndarray, np.ndarray]:\n motor_angles, motor_velocities = self.get_motor_states()\n motor_torques = -kp * (motor_angles - desired_motor_angles) - kd * (\n motor_velocities - desired_motor_velocities)\n\n return motor_torques", "def _compute_solar_torque(self):\n pass", "def torque(system, /, use_demag=True):\n if use_demag:\n total_field = (mm.consts.mu0 *\n (oc.compute(system.energy.demag.effective_field, system)\n + system.energy.zeeman.H))\n else:\n total_field = mm.consts.mu0 * np.array(system.energy.zeeman.H)\n norm_field = df.Field(system.m.mesh, dim=1,\n value=(system.m.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n moment = system.m * volume\n torque = (moment & total_field)\n return (df.integral(torque * df.dV / volume**2, direction='xyz'))", "def compute_torques(self, caller):\n if caller == 'pose':\n self.pose_call = True\n if caller == 'vel':\n self.vel_call = True\n #If both vels and poses has called compute torques\n if self.pose_call and self.vel_call:\n #Reset checkers\n self.pose_call = False\n self.vel_call = False\n #Vels and poses\n # print \"Heard:\"\n # print \" \".join(str(n) for n in self.joints_vels)\n # print \" \".join(str(n) for n in self.joints_poses)\n #Compute B g and C matrices\n array_vels = np.asarray(self.joints_vels)[np.newaxis].T\n array_poses = np.asarray(self.joints_poses)[np.newaxis].T\n # print(\"array_vels\")\n # print(array_vels[2:4])\n # print(\"array_poses\")\n # print(array_poses[2:4])\n err_vels = array_vels[1:4] - self.target_vel\n err_poses = array_poses[1:4] - self.target_pose\n print(\"velocity error:\")\n print(err_vels)\n print(\"position error:\")\n print(err_poses)\n B = np.matrix([[0.0040055721446399998476906034738931*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.0013481452371199999142570291610355*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.011671172651879999466092491395841*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0039281369187899997198368480111341*sin(self.joints_poses[2]) + 0.042812399753418998939427354098797,\\\n 0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0058355863259399997330462456979205*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0019640684593949998599184240055671*sin(self.joints_poses[2]) + 0.01625959562072499985284632093574,\\\n 0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171],\\\n [0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0058355863259399997330462456979205*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0019640684593949998599184240055671*sin(self.joints_poses[2]) + 0.01625959562072499985284632093574,\\\n 0.0040085638208*cos(self.joints_poses[3]) + 0.01618298062072499985284632093574,\\\n 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171],\n [0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171,\\\n 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171,\\\n 0.0026403112045896820614231443819367]])\n\n C = np.matrix([[- 0.176*self.joints_vels[3]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])) - 1.0*self.joints_vels[2]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])),\\\n - 0.176*self.joints_vels[3]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])) - 1.0*self.joints_vels[1]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])) - 1.0*self.joints_vels[2]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])),\\\n -0.176*(self.joints_vels[1] + self.joints_vels[2] + self.joints_vels[3])*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3]))],\\\n [self.joints_vels[1]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])) - 0.0020042819104*self.joints_vels[3]*sin(self.joints_poses[3]),\\\n -0.0020042819104*self.joints_vels[3]*sin(self.joints_poses[3]),\\\n -0.0020042819104*sin(self.joints_poses[3])*(self.joints_vels[1] + self.joints_vels[2] + self.joints_vels[3])],\\\n [0.0020042819104*self.joints_vels[2]*sin(self.joints_poses[3]) + 0.176*self.joints_vels[1]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])),\\\n 0.0020042819104*sin(self.joints_poses[3])*(self.joints_vels[1] + self.joints_vels[2]),0]])\n\n g = np.array([[0.69474494555999997358275432901564*cos(self.joints_poses[1]) + 0.21649055273999998623105089912144*sin(self.joints_poses[1]) + 0.40336448984999999688544018994207*cos(self.joints_poses[1])*cos(self.joints_poses[2]) - 0.40336448984999999688544018994207*sin(self.joints_poses[1])*sin(self.joints_poses[2]) + 0.1384355808*cos(self.joints_poses[1])*cos(self.joints_poses[2])*cos(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[1])*sin(self.joints_poses[2])*sin(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[2])*sin(self.joints_poses[1])*sin(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[3])*sin(self.joints_poses[1])*sin(self.joints_poses[2])],\\\n [0.1384355808*cos(self.joints_poses[1] + self.joints_poses[2] + self.joints_poses[3]) + 0.40336448984999999688544018994207*cos(self.joints_poses[1] + self.joints_poses[2])],\\\n [ 0.1384355808*cos(self.joints_poses[1] + self.joints_poses[2] + self.joints_poses[3])]])\n #Compute control torque\n control_from_errors = self.target_acc -np.dot(self.KD, err_vels) - np.dot(self.KP, err_poses)\n print(\"Derivative contribution: \")\n print(np.dot(self.KD, err_vels))\n print(\"proportional contribution: \")\n print(np.dot(self.KP, err_poses))\n control_torque = np.dot(C, self.target_vel) + g + np.dot(B, control_from_errors)\n print(\"Torques: \")\n print(control_torque)\n #Create ROS message\n self.torques.layout.dim = [self.torques_layout]\n # self.torques.layout.dim.size = 6\n # self.torques.layout.dim.stride = 1\n self.torques.layout.data_offset = 0\n self.torques.data = [0.0, control_torque[0], control_torque[1], control_torque[2], 0.0, 0.0]\n self.torque_pub.publish(self.torques)", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO", "def turbulence(*args, attenuation: Union[float, bool]=0.0, frequency: Union[float, bool]=0.0,\n magnitude: Union[float, bool]=0.0, maxDistance: Union[float, bool]=0.0, name:\n Union[AnyStr, bool]=\"\", noiseLevel: Union[int, bool]=0, noiseRatio: Union[float,\n bool]=0.0, perVertex: bool=True, phase: Union[float, bool]=0.0, phaseX:\n Union[float, bool]=0.0, phaseY: Union[float, bool]=0.0, phaseZ: Union[float,\n bool]=0.0, position: Union[List[float, float, float], List[List[float, float,\n float]], bool]=None, torusSectionRadius: Union[float, bool]=0.0,\n volumeExclusion: bool=True, volumeOffset: Union[List[float, float, float],\n bool]=None, volumeShape: Union[AnyStr, bool]=\"\", volumeSweep: Union[float,\n bool]=0.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def calculate_all_distances(self):\n self.close_distance = self.calculate_distance(self.close_distance_factor)\n self.medium_distance = self.calculate_distance(self.medium_distance_factor)\n self.far_distance = self.calculate_distance(self.far_distance_factor)", "def get_motor_torques(\n self,\n motor_commands: np.ndarray,\n motor_control_mode=None) -> Tuple[np.ndarray, np.ndarray]:\n if not motor_control_mode:\n motor_control_mode = self._motor_control_mode\n\n motor_torques = None\n\n if motor_control_mode is robot_config.MotorControlMode.TORQUE:\n motor_torques = motor_commands\n\n if motor_control_mode is robot_config.MotorControlMode.POSITION:\n motor_torques = self._compute_pd_torques(\n desired_motor_angles=motor_commands,\n kp=self._kp,\n desired_motor_velocities=self._zero_array,\n kd=self._kd)\n \n if motor_torques is None:\n raise ValueError(\n \"{} is not a supported motor control mode\".format(motor_control_mode))\n\n # Apply the output filter to model actuator dynamics\n # BUG: Causes big instability in the sim\n # motor_torques = self._torque_filter(motor_torques)\n\n # Hard-code torque limits until the torque limit bug is fixed\n motor_torques = np.clip(motor_torques, -1.7, 1.7)\n\n # Apply motor damping and friction\n motor_torques -= (np.sign(self._previous_true_motor_velocity) *\n self._motor_torque_dependent_friction *\n motor_torques)\n motor_torques -= self._previous_true_motor_velocity * self._motor_damping\n\n # Rescale and clip the motor torques as needed.\n motor_torques = self._strength_ratios * motor_torques\n if (self._torque_lower_limits is not None or\n self._torque_upper_limits is not None):\n motor_torques = np.clip(motor_torques, self._torque_lower_limits,\n self._torque_upper_limits)\n\n return motor_torques, motor_torques", "def torque(cls):\n jobids = [calc.jobid() for calc in vasp.Vasp.calculators]\n\n qstat = ['[[shell:qstat {}][{}]]'.format(jobid, jobid)\n for jobid in jobids]\n qdel = ['[[shell:qdel {}][qdel]]'.format(jobid)\n for jobid in jobids]\n\n dirs = [calc.directory\n for calc in vasp.Vasp.calculators]\n\n s = '[[shell:xterm -e \"cd {}; ls && /bin/bash\"][{}]]'\n xterm = [s.format(d, os.path.relpath(d))\n for d in dirs]\n\n s = '[[elisp:(find-file \"{}\")][dired]]'\n dired = [s.format(d)\n for d in dirs]\n\n return '\\n'.join(['| {0} {1} | {2} | {3} |'.format(xt, dd, qs, qd)\n for xt, qs, qd, dd in zip(xterm, qstat, qdel, dired)])", "def motor_torques(self):\n raise NotImplementedError('Not yet implemented!')", "def setup_tacs_problems(self, comm):\n # Overwrite default check values\n if self.dtype == complex:\n self.rtol = 1e-8\n self.atol = 1e-3\n self.dh = 1e-50\n else:\n self.rtol = 1e-1\n self.atol = 1e-4\n self.dh = 1e-5\n\n # Instantiate FEA Assembler\n fea_assembler = pytacs.pyTACS(bdf_file, comm)\n\n # Set up constitutive objects and elements\n fea_assembler.initialize()\n\n # set transient problem options\n transientOptions = {\"timeIntegrator\": \"DIRK\", \"integrationOrder\": DIRK_order}\n\n # get some problem info\n n_vpn = fea_assembler.getVarsPerNode()\n\n # Create coarse load-specified transient problem\n coarse_prob = fea_assembler.createTransientProblem(\n name=\"load_coarse\",\n tInit=0.0,\n tFinal=1.0,\n numSteps=8,\n options=transientOptions,\n )\n # Create fine load-specified transient problem\n fine_prob = fea_assembler.createTransientProblem(\n name=\"load_fine\",\n tInit=0.0,\n tFinal=1.0,\n numSteps=32,\n options=transientOptions,\n )\n load_probs = [coarse_prob, fine_prob]\n\n for prob in load_probs:\n forces = np.zeros(n_vpn)\n ns = prob.getNumTimeSteps()\n for k in range(ns + 1):\n t_array = prob.getTimeStages(k)\n for s, t in enumerate(t_array):\n f = f_mag * t**5\n forces[2] = f # applied to z-direction\n prob.addLoadToNodes(\n timeStep=k,\n timeStage=s,\n nodeIDs=21,\n F=forces,\n nastranOrdering=True,\n )\n\n for problem in load_probs:\n problem.addFunction(\"mass\", functions.StructuralMass)\n problem.addFunction(\n \"ks_disp\",\n functions.KSDisplacement,\n direction=[0.0, 0.0, 100.0],\n ftype=\"discrete\",\n )\n\n return load_probs, fea_assembler", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO", "def turbines(self):\n return self.turbine_map.turbines", "def tora(self) -> typing.Union[None, typing.List[int]]:\n return self.distances('TORA')", "def computeForces(self, neighbors=[]): #computing forces to drive the agents and avoid collisions \n if not self.atGoal:\n if self.entry_state % 2 == 0 and len(self.entrancex) > 0 and self.id != 4 : #checks if assigned curve is entry and switches to state 1 to follow entry bezier curve\n time2=0.5 # time used to calculate driving force \n self.local_goal = [self.entrancex[0], self.entrancey[0]] #assigning waypoint as goal\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2))) #calculating direction vector\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez) #driving force\n self.entrancex = np.delete(self.entrancex,0) #eliminating the used waypoints from the list \n self.entrancey = np.delete(self.entrancey,0) #eliminating the used waypoints from the list \n \n elif self.force_state == 1 and (abs(self.pos[0] - self.goal[0]) >400 or abs(self.pos[1] - self.goal[1]) >400): #checks if force-based navigation is assigned, switches to state 2\n self.F = (self.gvel-self.vel)/self.ksi #driving force\n for neighbor in neighbors:\n if neighbor.id != self.id: #and not neighbor.atGoal: \n distSq = (neighbor.pos-self.pos).dot(neighbor.pos-self.pos)\n #print(distSq, self.dhorSq)\n if distSq < self.dhorSq: # neighbor is inside the sensing radius\n tau = self.ttc(neighbor)\n #print(tau, self.timehor)\n if tau < self.timehor: # will the two agents collide in less than timehor?\n dir = self.pos + self.vel*tau - neighbor.pos - neighbor.vel*tau \n length = sqrt(dir.dot(dir))\n if length > 0:\n dir = dir/length # the direction of the force\n mag = (self.timehor - tau)/(tau + 1e-6) # the magnitude of the force\n self.F += mag*dir # add the force\n \n else: #state 3 - following the exit bezier curve\n time2=0.5 # time used to calculate driving force\n self.local_goal = [self.exitx[0], self.exity[0]]\n if abs(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos)))) >10: #to reach first point of exit curve from agents previous state position\n self.F = ((self.local_goal - self.pos)/(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos) )))*self.prefspeed)/self.ksi\n else:\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2)))\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez)\n #print(self.pos, self.local_goal)\n if len(self.exitx) > 1 :\n self.exitx = np.delete(self.exitx,0)\n self.exity = np.delete(self.exity,0)", "def compute_duty_factor():\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n print(np.sum(foot_l_contact)/len(foot_l_contact))\n print(np.sum(foot_r_contact)/len(foot_r_contact))\n\n return np.sum(foot_l_contact)/len(foot_l_contact)*0.5 + np.sum(foot_r_contact)/len(foot_r_contact)*0.5", "def correction(self):\r\n \r\n # empirical coefficients:\r\n k3, k2, k1, k0 = 0.0892, 0.0544, 0.2511, -0.0017\r\n \r\n # thrust as a function of the azimuth angle and the loads:\r\n thrust = self.qn*np.sin(Turbine.t) + self.qt*np.cos(Turbine.t)\r\n \r\n # interpolator function for the thrust:\r\n function = interp1d(Turbine.t, thrust, kind='cubic')\r\n \r\n # vectorize the function so that it takes an array of angles:\r\n __function__ = np.vectorize(function)\r\n \r\n # thrust coefficient integrating according to phi:\r\n self.cth = simps(__function__(Turbine.p), Turbine.p)\r\n \r\n # induction factor:\r\n self.a = k3*self.cth**3 + k2*self.cth**2 + k1*self.cth + k0\r\n \r\n # correction factor:\r\n if self.a <= 0.15:\r\n self.ka = 1.0/(1.0 - self.a)\r\n else:\r\n self.ka = (1./(1 - self.a))*(0.65 + 0.35*exp(-4.5*(self.a - 0.15)))", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def ransac(cloud_s, cloud_t, \n depth_s, depth_t,\n A_prev, b_prev,\n n_iter, n_inlier_cutoff, d_cutoff):\n import random\n n_s = len(cloud_s)\n n_t = len(cloud_t)\n n_inliers = [0] * n_iter\n# Initialization\n A_init = A_prev\n b_init = b_prev\n pred_t = A_init.dot(cloud_s.T).T + b_init\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n max_inliers = sum(inliers)\n print(\"Have \" + str(n_s) + \" features that could be inliers\")\n print(\"Starting with \" + str(max_inliers) + \" inliers\")\n for iter in range(n_iter):\n assert n_s == n_t, \"clouds not of equal size in ransac()\"\n # TODO: replace this random choice with 3 corresponding feature descriptors\n points_inds = random.sample(range(n_s), 3)\n x_vals = np.array([cloud_s[i] for i in points_inds])\n y_vals = np.array([cloud_t[i] for i in points_inds])\n\n # Using Horn 1987, Closed-form solution of absolute orientation\n # using unit quaternions.\n A_init_tmp, b_init_tmp = horn_adjust(x_vals, y_vals)\n\n # TODO: find inliers to the transformation T\n pred_t = A_init_tmp.dot(cloud_s.T).T + b_init_tmp\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n n_inliers = sum(inliers)\n\n # TODO: do we want to refit on the inliers?\n if n_inliers > max_inliers:\n A_init = A_init_tmp\n b_init = b_init_tmp\n max_inliers = n_inliers\n print(\"Adjusting A and b again!\")\n print(A_init)\n print(b_init)\n\n # TODO: are we using n_inlier_cutoff in this way? Check the paper!\n if max_inliers < n_inlier_cutoff:\n raise Exception('insufficient inliers! Want ' + str(n_inlier_cutoff) +\n ' but got ' + str(max_inliers))\n #max_index = n_inliers.index(max(n_inliers)) \n # Compute the best transformation T_star\n# TODO: actually optimize over the depth field!! using spatial.KDTree and spatial.KDTree.query\n# Need to shift depth1XYZ by our initial transformation first\n depth1XYZ = A_init.dot(depth_s.T).T + b_init\n depth2XYZ = depth_t\n tree = spatial.KDTree(depth2XYZ)\n tree_q = tree.query(depth1XYZ)\n# Keep only matches within the cutoff.\n# depth_pair_inds has indeces for depth1XYZ and depth2XYZ\n cutoff = 0.01\n depth_pair_inds = [(i,tree_q[1][i]) for i in range(len(tree_q[0]))\n if tree_q[0][i] < cutoff]\n #depth_cloud_s = np.array([depth1XYZ[k[0]] for k in depth_pair_inds])\n depth_cloud_s = np.array([depth_s[k[0]] for k in depth_pair_inds])\n depth_cloud_t = np.array([depth2XYZ[k[1]] for k in depth_pair_inds])\n\n# A_d = list(range(n_s))\n# A, b = find_argmin_T(cloud_s, cloud_t, A_d,\n# A_init, b_init)\n A_d = list(range(depth_cloud_s.shape[0]))\n A, b = find_argmin_T(depth_cloud_s, depth_cloud_t, A_d,\n A_init, b_init)\n print(\"A_init value:\")\n print(A_init)\n print(\"b_init value:\")\n print(b_init)\n \n print(\"Returning A, b\")\n print(\"A value:\")\n print(A)\n print(\"b value:\")\n print(b)\n print(\"inliers:\")\n print(max_inliers)\n return(A, b)", "def joints_torque(self):\r\n return self._arm.joints_torque", "def doubt_check(self):\n DoubtIndex = 0.0\n DoubtIndex += (np.random.rand() * 0.2 + 0.8) * \\\n (((self.Call - 700) / 300) ** 3)\n\n if self.DiscardCount == 2:\n DoubtIndex += 0.1\n if self.Call >= 990:\n DoubtIndex += 0.6\n\n if self.DiscardCount == 3:\n DoubtIndex += 0.3\n if self.Call >= 900:\n DoubtIndex += 0.3\n\n if DoubtIndex > 0.85:\n self.Doubt = True\n\n return self.Doubt", "def __get_bond_spot_rates__(self):\r\n for T in self.get_maturities():\r\n instrument=self.instruments[T]\r\n (par,coup,price,freq)=instrument\r\n\r\n if coup!=0:\r\n self.zero_rates[T]=self.__calculate_bond_spot_rate__(T,instrument)", "def set_t_FAST(self):\n\t\n\tself.N = 2**7\n\tdt = self.Orbit.Tobs/self.N\n\tself.t = np.linspace(0, self.N-1, self.N)*self.Orbit.Tobs/self.N\n\t\n\treturn", "def constrain_buckling(self, method=1, ms=0.1):\n self.create_dvars()\n eltype = self.elements[0].type\n\n # reading constants\n dtable_E = self.dtables['STRE'][0]\n dtable_nu = self.dtables['STRnu'][0]\n\n if method == 1 and self.profile.lower() == 'z_t':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n # reading constants\n dtable_b = self.dtables['STRZb'][0]\n dtable_h = self.dtables['STRZh'][0]\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id]\n dresp2.dtable = [dtable_b, dtable_h, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'z_t_b':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n dvar_b = self.dvars['STRZb']\n # reading constants\n dtable_h = self.dtables['STRZh'][0]\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_b.id]\n dresp2.dtable = [dtable_h, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'z_t_b_h':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n dvar_b = self.dvars['STRZb']\n dvar_h = self.dvars['STRZh']\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_b.id, dvar_h.id]\n dresp2.dtable = [dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'b_t':\n # buckling equation\n # - considers combined compression + shear\n # - disconsiders bending effects\n # - assumes 3 edges simply supported and one free unloaded edge\n deqatn = DEQATN('kc(t, h, L, E, nu, PC, PS) = 0.456 + (h/L)**2;'\n 'FCcr = kc*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FC = PC/(t*h);'\n 'Rc = FC/FCcr;'\n 'x = L/h;'\n 'ks = 0.0648*x**6 - 1.2338*x**5 + 9.4869*x**4 -'\n '37.697*x**3 + 81.88*x**2 - 93.218*x + 50.411;'\n 'ks = MAX(ks, 5.42);'\n 'FScr = ks*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FS = PS/(t*h);'\n 'Rs = FS/FScr;'\n 'MS = 2./(Rc + SQRT(Rc**2 + 4*Rs**2)) - 1.')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRBt']\n # reading constants\n dtable_h = self.dtables['STRBh'][0]\n dtable_L = self.dtables['STRBL'][0]\n # building DRESP1s that read:\n # - axial force\n # - shear along Plane 1 (y axis)\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n code_PC = OUTC['FORCE']['CBAR']['Axial force']\n code_PS = OUTC['FORCE']['CBAR']['Shear plane 1']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_PC = DRESP1('STRPC', 'FORCE', 'ELEM', region=None,\n atta=code_PC, attb='', atti=eid)\n dresp_PS = DRESP1('STRPS', 'FORCE', 'ELEM', region=None,\n atta=code_PS, attb='', atti=eid)\n self.add_dresp(dresp_PC)\n self.add_dresp(dresp_PS)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id]\n dresp2.dtable = [dtable_h, dtable_L, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_PC.id, dresp_PS.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'b_t_h':\n # buckling equation\n # - considers combined compression + shear\n # - disconsiders bending effects\n # - assumes 3 edges simply supported and one free unloaded edge\n deqatn = DEQATN('kc(t, h, L, E, nu, PC, PS) = 0.456 + (h/L)**2;'\n 'FCcr = kc*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FC = PC/(t*h);'\n 'Rc = FC/FCcr;'\n 'x = L/h;'\n 'ks = 0.0648*x**6 - 1.2338*x**5 + 9.4869*x**4 -'\n '37.697*x**3 + 81.88*x**2 - 93.218*x + 50.411;'\n 'ks = MAX(ks, 5.42);'\n 'FScr = ks*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FS = PS/(t*h);'\n 'Rs = FS/FScr;'\n 'MS = 2./(Rc + SQRT(Rc**2 + 4*Rs**2)) - 1.')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRBt']\n dvar_h = self.dvars['STRBh']\n # reading constants\n dtable_L = self.dtables['STRBL'][0]\n # building DRESP1s that read:\n # - axial force\n # - shear along Plane 1 (y axis)\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n code_PC = OUTC['FORCE']['CBAR']['Axial force']\n code_PS = OUTC['FORCE']['CBAR']['Shear plane 1']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_PC = DRESP1('STRPC', 'FORCE', 'ELEM', region=None,\n atta=code_PC, attb='', atti=eid)\n dresp_PS = DRESP1('STRPS', 'FORCE', 'ELEM', region=None,\n atta=code_PS, attb='', atti=eid)\n self.add_dresp(dresp_PC)\n self.add_dresp(dresp_PS)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_h.id]\n dresp2.dtable = [dtable_L, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_PC.id, dresp_PS.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n else:\n raise NotImplementedError('Stringer %s profile not supported!' %\n self.profile)", "def ODEs(y, t, B, MdiscI, RdiscI, epsilon, delta, n=1.0, alpha=0.1, cs7=1.0,\n k=0.9):\n # Initial conditions\n Mdisc, omega = y\n \n # Constants\n Rdisc = RdiscI * 1.0e5 # Disc radius - cm\n tvisc = Rdisc / (alpha * cs7 * 1.0e7) # Viscous timescale - s\n mu = 1.0e15 * B * (R ** 3.0) # Magnetic Dipole Moment\n M0 = delta * MdiscI * Msol # Global Fallback Mass Budget - g\n tfb = epsilon * tvisc # Fallback timescale - s\n \n # Radii - Alfven, Corotation, Light Cylinder\n Rm = ((mu ** (4.0 / 7.0)) * (GM ** (-1.0 / 7.0)) * ((Mdisc / tvisc) **\n (-2.0 / 7.0)))\n Rc = (GM / (omega ** 2.0)) ** (2.0 / 3.0)\n Rlc = c / omega\n # Cap the Alfven radius\n if Rm >= k * Rlc:\n Rm = k * Rlc\n \n w = (Rm / Rc) ** (3.0 / 2.0) # Fastness parameter\n \n bigT = 0.5 * I * (omega ** 2.0) # Rotational energy\n modW = (0.6 * M * (c ** 2.0) * ((GM / (R * (c ** 2.0))) / (1.0 - 0.5 * (GM\n / (R * (c ** 2.0)))))) # Binding energy\n rot_param = bigT / modW # Rotation parameter\n \n # Dipole torque\n Ndip = (-1.0 * (mu ** 2.0) * (omega ** 3.0)) / (6.0 * (c ** 3.0))\n \n # Mass flow rates and efficiencies\n eta2 = 0.5 * (1.0 + np.tanh(n * (w - 1.0)))\n eta1 = 1.0 - eta2\n Mdotprop = eta2 * (Mdisc / tvisc) # Propelled\n Mdotacc = eta1 * (Mdisc / tvisc) # Accreted\n Mdotfb = (M0 / tfb) * (((t + tfb) / tfb) ** (-5.0 / 3.0)) # Fallback rate\n Mdotdisc = Mdotfb - Mdotprop - Mdotacc # Mass flow through the disc\n \n if rot_param > 0.27:\n Nacc = 0.0 # Prevents magnetar break-up\n else:\n # Accretion torque\n if Rm >= R:\n Nacc = ((GM * Rm) ** 0.5) * (Mdotacc - Mdotprop)\n else:\n Nacc = ((GM * R) ** 0.5) * (Mdotacc - Mdotprop)\n \n omegadot = (Nacc + Ndip) / I # Angular frequency time derivative\n \n return np.array([Mdotdisc, omegadot])", "def optimize_hydrogens(self):\n _LOGGER.debug(\"Optimization progress:\")\n optlist = self.optlist\n connectivity = {}\n # Initialize the detection progress\n if len(optlist) == 0:\n return\n _LOGGER.debug(\" Detecting potential hydrogen bonds\")\n progress = 0.0\n increment = 1.0 / len(optlist)\n for obj in optlist:\n connectivity[obj] = []\n for atom in obj.atomlist:\n closeatoms = self.debumper.cells.get_near_cells(atom)\n for closeatom in closeatoms:\n # Conditions for continuing\n if atom.residue == closeatom.residue:\n continue\n if not (closeatom.hacceptor or closeatom.hdonor):\n continue\n if atom.hdonor and not atom.hacceptor:\n if not closeatom.hacceptor:\n continue\n if atom.hacceptor:\n if not atom.hdonor and not closeatom.hdonor:\n continue\n dist = util.distance(atom.coords, closeatom.coords)\n if dist < 4.3:\n residue = atom.residue\n hbond = PotentialBond(atom, closeatom, dist)\n # Store the potential bond\n obj.hbonds.append(hbond)\n # Keep track of connectivity\n if closeatom in self.atomlist:\n closeobj = self.resmap[closeatom.residue]\n if closeobj not in connectivity[obj]:\n connectivity[obj].append(closeobj)\n progress += increment\n while progress >= 0.0499:\n progress -= 0.05\n # Some residues might have no nearby hbonds - if so, place at\n # default state\n for obj in optlist:\n if len(obj.hbonds) == 0:\n if obj.residue.fixed:\n continue\n _LOGGER.debug(\n f\"{obj.residue} has no nearby partners - fixing.\"\n )\n obj.finalize()\n # Determine the distinct networks\n networks = []\n seen = []\n for obj1 in optlist:\n if obj1.residue.fixed:\n continue\n if obj1 in seen:\n continue\n network = util.analyze_connectivity(connectivity, obj1)\n for obj2 in network:\n if obj2 not in seen:\n seen.append(obj2)\n networks.append(network)\n # Initialize the output progress\n if len(networks) > 0:\n _LOGGER.debug(\"Optimizing hydrogen bonds\")\n progress = 0.0\n increment = 1.0 / len(networks)\n # Work on the networks\n for network in networks:\n txt = \"\"\n for obj in network:\n txt += f\"{obj}, \"\n _LOGGER.debug(f\"Starting network {txt[:-2]}\")\n # FIRST: Only optimizeable to backbone atoms\n _LOGGER.debug(\"* Optimizeable to backbone *\")\n hbondmap = {}\n for obj in network:\n for hbond in obj.hbonds:\n if hbond.atom2 not in self.atomlist:\n hbondmap[hbond] = hbond.dist\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj = self.resmap[atom.residue]\n\n if atom.residue.fixed:\n continue\n if atom.hdonor:\n obj.try_donor(atom, atom2)\n if atom.hacceptor:\n obj.try_acceptor(atom, atom2)\n # SECOND: Non-dual water Optimizeable to Optimizeable\n _LOGGER.debug(\"* Optimizeable to optimizeable *\")\n hbondmap = {}\n seenlist = []\n for obj in network:\n for hbond in obj.hbonds:\n if hbond.atom2 in self.atomlist:\n if not isinstance(hbond.atom1.residue, aa.WAT):\n if not isinstance(hbond.atom2.residue, aa.WAT):\n # Only get one hbond pair\n if (hbond.atom2, hbond.atom1) not in seenlist:\n hbondmap[hbond] = hbond.dist\n seenlist.append((hbond.atom1, hbond.atom2))\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj1 = self.resmap[atom.residue]\n obj2 = self.resmap[atom2.residue]\n # Atoms may no longer exist if already optimized\n if not atom.residue.has_atom(atom.name):\n continue\n if not atom2.residue.has_atom(atom2.name):\n continue\n res = 0\n if atom.hdonor and atom2.hacceptor:\n res = obj1.try_both(atom, atom2, obj2)\n if atom.hacceptor and atom2.hdonor and res == 0:\n obj2.try_both(atom2, atom, obj1)\n # THIRD: All water-water residues\n _LOGGER.debug(\"* Water to Water *\")\n hbondmap = {}\n seenlist = []\n for obj in network:\n for hbond in obj.hbonds:\n residue = hbond.atom1.residue\n if isinstance(residue, aa.WAT):\n if isinstance(hbond.atom2.residue, aa.WAT):\n if (hbond.atom2, hbond.atom1) not in seenlist:\n hbondmap[hbond] = hbond.dist\n seenlist.append((hbond.atom1, hbond.atom2))\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj1 = self.resmap[atom.residue]\n obj2 = self.resmap[atom2.residue]\n res = 0\n if atom.hdonor and atom2.hacceptor:\n res = obj1.try_both(atom, atom2, obj2)\n if atom.hacceptor and atom2.hdonor and res == 0:\n obj2.try_both(atom2, atom, obj1)\n # FOURTH: Complete all residues\n for obj in network:\n obj.complete()\n # STEP 5: Update progress meter\n progress += 100.0 * increment\n while progress >= 5.0:\n progress -= 5.0", "def isDisturbance(self):\n return True" ]
[ "0.58227944", "0.5271165", "0.5135637", "0.4930457", "0.48061916", "0.46520263", "0.46455866", "0.4638927", "0.46183428", "0.46048695", "0.45919403", "0.45686707", "0.45576572", "0.4542761", "0.4541472", "0.45262", "0.45204988", "0.45135644", "0.45062312", "0.45003635", "0.45003635", "0.449907", "0.44705045", "0.4460567", "0.44523862", "0.44522223", "0.4449781", "0.44423974", "0.44399044", "0.44267267" ]
0.56088495
1
Compute gravity gradient torque if gravity model provided. This method computes the Newtonian attraction and the perturbing part of the gravity gradient for every cuboid defined in dictionary inCub at time curr_date (= time of current satellite position). The gravity torque is computed in the inertial frame in which the spacecraft is defined. The perturbing part is calculated using Orekit's methods defined in the GravityModel object. The current position, rotation and mass of the satellite is obtained from the StateObserver object.
def _compute_gravity_torque(self, curr_date): if self._to_add[0]: # return gravity gradient torque in satellite frame body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date) body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation()) body2satRot = PyRotation(body2sat.q0, body2sat.q1, body2sat.q2, body2sat.q3) sat2bodyRot = body2satRot.revert() body2sat = body2satRot.getMatrix() sat2body = sat2bodyRot.getMatrix() satM = self.spacecraft_state.getMass() mCub = self.inCub['dm'] * satM # add booms if "dm_boom" in self.inCub: mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass CoM = self.inCub['CoM_np'] dmPos_s = CoM + self.satPos_s gNewton = (-self.muGM / np.linalg.norm(dmPos_s, axis=1, keepdims=True)**3) * dmPos_s # rotate vectors: dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s) gDist = np.empty(dmPos_b.shape) for i in xrange(0, dmPos_b.shape[0]): gDist[i, :] = np.asarray( self.GravityModel.gradient(curr_date, Vector3D(float(dmPos_b[i, 0]), float(dmPos_b[i, 1]), float(dmPos_b[i, 2])), self.muGM)) gDist_s = np.einsum('ij,kj->ki', body2sat, gDist) gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0) self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2])) else: self._gTorque = Vector3D.ZERO
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n sat2body = body2sat.revert()\n\n satM = self.state_observer.spacecraftState.getMass()\n mCub = self.inCub['mass_frac'] * satM\n\n self._gTorque = Vector3D.ZERO\n\n for CoM in self.inCub['CoM']:\n\n S_dmPos = self.satPos_s.add(CoM)\n\n r2 = S_dmPos.getNormSq()\n gNewton = Vector3D(-self.muGM / (sqrt(r2) * r2), S_dmPos)\n\n B_dmPos = sat2body.applyTo(S_dmPos)\n\n gDist = Vector3D(self.GravityModel.gradient(curr_date,\n B_dmPos,\n self.muGM))\n\n g_Dist_s = body2sat.applyTo(gDist)\n\n dmForce = Vector3D(mCub, gNewton.add(g_Dist_s))\n self._gTorque = self._gTorque.add(self.V3_cross(CoM, dmForce))\n\n else:\n self._gTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_gravity_torque(self):\n pass", "def compute_torques(self, rotation, omega, dt):\n # shift time @ which attitude integration currently is\n try:\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n omega = Vector3D(float(omega[0]), float(omega[1]), float(omega[2]))\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n # external torque has to be set separately because it is received\n # through a ros subscriber\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))\n except Exception:\n print traceback.print_exc()\n raise", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO", "def compute_torques(self, rotation, omega, dt):\n # shift time from integration start to time of attitude integration step\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n self.satPos_s = np.array([self.satPos_s.x,\n self.satPos_s.y,\n self.satPos_s.z], dtype='float64')\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def compute_gravity(self):\r\n # compute the gravity from the Gauss form.\r\n # if it fails, marks divergence\r\n try:\r\n self.gravsolver.solve()\r\n except:\r\n print(\"GRAVITY DIVERGED\")\r\n\r\n # write to log\r\n self.logfile.write(\"%s: STOPPED DUE TO DIVERGENCE IN GRAVITY \\n\" %\r\n (self.convert_time(time.time() -\r\n self.start_time)))\r\n self.diverged = True # set diverged to True, break the run\r\n return\r\n\r\n # split and update the gravity function with the answers\r\n # note the gravscale\r\n gravg, gravs = self.gravgs.split()\r\n\r\n # assign the result to the gravity function\r\n self.gravity.assign(project(gravg/self.gravscale, self.V))", "def compute_forces_mesh(self):\n f = self.ptclgrid.grid[:self.size,:self.size]*self.grad_phi_mesh()\n return f", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO", "def create_deltas_tensor(self, deltas):\n T = self.T\n N = self.N\n neighs = self.neighs\n self.deltas = {}\n for n in range(N):\n self.deltas[n] = self.get_empty_matrix((len(neighs[n]), T))\n for cc in deltas:\n t = int(cc[0])\n if t >= T:\n raise ValueError(\"Contact time above T!\")\n i = int(cc[1])\n j = int(cc[2])\n delta = cc[3]\n #lam = np.clip(lam, 0, 1 - self.err_max_lambda)\n #print(t,i,j,lam)\n index_i = neighs[j].index(i)\n self.deltas[j][index_i][t] = delta\n\n '''def create_delta_tensor(self, gamma):\n \"\"\"\n Deltas values for the computation of parameters of rate of contagion\n \"\"\"\n N = self.N\n self.deltas = {}\n for n in range(N):\n self.deltas[n] = self.logp_lam[n]/gamma\n '''", "def Evolve(self, data, t, dt, **kwargs):\n\n if self.grid.expansion:\n z = self.grid.cosm.TimeToRedshiftConverter(0, t, self.grid.zi)\n dz = dt / self.grid.cosm.dtdz(z)\n else:\n z = dz = 0\n\n if 'he_1' in self.grid.absorbers:\n i = self.grid.absorbers.index('he_1')\n self.chemnet.psi[...,i] *= data['he_2'] / data['he_1']\n\n # Make sure we've got number densities\n if 'n' not in data.keys():\n data['n'] = self.grid.particle_density(data, z)\n\n newdata = {}\n for field in data:\n newdata[field] = data[field].copy()\n\n if not kwargs:\n kwargs = self.rcs.copy()\n\n kwargs_by_cell = self._sort_kwargs_by_cell(kwargs)\n\n self.q_grid = np.zeros_like(self.zeros_gridxq)\n self.dqdt_grid = np.zeros_like(self.zeros_gridxq)\n\n # For debugging\n self.kwargs_by_cell = kwargs_by_cell\n\n # Loop over grid and solve chemistry\n for cell in range(self.grid.dims):\n\n # Construct q vector\n q = np.zeros(len(self.grid.evolving_fields))\n for i, species in enumerate(self.grid.evolving_fields):\n q[i] = data[species][cell]\n\n kwargs_cell = kwargs_by_cell[cell]\n\n if self.rtON:\n args = (cell, kwargs_cell['k_ion'], kwargs_cell['k_ion2'],\n kwargs_cell['k_heat'], kwargs_cell['k_heat_lya'],\n data['n'][cell], t)\n else:\n args = (cell, self.grid.zeros_absorbers,\n self.grid.zeros_absorbers2, self.grid.zeros_absorbers,\n 0.0, data['n'][cell], t)\n\n self.solver.set_initial_value(q, 0.0).set_f_params(args).set_jac_params(args)\n\n self.solver.integrate(dt)\n\n self.q_grid[cell] = q.copy()\n self.dqdt_grid[cell] = self.chemnet.dqdt.copy()\n\n for i, value in enumerate(self.solver.y):\n newdata[self.grid.evolving_fields[i]][cell] = self.solver.y[i]\n\n # Compute particle density\n newdata['n'] = self.grid.particle_density(newdata, z - dz)\n\n # Fix helium fractions if approx_He==True.\n if self.grid.pf['include_He']:\n if self.grid.pf['approx_He']:\n newdata['he_1'] = newdata['h_1']\n newdata['he_2'] = newdata['h_2']\n newdata['he_3'] = np.zeros_like(newdata['h_1'])\n\n return newdata", "def _compute_solar_torque(self):\n pass", "def update(self, gyro, accel, deltaT):\r\n gyro = np.array(gyro)\r\n accel = np.array(accel)\r\n q = self.quaternion\r\n qDot1 = 0.5 * (-q[1] * gyro[0] - q[2] * gyro[1] - q[3] * gyro[2])\r\n qDot2 = 0.5 * ( q[0] * gyro[0] + q[2] * gyro[2] - q[3] * gyro[1])\r\n qDot3 = 0.5 * ( q[0] * gyro[1] - q[1] * gyro[2] + q[3] * gyro[0])\r\n qDot4 = 0.5 * ( q[0] * gyro[2] + q[1] * gyro[1] - q[2] * gyro[0])\r\n\r\n qdot = [qDot1, qDot2, qDot3, qDot4]\r\n\r\n # Normalise accelerometer measurement\r\n if norm(accel) is 0:\r\n warnings.warn(\"accelerometer is zero\")\r\n else:\r\n accel /= norm(accel)\r\n\r\n # Auxiliary variables to avoid repeated calculations\r\n _2q0 = 2.0 * q[0]\r\n _2q1 = 2.0 * q[1]\r\n _2q2 = 2.0 * q[2]\r\n _2q3 = 2.0 * q[3]\r\n _4q0 = 4.0 * q[0]\r\n _4q1 = 4.0 * q[1]\r\n _4q2 = 4.0 * q[2]\r\n _8q1 = 8.0 * q[1]\r\n _8q2 = 8.0 * q[2]\r\n q0q0 = q[0] * q[0]\r\n q1q1 = q[1] * q[1]\r\n q2q2 = q[2] * q[2]\r\n q3q3 = q[3] * q[3]\r\n\r\n # Gradient descent algorithm corrective step\r\n s0 = _4q0 * q2q2 + _2q2 * accel[0] + _4q0 * q1q1 - _2q1 * accel[1]\r\n s1 = _4q1 * q3q3 - _2q3 * accel[0] + 4.0 * q0q0 * q[1]- _2q0 * accel[1] - _4q1 + _8q1 * q1q1 + _8q1 * q2q2 + _4q1 * accel[2]\r\n s2 = 4.0 * q0q0 * q[2] + _2q0 * accel[0] + _4q2 * q3q3 - _2q3 * accel[1] - _4q2 + _8q2 * q1q1 + _8q2 * q2q2 + _4q2 * accel[2]\r\n s3 = 4.0 * q1q1 * q[3] - _2q1 * accel[0] + 4.0 * q2q2 * q[3] - _2q2 * accel[1]\r\n\r\n s = np.array([s0, s1, s2, s3])\r\n s /= norm(s)\r\n\r\n # Apply Feedback Step\r\n qdot -= self.beta*s #(q * Quaternion(0, gyroscope[0], gyroscope[1], gyroscope[2])) * 0.5 - self.beta * step.T\r\n\r\n # Integrate to yield quaternion\r\n q += qdot * self.samplePeriod\r\n self.quaternion /= norm(q) # normalise quaternion\r", "def vlbi_grav_delay(dset):\n eph = apriori.get(\"ephemerides\", time=dset.time)\n grav_delay = np.zeros(dset.num_obs)\n\n # List of celestial bodies. Major moons are also recommended, like Titan, Ganymedes, ...\n bodies = [\n \"mercury barycenter\",\n \"venus barycenter\",\n \"earth\",\n \"moon\",\n \"mars barycenter\",\n \"jupiter barycenter\",\n \"saturn barycenter\",\n \"uranus barycenter\",\n \"neptune barycenter\",\n \"pluto barycenter\",\n \"sun\",\n ]\n\n bcrs_vel_earth = eph.vel_bcrs(\"earth\")\n\n baseline_gcrs = dset.site_pos_2.gcrs.pos - dset.site_pos_1.gcrs.pos\n src_dot_baseline = (dset.src_dir.unit_vector[:, None, :] @ baseline_gcrs.mat)[:, 0, 0]\n\n # Equation 11.6\n bcrs_site1 = eph.pos_bcrs(\"earth\") + dset.site_pos_1.gcrs.pos.val\n bcrs_site2 = eph.pos_bcrs(\"earth\") + dset.site_pos_2.gcrs.pos.val\n\n for body in bodies:\n try:\n GM_name = \"GM\" if body == \"earth\" else f\"GM_{body.split()[0]}\"\n GM_body = constant.get(GM_name, source=eph.ephemerides)\n except KeyError:\n log.warn(\n f\"The GM value of {body.split()[0].title()} is not defined for {eph.ephemerides}. \"\n f\"Correction set to zero.\"\n )\n continue\n bcrs_body_t1 = eph.pos_bcrs(body)\n\n # Equation 11.3\n delta_t = TimeDelta(\n np.maximum(0, dset.src_dir.unit_vector[:, None, :] @ (bcrs_body_t1 - bcrs_site1)[:, :, None])[:, 0, 0]\n * Unit.second2day\n / constant.c,\n fmt=\"jd\",\n scale=\"tdb\",\n )\n time_1J = dset.time.tdb - delta_t\n\n # Equation 11.4\n bcrs_body_t1J = eph.pos_bcrs(body, time=time_1J)\n vector_body_site1 = bcrs_site1 - bcrs_body_t1J\n\n # Equation 11.5\n vector_body_site2 = bcrs_site2 - bcrs_body_t1J - bcrs_vel_earth / constant.c * src_dot_baseline[:, None]\n\n # Needed for equation 11.1\n norm_body_site1 = np.linalg.norm(vector_body_site1, axis=1)\n src_dot_vector_body_site1 = (dset.src_dir.unit_vector[:, None, :] @ vector_body_site1[:, :, None])[:, 0, 0]\n nomJ = norm_body_site1 + src_dot_vector_body_site1\n denomJ = (\n np.linalg.norm(vector_body_site2, axis=1)\n + (dset.src_dir.unit_vector[:, None, :] @ vector_body_site2[:, :, None])[:, 0, 0]\n )\n\n # Main correction (equation 11.1)\n grav_delay += 2 * GM_body / constant.c ** 2 * np.log(nomJ / denomJ)\n\n # Higher order correction (equation 11.14)\n baseline_dot_vector_body_site1 = (baseline_gcrs.val[:, None, :] @ vector_body_site1[:, :, None])[:, 0, 0]\n grav_delay += (\n 4\n * GM_body ** 2\n / constant.c ** 4\n * (baseline_dot_vector_body_site1 / norm_body_site1 + src_dot_baseline)\n / (norm_body_site1 + src_dot_vector_body_site1) ** 2\n )\n\n # Denominator (equation 11.9)\n denominator = (\n 1\n + (\n (bcrs_vel_earth + dset.site_pos_2.gcrs.vel.val)[:, None, :]\n @ dset.src_dir.unit_vector[:, :, None]\n / constant.c\n )[:, 0, 0]\n )\n\n return grav_delay / denominator", "def solve(self,init=None,g_init=1e-3,g_step=5e-3,g_fin=None,evol=False,movingGrid=False):\n if(g_fin==None): g_fin=self.g\n #Check if all signs are correct\n if(g_fin<0):\n if(g_step>0): g_step*=-1.\n if(g_init>0): g_init*=-1.\n else:\n if(g_step<0): g_step*=-1.\n if(g_init<0): g_step*=-1.\n\n #If no initial distribution is given, start from the BCS ground state\n if(init==None): init=[1 if i<self.N else 0 for i in range(self.n)]\n var_init=np.array([-2.*init[i]-g_init/(1-2.*init[i])*np.sum([self.XXZ.Z(j,i)*(init[j]-init[i]) for j in range(self.n) if j!=i]) for i in range(self.n)])\n n_step=int((g_fin-g_init)/g_step)\n g=g_init\n\n #Define necessary variables if evol or movingGrid=True\n if(evol or movingGrid):\n var_evol=np.zeros([n_step,self.n])\n g_evol=np.zeros(n_step)\n if(movingGrid):\n rap_evol = np.zeros([n_step,self.N],dtype=complex)\n rap_evol[0] = [self.levels[i] for i in range(self.n) if init[i]!=0 ]\n rap=np.array([self.levels[i]+0.5*np.abs(np.random.rand()) for i in range(self.n) if init[i]!=0])\n grid=np.zeros(self.N+1,dtype=complex)\n grid[0]=1e3\n for k in range(self.N): grid[k+1]=rap[k]\n n_grid=n_step/20 #Calculates rapidities at 20 intermediate steps\n\n #Gradually increase the coupling constant g and solve iteratively at each step starting from the Taylor approximation from the previous step\n for i in range(n_step):\n var_new=self.newtonraphson(g,var_init)\n der=self.get_derivative(var_new,g)\n #var_init=self.taylor_expansion(g,g_step,var_new)\n var_init = var_new+g_step*der\n g+=g_step\n #print g\n\n #Save variables at current step if evol =True\n if(evol or movingGrid):\n var_evol[i]=var_init\n g_evol[i]=g\n if(movingGrid and i%n_grid==0 and i!=0):\n #Method for obtaining the rapidities starting from the set of Lambda_i\n rf=RootFinder(self.XXZ,var_evol[i]/g_evol[i],g_evol[i],self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap=lm.laguerre()\n rap_evol[i]=np.sort(lm.laguerre())\n for k in range(self.N): grid[k+1]=rap[k]\n grid[0]=10*max(rap)\n elif(movingGrid and i!=0):\n rf=RootFinder(self.XXZ,var_evol[i]/g_evol[i],g_evol[i],self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap_evol[i]=np.sort(lm.laguerre())\n \n \n #One final iterative solution at g=g_fin\n self.solution=self.newtonraphson(g_fin,var_init)\n #Calculate the occupation numbers\n self.occupation=0.5*(-1.-self.solution+g_fin*self.get_derivative(self.solution,g_fin))\n\n #One final calculation of the rapidities\n if(movingGrid):\n rf=RootFinder(self.XXZ,self.solution/g_fin,g_fin,self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap=lm.laguerre()\n self.rapidities=rap\n\n if movingGrid: return [g_evol,var_evol,rap_evol]\n if evol: return [g_evol,var_evol]\n return self.solution", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def magnetic_reynolds(uu, param, grid, aa=list(), bb=list(), jj=list(),\n nghost=3, lmix=True):\n if len(bb) ==0 and len(aa) ==0 and len(jj) ==0:\n print('magnetic_reynolds WARNING: no aa, bb nor jj provided\\n'+\n 'aa or bb must be provided or aa for only hyper resistivity') \n #resistive force\n lres, lhyper3 = False, False\n for iresi in param.iresistivity:\n iresi = str.strip(iresi,'\\n')\n if 'hyper' not in iresi and len(iresi) > 0:\n lres = True\n if 'hyper3' in iresi:\n lhyper3 = True\n fresi = np.zeros_like(uu)\n if lres:\n if lhyper3:\n lhyper3 = lhyper3==lmix\n if len(jj) == 0:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: calculating jj without aa\\n',\n 'provide aa or jj directly for accurate boundary values')\n jj = curl(bb,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n else:\n jj = curl2(aa,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n for j in range(0,3):\n jj[j, :nghost,:,:] = jj[j,-2*nghost:-nghost,:,:]\n jj[j,-nghost:,:,:] = jj[j, nghost: 2*nghost,:,:]\n jj[j,:, :nghost,:] = jj[j,:,-2*nghost:-nghost,:]\n jj[j,:,-nghost:,:] = jj[j,:, nghost: 2*nghost,:]\n jj[j,:,:, :nghost] = jj[j,:,:,-2*nghost:-nghost]\n jj[j,:,:,-nghost:] = jj[j,:,:, nghost: 2*nghost]\n fresi = fresi + param.eta*param.mu0*jj\n for iresi in param.iresistivity:\n iresi = str.strip(iresi,'\\n')\n if 'eta-const' not in iresi and 'hyper' not in iresi\\\n and len(iresi) > 0:\n print('magnetic_reynolds WARNING: '+iresi+' not implemented\\n'+\n 'terms may be missing from the standard resistive forces')\n if lhyper3:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: no aa provided\\n'+\n 'aa must be provided for hyper resistivity')\n return 1\n else:\n del6a = np.zeros_like(aa)\n for j in range(0,3):\n del6a[j] = del6(aa[j],grid.dx,grid.dy,grid.dz)\n del6a[j, :nghost,:,:] = del6a[j,-2*nghost:-nghost,:,:]\n del6a[j,-nghost:,:,:] = del6a[j, nghost: 2*nghost,:,:]\n del6a[j,:, :nghost,:] = del6a[j,:,-2*nghost:-nghost,:]\n del6a[j,:,-nghost:,:] = del6a[j,:, nghost: 2*nghost,:]\n del6a[j,:,:, :nghost] = del6a[j,:,:,-2*nghost:-nghost]\n del6a[j,:,:,-nghost:] = del6a[j,:,:, nghost: 2*nghost]\n #del6 for non-cartesian tba\n #del6a[j] = del6(aa[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n # coordinate_system=param.coord_system)\n #effective at l > 5 grid.dx? \n fresi = fresi + param.eta_hyper3*del6a\n del(del6a)\n fresi2 = np.sqrt(dot2(fresi))\n del(fresi)\n #advective force\n if len(bb) == 0:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: calculating uu x bb without bb\\n',\n 'provide aa or bb directly to proceed')\n return 1\n else:\n bb = curl(aa,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n for j in range(0,3):\n bb[j, :nghost,:,:] = bb[j,-2*nghost:-nghost,:,:]\n bb[j,-nghost:,:,:] = bb[j, nghost: 2*nghost,:,:]\n bb[j,:, :nghost,:] = bb[j,:,-2*nghost:-nghost,:]\n bb[j,:,-nghost:,:] = bb[j,:, nghost: 2*nghost,:]\n bb[j,:,:, :nghost] = bb[j,:,:,-2*nghost:-nghost]\n bb[j,:,:,-nghost:] = bb[j,:,:, nghost: 2*nghost]\n advec = cross(uu,bb)\n advec2 = np.sqrt(dot2(advec))\n del(advec)\n #avoid division by zero\n if fresi2.max() > 0:\n fresi2[np.where(fresi2==0)] = fresi2[np.where(fresi2>0)].min()\n Rm = advec2/fresi2\n #set minimum floor to exclude zero-valued Rm \n if Rm.max() > 0:\n Rm[np.where(Rm==0)] = Rm[np.where(Rm>0)].min()\n else:\n print('Rm undefined')\n else:\n Rm = advec2\n print('Rm undefined')\n return Rm", "def preevolve(self):\n\n self.in_preevolve = True\n\n myg = self.cc_data.grid\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n self.cc_data.fill_BC(\"density\")\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n # 1. do the initial projection. This makes sure that our original\n # velocity field satisties div U = 0\n\n # the coefficient for the elliptic equation is beta_0^2/rho\n coeff = 1/rho\n beta0 = self.base[\"beta0\"]\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # next create the multigrid object. We defined phi with\n # the right BCs previously\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n # solve D (beta_0^2/rho) G (phi/beta_0) = D( beta_0 U )\n\n # set the RHS to divU and solve\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-10)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi = self.cc_data.get_var(\"phi\")\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of phi and update the\n # velocities\n # FIXME: this update only needs to be done on the interior\n # cells -- not ghost cells\n gradp_x, gradp_y = mg.get_solution_gradient(grid=myg)\n\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= coeff.v()*gradp_x.v()\n v.v()[:, :] -= coeff.v()*gradp_y.v()\n\n # fill the ghostcells\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n # 2. now get an approximation to gradp at n-1/2 by going through the\n # evolution.\n\n # store the current solution -- we'll restore it in a bit\n orig_data = patch.cell_center_data_clone(self.cc_data)\n\n # get the timestep\n self.method_compute_timestep()\n\n # evolve\n self.evolve()\n\n # update gradp_x and gradp_y in our main data object\n new_gp_x = self.cc_data.get_var(\"gradp_x\")\n new_gp_y = self.cc_data.get_var(\"gradp_y\")\n\n orig_gp_x = orig_data.get_var(\"gradp_x\")\n orig_gp_y = orig_data.get_var(\"gradp_y\")\n\n orig_gp_x[:, :] = new_gp_x[:, :]\n orig_gp_y[:, :] = new_gp_y[:, :]\n\n self.cc_data = orig_data\n\n if self.verbose > 0:\n print(\"done with the pre-evolution\")\n\n self.in_preevolve = False", "def _ice_d3gdt3(temp,pres):\n # Reduced variables\n tn = temp/_TTP\n pn = pres/_PTPE\n _PI0 = _PATM/_PTPE\n g_ttt = 0.\n \n # Residual terms including complex numbers\n sr = [_GCOEFFS[1], complex(0.0,0.0)]\n for (k,rk) in enumerate(_GCOEFFS[2]):\n sr[1] += rk * (pn-_PI0)**k\n for (tk,s) in zip(_GCOEFFS[3],sr):\n term = 1./(tk-tn)**2 - 1./(tk+tn)**2\n g_ttt += (s * term).real / _TTP**2\n return g_ttt", "def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = np.asarray(self.meshDA['Area'])\n satVel = np.array([satVel.x, satVel.y, satVel.z])\n vAtm = np.array([vAtm.x, vAtm.y, vAtm.z])\n\n relativeVelocity = vAtm - (satVel + (np.cross(omega, CoM)))\n vNorm = np.linalg.norm(relativeVelocity, axis=1)\n vDir = np.reciprocal(vNorm[:, None]) * relativeVelocity\n\n dot = np.einsum('ij,ij->i', normal, vDir)\n\n dotCondition = dot < 0\n dot = dot[dotCondition]\n if dot.size > 0:\n vDir = vDir[dotCondition]\n vNorm = vNorm[dotCondition]\n normal = normal[dotCondition]\n area = area[dotCondition]\n CoM = CoM[dotCondition]\n\n coeff = 0.5 * rho * dragCoeff * (vNorm**2)\n oMr = 1.0 - liftRatio\n f = (coeff * area * dot)[:, None]\n\n aT = np.sum(np.cross(CoM, oMr * np.absolute(f) * vDir + 2 * liftRatio * f * normal), axis=0)\n\n self._aTorque = Vector3D(float(aT[0]), float(aT[1]), float(aT[2]))\n\n else:\n self._aTorque = Vector3D.ZERO", "def compute_force(X, V, bl, ip, box, gamma, kT, dt):\n N = len(X)\n F = np.zeros((N, 3))\n Fcube = np.zeros((N, N, 3))\n inv_box = np.zeros((3, 3))\n for i in range(3): inv_box[i, i] = 1.0 / box[i, i]\n g = np.zeros(3)\n rij = np.zeros(3)\n vij = np.zeros(3)\n a = 0.0\n nr = 0.0\n fpair = 0.0\n\n vir = 0.0\n sigma = np.zeros(3)\n volume = np.linalg.det(box)\n\n for i in range(N):\n for j in range(i):\n rij = X[i] - X[j]\n g = matvecmul(inv_box, rij)\n g = g - np.round_(g, 0, np.empty_like(g))\n rij = matvecmul(box, g)\n vij = V[i] - V[j]\n\n a = ip[bl[i]-1, bl[j]-1]\n nr = norm_numba(rij)\n\n fc = a * wr(nr)\n fpair = fc \\\n - gamma * wr(nr)**2 * dot_numba(rij, vij) / nr \\\n + sqrt(2.0*gamma*kT) * wr(nr) * np.random.randn() / sqrt(dt)\n Fcube[i, j, :] = fpair / nr * rij\n Fcube[j, i, :] = -fpair / nr * rij\n\n vir += Fcube[i, j, :] @ rij\n sigma += Fcube[i, j, :] * rij\n\n # kinetic part of stress tensor\n for i in range(N):\n sigma += V[i] * V[i]\n\n sigma = sigma / volume\n F = np.sum(Fcube, 1)\n\n return F, vir, sigma", "def main():\r\n #Drag Force Equation: 1/2 * rho * Cd * A * v^2\r\n\r\n #User-Defined Constants\r\n global m\r\n global v0\r\n global theta\r\n global rho #Fluid Density\r\n global A #Cross-sectional Area\r\n global Cd #Drag coefficient\r\n global tStep\r\n global g\r\n\r\n m = 1\r\n v0 = 30\r\n theta = math.radians(45)\r\n rho = 1.225\r\n A = 0.05\r\n Cd = 0.5 #A ball is approx. 0.5\r\n tStep = 0.005\r\n g = 9.8\r\n\r\n\r\n #Data Structures\r\n global tHist\r\n global xHist\r\n global yHist\r\n global thetaHist\r\n global vHist\r\n global vXHist\r\n global vYHist\r\n tHist = [] #list for all time steps\r\n xHist = [] #list for all x position steps\r\n yHist = [] #list for all y position steps\r\n thetaHist = [] #List for all theta at every time step\r\n vHist = [] #list for all velocities at every time step\r\n vXHist = [] #list for all x-axis velocities at every time step\r\n vYHist = [] #list for all y-axis velocities at every time step\r\n\r\n #Initialize intial values\r\n tHist.append(0.0)\r\n xHist.append(0.0)\r\n yHist.append(0.0)\r\n thetaHist.append(theta)\r\n vHist.append(v0)\r\n vXHist.append(v0 * math.cos(theta))\r\n vYHist.append(v0 * math.sin(theta))\r\n vTheta = math.atan(vYHist[0] / vXHist[0])\r\n # print(\"t: \" + str(tHist[0]))\r\n # print(\"x: \" + str(xHist[0]))\r\n # print(\"y: \" + str(yHist[0]))\r\n # print(\"v: \" + str(vHist[0]))\r\n # print(\"Vx: \" + str(vXHist[0]))\r\n # print(\"Vy: \" + str(vYHist[0]))\r\n\r\n #Convenience variables\r\n global k\r\n\r\n counter = 1\r\n #Loop until the y-displacement becomes negative (projectile reaches ground again)\r\n while True:\r\n tHist.append(counter * tStep) #increment time\r\n print(\"t: \" + str(tHist[counter]))\r\n\r\n #This large hunk is the solution to the net force differential equation in the x-axis\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep)) #STABLE\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd)/(2*m))*(tStep))\r\n # oneOverVX = (1/vHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep))\r\n oneOverVX = (1/vXHist[counter-1]) + ((rho*A*Cd)/(2*m*math.cos(thetaHist[counter-1]))*(tStep)) #This is one over the solution for velocity in the x-axis net force differential equation\r\n vXHist.append(1 / oneOverVX) #Adding the velocity to the list of velocities\r\n\r\n vY0 = vYHist[counter-1] #Convenience variable\r\n # k = 0.5 * rho * A * Cd * math.sin(abs(thetaHist[counter-1])) #STABLE\r\n # k = 0.5 * rho * A * Cd\r\n k = (rho * A * Cd) / (2 * math.sin(abs(thetaHist[counter-1]))) #Convenience variable\r\n print(\"k: \" + str(k))\r\n print(\"vX: \" + str(vXHist[counter]))\r\n rootGMK = math.sqrt(g*m*k) #Convenience variable\r\n if vYHist[counter-1] > 0.0: #If the projectile is going upwards\r\n #Solving the y-axis differential equation for velocity\r\n equationRight = -rootGMK * ((tStep/m) - (math.atan((k*vY0)/(rootGMK))/rootGMK))\r\n vYHist.append((math.tan(equationRight) * rootGMK) / k)\r\n elif vYHist[counter-1] < 0.0: #If the projectile is going downwards\r\n #Solving the y-axis differential equation for velocity\r\n\r\n # Hand-solved integral\r\n # exponent = -(2*tStep*rootGMK)/m\r\n # numerator = g*m*math.exp(exponent) - math.exp(exponent)*vY0*rootGMK - vY0*rootGMK - g*m\r\n # denominator = math.exp(exponent)*(vY0-rootGMK) - vY0*k - rootGMK\r\n # vYHist.append(numerator / denominator)\r\n\r\n #Wolfram Alpha arctanh integral\r\n arctanh =(vY0*math.sqrt(k))/(math.sqrt(g*m))\r\n print(\"arctanh: \" + str(arctanh))\r\n equationRight = (np.arctanh(arctanh))/(rootGMK) - (tStep/m)\r\n vYHist.append(np.tanh(rootGMK * equationRight) * ((math.sqrt(g*m))/(math.sqrt(k))))\r\n else: #If current y velocity is 0\r\n vYHist.append(vY0 - g*tStep)\r\n print(\"vY: \" + str(vYHist[counter]))\r\n\r\n vHist.append(math.hypot(vXHist[counter], vYHist[counter])) #Calculate the net velocity and add it to the velocities list\r\n print(\"v: \" + str(vHist[counter]))\r\n thetaHist.append(math.atan(vYHist[counter]/vXHist[counter])) #Calculate the current angle based on the velocities and add it to the theta list\r\n print(\"0: \" + str(math.degrees(thetaHist[counter])))\r\n\r\n x0 = xHist[counter-1]\r\n y0 = yHist[counter-1]\r\n\r\n # yIntegral = trigintegrate()\r\n\r\n \"\"\"\r\n Note: What I wanted to do here was to integrate the velocity functions over the time interval to find the exact\r\n changes in position. Unfortunately, I was running short of time and decided it was not worth it to move forward with\r\n this final step, and instead worked on the presentation and testing different cases.\r\n \"\"\"\r\n xHist.append(x0 + vXHist[counter]*tStep) #Calculate new x position using x = x0 + vt\r\n yHist.append(y0 + vYHist[counter]*tStep) #Calculate new y position using y = y0 + vt\r\n print(\"x: \" + str(xHist[counter]))\r\n print(\"y: \" + str(yHist[counter]))\r\n print()\r\n\r\n # xHist.append(xHist[counter-1] + vXHist[counter-1]*tStep + 0.5*aXHist[counter-1]*tStep**2)\r\n # yHist.append(yHist[counter-1] + vYHist[counter-1]*tStep + 0.5*aYHist[counter-1]*tStep**2)\r\n # vXHist.append(vXHist[counter-1] + aXHist[counter-1]*tStep)\r\n # vYHist.append(vYHist[counter-1] + aYHist[counter-1]*tStep)\r\n # vHist.append(math.hypot(vXHist[counter], vYHist[counter]))\r\n #\r\n # vTheta = math.atan(vYHist[counter] / vXHist[counter])\r\n # xDragAccel = -0.5*rho*Cd*A*vHist[counter]**2*math.cos(vTheta) / m\r\n # yDragAccel = -math.copysign(0.5*rho*Cd*A*vHist[counter]**2*math.sin(vTheta) / m, vYHist[counter])\r\n #\r\n # aXHist.append(xDragAccel)\r\n # aYHist.append(-g*tStep + yDragAccel)\r\n\r\n if vYHist[counter-1] > 0.0 and vYHist[counter] < 0.0: #Check if the projectile has reached it's peak by checking for a critical point\r\n print(\"max height reached at time=\" + str(tHist[counter]))\r\n # break\r\n\r\n # print(\"t: \" + str(tHist[counter]))\r\n # print(\"x: \" + str(xHist[counter]))\r\n # print(\"y: \" + str(yHist[counter]))\r\n # print(\"Vx: \" + str(vXHist[counter]))\r\n # print(\"Vy: \" + str(vYHist[counter]))\r\n # print(\"Ax: \" + str(aXHist[counter]))\r\n # print(\"Ay: \" + str(aYHist[counter]))\r\n\r\n if yHist[counter] < 0 or counter > 99999: #End the loop if the projectile has reached the ground (or limit the number of iterations to avoid computer death)\r\n break\r\n\r\n counter += 1\r\n\r\n plotData()", "def fvm(states: States, grid: Gridlines, topo: Topography, config: Config, runtime: DummyDict):\n # pylint: disable=invalid-name\n\n # calculate source term contributed from topography gradients\n states = topography_gradient(states, topo, config.params.gravity)\n\n # calculate slopes of piecewise linear approximation\n states = minmod_slope(states, grid, config.params.theta, runtime.tol)\n\n # interpolate to get discontinuous conservative quantities at cell faces\n states = get_discontinuous_cnsrv_q(states, grid)\n\n # fix non-physical negative depth\n states = correct_negative_depth(states, topo)\n\n # get non-conservative variables at cell faces\n states = decompose_variables(states, topo, runtime.epsilon)\n\n # get local speed at cell faces\n states = get_local_speed(states, config.params.gravity)\n\n # get discontinuous PDE flux at cell faces\n states = get_discontinuous_flux(states, topo, config.params.gravity)\n\n # get common/continuous numerical flux at cell faces\n states = central_scheme(states, runtime.tol)\n\n # get final right hand side\n states.rhs.w = \\\n (states.face.x.num_flux.w[:, :-1] - states.face.x.num_flux.w[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.w[:-1, :] - states.face.y.num_flux.w[1:, :]) / grid.y.delta + \\\n states.src.w\n\n states.rhs.hu = \\\n (states.face.x.num_flux.hu[:, :-1] - states.face.x.num_flux.hu[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hu[:-1, :] - states.face.y.num_flux.hu[1:, :]) / grid.y.delta + \\\n states.src.hu\n\n states.rhs.hv = \\\n (states.face.x.num_flux.hv[:, :-1] - states.face.x.num_flux.hv[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hv[:-1, :] - states.face.y.num_flux.hv[1:, :]) / grid.y.delta + \\\n states.src.hv\n\n # remove rounding errors\n states.rhs = remove_rounding_errors(states.rhs, runtime.tol)\n\n # obtain the maximum safe dt\n amax = nplike.max(nplike.maximum(states.face.x.plus.a, -states.face.x.minus.a))\n bmax = nplike.max(nplike.maximum(states.face.y.plus.a, -states.face.y.minus.a))\n max_dt = min(0.25*grid.x.delta/amax, 0.25*grid.y.delta/bmax)\n\n return states, max_dt", "def weight_update_conjugate_gradient(self, network):\n # compute beta: Fletcher-Reeves\n num = 0.0\n for l, layer in enumerate(network.layers):\n num += np.sum(self.dc_db[l] ** 2)\n num += np.sum(self.dc_dq[l] ** 2)\n num += np.sum(self.dc_drx_inp[l] ** 2)\n num += np.sum(self.dc_dry_inp[l] ** 2)\n num += np.sum(self.dc_drx_pos_out[l] ** 2)\n num += np.sum(self.dc_dry_pos_out[l] ** 2)\n num += np.sum(self.dc_drx_neg_out[l] ** 2)\n num += np.sum(self.dc_dry_neg_out[l] ** 2)\n\n # Initialize velocities to zero for momentum\n if self.vel_b is None or self.vel_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n # Take steepest descent step\n for l, layer in enumerate(network.layers):\n layer.b -= self.alpha * self.dc_db[l]\n layer.q -= self.alpha * self.dc_dq[l]\n layer.rx_inp -= self.alpha * self.dc_drx_inp[l]\n layer.ry_inp -= self.alpha * self.dc_dry_inp[l]\n layer.rx_pos_out -= self.alpha * self.dc_drx_pos_out[l]\n layer.ry_pos_out -= self.alpha * self.dc_dry_pos_out[l]\n layer.rx_neg_out -= self.alpha * self.dc_drx_neg_out[l]\n layer.ry_neg_out -= self.alpha * self.dc_dry_neg_out[l]\n\n else:\n # compute beta\n beta = num / self.denominator\n\n # compute s_n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = -self.alpha * self.dc_db[l] + beta * self.ms_b[l]\n self.ms_q[l] = -self.alpha * self.dc_dq[l] + beta * self.ms_q[l]\n self.ms_rx_inp[l] = -self.alpha * self.dc_drx_inp[l] + beta * self.ms_rx_inp[l]\n self.ms_ry_inp[l] = -self.alpha * self.dc_dry_inp[l] + beta * self.ms_ry_inp[l]\n self.ms_rx_pos_out[l] = -self.alpha * self.dc_drx_pos_out[l] + beta * self.ms_rx_pos_out[l]\n self.ms_ry_pos_out[l] = -self.alpha * self.dc_dry_pos_out[l] + beta * self.ms_ry_pos_out[l]\n self.ms_rx_neg_out[l] = -self.alpha * self.dc_drx_neg_out[l] + beta * self.ms_rx_neg_out[l]\n self.ms_ry_neg_out[l] = -self.alpha * self.dc_dry_neg_out[l] + beta * self.ms_ry_neg_out[l]\n\n # Take step\n for l, layer in enumerate(network.layers):\n layer.b += self.alpha * self.ms_b[l]\n layer.q += self.alpha * self.ms_q[l]\n layer.rx_inp += self.alpha * self.ms_rx_inp[l]\n layer.ry_inp += self.alpha * self.ms_ry_inp[l]\n layer.rx_pos_out += self.alpha * self.ms_rx_pos_out[l]\n layer.ry_pos_out += self.alpha * self.ms_ry_pos_out[l]\n layer.rx_neg_out += self.alpha * self.ms_rx_neg_out[l]\n layer.ry_neg_out += self.alpha * self.ms_ry_neg_out[l]\n\n # store num for next iteration to be used as denominator\n self.denominator = num", "def method_compute_timestep(self):\n\n myg = self.cc_data.grid\n\n cfl = self.rp.get_param(\"driver.cfl\")\n\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n # the timestep is min(dx/|u|, dy|v|)\n xtmp = ytmp = 1.e33\n if not abs(u).max() == 0:\n xtmp = myg.dx/abs(u.v()).max()\n if not abs(v).max() == 0:\n ytmp = myg.dy/abs(v.v()).max()\n\n dt = cfl*min(xtmp, ytmp)\n\n # We need an alternate timestep that accounts for buoyancy, to\n # handle the case where the velocity is initially zero.\n rho = self.cc_data.get_var(\"density\")\n rho0 = self.base[\"rho0\"]\n rhoprime = self.make_prime(rho, rho0)\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n\n F_buoy = (abs(rhoprime*g).v()/rho.v()).max()\n\n dt_buoy = np.sqrt(2.0*myg.dx/F_buoy)\n\n self.dt = min(dt, dt_buoy)\n if self.verbose > 0:\n print(f\"timestep is {dt}\")", "def f_dyn(Y, Yd, Fe, Te, tau, Conn, Prop):\n SE = Conn[1]\n ce, Qe = Prop[3], Prop[5]\n R0, Q0, q = Y[0:3], Y[3:6], Y[6:]\n v0, w0, qd = Yd[0:3], Yd[3:6], Yd[6:]\n\n num_j = len(q) # Number of joints/links\n num_e = SE.shape[1] # Number of endpoints\n\n # Position and rotation matrices\n AA = calc_aa(Q0, q, Conn, Prop)\n RR = calc_pos(R0, AA, q, Conn, Prop)\n\n # Inertia matrice\n HH = calc_hh(RR, AA, Conn, Prop)\n\n # Calculation of velocity dependent terms using the recursive Newton-Eulero\n # inverse dynamics setting to zero all accelerations and forces\n zero_Ydd = np.zeros(6+num_j)\n zero_Fe = np.zeros((3, num_e))\n Force0 = r_ne(RR, AA, q, Yd, zero_Ydd, zero_Fe, zero_Fe, Conn, Prop)\n\n # Generalized external forces applied on base centroid and joints\n F0 = np.zeros(3)\n T0 = np.zeros(3)\n\n # Loop over all endpoints\n for ie in range(num_e):\n\n # If the endpoint is associated with the base\n if (SE[0, ie] == 0):\n\n A_0_ie = rpy2dc(Qe[:, ie]).T # Endpoint to base\n A_I_ie = AA[:, 0:3] @ A_0_ie # Endpoint to inertial\n\n # If the external load is given wrt the local frame\n if (SE[1, ie] == 0):\n R_0_ie = A_0_ie.T @ ce[:, ie]\n F0 += A_I_ie @ Fe[:, ie]\n T0 += A_I_ie @ (tilde(R_0_ie) @ Fe[:, ie] + Te[:, ie])\n\n # If the external load is given wrt the inertial frame\n else:\n R_0_ie = A_0_ie @ ce[:, ie]\n F0 += Fe[:, ie]\n T0 += (tilde(R_0_ie) @ Fe[:, ie] + Te[:, ie])\n\n # Assemble all terms\n Force = np.block([F0, T0, tau])\n\n # Generalized external forces applied to the link endpoints\n Fx = np.zeros(3)\n Tx = np.zeros(3)\n taux = np.zeros(num_j)\n\n # Loop over all endpoints\n for ie in range(num_e):\n\n i = SE[0, ie] # Link associated to the endpoint <ie>\n\n # If the endpoint is associated with a link\n if (i > 0):\n\n # Endpoint Jacobian - shape is (6 x num_j)\n JJ_tmp = calc_je(ie, RR, AA, q, Conn, Prop)\n JJ_tx_i = JJ_tmp[0:3, :] # Translational component\n JJ_rx_i = JJ_tmp[3:6, :] # Rotational component\n\n # Endpoint position wrt the base centroid\n A_I_i = AA[:, 3*i:3*(i+1)]\n A_I_ie = A_I_i @ rpy2dc(Qe[:, ie]).T\n R_0_ie = RR[:, i] - RR[:, 0] + A_I_i @ ce[:, ie]\n\n # If the external load is given wrt the local frame\n if (SE[1, ie] == 0):\n Fx += A_I_ie @ Fe[:, ie]\n Tx += tilde(R_0_ie) @ A_I_ie @ Fe[:, ie] + A_I_ie @ Te[:, ie]\n taux += + JJ_tx_i.T @ A_I_ie @ Fe[:, ie] \\\n + JJ_rx_i.T @ A_I_ie @ Te[:, ie]\n\n # If the external load is given wrt the inertial frame\n else:\n Fx += Fe[:, ie]\n Tx += tilde(R_0_ie) @ Fe[:, ie] + Te[:, ie]\n taux += JJ_tx_i.T @ Fe[:, ie] + JJ_rx_i.T @ Te[:, ie]\n\n # Assemble the link endpoint contributions\n Force_ee = np.block([Fx, Tx, taux])\n\n # Calculate the accelerations - eq. 3.29\n Ydd = np.linalg.inv(HH) @ (Force + Force_ee - Force0)\n\n return Ydd", "def optimize_force_field_parameters_Cv_FWHM(cgmodel, file_list, temperature_list, param_bounds_dict,\n frame_begin=0, frame_end=-1, sample_spacing=1, sparsify_stride=1, output_data='output.nc',\n verbose=False, n_cpu=12, min_eff_samples=50,\n n_trial_boot=200, num_intermediate_states=0, plotfile='optimize_FWHM_iterations.pdf',\n min_method='TNC'):\n\n # Parse the force field parameter change dict:\n x0 = []\n param_names = []\n bounds = []\n units = []\n \n for key,value in param_bounds_dict.items():\n # value should be [(bound_lo, bound_hi)]\n # key should be a valid force field parameter name\n param_names.append(key)\n # Every parameter except periodicity should have units\n # For now, changing periodicity is not supported.\n \n # TODO: add support for sums of periodic torsion terms\n units.append(value[0].unit)\n bounds.append((value[0].value_in_unit(units[-1]),value[1].value_in_unit(units[-1])))\n # Use mean value as starting guess:\n x0.append((value[1].value_in_unit(units[-1])+value[0].value_in_unit(units[-1]))/2)\n\n if verbose:\n print(f'param_names: {param_names}')\n print(f'unit: {units}')\n print(f'bounds: {bounds}')\n print(f'x0: {x0}')\n\n def get_reeval_FWHM(param_values, cgmodel, file_list, temperature_list, output_data,\n param_names, units, frame_begin, sample_spacing, sparsify_stride, frame_end,\n n_cpu, n_trial_boot, num_intermediate_states):\n \"\"\"\n Objective function to be minimized\n \"\"\"\n\n # Construct dictionary of parameter update instructions:\n param_dict = {}\n \n # if len(param_names) == 1:\n # # 1D optimization:\n # param_dict[param_names[0]] = param_values * units[0]\n\n for i in range(len(param_names)):\n param_dict[param_names[i]] = param_values[i] * units[i]\n \n if verbose:\n print(f'Current parameters: {param_dict}') \n \n # Re-evaluate energy with current force field parameters:\n # For bootstrapping, evaluate all frames between [frame_begin:sparsify_stride:frame_end], and\n # apply the sample_spacing only to the heat capacity part\n U_eval, simulation = eval_energy(\n cgmodel,\n file_list,\n temperature_list,\n param_dict,\n frame_begin=frame_begin,\n frame_stride=sparsify_stride,\n frame_end=frame_end,\n n_cpu=n_cpu,\n verbose=verbose,\n )\n\n # Evaluate heat capacity and full-width half-maximum from bootstrapping:\n (new_temperature_list, C_v_values, C_v_uncertainty,\n Tm_value, Tm_uncertainty,\n Cv_height_value, Cv_height_uncertainty,\n FWHM_value, FWHM_uncertainty,\n N_eff_values) = bootstrap_heat_capacity(\n U_kln=U_eval,\n output_data=output_data,\n frame_begin=frame_begin,\n frame_end=frame_end,\n sample_spacing=sample_spacing,\n sparsify_stride=sparsify_stride,\n num_intermediate_states=num_intermediate_states,\n n_trial_boot=n_trial_boot,\n plot_file=f'heat_capacity_boot_{param_names[0]}_{param_values}.pdf',\n )\n \n if verbose:\n print(f'Current FWHM: {FWHM_value} +/- {FWHM_uncertainty[0]}')\n print(f'Current minimum N_eff: {np.min(N_eff_values)}')\n \n # Check for minimum N_eff criteria.\n # If too small, the minimization should stop if we're using a gradient method.\n # If we're not using a gradient method, return a large value.\n \n if np.min(N_eff_values) < min_eff_samples:\n print(f'Insufficient number of effective samples ({np.min(N_eff_values)})')\n \n # print(f'Creating a cgmodel with current parameters...,end='')\n # Create the cgmodel\n # print('done')\n \n exit()\n \n return FWHM_value.value_in_unit(unit.kelvin)\n\n # Run optimization:\n\n # if len(param_names) == 1:\n # # Do scalar optimization:\n # opt_results = minimize_scalar(get_reeval_FWHM, x0,\n # args=(cgmodel, file_list, temperature_list, output_data, param_names, units,\n # frame_begin, sample_spacing, sparsify_stride, frame_end, n_cpu, n_trial_boot, num_intermediate_states),\n # method='bounded',\n # bounds=[bounds[0][0],bounds[0][1]],\n # options={'maxiter': 25},\n # )\n\n # else:\n # Do multivariate optimization:\n opt_results = minimize(get_reeval_FWHM, x0, jac='2-point',\n args=(cgmodel, file_list, temperature_list, output_data, param_names, units,\n frame_begin, sample_spacing, sparsify_stride, frame_end, n_cpu, n_trial_boot, num_intermediate_states),\n method=min_method,\n bounds=bounds,\n options={'maxfun': 25, 'finite_diff_rel_step': 0.005, 'eta': 0.5}, # This should be user input\n ) \n \n # TODO: plot the heat capacity curves at each iteration, and make a plot of all FWHM_values \n\n # Construct dictionary of optimal parameters:\n opt_param_dict = {} \n \n k = 0\n for key,value in param_bounds_dict.items():\n opt_param_dict[key] = opt_results.x[k] * units[k]\n k += 1\n \n return opt_param_dict, opt_results", "def preCondConjugateGradientSolver(b, x, linsys_setup, eps, i_max, plotInterval, mapDir):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz=True\n \n \n # Calculate residual r = b - (A^-1) x\n r = b - applyMat(x, linsys_setup)\n d = r\n\n\n delta_new = numpy.inner(r,r)\n \n\n\n\n delta_o = delta_new\n delta_array = numpy.zeros(shape=(i_max))\n \n # Iterate CG solver until converged\n i = 0\n #i_max = 300\n while (i < i_max) and (delta_new > delta_o*eps**2.):\n if i==0: t = time.time()\n \n if i%plotInterval == 0 and i != 0:\n print \"\\tNumber of iterations in the CG:\", i\n x0 = x[:nx*ny] # CMB\n x1 = x[nx*ny:nx*ny+1] # Monopole\n x2 = x[nx*ny+1:nx*ny+1+nCluster] # TSZ\n if ksz: x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n print \"\\tMonopole:\", x1\n print \"\\tTSZ:\", x2\n if ksz: print \"\\tKSZ:\", x3\n \n x0.shape = (ny,nx)\n a_l = numpy.fft.fft2(x0)\n a_l *= precond_2d\n x_test = numpy.real(numpy.fft.ifft2(a_l))\n plot(x_test,mapDir+'/CMB_%d.png'%i,'Reconstructed CMB', range=(-250., 250.))\n print delta_new, delta_o*eps**2.\n\n q = applyMat(d, linsys_setup)\n alpha = delta_new / (numpy.inner(d,q))\n x += alpha * d\n\n # What does this do? It's always false.\n if i/50. < numpy.int(i/50):\n r = b - applyMat(x, linsys_setup)\n else:\n r = r - alpha*q\n \n delta_old = delta_new\n delta_new = numpy.inner(r,r)\n beta = delta_new/delta_old\n d = r + beta * d\n #if i==0: print \"\\tEach iteration takes:\", time.time()-t\n i += 1\n\n x0 = x[:nx*ny].reshape((ny, nx))\n x1 = x[nx*ny:nx*ny+1]\n x2 = x[nx*ny+1:nx*ny+1+nCluster]\n if ksz:\n x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n else:\n x3 = None\n \n a_l = numpy.fft.fft2(x0) * precond_2d\n x0 = numpy.real(numpy.fft.ifft2(a_l))\n\n \n # CMB, monopole, TSZ, KSZ\n return x0, x1, x2, x3" ]
[ "0.75096387", "0.6571571", "0.6571571", "0.61444074", "0.53732836", "0.5364078", "0.53592175", "0.53389966", "0.5281263", "0.52668315", "0.52541155", "0.52250105", "0.52205896", "0.5189851", "0.5186091", "0.512015", "0.50682104", "0.5003378", "0.49752918", "0.49623924", "0.49581325", "0.49376437", "0.4918121", "0.49087563", "0.49076924", "0.48919708", "0.48911765", "0.48859563", "0.48802888", "0.4869652" ]
0.77637815
0
Compute magnetic torque if magnetic model provided. This method converts the satellite's position into Longitude, Latitude, Altitude representation to determine the geo. magnetic field at that position and then computes based on those values the magnetic torque.
def _compute_magnetic_torque(self, curr_date): if self._to_add[1]: gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date) topoframe = TopocentricFrame(self.earth, gP, 'ENU') topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date) lat = gP.getLatitude() lon = gP.getLongitude() alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km] # get B-field in geodetic system (X:East, Y:North, Z:Nadir) B_geo = FileDataHandler.mag_field_model.calculateField( degrees(lat), degrees(lon), alt).getFieldVector() # convert geodetic frame to inertial and from [nT] to [T] B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo)) B_b = self.inertial2Sat.applyTo(B_i) B_b = np.array([B_b.x, B_b.y, B_b.z]) dipoleVector = self.dipoleM.getDipoleVectors(B_b) torque = np.sum(np.cross(dipoleVector, B_b), axis=0) self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2])) else: self._mTorque = Vector3D.ZERO
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def torque(system, /, use_demag=True):\n if use_demag:\n total_field = (mm.consts.mu0 *\n (oc.compute(system.energy.demag.effective_field, system)\n + system.energy.zeeman.H))\n else:\n total_field = mm.consts.mu0 * np.array(system.energy.zeeman.H)\n norm_field = df.Field(system.m.mesh, dim=1,\n value=(system.m.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n moment = system.m * volume\n torque = (moment & total_field)\n return (df.integral(torque * df.dV / volume**2, direction='xyz'))", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j*kr\n\n front_term = self.moment / (4. * np.pi * r**3) * np.exp(-ikr)\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n\n return front_term * (symmetric_term + oriented_term)", "def get_motor_load_torque(self):\n # Start with the brake normal\n # change to 17deg (tan 17?)\n # change to torque using the pitch of the thread on the ball screw\n # (^ make sure to take friction into account)\n # That should give us the torque acting on the motor. If this torque is greater than the motor max torque, it will slip\n # Take into account that the max holding torque is different from the max torque. How do we know if the motor is holding or moving? \n # How do we control the stepper motor? Where are the routines for that? \n pass", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j*kr\n\n front_term = (\n self.current * self.length / (4 * np.pi * r**2) * (ikr + 1) *\n np.exp(-ikr)\n )\n return -front_term * self.cross_orientation(dxyz) / r", "def _compute_solar_torque(self):\n pass", "def _compute_gravity_torque(self):\n pass", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n Rq, phiq, Zq = coords.T\n if self._axisym:\n BRq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._BR[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"BR\"],\n )\n Bphiq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._Bphi[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"Bphi\"],\n )\n BZq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._BZ[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"BZ\"],\n )\n\n else:\n BRq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._BR,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"BR\"],\n )\n Bphiq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._Bphi,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"Bphi\"],\n )\n BZq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._BZ,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"BZ\"],\n )\n B = jnp.array([BRq, Bphiq, BZq]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n return B", "def get_torque(self):\n return self.node.sdo[0x6077].phys # rate torque(mN.m) /1000", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n\n R, phi, Z = coords.T\n r = jnp.sqrt((R - self._R0) ** 2 + Z**2)\n theta = jnp.arctan2(Z, R - self._R0)\n br = -r * jnp.sin(theta)\n bp = jnp.zeros_like(br)\n bz = r * jnp.cos(theta)\n bmag = self._B0 * self._iota / self._R0\n B = bmag * jnp.array([br, bp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def mTorque(self):\n pass", "def make_torque(self):\n def torque_func(m):\n heff = self.field(m)\n total_torque = torque.landau_lifshitz(m, heff, self.damping)\n if self.stt != 0:\n total_torque += torque.slonczewski(m, self.Jc, self.stt)\n return total_torque\n self.torque = torque_func", "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n sat2body = body2sat.revert()\n\n satM = self.state_observer.spacecraftState.getMass()\n mCub = self.inCub['mass_frac'] * satM\n\n self._gTorque = Vector3D.ZERO\n\n for CoM in self.inCub['CoM']:\n\n S_dmPos = self.satPos_s.add(CoM)\n\n r2 = S_dmPos.getNormSq()\n gNewton = Vector3D(-self.muGM / (sqrt(r2) * r2), S_dmPos)\n\n B_dmPos = sat2body.applyTo(S_dmPos)\n\n gDist = Vector3D(self.GravityModel.gradient(curr_date,\n B_dmPos,\n self.muGM))\n\n g_Dist_s = body2sat.applyTo(gDist)\n\n dmForce = Vector3D(mCub, gNewton.add(g_Dist_s))\n self._gTorque = self._gTorque.add(self.V3_cross(CoM, dmForce))\n\n else:\n self._gTorque = Vector3D.ZERO", "def _compute_aero_torque(self):\n pass", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n Rq, phiq, Zq = coords.T\n\n if (params is None) or (len(params) == 0):\n params = self._params\n r, p, z = coords.T\n funR = lambda x: self._potential(x, p, z, **params)\n funP = lambda x: self._potential(r, x, z, **params)\n funZ = lambda x: self._potential(r, p, x, **params)\n br = Derivative.compute_jvp(funR, 0, (jnp.ones_like(r),), r)\n bp = Derivative.compute_jvp(funP, 0, (jnp.ones_like(p),), p)\n bz = Derivative.compute_jvp(funZ, 0, (jnp.ones_like(z),), z)\n B = jnp.array([br, bp / r, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n return B", "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n bp = self._B0 * self._R0 / coords[:, 0]\n brz = jnp.zeros_like(bp)\n B = jnp.array([brz, bp, brz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n bz = self._B0 * jnp.ones_like(coords[:, 2])\n brp = jnp.zeros_like(bz)\n B = jnp.array([brp, brp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def test_comp_magnetostrictive_tensor_1cell():\n\n # Physical quantities\n dim = 2\n Nt_tot = 1\n\n mu = 1\n Be = np.array([[[mu / 2, 0]]])\n He = np.array([[[-1 / 2, 0]]])\n mue = np.array([[mu]])\n\n Me = np.reshape(Be / mue - He, (dim, 1, Nt_tot))\n\n alphaij = [[1, 0, 0], [1, 0, 0]]\n\n alpha1 = 1\n alpha2 = 1\n\n # Computation\n tensor = ForceTensor()\n\n tensor_comp = tensor.comp_magnetrosctrictive_tensor(\n mue, Me, Nt_tot, alphaij\n ) # Should be equal to -alpha1*mu*MM' - alpha2*mu*M²*I2\n\n assert tensor_comp[0, 0, 0] == -mu * (alpha1 + alpha2)\n assert tensor_comp[0, 1, 0] == 0\n assert tensor_comp[1, 0, 0] == 0\n assert tensor_comp[1, 1, 0] == -mu * alpha2\n\n print(\"test_comp_magnetostrictive_tensor succeeded\")\n\n return True", "def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n return self._scalar * self._field.compute_magnetic_field(coords, params, basis)", "def magnetometer(self):\n self._mag[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_M), 16)\n self._mag[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_M), 16)\n self._mag[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_M), 16)\n\n return vector(self._mag)", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO", "def setMotorTorque(self, torque):\r\n if torque < 0.0:\r\n torque = 0.0\r\n elif torque > 1.0:\r\n torque = 1.0\r\n torque *= self.maxTorque\r\n if self.reverse:\r\n torque *= -1\r\n dTorque = 2\r\n if self.torque < torque:\r\n self.torque += dTorque\r\n elif self.torque > torque:\r\n self.torque -= dTorque\r\n \r\n for tire in self.tires:\r\n if tire.torque:\r\n tire.shape.setMotorTorque( self.torque )", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO", "def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))", "def get_motor_torques(\n self,\n motor_commands: np.ndarray,\n motor_control_mode=None) -> Tuple[np.ndarray, np.ndarray]:\n if not motor_control_mode:\n motor_control_mode = self._motor_control_mode\n\n motor_torques = None\n\n if motor_control_mode is robot_config.MotorControlMode.TORQUE:\n motor_torques = motor_commands\n\n if motor_control_mode is robot_config.MotorControlMode.POSITION:\n motor_torques = self._compute_pd_torques(\n desired_motor_angles=motor_commands,\n kp=self._kp,\n desired_motor_velocities=self._zero_array,\n kd=self._kd)\n \n if motor_torques is None:\n raise ValueError(\n \"{} is not a supported motor control mode\".format(motor_control_mode))\n\n # Apply the output filter to model actuator dynamics\n # BUG: Causes big instability in the sim\n # motor_torques = self._torque_filter(motor_torques)\n\n # Hard-code torque limits until the torque limit bug is fixed\n motor_torques = np.clip(motor_torques, -1.7, 1.7)\n\n # Apply motor damping and friction\n motor_torques -= (np.sign(self._previous_true_motor_velocity) *\n self._motor_torque_dependent_friction *\n motor_torques)\n motor_torques -= self._previous_true_motor_velocity * self._motor_damping\n\n # Rescale and clip the motor torques as needed.\n motor_torques = self._strength_ratios * motor_torques\n if (self._torque_lower_limits is not None or\n self._torque_upper_limits is not None):\n motor_torques = np.clip(motor_torques, self._torque_lower_limits,\n self._torque_upper_limits)\n\n return motor_torques, motor_torques", "def magnetic_tension(self, method='spectral'):\n import numpy as np\n gradB, B = self.magnetic_gradient_tensor(method=method, return_B=True)\n F = np.zeros_like(B)\n for i in range(3):\n for j in range(3):\n F[j] += B[i] * gradB[i,j]\n return F", "def set_hybrid_control(self, model, max_force_torque, timeout=5.0, stop_on_target_force=False):\n\n reduced_speed = np.deg2rad([100, 100, 100, 150, 150, 150])\n q_last = self.joint_angles()\n\n # Timeout for motion\n initime = rospy.get_time()\n xb = self.end_effector()\n failure_counter = 0\n\n while not rospy.is_shutdown() \\\n and (rospy.get_time() - initime) < timeout:\n\n # Transform wrench to the base_link frame\n Wb = self.get_ee_wrench()\n\n # Current Force in task-space\n Fb = -1 * Wb\n # Safety limits: max force\n if np.any(np.abs(Fb) > max_force_torque):\n rospy.logerr('Maximum force/torque exceeded {}'.format(np.round(Wb, 3)))\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return FORCE_TORQUE_EXCEEDED\n\n if stop_on_target_force and np.any(np.abs(Fb)[model.target_force != 0] > model.target_force[model.target_force != 0]):\n rospy.loginfo('Target F/T reached {}'.format(np.round(Wb, 3)) + ' Stopping!')\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return STOP_ON_TARGET_FORCE\n\n # Current position in task-space\n xb = self.end_effector()\n\n dxf = model.control_position_orientation(Fb, xb) # angular velocity\n\n # Limit linear/angular velocity\n dxf[:3] = np.clip(dxf[:3], -0.5, 0.5)\n dxf[3:] = np.clip(dxf[3:], -5., 5.)\n\n xc = transformations.pose_from_angular_velocity(xb, dxf, dt=model.dt)\n\n # Avoid extra acceleration when a point failed due to IK or other violation\n # So, this corrects the allowed time for the next point\n dt = model.dt * (failure_counter+1)\n\n q = self._solve_ik(xc)\n if q is None:\n rospy.logwarn(\"IK not found\")\n result = IK_NOT_FOUND\n else:\n q_speed = (q_last - q)/dt\n if np.any(np.abs(q_speed) > reduced_speed):\n rospy.logwarn(\"Exceeded reduced max speed %s deg/s, Ignoring command\" % np.round(np.rad2deg(q_speed), 0))\n result = SPEED_LIMIT_EXCEEDED\n else:\n result = self.set_joint_positions_flex(position=q, t=dt)\n\n if result != DONE:\n failure_counter += 1\n continue # Don't wait since there is not motion\n else:\n failure_counter = 0\n\n # Compensate the time allocated to the next command when there are failures\n for _ in range(failure_counter+1):\n self.rate.sleep()\n\n q_last = self.joint_angles()\n return DONE", "def gTorque(self):\n pass" ]
[ "0.6871455", "0.6338733", "0.6233411", "0.61869115", "0.61336684", "0.6113086", "0.6039602", "0.58993423", "0.58980733", "0.580256", "0.57689935", "0.5768379", "0.5749182", "0.57389605", "0.57320094", "0.56794477", "0.5643121", "0.5619001", "0.55776167", "0.55374926", "0.54764205", "0.54509246", "0.53950506", "0.53943104", "0.5343946", "0.5321389", "0.5306977", "0.52945024", "0.5270118", "0.52420545" ]
0.7265357
1
Initializes dipole Model. This method uses the simplified dipole model implemented in DipoleModel.py Which needs to initialize the induced Magnetic density in the hysteresis rods. It also adds the hysteresis rods and bar magnets specified in the settings file to the satellite using the DipoleModel class.
def _initialize_dipole_model(self, model): for key, hyst in model['Hysteresis'].items(): direction = np.array([float(x) for x in hyst['dir'].split(" ")]) self.dipoleM.addHysteresis(direction, hyst['vol'], hyst['Hc'], hyst['Bs'], hyst['Br']) # initialize values for Hysteresis (need B-field @ initial position) spacecraft_state = self.state_observer.spacecraftState self.inertial2Sat = spacecraft_state.getAttitude().getRotation() self.satPos_i = spacecraft_state.getPVCoordinates().getPosition() gP = self.earth.transform(self.satPos_i, self.in_frame, self.in_date) topoframe = TopocentricFrame(self.earth, gP, 'ENU') topo2inertial = topoframe.getTransformTo(self.in_frame, self.in_date) lat = gP.getLatitude() lon = gP.getLongitude() alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km] # get B-field in geodetic system (X:East, Y:North, Z:Nadir) B_geo = FileDataHandler.mag_field_model.calculateField( degrees(lat), degrees(lon), alt).getFieldVector() # convert geodetic frame to inertial and from [nT] to [T] B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo)) B_b = self.inertial2Sat.applyTo(B_i) B_field = np.array([B_b.x, B_b.y, B_b.z]) self.dipoleM.initializeHysteresisModel(B_field) # add bar magnets to satellite for key, bar in model['BarMagnet'].items(): direction = np.array([float(x) for x in bar['dir'].split(" ")]) self.dipoleM.addBarMagnet(direction, bar['m'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n super().__init__(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=UnimodalVirtualSensorModel(\n virtual_sensor_model=[\n DoorVirtualSensorModel(modalities={\"image\"}),\n DoorVirtualSensorModel(modalities={\"pos\", \"sensors\"}),\n ],\n state_dim=3,\n ),\n )", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def __init__(\n self,\n dem_path: str,\n model_name: Optional[str] = None,\n ela: int = 2850,\n m: float = 0.006,\n plot: bool = True,\n ) -> None:\n\n # Load DEM ------------------------------------------------------------\n dem = open(dem_path)\n ele = dem.read(1).astype(np.float32)\n\n # Instance variables --------------------------------------------------\n self.model_name = Path(dem_path).stem if model_name is None else model_name\n self.dem_path = dem_path\n\n # Mass balance parameters\n self.m = m # Mass balance gradient\n self.ela_start = ela # Equilibrium line altitude\n self._setup_params() # Variable parameters (i, ela, steady_state)\n\n # 2D arrays\n self.ele_orig = np.copy(ele) # Original topography\n self._setup_ndarrays() # Variable arrays (ele, h, u ,hs)\n\n # Coordinate reference system and dem resolution\n self._dem_meta = dem.meta\n self.res = dem.res[0]\n\n # Geographical extent of the dem\n x0, y0, x1, y1 = dem.bounds\n self.extent = (x0, x1, y1, y0)\n\n # Setup statistics\n self._setup_stats()\n\n # Setup plot\n self.plot = plot", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def setup_d2d(self):\n\n self.config[\"d2d\"] = dict()\n\n self.config[\"d2d\"][LC.WHITE] = dict()\n self.config[\"d2d\"][LC.GROWTH] = dict()\n\n self.config[\"d2d\"][LC.WHITE][\"analog-gain\"] = 1.0\n self.config[\"d2d\"][LC.WHITE][\"digital-gain\"] = 1.0\n self.config[\"d2d\"][LC.GROWTH][\"analog-gain\"] = 1.0\n self.config[\"d2d\"][LC.GROWTH][\"digital-gain\"] = 1.0\n\n self.config[\"d2d\"][\"timestamp\"] = time.time()\n\n self.save_config_to_file()", "def __init__(self):\n self._read_calibration_data()\n self.set_oversamplings_and_mode(\n HumidityOversampling.x08,\n TemperatureOversampling.x08,\n PressureOversampling.x16,\n SensorMode.Normal)\n self.set_config(\n InactiveDuration.ms1000,\n FilterCoefficient.fc04)", "def initialize_variables(self):\n super(D2Model, self).initialize_variables()\n\n s = \"::: initializing 2D variables :::\"\n print_text(s, cls=self)\n\n # Depth below sea level :\n class Depth(Expression):\n def eval(self, values, x):\n values[0] = abs(min(0, x[2]))\n self.D = Depth(element=self.Q.ufl_element())\n \n # Enthalpy model\n self.theta_surface = Function(self.Q, name='theta_surface')\n self.theta_float = Function(self.Q, name='theta_float')\n self.theta_app = Function(self.Q, name='theta_app')\n self.theta = Function(self.Q, name='theta')\n self.theta0 = Function(self.Q, name='theta0')\n self.W0 = Function(self.Q, name='W0')\n self.thetahat = Function(self.Q, name='thetahat')\n self.uhat = Function(self.Q, name='uhat')\n self.vhat = Function(self.Q, name='vhat')\n self.what = Function(self.Q, name='what')\n self.mhat = Function(self.Q, name='mhat')\n self.rho_b = Function(self.Q, name='rho_b')\n\n # Age model \n self.age = Function(self.Q, name='age')\n self.a0 = Function(self.Q, name='a0')\n\n # Surface climate model\n self.precip = Function(self.Q, name='precip')\n\n # Stokes-balance model :\n self.u_s = Function(self.Q, name='u_s')\n self.u_t = Function(self.Q, name='u_t')\n self.F_id = Function(self.Q, name='F_id')\n self.F_jd = Function(self.Q, name='F_jd')\n self.F_ib = Function(self.Q, name='F_ib')\n self.F_jb = Function(self.Q, name='F_jb')\n self.F_ip = Function(self.Q, name='F_ip')\n self.F_jp = Function(self.Q, name='F_jp')\n self.F_ii = Function(self.Q, name='F_ii')\n self.F_ij = Function(self.Q, name='F_ij')\n self.F_iz = Function(self.Q, name='F_iz')\n self.F_ji = Function(self.Q, name='F_ji')\n self.F_jj = Function(self.Q, name='F_jj')\n self.F_jz = Function(self.Q, name='F_jz')\n self.tau_iz = Function(self.Q, name='tau_iz')\n self.tau_jz = Function(self.Q, name='tau_jz')", "def __init__(self, model, settings):\n super().__init__(model, settings)\n self.model_part = self.model.CreateModelPart(self.settings[\"model_part_name\"].GetString())\n self.model_part.ProcessInfo.SetValue(KM.DOMAIN_SIZE, self.settings[\"domain_size\"].GetInt())\n self.model_part.ProcessInfo.SetValue(KM.GRAVITY_Z, self.settings[\"gravity\"].GetDouble())\n self.EstimateDeltaTimeUtility = SW.EstimateTimeStepUtility(self.GetComputingModelPart(), self.settings[\"time_stepping\"])", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def _setup(self):\n\n from AlGDock.topology import Topology\n self.top = Topology(self.args)\n self.top_RL = Topology(self.args, includeReceptor=True)\n\n # Initialize rmsd calculation function\n from AlGDock.RMSD import hRMSD\n self.get_rmsds = hRMSD(self.args.FNs['prmtop']['L'], \\\n self.top.inv_prmtop_atom_order_L)\n\n # Obtain reference pose\n if self.data['CD'].pose > -1:\n if ('starting_poses' in self.data['CD'].confs.keys()) and \\\n (self.data['CD'].confs['starting_poses'] is not None):\n starting_pose = np.copy(self.data['CD'].confs['starting_poses'][0])\n else:\n (confs, Es) = self._get_confs_to_rescore(site=False, \\\n minimize=False, sort=False)\n if self.args.params['CD']['pose'] < len(confs):\n starting_pose = np.copy(confs[self.args.params['CD']['pose']])\n self.data['CD'].confs['starting_poses'] = [np.copy(starting_pose)]\n else:\n self._clear('CD')\n self._store_infinite_f_RL()\n raise Exception('Pose index greater than number of poses')\n else:\n starting_pose = None\n\n from AlGDock.system import System\n self.system = System(self.args,\n self.log,\n self.top,\n self.top_RL,\n starting_pose=starting_pose)\n\n # Measure the binding site\n if (self.args.params['CD']['site'] == 'Measure'):\n self.args.params['CD']['site'] = 'Sphere'\n if self.args.params['CD']['site_measured'] is not None:\n (self.args.params['CD']['site_max_R'],self.args.params['CD']['site_center']) = \\\n self.args.params['CD']['site_measured']\n else:\n print '\\n*** Measuring the binding site ***'\n self.system.setParams(\n self.system.paramsFromAlpha(1.0, 'CD', site=False))\n (confs, Es) = self._get_confs_to_rescore(site=False, minimize=True)\n if len(confs) > 0:\n # Use the center of mass for configurations\n # within 20 RT of the lowest energy\n cutoffE = Es['total'][-1] + 20 * (R * self.T)\n coms = []\n for (conf, E) in reversed(zip(confs, Es['total'])):\n if E <= cutoffE:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, conf))\n coms.append(np.array(self.top.universe.centerOfMass()))\n else:\n break\n print ' %d configurations fit in the binding site' % len(coms)\n coms = np.array(coms)\n center = (np.min(coms, 0) + np.max(coms, 0)) / 2\n max_R = max(\n np.ceil(np.max(np.sqrt(np.sum(\n (coms - center)**2, 1))) * 10.) / 10., 0.6)\n self.args.params['CD']['site_max_R'] = max_R\n self.args.params['CD']['site_center'] = center\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[-1]))\n if ((self.args.params['CD']['site_max_R'] is None) or \\\n (self.args.params['CD']['site_center'] is None)):\n raise Exception('No binding site parameters!')\n else:\n self.args.params['CD']['site_measured'] = \\\n (self.args.params['CD']['site_max_R'], \\\n self.args.params['CD']['site_center'])\n\n # Read the reference ligand and receptor coordinates\n import AlGDock.IO\n IO_crd = AlGDock.IO.crd()\n if self.args.FNs['inpcrd']['R'] is not None:\n if os.path.isfile(self.args.FNs['inpcrd']['L']):\n lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)\n self.data['CD'].confs['receptor'] = IO_crd.read(\\\n self.args.FNs['inpcrd']['R'], multiplier=0.1)\n elif self.args.FNs['inpcrd']['RL'] is not None:\n complex_crd = IO_crd.read(self.args.FNs['inpcrd']['RL'], multiplier=0.1)\n lig_crd = complex_crd[self.top_RL.L_first_atom:self.top_RL.L_first_atom + \\\n self.top.universe.numberOfAtoms(),:]\n self.data['CD'].confs['receptor'] = np.vstack(\\\n (complex_crd[:self.top_RL.L_first_atom,:],\\\n complex_crd[self.top_RL.L_first_atom + self.top.universe.numberOfAtoms():,:]))\n elif self.args.FNs['inpcrd']['L'] is not None:\n self.data['CD'].confs['receptor'] = None\n if os.path.isfile(self.args.FNs['inpcrd']['L']):\n lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)\n else:\n lig_crd = None\n\n if lig_crd is not None:\n self.data['CD'].confs['ligand'] = lig_crd[self.top.\n inv_prmtop_atom_order_L, :]\n self.top.universe.setConfiguration(\\\n Configuration(self.top.universe,self.data['CD'].confs['ligand']))\n if self.top_RL.universe is not None:\n self.top_RL.universe.setConfiguration(\\\n Configuration(self.top_RL.universe, \\\n np.vstack((self.data['CD'].confs['receptor'],self.data['CD'].confs['ligand']))))\n\n if self.args.params['CD']['rmsd'] is not False:\n if self.args.params['CD']['rmsd'] is True:\n if lig_crd is not None:\n rmsd_crd = lig_crd[self.top.inv_prmtop_atom_order_L, :]\n else:\n raise Exception('Reference structure for rmsd calculations unknown')\n else:\n rmsd_crd = IO_crd.read(self.args.params['CD']['rmsd'], \\\n natoms=self.top.universe.numberOfAtoms(), multiplier=0.1)\n rmsd_crd = rmsd_crd[self.top.inv_prmtop_atom_order_L, :]\n self.data['CD'].confs['rmsd'] = rmsd_crd\n\n self.get_rmsds.set_ref_configuration(self.data['CD'].confs['rmsd'])\n\n # If configurations are being rescored, start with a docked structure\n (confs, Es) = self._get_confs_to_rescore(site=False, minimize=False)\n if len(confs) > 0:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[-1]))\n\n from AlGDock.simulation_iterator import SimulationIterator\n self.iterator = SimulationIterator(self.args, self.top, self.system)\n\n # Load progress\n from AlGDock.postprocessing import Postprocessing\n Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run(readOnly=True)\n\n self.calc_f_L(readOnly=True)\n self.calc_f_RL(readOnly=True)\n\n if self.args.random_seed > 0:\n np.random.seed(self.args.random_seed)", "def __init__(self, cpara, dz_soil):\n\n # --- grid ---\n self.z = np.linspace(0, cpara['grid']['zmax'], cpara['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n\n # --- switches ---\n self.Switch_Eflow = cpara['ctr']['Eflow'] # True assumes constant U/ustar at upper boundary\n self.Switch_WMA = cpara['ctr']['WMA'] # True solves scalar profiles\n self.Switch_Ebal = cpara['ctr']['Ebal'] # True solves leaf energy balance\n\n logger.info('Eflow: %s, WMA: %s, Ebal: %s',\n self.Switch_Eflow,\n self.Switch_WMA,\n self.Switch_Ebal)\n\n # --- PlantTypes ---\n ptypes = []\n ptnames = list(cpara['planttypes'].keys())\n ptnames.sort()\n for pt in ptnames:\n ptypes.append(PlantType(self.z, cpara['planttypes'][pt], dz_soil, ctr=cpara['ctr'], loc=cpara['loc']))\n self.planttypes = ptypes\n \n # --- stand characteristics: sum over planttypes---\n\n # total leaf area index [m2 m-2]\n self.LAI = sum([pt.LAI for pt in self.planttypes])\n # total leaf area density [m2 m-3]\n self.lad = sum([pt.lad for pt in self.planttypes])\n\n # layerwise mean leaf characteristic dimension [m] for interception model\n self.leaf_length = sum([pt.leafp['lt'] * pt.lad for pt in self.planttypes]) / (self.lad + EPS)\n\n # root area density [m2 m-3]\n rad = np.zeros(np.shape(dz_soil))\n imax = 1\n for pt in self.planttypes:\n rad[:len(pt.Roots.rad)] += pt.Roots.rad\n imax = max(imax, len(pt.Roots.rad))\n \n self.ix_roots = np.array(range(imax)) # soil model layers corresponding to root zone\n self.rad = rad[self.ix_roots]\n \n # total root area index [m2 m-2]\n self.RAI = sum([pt.Roots.RAI for pt in self.planttypes])\n # distribution of roots across soil model layers [-]\n self.root_distr = self.rad * dz_soil[self.ix_roots] / (self.RAI + EPS)\n\n # canopy height [m]\n if len(np.where(self.lad > 0)[0]) > 0:\n f = np.where(self.lad > 0)[0][-1]\n self.hc = self.z[f].copy()\n else:\n self.hc = 0.0\n\n # --- create radiation, micromet, interception, and forestfloor model instances\n self.radiation = Radiation(cpara['radiation'], self.Switch_Ebal)\n\n self.micromet = Micromet(self.z, self.lad, self.hc, cpara['micromet'])\n\n self.interception = Interception(cpara['interception'], self.lad * self.dz)\n\n self.forestfloor = ForestFloor(cpara['forestfloor'])", "def __init__(self):\n logger.debug('Initializing %s model.' % self.__class__.__name__)\n self.dependent_attributes = ['_alpha',\n '_log_like',\n '_gradient','_K',\n '_log_det']\n self._previous_parameters = None # previous parameters from last call\n self.grad_method = None # could be {'finite_difference','adjoint'}\n self.noise_var_constraint = '+ve' # Gaussian noise variance constraint\n return", "def __init__(self, root, io):\n parts.hand.Hand.__init__(self, root=root, io=io)\n\n dxl_motors = OrderedDict({\n name: dict(conf)\n for name, conf in self.dxl_motors.items()\n })\n\n self.attach_dxl_motors(dxl_motors)\n\n \"\"\"\n self._load_sensor = self.io.find_module('force_gripper')\n self._load_sensor.offset = 4\n self._load_sensor.scale = 10000\n \"\"\"", "def __init__(self, para, ini_cond):\n\n # grid\n self.z = np.linspace(0, para['grid']['zmax'], para['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n self.zref = para['zref'] # height of forcing data [m]\n \n # moss properties\n self.hc = para['hc'] # canopy height (m)\n self.lad = para['lad'] # shoot-area density (m2m-3)\n self.LAI = sum(self.lad*self.dz)\n \n self.canopy_nodes = np.where(self.lad > 0)[0]\n \n # hydraulic\n self.porosity = para['hydraulic']['porosity']\n self.pF = para['hydraulic']['pF']\n self.Ksat = para['hydraulic']['Ksat']\n self.freezing_curve = para['hydraulic']['freezing_curve']\n \n # radiation\n self.albedo = para['radiation'] # 'PAR', 'NIR'\n self.emissivity = para['radiation']['emissivity']\n self.clump = para['radiation']['clumping']\n self.leaf_angle = para['radiation']['leaf_angle']\n \n #self.radiation = para['radiation']\n \n # compute non-dimensional flow velocity Un = U/ust and momentum diffusivity\n Utop = ini_cond['Utop'] # U/ust at zref\n Ubot = 0.0 # no-slip\n self.Sc = para['Schmidt_nr']\n _, self.Un, self.Kmn, _ = closure_model_U_moss(self.z, self.lad, self.hc, Utop, Ubot) \n \n self.U = None\n self.Ks = None\n self.length_scale = para['length_scale']\n \n self.Switch_WMA = False\n \n # initial states\n self.T = ini_cond['T']\n self.Wtot = ini_cond['Wtot']\n self.Wliq, self.Wice, _ = frozen_water(self.T, self.Wot, fp=self.freezing_curve, To=0.0)\n self.h = water_retention(self.pF, theta=self.Wliq)", "def __init__(self, \n modeled_dem_name, \n modern_dem_name, \n outlet_id, \n category_file=None, \n category_values=None, \n weight_file=None,\n weight_values=None):\n\n # save dem names\n self.modern_dem_name = modern_dem_name\n self.modeled_dem_name = modeled_dem_name\n \n # Read and remember the modern DEM\n (self.grid, self.z) = self.read_topography(modern_dem_name)\n self.grid.set_watershed_boundary_condition_outlet_id(outlet_id,\n self.z, \n nodata_value=-9999)\n # Read and remember the modeled DEM \n (self.mgrid, self.mz) = self.read_topography(modeled_dem_name)\n self.mgrid.set_watershed_boundary_condition_outlet_id(outlet_id,\n self.mz, \n nodata_value=-9999)\n if self.mz.size != self.z.size:\n raise ValueError(('Size of provided DEMS is different.'))\n \n if category_file and category_values:\n raise ValueError(('Provide either an array-like structure of catetory ',\n 'values or a filename, not both.'))\n if weight_file and weight_values:\n raise ValueError(('Provide either an array-like structure of weight ',\n 'values or a filename, not both.'))\n if category_file:\n if os.path.exists(category_file):\n catagory_values = np.loadtxt(category_file)\n if catagory_values.size != self.z.size:\n raise ValueError(('Size of catagory array is different than the ',\n 'provided DEM.'))\n if weight_file:\n if os.path.exists(weight_file):\n weight_values = np.loadtxt(weight_file)\n if weight_values.size != self.z.size:\n raise ValueError(('Size of weight array is different than the ',\n 'provided DEM.'))\n try:\n np.asarray(weight_values).size == self.z.size \n except TypeError:\n weight_values = np.ones_like(self.z)\n \n self.category_values = category_values\n self.weight_values = weight_values\n self.cat_vals = np.sort(np.unique(self.category_values[self.grid.core_nodes]))\n self.metric = {}", "def initialize(self):\n self.lib.Initialize()\n\n self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,\n 'External Exposure': 7, 'External FVB EM': 9,\n 'Software Trigger': 10,\n 'External Charge Shifting': 12}\n self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}\n\n # Initial values\n\n self.readout_packing_state = False\n self.readout_packing = self.readout_packing_state\n\n self.readout_mode_mode = 'Image'\n self.readout_mode = self.readout_mode_mode\n\n self.photon_counting_mode_state = False\n self.photon_counting_mode = self.photon_counting_mode_state\n\n self.frame_transfer_mode_state = False\n self.frame_transfer_mode = self.frame_transfer_mode_state\n\n self.fan_mode_index = 'onfull'\n self.fan_mode = self.fan_mode_index\n\n self.EM_gain_mode_index = 'RealGain'\n self.EM_gain_mode = self.EM_gain_mode_index\n\n self.cooled_on_shutdown_value = False\n self.cooled_on_shutdown = self.cooled_on_shutdown_value\n\n self.baseline_offset_value = 100\n self.baseline_offset = self.baseline_offset_value\n\n self.adv_trigger_mode_state = True\n self.adv_trigger_mode = self.adv_trigger_mode_state\n\n self.acq_mode = 'Single Scan'\n self.acquisition_mode = self.acq_mode\n\n self.amp_typ = 0\n\n self.horiz_shift_speed_index = 0\n self.horiz_shift_speed = self.horiz_shift_speed_index\n\n self.vert_shift_speed_index = 0\n self.vert_shift_speed = self.vert_shift_speed_index\n\n self.preamp_index = 0\n self.preamp = self.preamp_index\n\n self.temperature_sp = 0 * degC\n self.temperature_setpoint = self.temperature_sp\n\n self.auxout = np.zeros(4, dtype=bool)\n for i in np.arange(1, 5):\n self.out_aux_port[i] = False\n\n self.trigger_mode_index = 'Internal'\n self.trigger_mode = self.trigger_mode_index", "def set_all(self):\n\n self.ecm = EnergyConsumptionModel(\n vehicle_type=\"car\",\n vehicle_size=list(self.array.coords[\"size\"].values),\n powertrains=list(self.array.coords[\"powertrain\"].values),\n cycle=self.cycle,\n gradient=self.gradient,\n country=self.country,\n )\n\n diff = 1.0\n\n while diff > 0.0001:\n old_driving_mass = self[\"driving mass\"].sum().values\n self.set_vehicle_mass()\n self.set_power_parameters()\n self.set_component_masses()\n self.set_auxiliaries()\n self.set_power_battery_properties()\n self.set_battery_properties()\n self.set_energy_stored_properties()\n self.set_recuperation()\n\n if \"FCEV\" in self.array.powertrain.values:\n self.set_fuel_cell_power()\n self.set_fuel_cell_mass()\n\n # if user-provided values are passed,\n # they override the default values\n if \"capacity\" in self.energy_storage:\n self.override_battery_capacity()\n\n diff = (self[\"driving mass\"].sum().values - old_driving_mass) / self[\n \"driving mass\"\n ].sum()\n\n self.set_ttw_efficiency()\n self.calculate_ttw_energy()\n self.set_ttw_efficiency()\n\n self.set_range()\n\n if self.target_range:\n self.override_range()\n\n self.set_share_recuperated_energy()\n self.set_battery_fuel_cell_replacements()\n self.adjust_cost()\n\n self.set_electric_utility_factor()\n self.set_electricity_consumption()\n self.set_costs()\n self.set_hot_emissions()\n self.set_particulates_emission()\n self.set_noise_emissions()\n self.create_PHEV()\n if self.drop_hybrids:\n self.drop_hybrid()\n\n self.remove_energy_consumption_from_unavailable_vehicles()", "def setup(self):\n\n module = [m for m in Rt.modules if m.name == self.module_name][0]\n\n # Take CPACS file from the optimisation\n cpacs_path = module.cpacs_in\n tixi = open_tixi(cpacs_path)\n self.Model = load_surrogate(tixi)\n tixi.save(cpacs_path)\n\n df = self.Model.df\n df.set_index(\"Name\", inplace=True)\n for name in df.index:\n if df.loc[name, \"type\"] == \"obj\":\n self.add_output(name)\n elif df.loc[name, \"type\"] == \"des\":\n self.add_input(name)\n\n self.xd = df.loc[[name for name in df.index if df.loc[name, \"type\"] == \"des\"]]\n self.yd = df.loc[[name for name in df.index if df.loc[name, \"type\"] == \"obj\"]]", "def setup(self, path_to_conf_file):\n\n self.track = Track.SENSORS\n self.num_frames = 0\n\n with open(path_to_conf_file, 'r') as f:\n config = yaml.safe_load(f)\n\n for key, value in config.items():\n setattr(self, key, value)\n\n self.device = torch.device('cuda')\n\n self.image_model = CameraModel(config).to(self.device)\n self.image_model.load_state_dict(torch.load(self.main_model_dir))\n self.image_model.eval()\n\n self.vizs = []\n\n self.waypointer = None\n\n if self.log_wandb:\n wandb.init(project='carla_evaluate')\n \n self.steers = torch.tensor(np.linspace(-self.max_steers,self.max_steers,self.num_steers)).float().to(self.device)\n self.throts = torch.tensor(np.linspace(0,self.max_throts,self.num_throts)).float().to(self.device)\n\n self.prev_steer = 0\n self.lane_change_counter = 0\n self.stop_counter = 0", "def __init__(self, config_file=False,\n simulation='2D_square',\n D=1.56, J=5.88, ku=0.41, mu_s=3, B=(0, 0, 0), Demag=None,\n mesh_nx=50, mesh_ny=50, mesh_a=0.2715\n ):\n\n self.simulation = simulation\n\n if config_file:\n tmp_config = {}\n configs = execfile(config_file, tmp_config)\n\n self.D = configs[\"D\"] * const.meV\n self.J = configs[\"J\"] * const.meV\n self.ku = configs[\"ku\"] * const.meV\n self.mu_s = configs[\"mu_s\"] * const.mu_B\n self.m_field = configs[\"m_field\"]\n if configs[\"B\"] is not None:\n self.B = configs[\"B\"]\n\n else:\n self.D = D * const.meV\n self.J = J * const.meV\n self.ku = ku * const.meV\n self.mu_s = mu_s * const.mu_B\n self.B = B\n self.Demag = Demag\n\n self.mesh_nx = mesh_nx\n self.mesh_ny = mesh_ny\n self.mesh_a = mesh_a\n\n # Dictionary to translate a vector component into the corresponding\n # indexes in Fidimag arrays, i.e. x --> 0, y --> 1, z --> 2\n self.v_dict = {'x': 0, 'y': 1, 'z': 2}\n\n # Measure for dm / dt\n self.DEGREE_PER_NANOSECOND = 2 * np.pi / (360 * 1e-9)", "def __init__(self):\n super(Config, self).__init__()\n self.run_control[\"output_dir\"] = os.getcwd()+\"/output/arctan_baseline/single_turn_injection/tracking\"\n ring_tof = 1149.185123\n field_files = glob.glob(\"output/arctan_baseline/bump_quest_v9/find_bump_r_*_theta_90/find_bump_parameters_001.out\")\n substitutions = config.get_baseline_substitution(0.22, ring_tof)\n substitutions = self.ramp_fields(\n substitutions,\n field_files,\n [i for i, f in enumerate(field_files)],\n ring_tof,\n will_step = False\n )\n self.substitution_list = [substitutions] # used for plotting unperturbed CO\n\n self.run_control[\"find_closed_orbits_4d\"] = False\n self.run_control[\"find_da\"] = False\n self.run_control[\"find_bump_parameters\"] = False\n self.run_control[\"track_bump\"] = False\n self.run_control[\"track_beam\"] = True\n self.find_closed_orbits[\"subs_overrides\"][\"__n_turns__\"] = 0.11\n self.find_closed_orbits[\"subs_overrides\"][\"__do_bump__\"] = False\n self.find_closed_orbits[\"final_subs_overrides\"].update(substitutions)\n self.find_closed_orbits[\"max_iterations\"] = 0\n self.find_closed_orbits[\"do_minuit\"] = True\n self.find_closed_orbits[\"minuit_iterations\"] = 10 \n self.find_closed_orbits[\"us_cell\"] = 0\n self.find_closed_orbits[\"ds_cell\"] = 1\n\n self.find_da = {\n \"run_dir\":\"tmp/find_da/\",\n \"probe_files\":\"RINGPROBE01.h5\",\n \"subs_overrides\":{\"__n_turns__\":101.1, \"__do_magnet_field_maps__\":\"False\", \"__step_size__\":0.01},\n \"get_output_file\":\"get_da\",\n \"scan_output_file\":\"scan_da\",\n \"row_list\":None,\n \"scan_x_list\":[],\n \"scan_y_list\":[],\n \"x_seed\":1.,\n \"y_seed\":1.,\n \"min_delta\":0.9,\n \"max_delta\":1000.,\n \"required_n_hits\":100,\n \"dt_tolerance\":0.5, # fraction of closed orbit dt\n \"max_iterations\":15,\n \"decoupled\":True,\n }\n\n max_amp = 0.01\n self.track_beam_dummy = {\n \"run_dir\":\"tmp/track_beam/\",\n \"save_dir\":\"track_beam\",\n \"print_events\":[0, 1, -1],\n \"settings\":[{\n \"name\":\"forwards\",\n \"direction\":\"forwards\",\n \"probe_files\":\"RINGPROBE01.h5\", \n \"beam\":{\n \"type\":\"beam_gen\",\n \"closed_orbit_file\":\"closed_orbits_cache\",\n \"eigen_emittances\":[[0, 0]]*3,#+[[max_amp, max_amp]],\n \"n_per_dimension\":2,\n \"variables\":[\"x\",\"x'\",\"y\",\"y'\"],\n \"amplitude_dist\":\"uniform\", #\"grid\", # \n \"phase_dist\":\"uniform\", #\"grid\", # \n \"max_amplitude_4d\":max_amp, # amplitude_dist != grid\n \"energy\":3.0,\n },\n \"subs_overrides\":{\n \"__n_turns__\":45.1,\n \"__hdf5__\":\"True\",\n \"__do_magnet_field_maps__\":\"False\",\n \"__do_bump__\":\"True\",\n \"__step_size__\":0.01\n },\n },],\n }\n\n T0 = ring_tof\n self.track_beam = {\n \"run_dir\":\"tmp/track_beam/\",\n \"save_dir\":\"track_beam_rf_on_2\",\n \"print_events\":[i for i in range(1)],\n \"variables\":[\"x\", \"x'\", \"y\", \"y'\", \"t\", \"energy\"],\n \"settings\":[{\n \"name\":\"grid\",\n \"direction\":\"forwards\",\n \"probe_files\":\"RINGPROBE01.h5\", \n \"beam\":{\n \"type\":\"grid\",\n \"energy\":3.0,\n \"start\":[4357.646683446333, 0.0, -116.7090485272821, 0.0, 0.0, 941.272],\n \"stop\":[4357.646683446333, 0.0, -116.7090485272821, 0.0, T0, 941.272],\n \"nsteps\":[1, 1, 1, 1, 4+1, 1],\n },\n \"subs_overrides\":{\n \"__n_turns__\":100.1,\n \"__hdf5__\":True,\n \"__do_magnet_field_maps__\":False,\n \"__do_bump__\":True,\n \"__do_rf__\":True,\n \"__do_foil__\":False,\n \"__step_size__\":0.01\n },\n },],\n }", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def initialise_fluids(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n c.fluid.val[air] = 0.8\n if not c.fluid.val_set[flue_gas]:\n c.fluid.val[flue_gas] = 0.2\n c.target.propagate_fluid_to_target(c, c.target)", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def initialize_system(self):\n self.mfd.set_mesh(self.mesh)\n [[div_data, div_row, div_col], \n [div_t_data, div_t_row, div_t_col]] = self.mfd.build_div()\n [self.m_x_coo_data, \n m_x_coo_row, \n m_x_coo_col] = self.mfd.build_m(save_update_info=True)\n\n self.m_x_coo_length = len(self.m_x_coo_data)\n \n # The data for the bottom right should be zeros. \n [c_data, c_row, c_col] = self.mfd.build_bottom_right()\n \n [coupling_data, coupling_row, coupling_col] = self.mfd.build_coupling_terms()\n\n self.div = sparse.coo_matrix((np.array(div_data), \n (np.add(np.array(div_row), \n -self.mesh.get_number_of_faces()), \n np.array(div_col))))\n self.div = self.div.tocsr()\n\n lhs_data = self.m_x_coo_data\n lhs_row = m_x_coo_row\n lhs_col = m_x_coo_col\n \n lhs_data += div_data\n lhs_row += div_row\n lhs_col += div_col\n\n lhs_data += div_t_data\n lhs_row += div_t_row\n lhs_col += div_t_col \n \n self.c_start = len(lhs_data)\n \n lhs_data += c_data\n lhs_row += c_row\n lhs_col += c_col \n\n self.c_end = len(c_data)\n\n lhs_data += coupling_data\n lhs_row += coupling_row\n lhs_col += coupling_col\n\n # Convert m_x_coo_data to numpy array. \n self.m_x_coo_data = np.array(self.m_x_coo_data)\n\n self.lhs_coo = sparse.coo_matrix((np.array(lhs_data), \n (np.array(lhs_row), \n np.array(lhs_col))))\n\n # RHS construction is for Neumann and Dirichlet \n # boundaries specified by the mesh. \n self.rhs_mfd = self.mfd.build_rhs()", "def __init__(self):\n INSTALL_DIR = dirname(__file__)\n CONFIG_DIR = '/etc/Model2WADL/'\n logging.basicConfig(level=logging.ERROR)\n logging.config.fileConfig([join(CONFIG_DIR, 'logging.conf'), expanduser('~/.logging.conf'), 'logging.conf'])\n self.__log = logging.getLogger('thesis')\n\n self.__log.debug(\"Reading general configuration from Model2WADL.cfg\")\n self.__m2wConfig = ConfigParser.SafeConfigParser()\n self.__m2wConfig.read(\n [join(CONFIG_DIR, 'Physical2Virtual.cfg'), expanduser('~/.Physical2Virtual.cfg'), 'Physical2Virtual.cfg'])\n\n self.__baseURI = self.__m2wConfig.get(\"Config\", \"baseURI\")\n self.__basePackage = self.__m2wConfig.get(\"Config\", \"basePackage\")\n self.__schemaFile = self.__m2wConfig.get(\"Config\", \"schemaFile\")\n self.__model = None\n self.__input = None\n self.__output = None", "def __init__(self):\n super().__init__()\n\n # Gadget state\n \n self.isDoorOpen = False\n self.verified = True\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n \n self.ir_sensor = InfraredSensor()\n self.ir_sensor.mode = self.ir_sensor.MODE_IR_REMOTE\n self.color_sensor = ColorSensor()\n self.color_sensor.mode = 'COL-COLOR' # WHITE\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()", "def __init__(self):\n super().__init__()\n self.indices_dir = ''\n self.split_file = ''\n\n self.model = '' # string identifying the model\n self.experiment = '' # string to describe experiment\n self.maps = [data.ID_MAP_T1H2O, data.ID_MAP_FF, data.ID_MAP_B1] # the used maps\n self.patch_size = [1, 32, 32]\n\n # training configuration\n self.loss = 'mse' # string identifying the loss function (huber, mse or mae)\n self.learning_rate = 0.01 # the learning rate\n self.dropout_p = 0.2\n self.norm = 'bn' # none, bn\n\n # we use the mean absolute error as best model score\n self.best_model_score_is_positive = True\n self.best_model_score_name = 'mae'", "def __init__(self, path, model_index=1):\n self.shape_total_df = _load_neutron_total_shape(path)\n self.shape_dict_mcnp = None\n self.shape_df_mcnp = None\n self.shape_df_mcnp_norm = None\n self.shape_df_interp = None\n self.shape_tof_df_interp = None\n self.shape_tof_df_dir = None\n\n self.result_shape_fit = None\n self.param_df_dir = None\n self.param_df = None\n self.linear_df = None\n self.linear_df_dir = None\n self.model_param_names = None\n self.e_min = None\n self.e_max = None\n self.t_us_mcnp = None\n self.t_us_conv_proton = np.linspace(t_min_us, t_max_us, t_nbr).round(3)\n self.result_neutron_folder = None\n self._energy_list = None\n self._energy_list_dropped = None\n\n self.model_map = {1: 'ikeda_carpenter',\n 2: 'cole_windsor',\n 3: 'pseudo_voigt',\n 4: 'ikeda_carpenter_jparc',\n 5: 'cole_windsor_jparc',\n }\n self.model_index = None\n self.model_used = None\n self.model = None\n self.__set_model(model_index)\n\n if self.result_neutron_folder is None:\n self.result_neutron_folder = self._check_and_make_subdir('result', 'neutron_pulse', self.model_used)\n\n self.proton_pulse = ProtonPulse(path=proton_path)", "def __init__(self):\n\n super().__init__(\n filter_models=[\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(modalities={\"image\"}),\n ),\n DoorKalmanFilter(\n dynamics_model=DoorDynamicsModel(),\n virtual_sensor_model=DoorVirtualSensorModel(\n modalities={\"pos\", \"sensors\"}\n ),\n ),\n ],\n state_dim=3,\n )" ]
[ "0.60838014", "0.5990392", "0.5922041", "0.5784273", "0.57757676", "0.5773712", "0.5773648", "0.5748722", "0.5730481", "0.5700419", "0.56870985", "0.5683414", "0.56764555", "0.56551784", "0.5640305", "0.56077576", "0.55869156", "0.5540809", "0.55335855", "0.55258167", "0.54779977", "0.5457308", "0.5443597", "0.5443282", "0.54384094", "0.54259914", "0.54250777", "0.5424373", "0.5421581", "0.54133105" ]
0.7723989
0
Update satellite state obtained from orbit propagation. This method should be called before each attitude integration step! It updates internal variables needed for disturbance torque computation.
def update_satellite_state(self, current_date): self.in_date = current_date self.spacecraft_state = self.state_observer.spacecraftState self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition() self.satVel_i = self.spacecraft_state.getPVCoordinates().getVelocity()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_satellite_state(self, integration_date):\n self.in_date = integration_date\n self.spacecraft_state = self.state_observer.spacecraftState\n\n self.satPos_i = self.spacecraft_state.getPVCoordinates().getPosition()\n self.satVel_i = self.spacecraft_state.getPVCoordinates().getVelocity()", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def update(self, dt):\n # get normal random iscipyut\n u = self.prng.normal(size=(1, self._x.shape[1]))\n # calculate state time derivative with state space equation\n dx_dt = self._A.dot(self._x) + self._B * u\n # apply update with Euler integration\n self._x += dx_dt * dt", "def update(self):\n\n SolidSolver.update(self)\n\n self.__nextStep()", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def synchronize_state(self):\n theta = self.unicycle_state[:,2]\n v = self.unicycle_state[:,3]\n self.position[:, :2] = self.unicycle_state[:,:2]\n self.orientation[:,2] = theta\n vx = v * np.cos(theta)\n vy = v * np.sin(theta)\n\n self.velocity[:, 0] = vx\n self.velocity[:, 1] = vy\n\n self.angular_velocity[:, 2] = self.unicycle_state[:, 4]", "def _update_(self):\n self._update_distance_()\n self._check_literature_name_()", "def temp_update(self):\n a_w = self.k / self.dx\n a_e = self.k / self.dx\n a_n = self.k / self.dy\n a_s = self.k / self.dy\n a_p = a_w + a_e + a_n + a_s + self.rho * self.cp * self.dx / self.dt\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n self.T[i,\n j] = (a_w * self.T[i - 1, j] + a_e * self.T[i + 1, j] +\n a_s * self.T[i, j - 1] + a_n * self.T[i, j + 1]) / a_p", "def update(self):\n self.brain.update()", "def update_state(self, a, obs, t):\n \n self.update_weights(a, obs, t) # only update weights, not particles \n self.update_running_average_weights(t) \n return None", "def _update_loc(self) -> None:\n self.state[:, :, Boids.Attr.LOC] += self.state[:, :, Boids.Attr.VEL]\n # wrap-around the simulated environment\n self.state[:, :, Boids.Attr.LOC] %= np.expand_dims(self.env_bounds, axis=1)", "def update_vehicle_state(self):\n sim_timestep = 1. / self.simulation_rate\n # Decompose v into x and y component.\n if self.v != self.commands['speed']:\n self.v = self.commands['speed']\n vx = numpy.cos(self.yaw) * self.v\n vy = numpy.sin(self.yaw) * self.v\n # Update vehicles position\n self.x += vx * sim_timestep\n self.y += vy * sim_timestep\n self.yaw += ((self.v / self.axles_distance) *\n numpy.tan(self.commands['steering_angle']) *\n sim_timestep)\n # Make sure self.yaw is never negative.\n # self.yaw 0..2pi\n if self.yaw > 2*numpy.pi:\n self.yaw = 0.\n elif self.yaw < 0.:\n self.yaw += 2*numpy.pi", "def updateState(self):\n\t\t# ask for current pose data\n\t\tcomm.write(b'id1 mav.pose_sensor get_local_data \\n')\n\t\t# update x value\n\t\tcomm.read_until(b'\"x\": ') # b'' as Telnet needs a bytes object instead of string since Python3\n\t\tread = comm.read_until(b',') # returns read values + finishing ','\n\t\tread = read[:-1] # cut that ','\n\t\tcurrent_state.x = float(read)\n\t\tself.state_x_label.set_text(\"%0.2f\" % current_state.x)\n\t\t# update y value\n\t\tcomm.read_until(b'\"y\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y = float(read)\n\t\tself.state_y_label.set_text(\"%0.2f\" % current_state.y)\n\t\t# update z value\n\t\tcomm.read_until(b'\"z\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.z = float(read)\n\t\tself.state_z_label.set_text(\"%0.2f\" % current_state.z)\n\t\t# update yaw value\n\t\tcomm.read_until(b'\"yaw\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.psi = float(read)\n\t\tself.state_psi_label.set_text(\"%0.2f\" % current_state.psi)\n\t\t# update pitch value\n\t\tcomm.read_until(b'\"pitch\": ')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.theta = float(read)\n\t\tself.state_theta_label.set_text(\"%0.2f\" % current_state.theta)\n\t\t# update roll value\n\t\tcomm.read_until(b'\"roll\": ')\n\t\tread = comm.read_until(b'}')\n\t\tread = read[:-1]\n\t\tcurrent_state.phi = float(read)\n\t\tself.state_phi_label.set_text(\"%0.2f\" % current_state.phi)\n\n\t\t# ask for current velocity data\n\t\tcomm.write(b'id1 mav.velocity_sensor get_local_data \\n')\n\t\t# update p value\n\t\tcomm.read_until(b'\"angular_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.p = float(read)\n\t\tself.state_p_label.set_text(\"%0.2f\" % current_state.p)\n\t\t# update q value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.q = float(read)\n\t\tself.state_q_label.set_text(\"%0.2f\" % current_state.q)\n\t\t# update r value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.r = float(read)\n\t\tself.state_r_label.set_text(\"%0.2f\" % current_state.r)\n\n\t\t# update x_dot value\n\t\tcomm.read_until(b'\"world_linear_velocity\": [')\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.x_dot = float(read)\n\t\tself.state_x_dot_label.set_text(\"%0.2f\" % current_state.x_dot)\n\t\t# update y_dot value\n\t\tread = comm.read_until(b',')\n\t\tread = read[:-1]\n\t\tcurrent_state.y_dot = float(read)\n\t\tself.state_y_dot_label.set_text(\"%0.2f\" % current_state.y_dot)\n\t\t# update z_dot value\n\t\tread = comm.read_until(b']')\n\t\tread = read[:-1]\n\t\tcurrent_state.z_dot = float(read)\n\t\tself.state_z_dot_label.set_text(\"%0.2f\" % current_state.z_dot)\n\n\t\t# update first waypoint for trajectory in GUI\n\t\twaypoints_gui[0] = [current_state.x, current_state.y, current_state.z, current_state.psi]\n\n\t\treturn GLib.SOURCE_CONTINUE", "def update_satellite_state(self, current_date):\n pass", "def update_state(self, a, delta):\n # input check\n if delta >= MAX_STEER:\n delta = MAX_STEER\n elif delta <= -MAX_STEER:\n delta = -MAX_STEER\n\n self.x = self.x + self.v * np.cos(self.yaw) * DT\n self.y = self.y + self.v * np.sin(self.yaw) * DT\n self.yaw = self.yaw + self.v / self.WB * np.tan(delta) * DT\n self.v = self.v + a * DT\n\n if self.v > MAX_SPEED:\n self.v = MAX_SPEED\n elif self.v < MIN_SPEED:\n self.v = MIN_SPEED\n\n return self", "def update(self):\n if (not self._run) or (not self.IA.is_loaded()):\n return\n self.IA.BG_MAP.update(speed=self.speed)\n self.IA.O_ATUAL.update()\n self._desintegrator.update()", "def update(self):\n self._ba_attrs = self._api.get_current_data_point(self._ba_uuid)\n self._state = self._ba_attrs[\"temperature\"]", "def update(self,dt):\n t1 = time()\n\n if SPLIT:\n self.check_refine()\n if AMALGAMATE:\n self.check_amalg(self.nl_default)\n\n t = time()\n self.rebuild_lists()\n self.timing['nlist rebuild time'] = time() - t\n\n # Is this derivative step required?\n t = time()\n self.derivatives()\n self.timing['deriv time'] = time() - t\n \n t = time()\n self.step(self.gather_state,self.derivatives, \\\n self.gather_derivatives,self.scatter_state,dt)\n self.timing['integrate time'] = time() - t\n \n self.box.apply(self)\n\n if self.thermostat:\n self.apply_thermostat(self.thermostat_temp)\n \n self.timing['update time'] = time() - t1\n self.steps += 1", "def update(self):\n self.arest.update()", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_target.parameters(), self.q_network_local.parameters()):\n p_target.data.copy_(tau * p_local.data + (1.0-tau) * p_target.data)", "def _update_positions(self):\n self._velocities += self._accelerations * self.time_step\n self._positions += self._velocities * self.time_step", "def update(self, tfinal):\n t = 0; kk = 0\n nstep = int(np.round(tfinal/self.dt))+1 # number of time steps\n self.omega = np.zeros((nstep,self.npts))\n self.theta = np.zeros((nstep,self.npts))\n\n while t <(tfinal+1e-10):\n self.return_map()\n self.omega[kk] = self.y[0]\n self.theta[kk] = self.y[1]\n\n self.y = RK4(self.y, self.dt, t, self.deri)\n kk += 1; t += self.dt\n\n return self", "def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()", "def update(self):\n # GPS data\n self.model.GPS_latitude.set(self._kernel.data.lat)\n self.model.GPS_longitude.set(self._kernel.data.lon)\n \n self.model.GPS_heading.set(self._kernel.data.gps_heading)\n self.model.GPS_speed.set(self._kernel.data.speed)\n self.model.GPS_altitude.set(self._kernel.data.altitude)\n \n self.model.GPS_fix.set(self._kernel.data.fix)\n self.model.GPS_satellite_count.set(self._kernel.data.num_sat)\n \n # compass data\n self.model.compass_heading.set(self._kernel.data.compass_heading)\n \n # time data\n self.model.time.set(self._kernel.data.timestamp.isoformat())\n self.model.date.set(self._kernel.data.datestamp.isoformat())\n \n # other data\n self.model.temperature.set(self._kernel.data.temperature)", "def update(self):\n if self.api is None:\n return\n self.api.update()\n\n if self.var_type == 'Time':\n self.var_state = self.api.result['timeRelease']\n return\n\n result = self.api.result[self.var_period.lower()]\n if self.var_type == 'Sky':\n sky = result['sky']\n self.var_state = sky['name']\n self.var_icon = get_sky_icon(sky['code'])\n else:\n temp = result['temperature']\n if self.var_detail == 'Max':\n self.var_state = round(float(temp['tmax']), 1)\n else:\n self.var_state = round(float(temp['tmin']), 1)", "def _update(self):\n # update current position based on speed\n distance = self.speed * self.update_period\n result = great_circle(distance=distance,\n azimuth=self._ahrs.heading,\n latitude=self._current_location.lat,\n longitude=self._current_location.lng)\n self._current_location = Point(result['latitude'], result['longitude'])\n self._gps.lat = self._current_location.lat\n self._gps.lng = self._current_location.lng\n\n if self.target_waypoint and not self.arrived:\n # update compass heading if we have a target waypoint\n self._ahrs.heading = heading_to_point(self._current_location,\n self.target_waypoint)\n # check if we have hit our target\n if self.distance_to_target <= self.TARGET_DISTANCE:\n try:\n # if there are waypoints qued up keep going\n self.move_to_waypoint(self.waypoints.popleft())\n except IndexError:\n # otherwise we have arrived\n self.arrived = True\n self.speed = 0\n logger.info('Arrived at Waypoint({}, {})'.format(self.target_waypoint.lat,\n self.target_waypoint.lng))\n\n else:\n # update heading and speed based on motor speeds\n self.speed = (self._left_motor.speed + self._right_motor.speed) // 2\n self._ahrs.heading += ((self._left_motor.speed - self._right_motor.speed) / 10)\n self._ahrs.heading = abs(self._ahrs.heading % 360)", "def update_location(self, loc, dt): #pylint: disable=invalid-name\n self.observer.date = dt\n self.sat.compute(self.observer)\n loc.az = float(self.sat.az)\n loc.al = float(self.sat.alt)", "def update_target_net(self):\n if self.n_steps % self.target_update_interval == 0:\n self.target_q.load_state_dict(self.working_q.state_dict())", "def update(self):\n self._state = None\n self._attributes = {}\n\n vehicles = self._api.get_vehicles(self._latitude, self._longitude)\n scooter = {}\n\n if vehicles:\n for vehicle in vehicles:\n location_vehicle = (vehicle[\"location\"][0], vehicle[\"location\"][1])\n location_hass = (self._latitude, self._longitude)\n vehicle[\"distance\"] = distance(location_vehicle, location_hass).m\n\n scooter = sorted(vehicles, key=lambda item: item[\"distance\"])[0]\n\n if scooter:\n self._state = round(scooter[\"distance\"])\n self._attributes[ATTR_LATITUDE] = round(scooter[\"location\"][0], 5)\n self._attributes[ATTR_LONGITUDE] = round(scooter[\"location\"][1], 5)\n self._attributes[ATTR_BATTERY_LEVEL] = round(scooter[\"battery\"])\n self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION", "def _update_step(self, *, observations: types.ObservationsTorch) -> None:" ]
[ "0.6674608", "0.63138056", "0.6144622", "0.61089504", "0.5971476", "0.5910543", "0.5902695", "0.5894484", "0.5882854", "0.5878823", "0.5864989", "0.5812776", "0.5812547", "0.5783059", "0.5753217", "0.5730742", "0.57175034", "0.5714535", "0.57135075", "0.57093894", "0.56997526", "0.5698025", "0.5696743", "0.56804293", "0.5666336", "0.56255466", "0.5611144", "0.5600216", "0.5589499", "0.5558427" ]
0.6535651
1
Compute disturbance torques acting on satellite. This method computes the disturbance torques, which are set to active in satellite's setting file.
def compute_torques(self, rotation, omega, dt): # shift time @ which attitude integration currently is try: curr_date = self.in_date.shiftedBy(dt) self.inertial2Sat = rotation self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i) omega = Vector3D(float(omega[0]), float(omega[1]), float(omega[2])) self._compute_gravity_torque(curr_date) self._compute_magnetic_torque(curr_date) self._compute_solar_torque(curr_date) self._compute_aero_torque(curr_date, omega) # external torque has to be set separately because it is received # through a ros subscriber return self._gTorque.add( self._mTorque.add( self._sTorque.add( self._aTorque))) except Exception: print traceback.print_exc() raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_torques(self, rotation, omega, dt):\n # shift time from integration start to time of attitude integration step\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n self.satPos_s = np.array([self.satPos_s.x,\n self.satPos_s.y,\n self.satPos_s.z], dtype='float64')\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))", "def calculate_impedance_torques(self, position_error, orientation_error):\n desired_force = (np.multiply(np.array(position_error), np.array(self.impedance_kp[0:3]))\n - np.multiply(np.array(self.current_lin_velocity), self.impedance_kv[0:3]))\n\n desired_torque = (np.multiply(np.array(orientation_error), np.array(self.impedance_kp[3:6]))\n - np.multiply(np.array(self.current_ang_velocity), self.impedance_kv[3:6]))\n\n uncoupling = True\n if (uncoupling):\n decoupled_force = np.dot(self.lambda_x_matrix, desired_force)\n decoupled_torque = np.dot(self.lambda_r_matrix, desired_torque)\n decoupled_wrench = np.concatenate([decoupled_force, decoupled_torque])\n else:\n desired_wrench = np.concatenate([desired_force, desired_torque])\n decoupled_wrench = np.dot(self.lambda_matrix, desired_wrench)\n\n torques = np.dot(self.J_full.T, decoupled_wrench)\n\n if self.initial_joint is not None:\n # TODO where does 10 come from?\n joint_kp = 10\n joint_kv = np.sqrt(joint_kp) * 2\n pose_torques = np.dot(self.mass_matrix, (joint_kp * (\n self.initial_joint - self.current_joint_position) - joint_kv * self.current_joint_velocity))\n nullspace_torques = np.dot(self.nullspace_matrix.transpose(), pose_torques)\n torques += nullspace_torques\n self.torques = torques\n\n return torques", "def _compute_pd_torques(\n self,\n desired_motor_angles: np.ndarray,\n kp: np.ndarray,\n desired_motor_velocities,\n kd: np.ndarray,\n ) -> Tuple[np.ndarray, np.ndarray]:\n motor_angles, motor_velocities = self.get_motor_states()\n motor_torques = -kp * (motor_angles - desired_motor_angles) - kd * (\n motor_velocities - desired_motor_velocities)\n\n return motor_torques", "def _compute_solar_torque(self):\n pass", "def torque(system, /, use_demag=True):\n if use_demag:\n total_field = (mm.consts.mu0 *\n (oc.compute(system.energy.demag.effective_field, system)\n + system.energy.zeeman.H))\n else:\n total_field = mm.consts.mu0 * np.array(system.energy.zeeman.H)\n norm_field = df.Field(system.m.mesh, dim=1,\n value=(system.m.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n moment = system.m * volume\n torque = (moment & total_field)\n return (df.integral(torque * df.dV / volume**2, direction='xyz'))", "def compute_torques(self, caller):\n if caller == 'pose':\n self.pose_call = True\n if caller == 'vel':\n self.vel_call = True\n #If both vels and poses has called compute torques\n if self.pose_call and self.vel_call:\n #Reset checkers\n self.pose_call = False\n self.vel_call = False\n #Vels and poses\n # print \"Heard:\"\n # print \" \".join(str(n) for n in self.joints_vels)\n # print \" \".join(str(n) for n in self.joints_poses)\n #Compute B g and C matrices\n array_vels = np.asarray(self.joints_vels)[np.newaxis].T\n array_poses = np.asarray(self.joints_poses)[np.newaxis].T\n # print(\"array_vels\")\n # print(array_vels[2:4])\n # print(\"array_poses\")\n # print(array_poses[2:4])\n err_vels = array_vels[1:4] - self.target_vel\n err_poses = array_poses[1:4] - self.target_pose\n print(\"velocity error:\")\n print(err_vels)\n print(\"position error:\")\n print(err_poses)\n B = np.matrix([[0.0040055721446399998476906034738931*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.0013481452371199999142570291610355*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.011671172651879999466092491395841*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0039281369187899997198368480111341*sin(self.joints_poses[2]) + 0.042812399753418998939427354098797,\\\n 0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0058355863259399997330462456979205*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0019640684593949998599184240055671*sin(self.joints_poses[2]) + 0.01625959562072499985284632093574,\\\n 0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171],\\\n [0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0058355863259399997330462456979205*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0019640684593949998599184240055671*sin(self.joints_poses[2]) + 0.01625959562072499985284632093574,\\\n 0.0040085638208*cos(self.joints_poses[3]) + 0.01618298062072499985284632093574,\\\n 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171],\n [0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171,\\\n 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171,\\\n 0.0026403112045896820614231443819367]])\n\n C = np.matrix([[- 0.176*self.joints_vels[3]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])) - 1.0*self.joints_vels[2]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])),\\\n - 0.176*self.joints_vels[3]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])) - 1.0*self.joints_vels[1]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])) - 1.0*self.joints_vels[2]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])),\\\n -0.176*(self.joints_vels[1] + self.joints_vels[2] + self.joints_vels[3])*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3]))],\\\n [self.joints_vels[1]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])) - 0.0020042819104*self.joints_vels[3]*sin(self.joints_poses[3]),\\\n -0.0020042819104*self.joints_vels[3]*sin(self.joints_poses[3]),\\\n -0.0020042819104*sin(self.joints_poses[3])*(self.joints_vels[1] + self.joints_vels[2] + self.joints_vels[3])],\\\n [0.0020042819104*self.joints_vels[2]*sin(self.joints_poses[3]) + 0.176*self.joints_vels[1]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])),\\\n 0.0020042819104*sin(self.joints_poses[3])*(self.joints_vels[1] + self.joints_vels[2]),0]])\n\n g = np.array([[0.69474494555999997358275432901564*cos(self.joints_poses[1]) + 0.21649055273999998623105089912144*sin(self.joints_poses[1]) + 0.40336448984999999688544018994207*cos(self.joints_poses[1])*cos(self.joints_poses[2]) - 0.40336448984999999688544018994207*sin(self.joints_poses[1])*sin(self.joints_poses[2]) + 0.1384355808*cos(self.joints_poses[1])*cos(self.joints_poses[2])*cos(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[1])*sin(self.joints_poses[2])*sin(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[2])*sin(self.joints_poses[1])*sin(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[3])*sin(self.joints_poses[1])*sin(self.joints_poses[2])],\\\n [0.1384355808*cos(self.joints_poses[1] + self.joints_poses[2] + self.joints_poses[3]) + 0.40336448984999999688544018994207*cos(self.joints_poses[1] + self.joints_poses[2])],\\\n [ 0.1384355808*cos(self.joints_poses[1] + self.joints_poses[2] + self.joints_poses[3])]])\n #Compute control torque\n control_from_errors = self.target_acc -np.dot(self.KD, err_vels) - np.dot(self.KP, err_poses)\n print(\"Derivative contribution: \")\n print(np.dot(self.KD, err_vels))\n print(\"proportional contribution: \")\n print(np.dot(self.KP, err_poses))\n control_torque = np.dot(C, self.target_vel) + g + np.dot(B, control_from_errors)\n print(\"Torques: \")\n print(control_torque)\n #Create ROS message\n self.torques.layout.dim = [self.torques_layout]\n # self.torques.layout.dim.size = 6\n # self.torques.layout.dim.stride = 1\n self.torques.layout.data_offset = 0\n self.torques.data = [0.0, control_torque[0], control_torque[1], control_torque[2], 0.0, 0.0]\n self.torque_pub.publish(self.torques)", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO", "def turbulence(*args, attenuation: Union[float, bool]=0.0, frequency: Union[float, bool]=0.0,\n magnitude: Union[float, bool]=0.0, maxDistance: Union[float, bool]=0.0, name:\n Union[AnyStr, bool]=\"\", noiseLevel: Union[int, bool]=0, noiseRatio: Union[float,\n bool]=0.0, perVertex: bool=True, phase: Union[float, bool]=0.0, phaseX:\n Union[float, bool]=0.0, phaseY: Union[float, bool]=0.0, phaseZ: Union[float,\n bool]=0.0, position: Union[List[float, float, float], List[List[float, float,\n float]], bool]=None, torusSectionRadius: Union[float, bool]=0.0,\n volumeExclusion: bool=True, volumeOffset: Union[List[float, float, float],\n bool]=None, volumeShape: Union[AnyStr, bool]=\"\", volumeSweep: Union[float,\n bool]=0.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def calculate_all_distances(self):\n self.close_distance = self.calculate_distance(self.close_distance_factor)\n self.medium_distance = self.calculate_distance(self.medium_distance_factor)\n self.far_distance = self.calculate_distance(self.far_distance_factor)", "def get_motor_torques(\n self,\n motor_commands: np.ndarray,\n motor_control_mode=None) -> Tuple[np.ndarray, np.ndarray]:\n if not motor_control_mode:\n motor_control_mode = self._motor_control_mode\n\n motor_torques = None\n\n if motor_control_mode is robot_config.MotorControlMode.TORQUE:\n motor_torques = motor_commands\n\n if motor_control_mode is robot_config.MotorControlMode.POSITION:\n motor_torques = self._compute_pd_torques(\n desired_motor_angles=motor_commands,\n kp=self._kp,\n desired_motor_velocities=self._zero_array,\n kd=self._kd)\n \n if motor_torques is None:\n raise ValueError(\n \"{} is not a supported motor control mode\".format(motor_control_mode))\n\n # Apply the output filter to model actuator dynamics\n # BUG: Causes big instability in the sim\n # motor_torques = self._torque_filter(motor_torques)\n\n # Hard-code torque limits until the torque limit bug is fixed\n motor_torques = np.clip(motor_torques, -1.7, 1.7)\n\n # Apply motor damping and friction\n motor_torques -= (np.sign(self._previous_true_motor_velocity) *\n self._motor_torque_dependent_friction *\n motor_torques)\n motor_torques -= self._previous_true_motor_velocity * self._motor_damping\n\n # Rescale and clip the motor torques as needed.\n motor_torques = self._strength_ratios * motor_torques\n if (self._torque_lower_limits is not None or\n self._torque_upper_limits is not None):\n motor_torques = np.clip(motor_torques, self._torque_lower_limits,\n self._torque_upper_limits)\n\n return motor_torques, motor_torques", "def torque(cls):\n jobids = [calc.jobid() for calc in vasp.Vasp.calculators]\n\n qstat = ['[[shell:qstat {}][{}]]'.format(jobid, jobid)\n for jobid in jobids]\n qdel = ['[[shell:qdel {}][qdel]]'.format(jobid)\n for jobid in jobids]\n\n dirs = [calc.directory\n for calc in vasp.Vasp.calculators]\n\n s = '[[shell:xterm -e \"cd {}; ls && /bin/bash\"][{}]]'\n xterm = [s.format(d, os.path.relpath(d))\n for d in dirs]\n\n s = '[[elisp:(find-file \"{}\")][dired]]'\n dired = [s.format(d)\n for d in dirs]\n\n return '\\n'.join(['| {0} {1} | {2} | {3} |'.format(xt, dd, qs, qd)\n for xt, qs, qd, dd in zip(xterm, qstat, qdel, dired)])", "def motor_torques(self):\n raise NotImplementedError('Not yet implemented!')", "def setup_tacs_problems(self, comm):\n # Overwrite default check values\n if self.dtype == complex:\n self.rtol = 1e-8\n self.atol = 1e-3\n self.dh = 1e-50\n else:\n self.rtol = 1e-1\n self.atol = 1e-4\n self.dh = 1e-5\n\n # Instantiate FEA Assembler\n fea_assembler = pytacs.pyTACS(bdf_file, comm)\n\n # Set up constitutive objects and elements\n fea_assembler.initialize()\n\n # set transient problem options\n transientOptions = {\"timeIntegrator\": \"DIRK\", \"integrationOrder\": DIRK_order}\n\n # get some problem info\n n_vpn = fea_assembler.getVarsPerNode()\n\n # Create coarse load-specified transient problem\n coarse_prob = fea_assembler.createTransientProblem(\n name=\"load_coarse\",\n tInit=0.0,\n tFinal=1.0,\n numSteps=8,\n options=transientOptions,\n )\n # Create fine load-specified transient problem\n fine_prob = fea_assembler.createTransientProblem(\n name=\"load_fine\",\n tInit=0.0,\n tFinal=1.0,\n numSteps=32,\n options=transientOptions,\n )\n load_probs = [coarse_prob, fine_prob]\n\n for prob in load_probs:\n forces = np.zeros(n_vpn)\n ns = prob.getNumTimeSteps()\n for k in range(ns + 1):\n t_array = prob.getTimeStages(k)\n for s, t in enumerate(t_array):\n f = f_mag * t**5\n forces[2] = f # applied to z-direction\n prob.addLoadToNodes(\n timeStep=k,\n timeStage=s,\n nodeIDs=21,\n F=forces,\n nastranOrdering=True,\n )\n\n for problem in load_probs:\n problem.addFunction(\"mass\", functions.StructuralMass)\n problem.addFunction(\n \"ks_disp\",\n functions.KSDisplacement,\n direction=[0.0, 0.0, 100.0],\n ftype=\"discrete\",\n )\n\n return load_probs, fea_assembler", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO", "def turbines(self):\n return self.turbine_map.turbines", "def tora(self) -> typing.Union[None, typing.List[int]]:\n return self.distances('TORA')", "def computeForces(self, neighbors=[]): #computing forces to drive the agents and avoid collisions \n if not self.atGoal:\n if self.entry_state % 2 == 0 and len(self.entrancex) > 0 and self.id != 4 : #checks if assigned curve is entry and switches to state 1 to follow entry bezier curve\n time2=0.5 # time used to calculate driving force \n self.local_goal = [self.entrancex[0], self.entrancey[0]] #assigning waypoint as goal\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2))) #calculating direction vector\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez) #driving force\n self.entrancex = np.delete(self.entrancex,0) #eliminating the used waypoints from the list \n self.entrancey = np.delete(self.entrancey,0) #eliminating the used waypoints from the list \n \n elif self.force_state == 1 and (abs(self.pos[0] - self.goal[0]) >400 or abs(self.pos[1] - self.goal[1]) >400): #checks if force-based navigation is assigned, switches to state 2\n self.F = (self.gvel-self.vel)/self.ksi #driving force\n for neighbor in neighbors:\n if neighbor.id != self.id: #and not neighbor.atGoal: \n distSq = (neighbor.pos-self.pos).dot(neighbor.pos-self.pos)\n #print(distSq, self.dhorSq)\n if distSq < self.dhorSq: # neighbor is inside the sensing radius\n tau = self.ttc(neighbor)\n #print(tau, self.timehor)\n if tau < self.timehor: # will the two agents collide in less than timehor?\n dir = self.pos + self.vel*tau - neighbor.pos - neighbor.vel*tau \n length = sqrt(dir.dot(dir))\n if length > 0:\n dir = dir/length # the direction of the force\n mag = (self.timehor - tau)/(tau + 1e-6) # the magnitude of the force\n self.F += mag*dir # add the force\n \n else: #state 3 - following the exit bezier curve\n time2=0.5 # time used to calculate driving force\n self.local_goal = [self.exitx[0], self.exity[0]]\n if abs(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos)))) >10: #to reach first point of exit curve from agents previous state position\n self.F = ((self.local_goal - self.pos)/(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos) )))*self.prefspeed)/self.ksi\n else:\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2)))\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez)\n #print(self.pos, self.local_goal)\n if len(self.exitx) > 1 :\n self.exitx = np.delete(self.exitx,0)\n self.exity = np.delete(self.exity,0)", "def compute_duty_factor():\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n print(np.sum(foot_l_contact)/len(foot_l_contact))\n print(np.sum(foot_r_contact)/len(foot_r_contact))\n\n return np.sum(foot_l_contact)/len(foot_l_contact)*0.5 + np.sum(foot_r_contact)/len(foot_r_contact)*0.5", "def correction(self):\r\n \r\n # empirical coefficients:\r\n k3, k2, k1, k0 = 0.0892, 0.0544, 0.2511, -0.0017\r\n \r\n # thrust as a function of the azimuth angle and the loads:\r\n thrust = self.qn*np.sin(Turbine.t) + self.qt*np.cos(Turbine.t)\r\n \r\n # interpolator function for the thrust:\r\n function = interp1d(Turbine.t, thrust, kind='cubic')\r\n \r\n # vectorize the function so that it takes an array of angles:\r\n __function__ = np.vectorize(function)\r\n \r\n # thrust coefficient integrating according to phi:\r\n self.cth = simps(__function__(Turbine.p), Turbine.p)\r\n \r\n # induction factor:\r\n self.a = k3*self.cth**3 + k2*self.cth**2 + k1*self.cth + k0\r\n \r\n # correction factor:\r\n if self.a <= 0.15:\r\n self.ka = 1.0/(1.0 - self.a)\r\n else:\r\n self.ka = (1./(1 - self.a))*(0.65 + 0.35*exp(-4.5*(self.a - 0.15)))", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def ransac(cloud_s, cloud_t, \n depth_s, depth_t,\n A_prev, b_prev,\n n_iter, n_inlier_cutoff, d_cutoff):\n import random\n n_s = len(cloud_s)\n n_t = len(cloud_t)\n n_inliers = [0] * n_iter\n# Initialization\n A_init = A_prev\n b_init = b_prev\n pred_t = A_init.dot(cloud_s.T).T + b_init\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n max_inliers = sum(inliers)\n print(\"Have \" + str(n_s) + \" features that could be inliers\")\n print(\"Starting with \" + str(max_inliers) + \" inliers\")\n for iter in range(n_iter):\n assert n_s == n_t, \"clouds not of equal size in ransac()\"\n # TODO: replace this random choice with 3 corresponding feature descriptors\n points_inds = random.sample(range(n_s), 3)\n x_vals = np.array([cloud_s[i] for i in points_inds])\n y_vals = np.array([cloud_t[i] for i in points_inds])\n\n # Using Horn 1987, Closed-form solution of absolute orientation\n # using unit quaternions.\n A_init_tmp, b_init_tmp = horn_adjust(x_vals, y_vals)\n\n # TODO: find inliers to the transformation T\n pred_t = A_init_tmp.dot(cloud_s.T).T + b_init_tmp\n# TODO: should really be looking at the distance in the projected space!!\n inliers = [np.linalg.norm(pred_t[i,] - cloud_t[i,]) < d_cutoff for i in range(n_s)]\n n_inliers = sum(inliers)\n\n # TODO: do we want to refit on the inliers?\n if n_inliers > max_inliers:\n A_init = A_init_tmp\n b_init = b_init_tmp\n max_inliers = n_inliers\n print(\"Adjusting A and b again!\")\n print(A_init)\n print(b_init)\n\n # TODO: are we using n_inlier_cutoff in this way? Check the paper!\n if max_inliers < n_inlier_cutoff:\n raise Exception('insufficient inliers! Want ' + str(n_inlier_cutoff) +\n ' but got ' + str(max_inliers))\n #max_index = n_inliers.index(max(n_inliers)) \n # Compute the best transformation T_star\n# TODO: actually optimize over the depth field!! using spatial.KDTree and spatial.KDTree.query\n# Need to shift depth1XYZ by our initial transformation first\n depth1XYZ = A_init.dot(depth_s.T).T + b_init\n depth2XYZ = depth_t\n tree = spatial.KDTree(depth2XYZ)\n tree_q = tree.query(depth1XYZ)\n# Keep only matches within the cutoff.\n# depth_pair_inds has indeces for depth1XYZ and depth2XYZ\n cutoff = 0.01\n depth_pair_inds = [(i,tree_q[1][i]) for i in range(len(tree_q[0]))\n if tree_q[0][i] < cutoff]\n #depth_cloud_s = np.array([depth1XYZ[k[0]] for k in depth_pair_inds])\n depth_cloud_s = np.array([depth_s[k[0]] for k in depth_pair_inds])\n depth_cloud_t = np.array([depth2XYZ[k[1]] for k in depth_pair_inds])\n\n# A_d = list(range(n_s))\n# A, b = find_argmin_T(cloud_s, cloud_t, A_d,\n# A_init, b_init)\n A_d = list(range(depth_cloud_s.shape[0]))\n A, b = find_argmin_T(depth_cloud_s, depth_cloud_t, A_d,\n A_init, b_init)\n print(\"A_init value:\")\n print(A_init)\n print(\"b_init value:\")\n print(b_init)\n \n print(\"Returning A, b\")\n print(\"A value:\")\n print(A)\n print(\"b value:\")\n print(b)\n print(\"inliers:\")\n print(max_inliers)\n return(A, b)", "def joints_torque(self):\r\n return self._arm.joints_torque", "def doubt_check(self):\n DoubtIndex = 0.0\n DoubtIndex += (np.random.rand() * 0.2 + 0.8) * \\\n (((self.Call - 700) / 300) ** 3)\n\n if self.DiscardCount == 2:\n DoubtIndex += 0.1\n if self.Call >= 990:\n DoubtIndex += 0.6\n\n if self.DiscardCount == 3:\n DoubtIndex += 0.3\n if self.Call >= 900:\n DoubtIndex += 0.3\n\n if DoubtIndex > 0.85:\n self.Doubt = True\n\n return self.Doubt", "def set_t_FAST(self):\n\t\n\tself.N = 2**7\n\tdt = self.Orbit.Tobs/self.N\n\tself.t = np.linspace(0, self.N-1, self.N)*self.Orbit.Tobs/self.N\n\t\n\treturn", "def __get_bond_spot_rates__(self):\r\n for T in self.get_maturities():\r\n instrument=self.instruments[T]\r\n (par,coup,price,freq)=instrument\r\n\r\n if coup!=0:\r\n self.zero_rates[T]=self.__calculate_bond_spot_rate__(T,instrument)", "def constrain_buckling(self, method=1, ms=0.1):\n self.create_dvars()\n eltype = self.elements[0].type\n\n # reading constants\n dtable_E = self.dtables['STRE'][0]\n dtable_nu = self.dtables['STRnu'][0]\n\n if method == 1 and self.profile.lower() == 'z_t':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n # reading constants\n dtable_b = self.dtables['STRZb'][0]\n dtable_h = self.dtables['STRZh'][0]\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id]\n dresp2.dtable = [dtable_b, dtable_h, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'z_t_b':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n dvar_b = self.dvars['STRZb']\n # reading constants\n dtable_h = self.dtables['STRZh'][0]\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_b.id]\n dresp2.dtable = [dtable_h, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'z_t_b_h':\n # buckling equation\n deqatn = DEQATN(\n 'bf(t, b, h, E, nu, FA) = b-t/2.;'\n 'bw = h-t;'\n 'x = bf/bw;'\n 'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '\n '+ 249.62*x**2 -41.924*x + 6.4545;'\n 'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'\n 'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRZt']\n dvar_b = self.dvars['STRZb']\n dvar_h = self.dvars['STRZh']\n # building DRESP1 that reads:\n # - axial stress\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n atta = OUTC['STRESS']['CBAR']['Axial']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,\n atta=atta, attb='', atti=eid)\n self.add_dresp(dresp_FA)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_b.id, dvar_h.id]\n dresp2.dtable = [dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_FA.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'b_t':\n # buckling equation\n # - considers combined compression + shear\n # - disconsiders bending effects\n # - assumes 3 edges simply supported and one free unloaded edge\n deqatn = DEQATN('kc(t, h, L, E, nu, PC, PS) = 0.456 + (h/L)**2;'\n 'FCcr = kc*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FC = PC/(t*h);'\n 'Rc = FC/FCcr;'\n 'x = L/h;'\n 'ks = 0.0648*x**6 - 1.2338*x**5 + 9.4869*x**4 -'\n '37.697*x**3 + 81.88*x**2 - 93.218*x + 50.411;'\n 'ks = MAX(ks, 5.42);'\n 'FScr = ks*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FS = PS/(t*h);'\n 'Rs = FS/FScr;'\n 'MS = 2./(Rc + SQRT(Rc**2 + 4*Rs**2)) - 1.')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRBt']\n # reading constants\n dtable_h = self.dtables['STRBh'][0]\n dtable_L = self.dtables['STRBL'][0]\n # building DRESP1s that read:\n # - axial force\n # - shear along Plane 1 (y axis)\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n code_PC = OUTC['FORCE']['CBAR']['Axial force']\n code_PS = OUTC['FORCE']['CBAR']['Shear plane 1']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_PC = DRESP1('STRPC', 'FORCE', 'ELEM', region=None,\n atta=code_PC, attb='', atti=eid)\n dresp_PS = DRESP1('STRPS', 'FORCE', 'ELEM', region=None,\n atta=code_PS, attb='', atti=eid)\n self.add_dresp(dresp_PC)\n self.add_dresp(dresp_PS)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id]\n dresp2.dtable = [dtable_h, dtable_L, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_PC.id, dresp_PS.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n elif method == 1 and self.profile.lower() == 'b_t_h':\n # buckling equation\n # - considers combined compression + shear\n # - disconsiders bending effects\n # - assumes 3 edges simply supported and one free unloaded edge\n deqatn = DEQATN('kc(t, h, L, E, nu, PC, PS) = 0.456 + (h/L)**2;'\n 'FCcr = kc*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FC = PC/(t*h);'\n 'Rc = FC/FCcr;'\n 'x = L/h;'\n 'ks = 0.0648*x**6 - 1.2338*x**5 + 9.4869*x**4 -'\n '37.697*x**3 + 81.88*x**2 - 93.218*x + 50.411;'\n 'ks = MAX(ks, 5.42);'\n 'FScr = ks*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'\n 'FS = PS/(t*h);'\n 'Rs = FS/FScr;'\n 'MS = 2./(Rc + SQRT(Rc**2 + 4*Rs**2)) - 1.')\n self.add_deqatn(deqatn)\n # reading variables\n dvar_t = self.dvars['STRBt']\n dvar_h = self.dvars['STRBh']\n # reading constants\n dtable_L = self.dtables['STRBL'][0]\n # building DRESP1s that read:\n # - axial force\n # - shear along Plane 1 (y axis)\n OUTC = output_codes_SOL200.OUTC\n if eltype == 'CBAR':\n code_PC = OUTC['FORCE']['CBAR']['Axial force']\n code_PS = OUTC['FORCE']['CBAR']['Shear plane 1']\n else:\n raise NotImplementedError('element %s not implemented' %\n eltype)\n eid = self.get_central_element().eid\n dresp_PC = DRESP1('STRPC', 'FORCE', 'ELEM', region=None,\n atta=code_PC, attb='', atti=eid)\n dresp_PS = DRESP1('STRPS', 'FORCE', 'ELEM', region=None,\n atta=code_PS, attb='', atti=eid)\n self.add_dresp(dresp_PC)\n self.add_dresp(dresp_PS)\n # building DRESP2\n dresp2 = DRESP2('STRBUCK', deqatn.id)\n dresp2.dvars = [dvar_t.id, dvar_h.id]\n dresp2.dtable = [dtable_L, dtable_E, dtable_nu]\n dresp2.dresp1 = [dresp_PC.id, dresp_PS.id]\n self.add_dresp(dresp2)\n # applying constraint\n dcid = self.constraints['buckling']\n dconstr = self.add_constraint(dcid, dresp2, ms, None)\n\n else:\n raise NotImplementedError('Stringer %s profile not supported!' %\n self.profile)", "def ODEs(y, t, B, MdiscI, RdiscI, epsilon, delta, n=1.0, alpha=0.1, cs7=1.0,\n k=0.9):\n # Initial conditions\n Mdisc, omega = y\n \n # Constants\n Rdisc = RdiscI * 1.0e5 # Disc radius - cm\n tvisc = Rdisc / (alpha * cs7 * 1.0e7) # Viscous timescale - s\n mu = 1.0e15 * B * (R ** 3.0) # Magnetic Dipole Moment\n M0 = delta * MdiscI * Msol # Global Fallback Mass Budget - g\n tfb = epsilon * tvisc # Fallback timescale - s\n \n # Radii - Alfven, Corotation, Light Cylinder\n Rm = ((mu ** (4.0 / 7.0)) * (GM ** (-1.0 / 7.0)) * ((Mdisc / tvisc) **\n (-2.0 / 7.0)))\n Rc = (GM / (omega ** 2.0)) ** (2.0 / 3.0)\n Rlc = c / omega\n # Cap the Alfven radius\n if Rm >= k * Rlc:\n Rm = k * Rlc\n \n w = (Rm / Rc) ** (3.0 / 2.0) # Fastness parameter\n \n bigT = 0.5 * I * (omega ** 2.0) # Rotational energy\n modW = (0.6 * M * (c ** 2.0) * ((GM / (R * (c ** 2.0))) / (1.0 - 0.5 * (GM\n / (R * (c ** 2.0)))))) # Binding energy\n rot_param = bigT / modW # Rotation parameter\n \n # Dipole torque\n Ndip = (-1.0 * (mu ** 2.0) * (omega ** 3.0)) / (6.0 * (c ** 3.0))\n \n # Mass flow rates and efficiencies\n eta2 = 0.5 * (1.0 + np.tanh(n * (w - 1.0)))\n eta1 = 1.0 - eta2\n Mdotprop = eta2 * (Mdisc / tvisc) # Propelled\n Mdotacc = eta1 * (Mdisc / tvisc) # Accreted\n Mdotfb = (M0 / tfb) * (((t + tfb) / tfb) ** (-5.0 / 3.0)) # Fallback rate\n Mdotdisc = Mdotfb - Mdotprop - Mdotacc # Mass flow through the disc\n \n if rot_param > 0.27:\n Nacc = 0.0 # Prevents magnetar break-up\n else:\n # Accretion torque\n if Rm >= R:\n Nacc = ((GM * Rm) ** 0.5) * (Mdotacc - Mdotprop)\n else:\n Nacc = ((GM * R) ** 0.5) * (Mdotacc - Mdotprop)\n \n omegadot = (Nacc + Ndip) / I # Angular frequency time derivative\n \n return np.array([Mdotdisc, omegadot])", "def optimize_hydrogens(self):\n _LOGGER.debug(\"Optimization progress:\")\n optlist = self.optlist\n connectivity = {}\n # Initialize the detection progress\n if len(optlist) == 0:\n return\n _LOGGER.debug(\" Detecting potential hydrogen bonds\")\n progress = 0.0\n increment = 1.0 / len(optlist)\n for obj in optlist:\n connectivity[obj] = []\n for atom in obj.atomlist:\n closeatoms = self.debumper.cells.get_near_cells(atom)\n for closeatom in closeatoms:\n # Conditions for continuing\n if atom.residue == closeatom.residue:\n continue\n if not (closeatom.hacceptor or closeatom.hdonor):\n continue\n if atom.hdonor and not atom.hacceptor:\n if not closeatom.hacceptor:\n continue\n if atom.hacceptor:\n if not atom.hdonor and not closeatom.hdonor:\n continue\n dist = util.distance(atom.coords, closeatom.coords)\n if dist < 4.3:\n residue = atom.residue\n hbond = PotentialBond(atom, closeatom, dist)\n # Store the potential bond\n obj.hbonds.append(hbond)\n # Keep track of connectivity\n if closeatom in self.atomlist:\n closeobj = self.resmap[closeatom.residue]\n if closeobj not in connectivity[obj]:\n connectivity[obj].append(closeobj)\n progress += increment\n while progress >= 0.0499:\n progress -= 0.05\n # Some residues might have no nearby hbonds - if so, place at\n # default state\n for obj in optlist:\n if len(obj.hbonds) == 0:\n if obj.residue.fixed:\n continue\n _LOGGER.debug(\n f\"{obj.residue} has no nearby partners - fixing.\"\n )\n obj.finalize()\n # Determine the distinct networks\n networks = []\n seen = []\n for obj1 in optlist:\n if obj1.residue.fixed:\n continue\n if obj1 in seen:\n continue\n network = util.analyze_connectivity(connectivity, obj1)\n for obj2 in network:\n if obj2 not in seen:\n seen.append(obj2)\n networks.append(network)\n # Initialize the output progress\n if len(networks) > 0:\n _LOGGER.debug(\"Optimizing hydrogen bonds\")\n progress = 0.0\n increment = 1.0 / len(networks)\n # Work on the networks\n for network in networks:\n txt = \"\"\n for obj in network:\n txt += f\"{obj}, \"\n _LOGGER.debug(f\"Starting network {txt[:-2]}\")\n # FIRST: Only optimizeable to backbone atoms\n _LOGGER.debug(\"* Optimizeable to backbone *\")\n hbondmap = {}\n for obj in network:\n for hbond in obj.hbonds:\n if hbond.atom2 not in self.atomlist:\n hbondmap[hbond] = hbond.dist\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj = self.resmap[atom.residue]\n\n if atom.residue.fixed:\n continue\n if atom.hdonor:\n obj.try_donor(atom, atom2)\n if atom.hacceptor:\n obj.try_acceptor(atom, atom2)\n # SECOND: Non-dual water Optimizeable to Optimizeable\n _LOGGER.debug(\"* Optimizeable to optimizeable *\")\n hbondmap = {}\n seenlist = []\n for obj in network:\n for hbond in obj.hbonds:\n if hbond.atom2 in self.atomlist:\n if not isinstance(hbond.atom1.residue, aa.WAT):\n if not isinstance(hbond.atom2.residue, aa.WAT):\n # Only get one hbond pair\n if (hbond.atom2, hbond.atom1) not in seenlist:\n hbondmap[hbond] = hbond.dist\n seenlist.append((hbond.atom1, hbond.atom2))\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj1 = self.resmap[atom.residue]\n obj2 = self.resmap[atom2.residue]\n # Atoms may no longer exist if already optimized\n if not atom.residue.has_atom(atom.name):\n continue\n if not atom2.residue.has_atom(atom2.name):\n continue\n res = 0\n if atom.hdonor and atom2.hacceptor:\n res = obj1.try_both(atom, atom2, obj2)\n if atom.hacceptor and atom2.hdonor and res == 0:\n obj2.try_both(atom2, atom, obj1)\n # THIRD: All water-water residues\n _LOGGER.debug(\"* Water to Water *\")\n hbondmap = {}\n seenlist = []\n for obj in network:\n for hbond in obj.hbonds:\n residue = hbond.atom1.residue\n if isinstance(residue, aa.WAT):\n if isinstance(hbond.atom2.residue, aa.WAT):\n if (hbond.atom2, hbond.atom1) not in seenlist:\n hbondmap[hbond] = hbond.dist\n seenlist.append((hbond.atom1, hbond.atom2))\n hbondlist = util.sort_dict_by_value(hbondmap)\n hbondlist.reverse()\n for hbond in hbondlist:\n atom = hbond.atom1\n atom2 = hbond.atom2\n obj1 = self.resmap[atom.residue]\n obj2 = self.resmap[atom2.residue]\n res = 0\n if atom.hdonor and atom2.hacceptor:\n res = obj1.try_both(atom, atom2, obj2)\n if atom.hacceptor and atom2.hdonor and res == 0:\n obj2.try_both(atom2, atom, obj1)\n # FOURTH: Complete all residues\n for obj in network:\n obj.complete()\n # STEP 5: Update progress meter\n progress += 100.0 * increment\n while progress >= 5.0:\n progress -= 5.0", "def _compute_aero_torque(self):\n pass" ]
[ "0.56103945", "0.5272524", "0.5136728", "0.49325484", "0.4808587", "0.46530074", "0.46459928", "0.46395054", "0.4618196", "0.4606716", "0.45933738", "0.45713773", "0.45592108", "0.45434135", "0.45429534", "0.45280662", "0.45210528", "0.45133996", "0.45075887", "0.45007682", "0.45007682", "0.44990927", "0.44721466", "0.44606313", "0.44543698", "0.4452557", "0.44495058", "0.44427827", "0.44399157", "0.4427905" ]
0.5824297
0
Compute gravity gradient torque if gravity model provided. This method computes the Newtonian attraction and the perturbing part of the gravity gradient for every cuboid defined in dictionary inCub at time curr_date (= time of current satellite position). The gravity torque is computed in the inertial frame in which the spacecraft is defined. The perturbing part is calculated using Orekit's methods defined in the GravityModel object. The current position, rotation and mass of the satellite is obtained from the StateObserver object.
def _compute_gravity_torque(self, curr_date): if self._to_add[0]: body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date) body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation()) sat2body = body2sat.revert() satM = self.state_observer.spacecraftState.getMass() mCub = self.inCub['mass_frac'] * satM self._gTorque = Vector3D.ZERO for CoM in self.inCub['CoM']: S_dmPos = self.satPos_s.add(CoM) r2 = S_dmPos.getNormSq() gNewton = Vector3D(-self.muGM / (sqrt(r2) * r2), S_dmPos) B_dmPos = sat2body.applyTo(S_dmPos) gDist = Vector3D(self.GravityModel.gradient(curr_date, B_dmPos, self.muGM)) g_Dist_s = body2sat.applyTo(gDist) dmForce = Vector3D(mCub, gNewton.add(g_Dist_s)) self._gTorque = self._gTorque.add(self.V3_cross(CoM, dmForce)) else: self._gTorque = Vector3D.ZERO
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_gravity_torque(self):\n pass", "def compute_torques(self, rotation, omega, dt):\n # shift time @ which attitude integration currently is\n try:\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n omega = Vector3D(float(omega[0]), float(omega[1]), float(omega[2]))\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n # external torque has to be set separately because it is received\n # through a ros subscriber\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))\n except Exception:\n print traceback.print_exc()\n raise", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO", "def compute_torques(self, rotation, omega, dt):\n # shift time from integration start to time of attitude integration step\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n self.satPos_s = np.array([self.satPos_s.x,\n self.satPos_s.y,\n self.satPos_s.z], dtype='float64')\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def compute_gravity(self):\r\n # compute the gravity from the Gauss form.\r\n # if it fails, marks divergence\r\n try:\r\n self.gravsolver.solve()\r\n except:\r\n print(\"GRAVITY DIVERGED\")\r\n\r\n # write to log\r\n self.logfile.write(\"%s: STOPPED DUE TO DIVERGENCE IN GRAVITY \\n\" %\r\n (self.convert_time(time.time() -\r\n self.start_time)))\r\n self.diverged = True # set diverged to True, break the run\r\n return\r\n\r\n # split and update the gravity function with the answers\r\n # note the gravscale\r\n gravg, gravs = self.gravgs.split()\r\n\r\n # assign the result to the gravity function\r\n self.gravity.assign(project(gravg/self.gravscale, self.V))", "def compute_forces_mesh(self):\n f = self.ptclgrid.grid[:self.size,:self.size]*self.grad_phi_mesh()\n return f", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO", "def create_deltas_tensor(self, deltas):\n T = self.T\n N = self.N\n neighs = self.neighs\n self.deltas = {}\n for n in range(N):\n self.deltas[n] = self.get_empty_matrix((len(neighs[n]), T))\n for cc in deltas:\n t = int(cc[0])\n if t >= T:\n raise ValueError(\"Contact time above T!\")\n i = int(cc[1])\n j = int(cc[2])\n delta = cc[3]\n #lam = np.clip(lam, 0, 1 - self.err_max_lambda)\n #print(t,i,j,lam)\n index_i = neighs[j].index(i)\n self.deltas[j][index_i][t] = delta\n\n '''def create_delta_tensor(self, gamma):\n \"\"\"\n Deltas values for the computation of parameters of rate of contagion\n \"\"\"\n N = self.N\n self.deltas = {}\n for n in range(N):\n self.deltas[n] = self.logp_lam[n]/gamma\n '''", "def Evolve(self, data, t, dt, **kwargs):\n\n if self.grid.expansion:\n z = self.grid.cosm.TimeToRedshiftConverter(0, t, self.grid.zi)\n dz = dt / self.grid.cosm.dtdz(z)\n else:\n z = dz = 0\n\n if 'he_1' in self.grid.absorbers:\n i = self.grid.absorbers.index('he_1')\n self.chemnet.psi[...,i] *= data['he_2'] / data['he_1']\n\n # Make sure we've got number densities\n if 'n' not in data.keys():\n data['n'] = self.grid.particle_density(data, z)\n\n newdata = {}\n for field in data:\n newdata[field] = data[field].copy()\n\n if not kwargs:\n kwargs = self.rcs.copy()\n\n kwargs_by_cell = self._sort_kwargs_by_cell(kwargs)\n\n self.q_grid = np.zeros_like(self.zeros_gridxq)\n self.dqdt_grid = np.zeros_like(self.zeros_gridxq)\n\n # For debugging\n self.kwargs_by_cell = kwargs_by_cell\n\n # Loop over grid and solve chemistry\n for cell in range(self.grid.dims):\n\n # Construct q vector\n q = np.zeros(len(self.grid.evolving_fields))\n for i, species in enumerate(self.grid.evolving_fields):\n q[i] = data[species][cell]\n\n kwargs_cell = kwargs_by_cell[cell]\n\n if self.rtON:\n args = (cell, kwargs_cell['k_ion'], kwargs_cell['k_ion2'],\n kwargs_cell['k_heat'], kwargs_cell['k_heat_lya'],\n data['n'][cell], t)\n else:\n args = (cell, self.grid.zeros_absorbers,\n self.grid.zeros_absorbers2, self.grid.zeros_absorbers,\n 0.0, data['n'][cell], t)\n\n self.solver.set_initial_value(q, 0.0).set_f_params(args).set_jac_params(args)\n\n self.solver.integrate(dt)\n\n self.q_grid[cell] = q.copy()\n self.dqdt_grid[cell] = self.chemnet.dqdt.copy()\n\n for i, value in enumerate(self.solver.y):\n newdata[self.grid.evolving_fields[i]][cell] = self.solver.y[i]\n\n # Compute particle density\n newdata['n'] = self.grid.particle_density(newdata, z - dz)\n\n # Fix helium fractions if approx_He==True.\n if self.grid.pf['include_He']:\n if self.grid.pf['approx_He']:\n newdata['he_1'] = newdata['h_1']\n newdata['he_2'] = newdata['h_2']\n newdata['he_3'] = np.zeros_like(newdata['h_1'])\n\n return newdata", "def _compute_solar_torque(self):\n pass", "def update(self, gyro, accel, deltaT):\r\n gyro = np.array(gyro)\r\n accel = np.array(accel)\r\n q = self.quaternion\r\n qDot1 = 0.5 * (-q[1] * gyro[0] - q[2] * gyro[1] - q[3] * gyro[2])\r\n qDot2 = 0.5 * ( q[0] * gyro[0] + q[2] * gyro[2] - q[3] * gyro[1])\r\n qDot3 = 0.5 * ( q[0] * gyro[1] - q[1] * gyro[2] + q[3] * gyro[0])\r\n qDot4 = 0.5 * ( q[0] * gyro[2] + q[1] * gyro[1] - q[2] * gyro[0])\r\n\r\n qdot = [qDot1, qDot2, qDot3, qDot4]\r\n\r\n # Normalise accelerometer measurement\r\n if norm(accel) is 0:\r\n warnings.warn(\"accelerometer is zero\")\r\n else:\r\n accel /= norm(accel)\r\n\r\n # Auxiliary variables to avoid repeated calculations\r\n _2q0 = 2.0 * q[0]\r\n _2q1 = 2.0 * q[1]\r\n _2q2 = 2.0 * q[2]\r\n _2q3 = 2.0 * q[3]\r\n _4q0 = 4.0 * q[0]\r\n _4q1 = 4.0 * q[1]\r\n _4q2 = 4.0 * q[2]\r\n _8q1 = 8.0 * q[1]\r\n _8q2 = 8.0 * q[2]\r\n q0q0 = q[0] * q[0]\r\n q1q1 = q[1] * q[1]\r\n q2q2 = q[2] * q[2]\r\n q3q3 = q[3] * q[3]\r\n\r\n # Gradient descent algorithm corrective step\r\n s0 = _4q0 * q2q2 + _2q2 * accel[0] + _4q0 * q1q1 - _2q1 * accel[1]\r\n s1 = _4q1 * q3q3 - _2q3 * accel[0] + 4.0 * q0q0 * q[1]- _2q0 * accel[1] - _4q1 + _8q1 * q1q1 + _8q1 * q2q2 + _4q1 * accel[2]\r\n s2 = 4.0 * q0q0 * q[2] + _2q0 * accel[0] + _4q2 * q3q3 - _2q3 * accel[1] - _4q2 + _8q2 * q1q1 + _8q2 * q2q2 + _4q2 * accel[2]\r\n s3 = 4.0 * q1q1 * q[3] - _2q1 * accel[0] + 4.0 * q2q2 * q[3] - _2q2 * accel[1]\r\n\r\n s = np.array([s0, s1, s2, s3])\r\n s /= norm(s)\r\n\r\n # Apply Feedback Step\r\n qdot -= self.beta*s #(q * Quaternion(0, gyroscope[0], gyroscope[1], gyroscope[2])) * 0.5 - self.beta * step.T\r\n\r\n # Integrate to yield quaternion\r\n q += qdot * self.samplePeriod\r\n self.quaternion /= norm(q) # normalise quaternion\r", "def vlbi_grav_delay(dset):\n eph = apriori.get(\"ephemerides\", time=dset.time)\n grav_delay = np.zeros(dset.num_obs)\n\n # List of celestial bodies. Major moons are also recommended, like Titan, Ganymedes, ...\n bodies = [\n \"mercury barycenter\",\n \"venus barycenter\",\n \"earth\",\n \"moon\",\n \"mars barycenter\",\n \"jupiter barycenter\",\n \"saturn barycenter\",\n \"uranus barycenter\",\n \"neptune barycenter\",\n \"pluto barycenter\",\n \"sun\",\n ]\n\n bcrs_vel_earth = eph.vel_bcrs(\"earth\")\n\n baseline_gcrs = dset.site_pos_2.gcrs.pos - dset.site_pos_1.gcrs.pos\n src_dot_baseline = (dset.src_dir.unit_vector[:, None, :] @ baseline_gcrs.mat)[:, 0, 0]\n\n # Equation 11.6\n bcrs_site1 = eph.pos_bcrs(\"earth\") + dset.site_pos_1.gcrs.pos.val\n bcrs_site2 = eph.pos_bcrs(\"earth\") + dset.site_pos_2.gcrs.pos.val\n\n for body in bodies:\n try:\n GM_name = \"GM\" if body == \"earth\" else f\"GM_{body.split()[0]}\"\n GM_body = constant.get(GM_name, source=eph.ephemerides)\n except KeyError:\n log.warn(\n f\"The GM value of {body.split()[0].title()} is not defined for {eph.ephemerides}. \"\n f\"Correction set to zero.\"\n )\n continue\n bcrs_body_t1 = eph.pos_bcrs(body)\n\n # Equation 11.3\n delta_t = TimeDelta(\n np.maximum(0, dset.src_dir.unit_vector[:, None, :] @ (bcrs_body_t1 - bcrs_site1)[:, :, None])[:, 0, 0]\n * Unit.second2day\n / constant.c,\n fmt=\"jd\",\n scale=\"tdb\",\n )\n time_1J = dset.time.tdb - delta_t\n\n # Equation 11.4\n bcrs_body_t1J = eph.pos_bcrs(body, time=time_1J)\n vector_body_site1 = bcrs_site1 - bcrs_body_t1J\n\n # Equation 11.5\n vector_body_site2 = bcrs_site2 - bcrs_body_t1J - bcrs_vel_earth / constant.c * src_dot_baseline[:, None]\n\n # Needed for equation 11.1\n norm_body_site1 = np.linalg.norm(vector_body_site1, axis=1)\n src_dot_vector_body_site1 = (dset.src_dir.unit_vector[:, None, :] @ vector_body_site1[:, :, None])[:, 0, 0]\n nomJ = norm_body_site1 + src_dot_vector_body_site1\n denomJ = (\n np.linalg.norm(vector_body_site2, axis=1)\n + (dset.src_dir.unit_vector[:, None, :] @ vector_body_site2[:, :, None])[:, 0, 0]\n )\n\n # Main correction (equation 11.1)\n grav_delay += 2 * GM_body / constant.c ** 2 * np.log(nomJ / denomJ)\n\n # Higher order correction (equation 11.14)\n baseline_dot_vector_body_site1 = (baseline_gcrs.val[:, None, :] @ vector_body_site1[:, :, None])[:, 0, 0]\n grav_delay += (\n 4\n * GM_body ** 2\n / constant.c ** 4\n * (baseline_dot_vector_body_site1 / norm_body_site1 + src_dot_baseline)\n / (norm_body_site1 + src_dot_vector_body_site1) ** 2\n )\n\n # Denominator (equation 11.9)\n denominator = (\n 1\n + (\n (bcrs_vel_earth + dset.site_pos_2.gcrs.vel.val)[:, None, :]\n @ dset.src_dir.unit_vector[:, :, None]\n / constant.c\n )[:, 0, 0]\n )\n\n return grav_delay / denominator", "def solve(self,init=None,g_init=1e-3,g_step=5e-3,g_fin=None,evol=False,movingGrid=False):\n if(g_fin==None): g_fin=self.g\n #Check if all signs are correct\n if(g_fin<0):\n if(g_step>0): g_step*=-1.\n if(g_init>0): g_init*=-1.\n else:\n if(g_step<0): g_step*=-1.\n if(g_init<0): g_step*=-1.\n\n #If no initial distribution is given, start from the BCS ground state\n if(init==None): init=[1 if i<self.N else 0 for i in range(self.n)]\n var_init=np.array([-2.*init[i]-g_init/(1-2.*init[i])*np.sum([self.XXZ.Z(j,i)*(init[j]-init[i]) for j in range(self.n) if j!=i]) for i in range(self.n)])\n n_step=int((g_fin-g_init)/g_step)\n g=g_init\n\n #Define necessary variables if evol or movingGrid=True\n if(evol or movingGrid):\n var_evol=np.zeros([n_step,self.n])\n g_evol=np.zeros(n_step)\n if(movingGrid):\n rap_evol = np.zeros([n_step,self.N],dtype=complex)\n rap_evol[0] = [self.levels[i] for i in range(self.n) if init[i]!=0 ]\n rap=np.array([self.levels[i]+0.5*np.abs(np.random.rand()) for i in range(self.n) if init[i]!=0])\n grid=np.zeros(self.N+1,dtype=complex)\n grid[0]=1e3\n for k in range(self.N): grid[k+1]=rap[k]\n n_grid=n_step/20 #Calculates rapidities at 20 intermediate steps\n\n #Gradually increase the coupling constant g and solve iteratively at each step starting from the Taylor approximation from the previous step\n for i in range(n_step):\n var_new=self.newtonraphson(g,var_init)\n der=self.get_derivative(var_new,g)\n #var_init=self.taylor_expansion(g,g_step,var_new)\n var_init = var_new+g_step*der\n g+=g_step\n #print g\n\n #Save variables at current step if evol =True\n if(evol or movingGrid):\n var_evol[i]=var_init\n g_evol[i]=g\n if(movingGrid and i%n_grid==0 and i!=0):\n #Method for obtaining the rapidities starting from the set of Lambda_i\n rf=RootFinder(self.XXZ,var_evol[i]/g_evol[i],g_evol[i],self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap=lm.laguerre()\n rap_evol[i]=np.sort(lm.laguerre())\n for k in range(self.N): grid[k+1]=rap[k]\n grid[0]=10*max(rap)\n elif(movingGrid and i!=0):\n rf=RootFinder(self.XXZ,var_evol[i]/g_evol[i],g_evol[i],self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap_evol[i]=np.sort(lm.laguerre())\n \n \n #One final iterative solution at g=g_fin\n self.solution=self.newtonraphson(g_fin,var_init)\n #Calculate the occupation numbers\n self.occupation=0.5*(-1.-self.solution+g_fin*self.get_derivative(self.solution,g_fin))\n\n #One final calculation of the rapidities\n if(movingGrid):\n rf=RootFinder(self.XXZ,self.solution/g_fin,g_fin,self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap=lm.laguerre()\n self.rapidities=rap\n\n if movingGrid: return [g_evol,var_evol,rap_evol]\n if evol: return [g_evol,var_evol]\n return self.solution", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def magnetic_reynolds(uu, param, grid, aa=list(), bb=list(), jj=list(),\n nghost=3, lmix=True):\n if len(bb) ==0 and len(aa) ==0 and len(jj) ==0:\n print('magnetic_reynolds WARNING: no aa, bb nor jj provided\\n'+\n 'aa or bb must be provided or aa for only hyper resistivity') \n #resistive force\n lres, lhyper3 = False, False\n for iresi in param.iresistivity:\n iresi = str.strip(iresi,'\\n')\n if 'hyper' not in iresi and len(iresi) > 0:\n lres = True\n if 'hyper3' in iresi:\n lhyper3 = True\n fresi = np.zeros_like(uu)\n if lres:\n if lhyper3:\n lhyper3 = lhyper3==lmix\n if len(jj) == 0:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: calculating jj without aa\\n',\n 'provide aa or jj directly for accurate boundary values')\n jj = curl(bb,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n else:\n jj = curl2(aa,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n for j in range(0,3):\n jj[j, :nghost,:,:] = jj[j,-2*nghost:-nghost,:,:]\n jj[j,-nghost:,:,:] = jj[j, nghost: 2*nghost,:,:]\n jj[j,:, :nghost,:] = jj[j,:,-2*nghost:-nghost,:]\n jj[j,:,-nghost:,:] = jj[j,:, nghost: 2*nghost,:]\n jj[j,:,:, :nghost] = jj[j,:,:,-2*nghost:-nghost]\n jj[j,:,:,-nghost:] = jj[j,:,:, nghost: 2*nghost]\n fresi = fresi + param.eta*param.mu0*jj\n for iresi in param.iresistivity:\n iresi = str.strip(iresi,'\\n')\n if 'eta-const' not in iresi and 'hyper' not in iresi\\\n and len(iresi) > 0:\n print('magnetic_reynolds WARNING: '+iresi+' not implemented\\n'+\n 'terms may be missing from the standard resistive forces')\n if lhyper3:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: no aa provided\\n'+\n 'aa must be provided for hyper resistivity')\n return 1\n else:\n del6a = np.zeros_like(aa)\n for j in range(0,3):\n del6a[j] = del6(aa[j],grid.dx,grid.dy,grid.dz)\n del6a[j, :nghost,:,:] = del6a[j,-2*nghost:-nghost,:,:]\n del6a[j,-nghost:,:,:] = del6a[j, nghost: 2*nghost,:,:]\n del6a[j,:, :nghost,:] = del6a[j,:,-2*nghost:-nghost,:]\n del6a[j,:,-nghost:,:] = del6a[j,:, nghost: 2*nghost,:]\n del6a[j,:,:, :nghost] = del6a[j,:,:,-2*nghost:-nghost]\n del6a[j,:,:,-nghost:] = del6a[j,:,:, nghost: 2*nghost]\n #del6 for non-cartesian tba\n #del6a[j] = del6(aa[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n # coordinate_system=param.coord_system)\n #effective at l > 5 grid.dx? \n fresi = fresi + param.eta_hyper3*del6a\n del(del6a)\n fresi2 = np.sqrt(dot2(fresi))\n del(fresi)\n #advective force\n if len(bb) == 0:\n if len(aa) == 0:\n print('magnetic_reynolds WARNING: calculating uu x bb without bb\\n',\n 'provide aa or bb directly to proceed')\n return 1\n else:\n bb = curl(aa,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y, \n coordinate_system=param.coord_system)\n for j in range(0,3):\n bb[j, :nghost,:,:] = bb[j,-2*nghost:-nghost,:,:]\n bb[j,-nghost:,:,:] = bb[j, nghost: 2*nghost,:,:]\n bb[j,:, :nghost,:] = bb[j,:,-2*nghost:-nghost,:]\n bb[j,:,-nghost:,:] = bb[j,:, nghost: 2*nghost,:]\n bb[j,:,:, :nghost] = bb[j,:,:,-2*nghost:-nghost]\n bb[j,:,:,-nghost:] = bb[j,:,:, nghost: 2*nghost]\n advec = cross(uu,bb)\n advec2 = np.sqrt(dot2(advec))\n del(advec)\n #avoid division by zero\n if fresi2.max() > 0:\n fresi2[np.where(fresi2==0)] = fresi2[np.where(fresi2>0)].min()\n Rm = advec2/fresi2\n #set minimum floor to exclude zero-valued Rm \n if Rm.max() > 0:\n Rm[np.where(Rm==0)] = Rm[np.where(Rm>0)].min()\n else:\n print('Rm undefined')\n else:\n Rm = advec2\n print('Rm undefined')\n return Rm", "def preevolve(self):\n\n self.in_preevolve = True\n\n myg = self.cc_data.grid\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n self.cc_data.fill_BC(\"density\")\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n # 1. do the initial projection. This makes sure that our original\n # velocity field satisties div U = 0\n\n # the coefficient for the elliptic equation is beta_0^2/rho\n coeff = 1/rho\n beta0 = self.base[\"beta0\"]\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # next create the multigrid object. We defined phi with\n # the right BCs previously\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n # solve D (beta_0^2/rho) G (phi/beta_0) = D( beta_0 U )\n\n # set the RHS to divU and solve\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-10)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi = self.cc_data.get_var(\"phi\")\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of phi and update the\n # velocities\n # FIXME: this update only needs to be done on the interior\n # cells -- not ghost cells\n gradp_x, gradp_y = mg.get_solution_gradient(grid=myg)\n\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= coeff.v()*gradp_x.v()\n v.v()[:, :] -= coeff.v()*gradp_y.v()\n\n # fill the ghostcells\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n # 2. now get an approximation to gradp at n-1/2 by going through the\n # evolution.\n\n # store the current solution -- we'll restore it in a bit\n orig_data = patch.cell_center_data_clone(self.cc_data)\n\n # get the timestep\n self.method_compute_timestep()\n\n # evolve\n self.evolve()\n\n # update gradp_x and gradp_y in our main data object\n new_gp_x = self.cc_data.get_var(\"gradp_x\")\n new_gp_y = self.cc_data.get_var(\"gradp_y\")\n\n orig_gp_x = orig_data.get_var(\"gradp_x\")\n orig_gp_y = orig_data.get_var(\"gradp_y\")\n\n orig_gp_x[:, :] = new_gp_x[:, :]\n orig_gp_y[:, :] = new_gp_y[:, :]\n\n self.cc_data = orig_data\n\n if self.verbose > 0:\n print(\"done with the pre-evolution\")\n\n self.in_preevolve = False", "def _ice_d3gdt3(temp,pres):\n # Reduced variables\n tn = temp/_TTP\n pn = pres/_PTPE\n _PI0 = _PATM/_PTPE\n g_ttt = 0.\n \n # Residual terms including complex numbers\n sr = [_GCOEFFS[1], complex(0.0,0.0)]\n for (k,rk) in enumerate(_GCOEFFS[2]):\n sr[1] += rk * (pn-_PI0)**k\n for (tk,s) in zip(_GCOEFFS[3],sr):\n term = 1./(tk-tn)**2 - 1./(tk+tn)**2\n g_ttt += (s * term).real / _TTP**2\n return g_ttt", "def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = np.asarray(self.meshDA['Area'])\n satVel = np.array([satVel.x, satVel.y, satVel.z])\n vAtm = np.array([vAtm.x, vAtm.y, vAtm.z])\n\n relativeVelocity = vAtm - (satVel + (np.cross(omega, CoM)))\n vNorm = np.linalg.norm(relativeVelocity, axis=1)\n vDir = np.reciprocal(vNorm[:, None]) * relativeVelocity\n\n dot = np.einsum('ij,ij->i', normal, vDir)\n\n dotCondition = dot < 0\n dot = dot[dotCondition]\n if dot.size > 0:\n vDir = vDir[dotCondition]\n vNorm = vNorm[dotCondition]\n normal = normal[dotCondition]\n area = area[dotCondition]\n CoM = CoM[dotCondition]\n\n coeff = 0.5 * rho * dragCoeff * (vNorm**2)\n oMr = 1.0 - liftRatio\n f = (coeff * area * dot)[:, None]\n\n aT = np.sum(np.cross(CoM, oMr * np.absolute(f) * vDir + 2 * liftRatio * f * normal), axis=0)\n\n self._aTorque = Vector3D(float(aT[0]), float(aT[1]), float(aT[2]))\n\n else:\n self._aTorque = Vector3D.ZERO", "def compute_force(X, V, bl, ip, box, gamma, kT, dt):\n N = len(X)\n F = np.zeros((N, 3))\n Fcube = np.zeros((N, N, 3))\n inv_box = np.zeros((3, 3))\n for i in range(3): inv_box[i, i] = 1.0 / box[i, i]\n g = np.zeros(3)\n rij = np.zeros(3)\n vij = np.zeros(3)\n a = 0.0\n nr = 0.0\n fpair = 0.0\n\n vir = 0.0\n sigma = np.zeros(3)\n volume = np.linalg.det(box)\n\n for i in range(N):\n for j in range(i):\n rij = X[i] - X[j]\n g = matvecmul(inv_box, rij)\n g = g - np.round_(g, 0, np.empty_like(g))\n rij = matvecmul(box, g)\n vij = V[i] - V[j]\n\n a = ip[bl[i]-1, bl[j]-1]\n nr = norm_numba(rij)\n\n fc = a * wr(nr)\n fpair = fc \\\n - gamma * wr(nr)**2 * dot_numba(rij, vij) / nr \\\n + sqrt(2.0*gamma*kT) * wr(nr) * np.random.randn() / sqrt(dt)\n Fcube[i, j, :] = fpair / nr * rij\n Fcube[j, i, :] = -fpair / nr * rij\n\n vir += Fcube[i, j, :] @ rij\n sigma += Fcube[i, j, :] * rij\n\n # kinetic part of stress tensor\n for i in range(N):\n sigma += V[i] * V[i]\n\n sigma = sigma / volume\n F = np.sum(Fcube, 1)\n\n return F, vir, sigma", "def main():\r\n #Drag Force Equation: 1/2 * rho * Cd * A * v^2\r\n\r\n #User-Defined Constants\r\n global m\r\n global v0\r\n global theta\r\n global rho #Fluid Density\r\n global A #Cross-sectional Area\r\n global Cd #Drag coefficient\r\n global tStep\r\n global g\r\n\r\n m = 1\r\n v0 = 30\r\n theta = math.radians(45)\r\n rho = 1.225\r\n A = 0.05\r\n Cd = 0.5 #A ball is approx. 0.5\r\n tStep = 0.005\r\n g = 9.8\r\n\r\n\r\n #Data Structures\r\n global tHist\r\n global xHist\r\n global yHist\r\n global thetaHist\r\n global vHist\r\n global vXHist\r\n global vYHist\r\n tHist = [] #list for all time steps\r\n xHist = [] #list for all x position steps\r\n yHist = [] #list for all y position steps\r\n thetaHist = [] #List for all theta at every time step\r\n vHist = [] #list for all velocities at every time step\r\n vXHist = [] #list for all x-axis velocities at every time step\r\n vYHist = [] #list for all y-axis velocities at every time step\r\n\r\n #Initialize intial values\r\n tHist.append(0.0)\r\n xHist.append(0.0)\r\n yHist.append(0.0)\r\n thetaHist.append(theta)\r\n vHist.append(v0)\r\n vXHist.append(v0 * math.cos(theta))\r\n vYHist.append(v0 * math.sin(theta))\r\n vTheta = math.atan(vYHist[0] / vXHist[0])\r\n # print(\"t: \" + str(tHist[0]))\r\n # print(\"x: \" + str(xHist[0]))\r\n # print(\"y: \" + str(yHist[0]))\r\n # print(\"v: \" + str(vHist[0]))\r\n # print(\"Vx: \" + str(vXHist[0]))\r\n # print(\"Vy: \" + str(vYHist[0]))\r\n\r\n #Convenience variables\r\n global k\r\n\r\n counter = 1\r\n #Loop until the y-displacement becomes negative (projectile reaches ground again)\r\n while True:\r\n tHist.append(counter * tStep) #increment time\r\n print(\"t: \" + str(tHist[counter]))\r\n\r\n #This large hunk is the solution to the net force differential equation in the x-axis\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep)) #STABLE\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd)/(2*m))*(tStep))\r\n # oneOverVX = (1/vHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep))\r\n oneOverVX = (1/vXHist[counter-1]) + ((rho*A*Cd)/(2*m*math.cos(thetaHist[counter-1]))*(tStep)) #This is one over the solution for velocity in the x-axis net force differential equation\r\n vXHist.append(1 / oneOverVX) #Adding the velocity to the list of velocities\r\n\r\n vY0 = vYHist[counter-1] #Convenience variable\r\n # k = 0.5 * rho * A * Cd * math.sin(abs(thetaHist[counter-1])) #STABLE\r\n # k = 0.5 * rho * A * Cd\r\n k = (rho * A * Cd) / (2 * math.sin(abs(thetaHist[counter-1]))) #Convenience variable\r\n print(\"k: \" + str(k))\r\n print(\"vX: \" + str(vXHist[counter]))\r\n rootGMK = math.sqrt(g*m*k) #Convenience variable\r\n if vYHist[counter-1] > 0.0: #If the projectile is going upwards\r\n #Solving the y-axis differential equation for velocity\r\n equationRight = -rootGMK * ((tStep/m) - (math.atan((k*vY0)/(rootGMK))/rootGMK))\r\n vYHist.append((math.tan(equationRight) * rootGMK) / k)\r\n elif vYHist[counter-1] < 0.0: #If the projectile is going downwards\r\n #Solving the y-axis differential equation for velocity\r\n\r\n # Hand-solved integral\r\n # exponent = -(2*tStep*rootGMK)/m\r\n # numerator = g*m*math.exp(exponent) - math.exp(exponent)*vY0*rootGMK - vY0*rootGMK - g*m\r\n # denominator = math.exp(exponent)*(vY0-rootGMK) - vY0*k - rootGMK\r\n # vYHist.append(numerator / denominator)\r\n\r\n #Wolfram Alpha arctanh integral\r\n arctanh =(vY0*math.sqrt(k))/(math.sqrt(g*m))\r\n print(\"arctanh: \" + str(arctanh))\r\n equationRight = (np.arctanh(arctanh))/(rootGMK) - (tStep/m)\r\n vYHist.append(np.tanh(rootGMK * equationRight) * ((math.sqrt(g*m))/(math.sqrt(k))))\r\n else: #If current y velocity is 0\r\n vYHist.append(vY0 - g*tStep)\r\n print(\"vY: \" + str(vYHist[counter]))\r\n\r\n vHist.append(math.hypot(vXHist[counter], vYHist[counter])) #Calculate the net velocity and add it to the velocities list\r\n print(\"v: \" + str(vHist[counter]))\r\n thetaHist.append(math.atan(vYHist[counter]/vXHist[counter])) #Calculate the current angle based on the velocities and add it to the theta list\r\n print(\"0: \" + str(math.degrees(thetaHist[counter])))\r\n\r\n x0 = xHist[counter-1]\r\n y0 = yHist[counter-1]\r\n\r\n # yIntegral = trigintegrate()\r\n\r\n \"\"\"\r\n Note: What I wanted to do here was to integrate the velocity functions over the time interval to find the exact\r\n changes in position. Unfortunately, I was running short of time and decided it was not worth it to move forward with\r\n this final step, and instead worked on the presentation and testing different cases.\r\n \"\"\"\r\n xHist.append(x0 + vXHist[counter]*tStep) #Calculate new x position using x = x0 + vt\r\n yHist.append(y0 + vYHist[counter]*tStep) #Calculate new y position using y = y0 + vt\r\n print(\"x: \" + str(xHist[counter]))\r\n print(\"y: \" + str(yHist[counter]))\r\n print()\r\n\r\n # xHist.append(xHist[counter-1] + vXHist[counter-1]*tStep + 0.5*aXHist[counter-1]*tStep**2)\r\n # yHist.append(yHist[counter-1] + vYHist[counter-1]*tStep + 0.5*aYHist[counter-1]*tStep**2)\r\n # vXHist.append(vXHist[counter-1] + aXHist[counter-1]*tStep)\r\n # vYHist.append(vYHist[counter-1] + aYHist[counter-1]*tStep)\r\n # vHist.append(math.hypot(vXHist[counter], vYHist[counter]))\r\n #\r\n # vTheta = math.atan(vYHist[counter] / vXHist[counter])\r\n # xDragAccel = -0.5*rho*Cd*A*vHist[counter]**2*math.cos(vTheta) / m\r\n # yDragAccel = -math.copysign(0.5*rho*Cd*A*vHist[counter]**2*math.sin(vTheta) / m, vYHist[counter])\r\n #\r\n # aXHist.append(xDragAccel)\r\n # aYHist.append(-g*tStep + yDragAccel)\r\n\r\n if vYHist[counter-1] > 0.0 and vYHist[counter] < 0.0: #Check if the projectile has reached it's peak by checking for a critical point\r\n print(\"max height reached at time=\" + str(tHist[counter]))\r\n # break\r\n\r\n # print(\"t: \" + str(tHist[counter]))\r\n # print(\"x: \" + str(xHist[counter]))\r\n # print(\"y: \" + str(yHist[counter]))\r\n # print(\"Vx: \" + str(vXHist[counter]))\r\n # print(\"Vy: \" + str(vYHist[counter]))\r\n # print(\"Ax: \" + str(aXHist[counter]))\r\n # print(\"Ay: \" + str(aYHist[counter]))\r\n\r\n if yHist[counter] < 0 or counter > 99999: #End the loop if the projectile has reached the ground (or limit the number of iterations to avoid computer death)\r\n break\r\n\r\n counter += 1\r\n\r\n plotData()", "def fvm(states: States, grid: Gridlines, topo: Topography, config: Config, runtime: DummyDict):\n # pylint: disable=invalid-name\n\n # calculate source term contributed from topography gradients\n states = topography_gradient(states, topo, config.params.gravity)\n\n # calculate slopes of piecewise linear approximation\n states = minmod_slope(states, grid, config.params.theta, runtime.tol)\n\n # interpolate to get discontinuous conservative quantities at cell faces\n states = get_discontinuous_cnsrv_q(states, grid)\n\n # fix non-physical negative depth\n states = correct_negative_depth(states, topo)\n\n # get non-conservative variables at cell faces\n states = decompose_variables(states, topo, runtime.epsilon)\n\n # get local speed at cell faces\n states = get_local_speed(states, config.params.gravity)\n\n # get discontinuous PDE flux at cell faces\n states = get_discontinuous_flux(states, topo, config.params.gravity)\n\n # get common/continuous numerical flux at cell faces\n states = central_scheme(states, runtime.tol)\n\n # get final right hand side\n states.rhs.w = \\\n (states.face.x.num_flux.w[:, :-1] - states.face.x.num_flux.w[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.w[:-1, :] - states.face.y.num_flux.w[1:, :]) / grid.y.delta + \\\n states.src.w\n\n states.rhs.hu = \\\n (states.face.x.num_flux.hu[:, :-1] - states.face.x.num_flux.hu[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hu[:-1, :] - states.face.y.num_flux.hu[1:, :]) / grid.y.delta + \\\n states.src.hu\n\n states.rhs.hv = \\\n (states.face.x.num_flux.hv[:, :-1] - states.face.x.num_flux.hv[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hv[:-1, :] - states.face.y.num_flux.hv[1:, :]) / grid.y.delta + \\\n states.src.hv\n\n # remove rounding errors\n states.rhs = remove_rounding_errors(states.rhs, runtime.tol)\n\n # obtain the maximum safe dt\n amax = nplike.max(nplike.maximum(states.face.x.plus.a, -states.face.x.minus.a))\n bmax = nplike.max(nplike.maximum(states.face.y.plus.a, -states.face.y.minus.a))\n max_dt = min(0.25*grid.x.delta/amax, 0.25*grid.y.delta/bmax)\n\n return states, max_dt", "def weight_update_conjugate_gradient(self, network):\n # compute beta: Fletcher-Reeves\n num = 0.0\n for l, layer in enumerate(network.layers):\n num += np.sum(self.dc_db[l] ** 2)\n num += np.sum(self.dc_dq[l] ** 2)\n num += np.sum(self.dc_drx_inp[l] ** 2)\n num += np.sum(self.dc_dry_inp[l] ** 2)\n num += np.sum(self.dc_drx_pos_out[l] ** 2)\n num += np.sum(self.dc_dry_pos_out[l] ** 2)\n num += np.sum(self.dc_drx_neg_out[l] ** 2)\n num += np.sum(self.dc_dry_neg_out[l] ** 2)\n\n # Initialize velocities to zero for momentum\n if self.vel_b is None or self.vel_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n # Take steepest descent step\n for l, layer in enumerate(network.layers):\n layer.b -= self.alpha * self.dc_db[l]\n layer.q -= self.alpha * self.dc_dq[l]\n layer.rx_inp -= self.alpha * self.dc_drx_inp[l]\n layer.ry_inp -= self.alpha * self.dc_dry_inp[l]\n layer.rx_pos_out -= self.alpha * self.dc_drx_pos_out[l]\n layer.ry_pos_out -= self.alpha * self.dc_dry_pos_out[l]\n layer.rx_neg_out -= self.alpha * self.dc_drx_neg_out[l]\n layer.ry_neg_out -= self.alpha * self.dc_dry_neg_out[l]\n\n else:\n # compute beta\n beta = num / self.denominator\n\n # compute s_n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = -self.alpha * self.dc_db[l] + beta * self.ms_b[l]\n self.ms_q[l] = -self.alpha * self.dc_dq[l] + beta * self.ms_q[l]\n self.ms_rx_inp[l] = -self.alpha * self.dc_drx_inp[l] + beta * self.ms_rx_inp[l]\n self.ms_ry_inp[l] = -self.alpha * self.dc_dry_inp[l] + beta * self.ms_ry_inp[l]\n self.ms_rx_pos_out[l] = -self.alpha * self.dc_drx_pos_out[l] + beta * self.ms_rx_pos_out[l]\n self.ms_ry_pos_out[l] = -self.alpha * self.dc_dry_pos_out[l] + beta * self.ms_ry_pos_out[l]\n self.ms_rx_neg_out[l] = -self.alpha * self.dc_drx_neg_out[l] + beta * self.ms_rx_neg_out[l]\n self.ms_ry_neg_out[l] = -self.alpha * self.dc_dry_neg_out[l] + beta * self.ms_ry_neg_out[l]\n\n # Take step\n for l, layer in enumerate(network.layers):\n layer.b += self.alpha * self.ms_b[l]\n layer.q += self.alpha * self.ms_q[l]\n layer.rx_inp += self.alpha * self.ms_rx_inp[l]\n layer.ry_inp += self.alpha * self.ms_ry_inp[l]\n layer.rx_pos_out += self.alpha * self.ms_rx_pos_out[l]\n layer.ry_pos_out += self.alpha * self.ms_ry_pos_out[l]\n layer.rx_neg_out += self.alpha * self.ms_rx_neg_out[l]\n layer.ry_neg_out += self.alpha * self.ms_ry_neg_out[l]\n\n # store num for next iteration to be used as denominator\n self.denominator = num", "def method_compute_timestep(self):\n\n myg = self.cc_data.grid\n\n cfl = self.rp.get_param(\"driver.cfl\")\n\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n # the timestep is min(dx/|u|, dy|v|)\n xtmp = ytmp = 1.e33\n if not abs(u).max() == 0:\n xtmp = myg.dx/abs(u.v()).max()\n if not abs(v).max() == 0:\n ytmp = myg.dy/abs(v.v()).max()\n\n dt = cfl*min(xtmp, ytmp)\n\n # We need an alternate timestep that accounts for buoyancy, to\n # handle the case where the velocity is initially zero.\n rho = self.cc_data.get_var(\"density\")\n rho0 = self.base[\"rho0\"]\n rhoprime = self.make_prime(rho, rho0)\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n\n F_buoy = (abs(rhoprime*g).v()/rho.v()).max()\n\n dt_buoy = np.sqrt(2.0*myg.dx/F_buoy)\n\n self.dt = min(dt, dt_buoy)\n if self.verbose > 0:\n print(f\"timestep is {dt}\")", "def f_dyn(Y, Yd, Fe, Te, tau, Conn, Prop):\n SE = Conn[1]\n ce, Qe = Prop[3], Prop[5]\n R0, Q0, q = Y[0:3], Y[3:6], Y[6:]\n v0, w0, qd = Yd[0:3], Yd[3:6], Yd[6:]\n\n num_j = len(q) # Number of joints/links\n num_e = SE.shape[1] # Number of endpoints\n\n # Position and rotation matrices\n AA = calc_aa(Q0, q, Conn, Prop)\n RR = calc_pos(R0, AA, q, Conn, Prop)\n\n # Inertia matrice\n HH = calc_hh(RR, AA, Conn, Prop)\n\n # Calculation of velocity dependent terms using the recursive Newton-Eulero\n # inverse dynamics setting to zero all accelerations and forces\n zero_Ydd = np.zeros(6+num_j)\n zero_Fe = np.zeros((3, num_e))\n Force0 = r_ne(RR, AA, q, Yd, zero_Ydd, zero_Fe, zero_Fe, Conn, Prop)\n\n # Generalized external forces applied on base centroid and joints\n F0 = np.zeros(3)\n T0 = np.zeros(3)\n\n # Loop over all endpoints\n for ie in range(num_e):\n\n # If the endpoint is associated with the base\n if (SE[0, ie] == 0):\n\n A_0_ie = rpy2dc(Qe[:, ie]).T # Endpoint to base\n A_I_ie = AA[:, 0:3] @ A_0_ie # Endpoint to inertial\n\n # If the external load is given wrt the local frame\n if (SE[1, ie] == 0):\n R_0_ie = A_0_ie.T @ ce[:, ie]\n F0 += A_I_ie @ Fe[:, ie]\n T0 += A_I_ie @ (tilde(R_0_ie) @ Fe[:, ie] + Te[:, ie])\n\n # If the external load is given wrt the inertial frame\n else:\n R_0_ie = A_0_ie @ ce[:, ie]\n F0 += Fe[:, ie]\n T0 += (tilde(R_0_ie) @ Fe[:, ie] + Te[:, ie])\n\n # Assemble all terms\n Force = np.block([F0, T0, tau])\n\n # Generalized external forces applied to the link endpoints\n Fx = np.zeros(3)\n Tx = np.zeros(3)\n taux = np.zeros(num_j)\n\n # Loop over all endpoints\n for ie in range(num_e):\n\n i = SE[0, ie] # Link associated to the endpoint <ie>\n\n # If the endpoint is associated with a link\n if (i > 0):\n\n # Endpoint Jacobian - shape is (6 x num_j)\n JJ_tmp = calc_je(ie, RR, AA, q, Conn, Prop)\n JJ_tx_i = JJ_tmp[0:3, :] # Translational component\n JJ_rx_i = JJ_tmp[3:6, :] # Rotational component\n\n # Endpoint position wrt the base centroid\n A_I_i = AA[:, 3*i:3*(i+1)]\n A_I_ie = A_I_i @ rpy2dc(Qe[:, ie]).T\n R_0_ie = RR[:, i] - RR[:, 0] + A_I_i @ ce[:, ie]\n\n # If the external load is given wrt the local frame\n if (SE[1, ie] == 0):\n Fx += A_I_ie @ Fe[:, ie]\n Tx += tilde(R_0_ie) @ A_I_ie @ Fe[:, ie] + A_I_ie @ Te[:, ie]\n taux += + JJ_tx_i.T @ A_I_ie @ Fe[:, ie] \\\n + JJ_rx_i.T @ A_I_ie @ Te[:, ie]\n\n # If the external load is given wrt the inertial frame\n else:\n Fx += Fe[:, ie]\n Tx += tilde(R_0_ie) @ Fe[:, ie] + Te[:, ie]\n taux += JJ_tx_i.T @ Fe[:, ie] + JJ_rx_i.T @ Te[:, ie]\n\n # Assemble the link endpoint contributions\n Force_ee = np.block([Fx, Tx, taux])\n\n # Calculate the accelerations - eq. 3.29\n Ydd = np.linalg.inv(HH) @ (Force + Force_ee - Force0)\n\n return Ydd", "def optimize_force_field_parameters_Cv_FWHM(cgmodel, file_list, temperature_list, param_bounds_dict,\n frame_begin=0, frame_end=-1, sample_spacing=1, sparsify_stride=1, output_data='output.nc',\n verbose=False, n_cpu=12, min_eff_samples=50,\n n_trial_boot=200, num_intermediate_states=0, plotfile='optimize_FWHM_iterations.pdf',\n min_method='TNC'):\n\n # Parse the force field parameter change dict:\n x0 = []\n param_names = []\n bounds = []\n units = []\n \n for key,value in param_bounds_dict.items():\n # value should be [(bound_lo, bound_hi)]\n # key should be a valid force field parameter name\n param_names.append(key)\n # Every parameter except periodicity should have units\n # For now, changing periodicity is not supported.\n \n # TODO: add support for sums of periodic torsion terms\n units.append(value[0].unit)\n bounds.append((value[0].value_in_unit(units[-1]),value[1].value_in_unit(units[-1])))\n # Use mean value as starting guess:\n x0.append((value[1].value_in_unit(units[-1])+value[0].value_in_unit(units[-1]))/2)\n\n if verbose:\n print(f'param_names: {param_names}')\n print(f'unit: {units}')\n print(f'bounds: {bounds}')\n print(f'x0: {x0}')\n\n def get_reeval_FWHM(param_values, cgmodel, file_list, temperature_list, output_data,\n param_names, units, frame_begin, sample_spacing, sparsify_stride, frame_end,\n n_cpu, n_trial_boot, num_intermediate_states):\n \"\"\"\n Objective function to be minimized\n \"\"\"\n\n # Construct dictionary of parameter update instructions:\n param_dict = {}\n \n # if len(param_names) == 1:\n # # 1D optimization:\n # param_dict[param_names[0]] = param_values * units[0]\n\n for i in range(len(param_names)):\n param_dict[param_names[i]] = param_values[i] * units[i]\n \n if verbose:\n print(f'Current parameters: {param_dict}') \n \n # Re-evaluate energy with current force field parameters:\n # For bootstrapping, evaluate all frames between [frame_begin:sparsify_stride:frame_end], and\n # apply the sample_spacing only to the heat capacity part\n U_eval, simulation = eval_energy(\n cgmodel,\n file_list,\n temperature_list,\n param_dict,\n frame_begin=frame_begin,\n frame_stride=sparsify_stride,\n frame_end=frame_end,\n n_cpu=n_cpu,\n verbose=verbose,\n )\n\n # Evaluate heat capacity and full-width half-maximum from bootstrapping:\n (new_temperature_list, C_v_values, C_v_uncertainty,\n Tm_value, Tm_uncertainty,\n Cv_height_value, Cv_height_uncertainty,\n FWHM_value, FWHM_uncertainty,\n N_eff_values) = bootstrap_heat_capacity(\n U_kln=U_eval,\n output_data=output_data,\n frame_begin=frame_begin,\n frame_end=frame_end,\n sample_spacing=sample_spacing,\n sparsify_stride=sparsify_stride,\n num_intermediate_states=num_intermediate_states,\n n_trial_boot=n_trial_boot,\n plot_file=f'heat_capacity_boot_{param_names[0]}_{param_values}.pdf',\n )\n \n if verbose:\n print(f'Current FWHM: {FWHM_value} +/- {FWHM_uncertainty[0]}')\n print(f'Current minimum N_eff: {np.min(N_eff_values)}')\n \n # Check for minimum N_eff criteria.\n # If too small, the minimization should stop if we're using a gradient method.\n # If we're not using a gradient method, return a large value.\n \n if np.min(N_eff_values) < min_eff_samples:\n print(f'Insufficient number of effective samples ({np.min(N_eff_values)})')\n \n # print(f'Creating a cgmodel with current parameters...,end='')\n # Create the cgmodel\n # print('done')\n \n exit()\n \n return FWHM_value.value_in_unit(unit.kelvin)\n\n # Run optimization:\n\n # if len(param_names) == 1:\n # # Do scalar optimization:\n # opt_results = minimize_scalar(get_reeval_FWHM, x0,\n # args=(cgmodel, file_list, temperature_list, output_data, param_names, units,\n # frame_begin, sample_spacing, sparsify_stride, frame_end, n_cpu, n_trial_boot, num_intermediate_states),\n # method='bounded',\n # bounds=[bounds[0][0],bounds[0][1]],\n # options={'maxiter': 25},\n # )\n\n # else:\n # Do multivariate optimization:\n opt_results = minimize(get_reeval_FWHM, x0, jac='2-point',\n args=(cgmodel, file_list, temperature_list, output_data, param_names, units,\n frame_begin, sample_spacing, sparsify_stride, frame_end, n_cpu, n_trial_boot, num_intermediate_states),\n method=min_method,\n bounds=bounds,\n options={'maxfun': 25, 'finite_diff_rel_step': 0.005, 'eta': 0.5}, # This should be user input\n ) \n \n # TODO: plot the heat capacity curves at each iteration, and make a plot of all FWHM_values \n\n # Construct dictionary of optimal parameters:\n opt_param_dict = {} \n \n k = 0\n for key,value in param_bounds_dict.items():\n opt_param_dict[key] = opt_results.x[k] * units[k]\n k += 1\n \n return opt_param_dict, opt_results", "def preCondConjugateGradientSolver(b, x, linsys_setup, eps, i_max, plotInterval, mapDir):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz=True\n \n \n # Calculate residual r = b - (A^-1) x\n r = b - applyMat(x, linsys_setup)\n d = r\n\n\n delta_new = numpy.inner(r,r)\n \n\n\n\n delta_o = delta_new\n delta_array = numpy.zeros(shape=(i_max))\n \n # Iterate CG solver until converged\n i = 0\n #i_max = 300\n while (i < i_max) and (delta_new > delta_o*eps**2.):\n if i==0: t = time.time()\n \n if i%plotInterval == 0 and i != 0:\n print \"\\tNumber of iterations in the CG:\", i\n x0 = x[:nx*ny] # CMB\n x1 = x[nx*ny:nx*ny+1] # Monopole\n x2 = x[nx*ny+1:nx*ny+1+nCluster] # TSZ\n if ksz: x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n print \"\\tMonopole:\", x1\n print \"\\tTSZ:\", x2\n if ksz: print \"\\tKSZ:\", x3\n \n x0.shape = (ny,nx)\n a_l = numpy.fft.fft2(x0)\n a_l *= precond_2d\n x_test = numpy.real(numpy.fft.ifft2(a_l))\n plot(x_test,mapDir+'/CMB_%d.png'%i,'Reconstructed CMB', range=(-250., 250.))\n print delta_new, delta_o*eps**2.\n\n q = applyMat(d, linsys_setup)\n alpha = delta_new / (numpy.inner(d,q))\n x += alpha * d\n\n # What does this do? It's always false.\n if i/50. < numpy.int(i/50):\n r = b - applyMat(x, linsys_setup)\n else:\n r = r - alpha*q\n \n delta_old = delta_new\n delta_new = numpy.inner(r,r)\n beta = delta_new/delta_old\n d = r + beta * d\n #if i==0: print \"\\tEach iteration takes:\", time.time()-t\n i += 1\n\n x0 = x[:nx*ny].reshape((ny, nx))\n x1 = x[nx*ny:nx*ny+1]\n x2 = x[nx*ny+1:nx*ny+1+nCluster]\n if ksz:\n x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n else:\n x3 = None\n \n a_l = numpy.fft.fft2(x0) * precond_2d\n x0 = numpy.real(numpy.fft.ifft2(a_l))\n\n \n # CMB, monopole, TSZ, KSZ\n return x0, x1, x2, x3" ]
[ "0.77637815", "0.6571571", "0.6571571", "0.61444074", "0.53732836", "0.5364078", "0.53592175", "0.53389966", "0.5281263", "0.52668315", "0.52541155", "0.52250105", "0.52205896", "0.5189851", "0.5186091", "0.512015", "0.50682104", "0.5003378", "0.49752918", "0.49623924", "0.49581325", "0.49376437", "0.4918121", "0.49087563", "0.49076924", "0.48919708", "0.48911765", "0.48859563", "0.48802888", "0.4869652" ]
0.75096387
1
Compute magnetic torque if magnetic model provided. This method converts the satellite's position into Longitude, Latitude, Altitude representation to determine the geo. magnetic field at that position and then computes based on those values the magnetic torque.
def _compute_magnetic_torque(self, curr_date): if self._to_add[1]: gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date) topoframe = TopocentricFrame(self.earth, gP, 'ENU') topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date) lat = gP.getLatitude() lon = gP.getLongitude() alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km] # get B-field in geodetic system (X:East, Y:North, Z:Nadir) B_geo = FileDataHandler.mag_field_model.calculateField( degrees(lat), degrees(lon), alt).getFieldVector() # convert geodetic frame to inertial and from [nT] to [T] B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo)) B_b = self.inertial2Sat.applyTo(B_i) B_b = np.array([B_b.x, B_b.y, B_b.z]) dipoleVector = self.dipoleM.getDipoleVectors(B_b) torque = np.sum(np.cross(dipoleVector, B_b), axis=0) self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2])) else: self._mTorque = Vector3D.ZERO
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def torque(system, /, use_demag=True):\n if use_demag:\n total_field = (mm.consts.mu0 *\n (oc.compute(system.energy.demag.effective_field, system)\n + system.energy.zeeman.H))\n else:\n total_field = mm.consts.mu0 * np.array(system.energy.zeeman.H)\n norm_field = df.Field(system.m.mesh, dim=1,\n value=(system.m.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n moment = system.m * volume\n torque = (moment & total_field)\n return (df.integral(torque * df.dV / volume**2, direction='xyz'))", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j*kr\n\n front_term = self.moment / (4. * np.pi * r**3) * np.exp(-ikr)\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n\n return front_term * (symmetric_term + oriented_term)", "def get_motor_load_torque(self):\n # Start with the brake normal\n # change to 17deg (tan 17?)\n # change to torque using the pitch of the thread on the ball screw\n # (^ make sure to take friction into account)\n # That should give us the torque acting on the motor. If this torque is greater than the motor max torque, it will slip\n # Take into account that the max holding torque is different from the max torque. How do we know if the motor is holding or moving? \n # How do we control the stepper motor? Where are the routines for that? \n pass", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j*kr\n\n front_term = (\n self.current * self.length / (4 * np.pi * r**2) * (ikr + 1) *\n np.exp(-ikr)\n )\n return -front_term * self.cross_orientation(dxyz) / r", "def _compute_solar_torque(self):\n pass", "def _compute_gravity_torque(self):\n pass", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n Rq, phiq, Zq = coords.T\n if self._axisym:\n BRq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._BR[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"BR\"],\n )\n Bphiq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._Bphi[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"Bphi\"],\n )\n BZq = interp2d(\n Rq,\n Zq,\n self._R,\n self._Z,\n self._BZ[:, 0, :],\n self._method,\n (0, 0),\n self._extrap,\n (None, None),\n **self._derivs[\"BZ\"],\n )\n\n else:\n BRq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._BR,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"BR\"],\n )\n Bphiq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._Bphi,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"Bphi\"],\n )\n BZq = interp3d(\n Rq,\n phiq,\n Zq,\n self._R,\n self._phi,\n self._Z,\n self._BZ,\n self._method,\n (0, 0, 0),\n self._extrap,\n (None, self._period, None),\n **self._derivs[\"BZ\"],\n )\n B = jnp.array([BRq, Bphiq, BZq]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n return B", "def get_torque(self):\n return self.node.sdo[0x6077].phys # rate torque(mN.m) /1000", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n\n R, phi, Z = coords.T\n r = jnp.sqrt((R - self._R0) ** 2 + Z**2)\n theta = jnp.arctan2(Z, R - self._R0)\n br = -r * jnp.sin(theta)\n bp = jnp.zeros_like(br)\n bz = r * jnp.cos(theta)\n bmag = self._B0 * self._iota / self._R0\n B = bmag * jnp.array([br, bp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def mTorque(self):\n pass", "def make_torque(self):\n def torque_func(m):\n heff = self.field(m)\n total_torque = torque.landau_lifshitz(m, heff, self.damping)\n if self.stt != 0:\n total_torque += torque.slonczewski(m, self.Jc, self.stt)\n return total_torque\n self.torque = torque_func", "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n sat2body = body2sat.revert()\n\n satM = self.state_observer.spacecraftState.getMass()\n mCub = self.inCub['mass_frac'] * satM\n\n self._gTorque = Vector3D.ZERO\n\n for CoM in self.inCub['CoM']:\n\n S_dmPos = self.satPos_s.add(CoM)\n\n r2 = S_dmPos.getNormSq()\n gNewton = Vector3D(-self.muGM / (sqrt(r2) * r2), S_dmPos)\n\n B_dmPos = sat2body.applyTo(S_dmPos)\n\n gDist = Vector3D(self.GravityModel.gradient(curr_date,\n B_dmPos,\n self.muGM))\n\n g_Dist_s = body2sat.applyTo(gDist)\n\n dmForce = Vector3D(mCub, gNewton.add(g_Dist_s))\n self._gTorque = self._gTorque.add(self.V3_cross(CoM, dmForce))\n\n else:\n self._gTorque = Vector3D.ZERO", "def _compute_aero_torque(self):\n pass", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n Rq, phiq, Zq = coords.T\n\n if (params is None) or (len(params) == 0):\n params = self._params\n r, p, z = coords.T\n funR = lambda x: self._potential(x, p, z, **params)\n funP = lambda x: self._potential(r, x, z, **params)\n funZ = lambda x: self._potential(r, p, x, **params)\n br = Derivative.compute_jvp(funR, 0, (jnp.ones_like(r),), r)\n bp = Derivative.compute_jvp(funP, 0, (jnp.ones_like(p),), p)\n bz = Derivative.compute_jvp(funZ, 0, (jnp.ones_like(z),), z)\n B = jnp.array([br, bp / r, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n return B", "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n bp = self._B0 * self._R0 / coords[:, 0]\n brz = jnp.zeros_like(bp)\n B = jnp.array([brz, bp, brz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n bz = self._B0 * jnp.ones_like(coords[:, 2])\n brp = jnp.zeros_like(bz)\n B = jnp.array([brp, brp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def test_comp_magnetostrictive_tensor_1cell():\n\n # Physical quantities\n dim = 2\n Nt_tot = 1\n\n mu = 1\n Be = np.array([[[mu / 2, 0]]])\n He = np.array([[[-1 / 2, 0]]])\n mue = np.array([[mu]])\n\n Me = np.reshape(Be / mue - He, (dim, 1, Nt_tot))\n\n alphaij = [[1, 0, 0], [1, 0, 0]]\n\n alpha1 = 1\n alpha2 = 1\n\n # Computation\n tensor = ForceTensor()\n\n tensor_comp = tensor.comp_magnetrosctrictive_tensor(\n mue, Me, Nt_tot, alphaij\n ) # Should be equal to -alpha1*mu*MM' - alpha2*mu*M²*I2\n\n assert tensor_comp[0, 0, 0] == -mu * (alpha1 + alpha2)\n assert tensor_comp[0, 1, 0] == 0\n assert tensor_comp[1, 0, 0] == 0\n assert tensor_comp[1, 1, 0] == -mu * alpha2\n\n print(\"test_comp_magnetostrictive_tensor succeeded\")\n\n return True", "def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n return self._scalar * self._field.compute_magnetic_field(coords, params, basis)", "def magnetometer(self):\n self._mag[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_M), 16)\n self._mag[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_M), 16)\n self._mag[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_M), 16)\n\n return vector(self._mag)", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO", "def setMotorTorque(self, torque):\r\n if torque < 0.0:\r\n torque = 0.0\r\n elif torque > 1.0:\r\n torque = 1.0\r\n torque *= self.maxTorque\r\n if self.reverse:\r\n torque *= -1\r\n dTorque = 2\r\n if self.torque < torque:\r\n self.torque += dTorque\r\n elif self.torque > torque:\r\n self.torque -= dTorque\r\n \r\n for tire in self.tires:\r\n if tire.torque:\r\n tire.shape.setMotorTorque( self.torque )", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n inertial2Sat = self.spacecraft_state.getAttitude().getRotation()\n\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n self._sTorque = Vector3D.ZERO\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'],\n self.meshDA['Coefs'])\n\n for CoM, normal, area, coefs in iterator:\n position = self.satPos_s.add(CoM)\n\n # compute flux in inertial frame\n sunSatVector = \\\n position.subtract(sunPos)\n r2 = sunSatVector.getNormSq()\n\n rawP = ratio * self.K_REF / r2\n flux = Vector3D(rawP / sqrt(r2), sunSatVector)\n\n # compute Radiation Pressure Force:\n if flux.getNormSq() > Precision.SAFE_MIN:\n # illumination (we are not in umbra)\n # rotate flux to spacecraft frame:\n dot = self.V3_dot(normal, flux)\n\n if dot > 0:\n # the solar array is illuminated backward,\n # fix signs to compute contribution correctly\n dot = -dot\n normal = normal.negate()\n absorbCoeff = coefs[0]\n specularReflCoeff = coefs[1]\n diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff)\n try:\n assert(diffuseReflCoeff >= 0)\n except AssertionError:\n raise AssertionError(\n \"Negative diffuse reflection coefficient not possible!\")\n psr = flux.getNorm()\n # Vallado's equation uses different parameters which are\n # related to our parameters as:\n # cos (phi) = - dot / (psr*area)\n # n = N (n...unit vector)\n # s = -fluxSat / psr (s...unit vector)\n cN = 2 * area * dot * (diffuseReflCoeff / 3 -\n specularReflCoeff * dot / psr)\n cS = (area * dot / psr) * (specularReflCoeff - 1)\n Force = Vector3D(float(cN), normal, float(cS), flux)\n # Force already in spacecraft frame. No need to convert\n self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force))\n\n else:\n self._sTorque = Vector3D.ZERO", "def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))", "def get_motor_torques(\n self,\n motor_commands: np.ndarray,\n motor_control_mode=None) -> Tuple[np.ndarray, np.ndarray]:\n if not motor_control_mode:\n motor_control_mode = self._motor_control_mode\n\n motor_torques = None\n\n if motor_control_mode is robot_config.MotorControlMode.TORQUE:\n motor_torques = motor_commands\n\n if motor_control_mode is robot_config.MotorControlMode.POSITION:\n motor_torques = self._compute_pd_torques(\n desired_motor_angles=motor_commands,\n kp=self._kp,\n desired_motor_velocities=self._zero_array,\n kd=self._kd)\n \n if motor_torques is None:\n raise ValueError(\n \"{} is not a supported motor control mode\".format(motor_control_mode))\n\n # Apply the output filter to model actuator dynamics\n # BUG: Causes big instability in the sim\n # motor_torques = self._torque_filter(motor_torques)\n\n # Hard-code torque limits until the torque limit bug is fixed\n motor_torques = np.clip(motor_torques, -1.7, 1.7)\n\n # Apply motor damping and friction\n motor_torques -= (np.sign(self._previous_true_motor_velocity) *\n self._motor_torque_dependent_friction *\n motor_torques)\n motor_torques -= self._previous_true_motor_velocity * self._motor_damping\n\n # Rescale and clip the motor torques as needed.\n motor_torques = self._strength_ratios * motor_torques\n if (self._torque_lower_limits is not None or\n self._torque_upper_limits is not None):\n motor_torques = np.clip(motor_torques, self._torque_lower_limits,\n self._torque_upper_limits)\n\n return motor_torques, motor_torques", "def magnetic_tension(self, method='spectral'):\n import numpy as np\n gradB, B = self.magnetic_gradient_tensor(method=method, return_B=True)\n F = np.zeros_like(B)\n for i in range(3):\n for j in range(3):\n F[j] += B[i] * gradB[i,j]\n return F", "def set_hybrid_control(self, model, max_force_torque, timeout=5.0, stop_on_target_force=False):\n\n reduced_speed = np.deg2rad([100, 100, 100, 150, 150, 150])\n q_last = self.joint_angles()\n\n # Timeout for motion\n initime = rospy.get_time()\n xb = self.end_effector()\n failure_counter = 0\n\n while not rospy.is_shutdown() \\\n and (rospy.get_time() - initime) < timeout:\n\n # Transform wrench to the base_link frame\n Wb = self.get_ee_wrench()\n\n # Current Force in task-space\n Fb = -1 * Wb\n # Safety limits: max force\n if np.any(np.abs(Fb) > max_force_torque):\n rospy.logerr('Maximum force/torque exceeded {}'.format(np.round(Wb, 3)))\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return FORCE_TORQUE_EXCEEDED\n\n if stop_on_target_force and np.any(np.abs(Fb)[model.target_force != 0] > model.target_force[model.target_force != 0]):\n rospy.loginfo('Target F/T reached {}'.format(np.round(Wb, 3)) + ' Stopping!')\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return STOP_ON_TARGET_FORCE\n\n # Current position in task-space\n xb = self.end_effector()\n\n dxf = model.control_position_orientation(Fb, xb) # angular velocity\n\n # Limit linear/angular velocity\n dxf[:3] = np.clip(dxf[:3], -0.5, 0.5)\n dxf[3:] = np.clip(dxf[3:], -5., 5.)\n\n xc = transformations.pose_from_angular_velocity(xb, dxf, dt=model.dt)\n\n # Avoid extra acceleration when a point failed due to IK or other violation\n # So, this corrects the allowed time for the next point\n dt = model.dt * (failure_counter+1)\n\n q = self._solve_ik(xc)\n if q is None:\n rospy.logwarn(\"IK not found\")\n result = IK_NOT_FOUND\n else:\n q_speed = (q_last - q)/dt\n if np.any(np.abs(q_speed) > reduced_speed):\n rospy.logwarn(\"Exceeded reduced max speed %s deg/s, Ignoring command\" % np.round(np.rad2deg(q_speed), 0))\n result = SPEED_LIMIT_EXCEEDED\n else:\n result = self.set_joint_positions_flex(position=q, t=dt)\n\n if result != DONE:\n failure_counter += 1\n continue # Don't wait since there is not motion\n else:\n failure_counter = 0\n\n # Compensate the time allocated to the next command when there are failures\n for _ in range(failure_counter+1):\n self.rate.sleep()\n\n q_last = self.joint_angles()\n return DONE", "def gTorque(self):\n pass" ]
[ "0.68699807", "0.63400656", "0.6232671", "0.6188151", "0.6134176", "0.61125404", "0.60403585", "0.5898898", "0.58988893", "0.5801989", "0.5767733", "0.5767675", "0.57487917", "0.57399243", "0.57315844", "0.568015", "0.5644834", "0.5619654", "0.5577926", "0.5538078", "0.54779387", "0.5452449", "0.539544", "0.5392918", "0.5344376", "0.53228706", "0.53060395", "0.5294093", "0.52701694", "0.52411765" ]
0.72651374
0
Compute torque acting on satellite due to solar radiation pressure. This method uses the getLightingRatio() method defined in Orekit and copies parts of the acceleration() method of the SolarRadiationPressure and radiationPressureAcceleration() of the BoxAndSolarArraySpacecraft class to to calculate the solar radiation pressure on the discretized surface of the satellite. This is done, since the necessary Orekit methods cannot be accessed directly without creating an Spacecraft object.
def _compute_solar_torque(self, curr_date): if self._to_add[2]: inertial2Sat = self.spacecraft_state.getAttitude().getRotation() ratio = self.SolarModel.getLightingRatio(self.satPos_i, self.in_frame, curr_date) sunPos = inertial2Sat.applyTo( self.sun.getPVCoordinates(curr_date, self.in_frame).getPosition()) self._sTorque = Vector3D.ZERO iterator = itertools.izip(self.meshDA['CoM'], self.meshDA['Normal'], self.meshDA['Area'], self.meshDA['Coefs']) for CoM, normal, area, coefs in iterator: position = self.satPos_s.add(CoM) # compute flux in inertial frame sunSatVector = \ position.subtract(sunPos) r2 = sunSatVector.getNormSq() rawP = ratio * self.K_REF / r2 flux = Vector3D(rawP / sqrt(r2), sunSatVector) # compute Radiation Pressure Force: if flux.getNormSq() > Precision.SAFE_MIN: # illumination (we are not in umbra) # rotate flux to spacecraft frame: dot = self.V3_dot(normal, flux) if dot > 0: # the solar array is illuminated backward, # fix signs to compute contribution correctly dot = -dot normal = normal.negate() absorbCoeff = coefs[0] specularReflCoeff = coefs[1] diffuseReflCoeff = 1 - (absorbCoeff + specularReflCoeff) try: assert(diffuseReflCoeff >= 0) except AssertionError: raise AssertionError( "Negative diffuse reflection coefficient not possible!") psr = flux.getNorm() # Vallado's equation uses different parameters which are # related to our parameters as: # cos (phi) = - dot / (psr*area) # n = N (n...unit vector) # s = -fluxSat / psr (s...unit vector) cN = 2 * area * dot * (diffuseReflCoeff / 3 - specularReflCoeff * dot / psr) cS = (area * dot / psr) * (specularReflCoeff - 1) Force = Vector3D(float(cN), normal, float(cS), flux) # Force already in spacecraft frame. No need to convert self._sTorque = self._sTorque.add(self.V3_cross(CoM, Force)) else: self._sTorque = Vector3D.ZERO
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_solar_torque(self):\n pass", "def _compute_solar_torque(self, curr_date):\n if self._to_add[2]:\n ratio = self.SolarModel.getLightingRatio(self.satPos_i,\n self.in_frame,\n curr_date)\n\n sunPos = self.inertial2Sat.applyTo(\n self.sun.getPVCoordinates(curr_date,\n self.in_frame).getPosition())\n sunPos = np.array([sunPos.x, sunPos.y, sunPos.z], dtype='float64')\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = self.meshDA['Area_np']\n coefs = self.meshDA['Coefs_np']\n\n sunSatVector = self.satPos_s + CoM - sunPos\n r = np.linalg.norm(sunSatVector, axis=1)\n rawP = ratio * self.K_REF / (r**2)\n flux = (rawP / r)[:, None] * sunSatVector\n # eliminate arrays where zero flux\n fluxNorm = np.linalg.norm(flux, axis=1)\n Condflux = fluxNorm**2 > Precision.SAFE_MIN\n flux = flux[Condflux]\n normal = normal[Condflux]\n\n # dot product for multidimensional arrays:\n dot = np.einsum('ij,ij->i', flux, normal)\n dot[dot > 0] = dot[dot > 0] * (-1.0)\n if dot.size > 0:\n normal[dot > 0] = normal[dot > 0] * (-1.0)\n\n cN = 2 * area * dot * (coefs[:, 2] / 3 - coefs[:, 1] * dot / fluxNorm)\n cS = (area * dot / fluxNorm) * (coefs[:, 1] - 1)\n force = cN[:, None] * normal + cS[:, None] * flux\n\n sT = np.sum(np.cross(CoM, force), axis=0)\n\n self._sTorque = Vector3D(float(sT[0]), float(sT[1]), float(sT[2]))\n\n else:\n self._sTorque = Vector3D.ZERO", "def compute_torques(self, rotation, omega, dt):\n # shift time @ which attitude integration currently is\n try:\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n omega = Vector3D(float(omega[0]), float(omega[1]), float(omega[2]))\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n # external torque has to be set separately because it is received\n # through a ros subscriber\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))\n except Exception:\n print traceback.print_exc()\n raise", "def compute_torques(self, rotation, omega, dt):\n # shift time from integration start to time of attitude integration step\n curr_date = self.in_date.shiftedBy(dt)\n\n self.inertial2Sat = rotation\n self.satPos_s = self.inertial2Sat.applyTo(self.satPos_i)\n self.satPos_s = np.array([self.satPos_s.x,\n self.satPos_s.y,\n self.satPos_s.z], dtype='float64')\n\n self._compute_gravity_torque(curr_date)\n self._compute_magnetic_torque(curr_date)\n self._compute_solar_torque(curr_date)\n self._compute_aero_torque(curr_date, omega)\n\n return self._gTorque.add(\n self._mTorque.add(\n self._sTorque.add(\n self._aTorque)))", "def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n CoM = self.meshDA['CoM_np']\n normal = self.meshDA['Normal_np']\n area = np.asarray(self.meshDA['Area'])\n satVel = np.array([satVel.x, satVel.y, satVel.z])\n vAtm = np.array([vAtm.x, vAtm.y, vAtm.z])\n\n relativeVelocity = vAtm - (satVel + (np.cross(omega, CoM)))\n vNorm = np.linalg.norm(relativeVelocity, axis=1)\n vDir = np.reciprocal(vNorm[:, None]) * relativeVelocity\n\n dot = np.einsum('ij,ij->i', normal, vDir)\n\n dotCondition = dot < 0\n dot = dot[dotCondition]\n if dot.size > 0:\n vDir = vDir[dotCondition]\n vNorm = vNorm[dotCondition]\n normal = normal[dotCondition]\n area = area[dotCondition]\n CoM = CoM[dotCondition]\n\n coeff = 0.5 * rho * dragCoeff * (vNorm**2)\n oMr = 1.0 - liftRatio\n f = (coeff * area * dot)[:, None]\n\n aT = np.sum(np.cross(CoM, oMr * np.absolute(f) * vDir + 2 * liftRatio * f * normal), axis=0)\n\n self._aTorque = Vector3D(float(aT[0]), float(aT[1]), float(aT[2]))\n\n else:\n self._aTorque = Vector3D.ZERO", "def calculate_impedance_torques(self, position_error, orientation_error):\n desired_force = (np.multiply(np.array(position_error), np.array(self.impedance_kp[0:3]))\n - np.multiply(np.array(self.current_lin_velocity), self.impedance_kv[0:3]))\n\n desired_torque = (np.multiply(np.array(orientation_error), np.array(self.impedance_kp[3:6]))\n - np.multiply(np.array(self.current_ang_velocity), self.impedance_kv[3:6]))\n\n uncoupling = True\n if (uncoupling):\n decoupled_force = np.dot(self.lambda_x_matrix, desired_force)\n decoupled_torque = np.dot(self.lambda_r_matrix, desired_torque)\n decoupled_wrench = np.concatenate([decoupled_force, decoupled_torque])\n else:\n desired_wrench = np.concatenate([desired_force, desired_torque])\n decoupled_wrench = np.dot(self.lambda_matrix, desired_wrench)\n\n torques = np.dot(self.J_full.T, decoupled_wrench)\n\n if self.initial_joint is not None:\n # TODO where does 10 come from?\n joint_kp = 10\n joint_kv = np.sqrt(joint_kp) * 2\n pose_torques = np.dot(self.mass_matrix, (joint_kp * (\n self.initial_joint - self.current_joint_position) - joint_kv * self.current_joint_velocity))\n nullspace_torques = np.dot(self.nullspace_matrix.transpose(), pose_torques)\n torques += nullspace_torques\n self.torques = torques\n\n return torques", "def torque(system, /, use_demag=True):\n if use_demag:\n total_field = (mm.consts.mu0 *\n (oc.compute(system.energy.demag.effective_field, system)\n + system.energy.zeeman.H))\n else:\n total_field = mm.consts.mu0 * np.array(system.energy.zeeman.H)\n norm_field = df.Field(system.m.mesh, dim=1,\n value=(system.m.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n moment = system.m * volume\n torque = (moment & total_field)\n return (df.integral(torque * df.dV / volume**2, direction='xyz'))", "def get_torque(self):\n return self.node.sdo[0x6077].phys # rate torque(mN.m) /1000", "def make_torque(self):\n def torque_func(m):\n heff = self.field(m)\n total_torque = torque.landau_lifshitz(m, heff, self.damping)\n if self.stt != 0:\n total_torque += torque.slonczewski(m, self.Jc, self.stt)\n return total_torque\n self.torque = torque_func", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO", "def calculate(self, atoms):\n pmat = self.get_polarizability(self.omega, Eext=np.array([1.0, 1.0, 1.0]))\n\n # Specific for raman calls, it expects just the tensor for a single\n # frequency and need only the real part\n\n # For static raman, imaginary part is zero??\n # Answer from Michael Walter: Yes, in the case of finite systems you may\n # choose the wavefunctions to be real valued. Then also the density\n # response function and hence the polarizability are real.\n\n # Convert from atomic units to e**2 Ang**2/eV\n return pmat[:, :, 0].real * (un.Bohr**2) / un.Ha", "def _compute_aero_torque(self, curr_date, omega):\n if self._to_add[3]:\n # assuming constant atmosphere condition over spacecraft\n # error is of order of 10^-17\n rho = self.AtmoModel.getDensity(curr_date, self.satPos_i, self.in_frame)\n vAtm_i = self.AtmoModel.getVelocity(curr_date, self.satPos_i, self.in_frame)\n\n satVel = self.inertial2Sat.applyTo(self.satVel_i)\n vAtm = self.inertial2Sat.applyTo(vAtm_i)\n\n self._aTorque = Vector3D.ZERO\n\n dragCoeff = self.meshDA['Cd']\n liftRatio = 0.0 # no lift considered\n\n iterator = itertools.izip(self.meshDA['CoM'],\n self.meshDA['Normal'],\n self.meshDA['Area'])\n\n for CoM, Normal, Area in iterator:\n CoMVelocity = satVel.add(self.V3_cross(omega, CoM))\n relativeVelocity = vAtm.subtract(CoMVelocity)\n\n vNorm2 = relativeVelocity.getNormSq()\n vNorm = sqrt(vNorm2)\n vDir = relativeVelocity.scalarMultiply(1.0 / vNorm)\n\n dot = self.V3_dot(Normal, vDir)\n if (dot < 0):\n coeff = 0.5 * rho * dragCoeff * vNorm2\n oMr = 1.0 - liftRatio\n # dA intercepts the incoming flux\n f = coeff * Area * dot\n force = Vector3D(float(oMr * abs(f)), vDir,\n float(liftRatio * f * 2), Normal)\n self._aTorque = self._aTorque.add(self.V3_cross(CoM, force))\n\n else:\n self._aTorque = Vector3D.ZERO", "def correction(self):\r\n \r\n # empirical coefficients:\r\n k3, k2, k1, k0 = 0.0892, 0.0544, 0.2511, -0.0017\r\n \r\n # thrust as a function of the azimuth angle and the loads:\r\n thrust = self.qn*np.sin(Turbine.t) + self.qt*np.cos(Turbine.t)\r\n \r\n # interpolator function for the thrust:\r\n function = interp1d(Turbine.t, thrust, kind='cubic')\r\n \r\n # vectorize the function so that it takes an array of angles:\r\n __function__ = np.vectorize(function)\r\n \r\n # thrust coefficient integrating according to phi:\r\n self.cth = simps(__function__(Turbine.p), Turbine.p)\r\n \r\n # induction factor:\r\n self.a = k3*self.cth**3 + k2*self.cth**2 + k1*self.cth + k0\r\n \r\n # correction factor:\r\n if self.a <= 0.15:\r\n self.ka = 1.0/(1.0 - self.a)\r\n else:\r\n self.ka = (1./(1 - self.a))*(0.65 + 0.35*exp(-4.5*(self.a - 0.15)))", "def _compute_aero_torque(self):\n pass", "def optimal_angle_and_tilt(sensors_metadata_clean, latitude, sun_properties, Max_Isol, panel_properties):\n # calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.\n optimal_angle_flat = calc_optimal_angle(180, latitude, sun_properties.trr_mean) # assume surface azimuth = 180 (N,E), south facing\n sensors_metadata_clean['tilt']= np.vectorize(acos)(sensors_metadata_clean['Zdir']) #surface tilt angle in rad\n sensors_metadata_clean['tilt'] = np.vectorize(degrees)(sensors_metadata_clean['tilt']) #surface tilt angle in degrees\n sensors_metadata_clean['B'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean['tilt'],\n degrees(optimal_angle_flat)) # panel tilt angle in degrees\n\n # calculate spacing and surface azimuth of the panels for flat roofs\n module_length = panel_properties['module_length']\n optimal_spacing_flat = calc_optimal_spacing(sun_properties, optimal_angle_flat, module_length)\n sensors_metadata_clean['array_s'] = np.where(sensors_metadata_clean['tilt'] >= 5, 0, optimal_spacing_flat)\n sensors_metadata_clean['surface_azimuth'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],\n sensors_metadata_clean['Ydir'],\n sensors_metadata_clean['B']) # degrees\n\n # calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing\n if panel_properties['type'] == 'PV':\n module_width = module_length # for PV\n else:\n module_width = panel_properties['module_area']/module_length # for FP, ET\n module_flat_surface_area = module_width * (sensors_metadata_clean.array_s / 2 + module_length * cos(optimal_angle_flat))\n area_per_module = module_width * module_length\n\n # calculate the pv/solar collector module area within the area of each sensor point\n sensors_metadata_clean['area_installed_module'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean.AREA_m2,\n area_per_module * (sensors_metadata_clean.AREA_m2 / module_flat_surface_area))\n\n # categorize the sensors by surface_azimuth, B, GB\n result = np.vectorize(calc_categoriesroof)(sensors_metadata_clean.surface_azimuth, sensors_metadata_clean.B,\n sensors_metadata_clean.total_rad_Whm2, Max_Isol)\n sensors_metadata_clean['CATteta_z'] = result[0]\n sensors_metadata_clean['CATB'] = result[1]\n sensors_metadata_clean['CATGB'] = result[2]\n return sensors_metadata_clean", "def _compute_gravity_torque(self):\n pass", "def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint", "def update_rhs(self, h, a, r):\n return Tensors(\n t1=r.t1 - a.t1 / cc_denom(h.f, 2, 'dir', 'full'),\n t2=r.t2 - a.t2 / cc_denom(h.f, 4, 'dir', 'full'),\n t3=r.t3 - (a.t3 - a.t3.transpose([0, 1, 2, 4, 3, 5])) /\n cc_denom(h.f, 6, 'dir', 'full')\n )", "def update_rhs(self, h, a, r):\n return Tensors(\n t1=r.t1 - a.t1 / cc_denom(h.f, 2, 'dir', 'full'),\n t2=r.t2 - a.t2 / cc_denom(h.f, 4, 'dir', 'full'),\n t3=r.t3 - (a.t3 - a.t3.transpose([0, 1, 2, 4, 3, 5])) /\n cc_denom(h.f, 6, 'dir', 'full')\n )", "def temp_update(self):\n a_w = self.k / self.dx\n a_e = self.k / self.dx\n a_n = self.k / self.dy\n a_s = self.k / self.dy\n a_p = a_w + a_e + a_n + a_s + self.rho * self.cp * self.dx / self.dt\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n self.T[i,\n j] = (a_w * self.T[i - 1, j] + a_e * self.T[i + 1, j] +\n a_s * self.T[i, j - 1] + a_n * self.T[i, j + 1]) / a_p", "def comp_torque(self, output):\n\n N0 = output.elec.N0\n omega = 2 * pi * N0 / 60\n\n P = output.elec.Pem_av_ref\n losses = output.elec.Pj_losses # TODO update since there may also be other losses\n\n Tem_av_ref = (P - losses) / omega\n\n output.elec.Tem_av_ref = Tem_av_ref", "def f(r,t):\r\n x = r[0]\r\n y = r[2]\r\n z = r[4]\r\n vx = r[1]\r\n vy = r[3]\r\n vz = r[5]\r\n velocity = np.sqrt(vx**2+vy**2+vz**2)\r\n #if np.abs(z)>eps:\r\n velocity = np.sqrt((vx+c*radius*wy)**2+(vy-c*radius*wx)**2+(-e*vz)**2)\r\n \r\n # equations for a cricket ball in motion\r\n return np.array([vx, (-k_d*velocity*vx+k_l*(wy*vz-wz*vy)),\r\n vy, (-k_d*velocity*vy+k_l*(wz*vx-wx*vz)),\r\n vz,(-k_d*velocity*vz+k_l*(wz*vy-wy*vx)-g)], float)", "def solve_atmospheric_entry(self, radius, velocity, density, strength, angle,\n init_altitude=100e3, ts=0.01, dt=0.05, tmax=120, radians=False):\n # RK4 solver\n def RK4(f, u0, t0, t_max, dt, args=()):\n \"\"\" Implement RK4 time-stepping to solve du/dt = f(t, u), given the RHS vector f,\n initial condition u0, start time t0, termination time t_max, and the timestep dt\n \"\"\"\n u = np.array(u0)\n t = np.array(t0)\n u_all = [u0]\n t_all = [t0]\n while t+dt < t_max:\n k1 = dt*f(t, u, *args)\n k2 = dt*f(t + 0.5*dt, u + 0.5*k1, *args)\n k3 = dt*f(t + 0.5*dt, u + 0.5*k2, *args)\n k4 = dt*f(t + dt, u + k3, *args)\n u = u + (1/6)*(k1 + 2*k2 + 2*k3 + k4)\n u_all.append(u)\n t = t + dt\n t_all.append(t)\n if u[3] <= 0:\n break # terminate at ground\n return np.array(u_all), np.array(t_all)\n\n # initial condition\n v0 = velocity\n m0 = (4/3) * np.pi * radius**3 * density\n if radians:\n theta0 = angle\n else:\n theta0 = angle * np.pi / 180\n z0 = init_altitude\n x0 = 0\n r0 = radius\n state0 = np.array([v0, m0, theta0, z0, x0, r0])\n\n # run solver\n t0 = 0\n sol = RK4(self.system, state0, t0, tmax, ts, args=(strength, density))\n\n # convert angles back to degrees if specfied at input\n if not radians:\n sol[0][:, 2] = sol[0][:, 2] * 180 / np.pi\n\n # interpolate results at the output timestep\n if dt == ts:\n t_out = sol[1].T\n sol_out = sol[0].T\n else:\n t_sol = sol[1]\n N = floor(t_sol[-1] * 0.9999 / dt)\n t_out = np.hstack([np.linspace(t_sol[0], N*dt, N+1), t_sol[-1]])\n sol_out = np.array([np.interp(t_out, t_sol, sol[0][:, j])\n for j in range(len(state0))])\n\n return pd.DataFrame({'velocity': sol_out[0, :],\n 'mass': sol_out[1, :],\n 'angle': sol_out[2, :],\n 'altitude': sol_out[3, :],\n 'distance': sol_out[4, :],\n 'radius': sol_out[5, :],\n 'time': t_out}, index=range(len(t_out)))", "def get_motor_load_torque(self):\n # Start with the brake normal\n # change to 17deg (tan 17?)\n # change to torque using the pitch of the thread on the ball screw\n # (^ make sure to take friction into account)\n # That should give us the torque acting on the motor. If this torque is greater than the motor max torque, it will slip\n # Take into account that the max holding torque is different from the max torque. How do we know if the motor is holding or moving? \n # How do we control the stepper motor? Where are the routines for that? \n pass", "def get_torque(self, theta, modulus):\n\n\t\treturn self.get_k(modulus)*theta", "def _calc_solar_from_clouds_and_angle(hr, ds_path):\n # Solar radiation [W/m^2] incident on top of atmosphere\n Q_o = 1368.0\n # Cloud model based on Dobson and Smith, table 5\n # SEA -- May 2010 : redid the cloud parametrization based on UBC\n # Solar data (/ocean/shared/SoG/met/solar/) fitting Q to cos_Z\n # (not Q/cos_Z as Kate did). Allen and Wolfe (2013). (0) no\n # clouds, (1) 1/10 cloud fraction (10) 100% clouds. Four sig\n # figs are what comes out of matlab but standard deviations are\n # 40W/m2 for low cloud fraction to 120 W/m2 for 6-9 cloud\n # fraction to 85 W/m2 for completely cloudy.\n cloud_consts = SimpleNamespace(\n A=numpy.array(\n [\n 0.6337,\n 0.6149,\n 0.5861,\n 0.5512,\n 0.5002,\n 0.4649,\n 0.4225,\n 0.3669,\n 0.2468,\n 0.1981,\n 0.0841,\n ]\n ),\n B=numpy.array(\n [\n 0.1959,\n 0.2119,\n 0.2400,\n 0.2859,\n 0.3192,\n 0.3356,\n 0.3339,\n 0.3490,\n 0.4427,\n 0.3116,\n 0.2283,\n ]\n ),\n )\n # Local standard time\n ## WARNING: .to(\"PST\") may be fragile and incorrect for summer-time dates\n lst = hr.to(\"PST\")\n # day_time is in seconds, LST\n day_time = (lst - lst.floor(\"day\")).seconds\n # hour of day as degrees from noon\n hour = (day_time / 3600 - 12) * 15\n # day is year-day\n day = (lst - lst.floor(\"year\")).days\n # solar declination [radians]\n declination = (\n 23.45 * numpy.pi / 180 * numpy.sin((284 + day) / 365.25 * 2 * numpy.pi)\n )\n # Latitude of approximate centre of model domain in radians\n lat = numpy.pi * 50 / 180\n # solar elevation\n elev_sin = numpy.sin(declination) * numpy.sin(lat)\n elev_cos = numpy.cos(declination) * numpy.cos(lat)\n cos_Z = elev_sin + elev_cos * numpy.cos(numpy.pi / 180 * hour)\n # cos of -hour_angle in radians\n hour_angle = numpy.tan(lat) * numpy.tan(declination)\n # assume we are south of the Arctic Circle\n day_length = numpy.arccos(-hour_angle) / 15 * 2 * 180 / numpy.pi\n sunrise = 12 - 0.5 * day_length # hours\n sunset = 12 + 0.5 * day_length # hours\n Qso = Q_o * (1 + 0.033 * numpy.cos(day / 365.25 * 2 * numpy.pi))\n with xarray.open_dataset(ds_path) as ds:\n cf_value = ds.percentcloud * 10\n fcf = numpy.floor(cf_value).astype(int) # integer below cf value\n fcf = xarray.where(fcf == 10, 9, fcf).data\n ccf = fcf + 1 # integer above cf value\n if (sunrise > day_time / 3600) or (day_time / 3600 > sunset):\n # nighttime\n return xarray.zeros_like(ds.percentcloud)\n return (\n Qso\n * (\n cloud_consts.A[fcf] * (ccf - cf_value)\n + cloud_consts.A[ccf] * (cf_value - fcf)\n + (\n cloud_consts.B[fcf] * (ccf - cf_value)\n + cloud_consts.B[ccf] * (cf_value - fcf)\n )\n * cos_Z\n )\n * cos_Z\n )", "def _calc_R_T_amp(self, polarization, n, delta):\n t_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)\n r_amp = np.zeros((len(self.structure), len(self.structure)), dtype=complex)\n# # debugging statement\n# print(\"\\nr_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,r_amp[i][j]))\n# # debugging statement\n# print(\"\\nt_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,t_amp[i][j]))\n\n for i in range(len(self.structure)-1):\n t_amp[i,i+1] = self._t_at_interface(polarization, n[i], n[i+1])\n r_amp[i,i+1] = self._r_at_interface(polarization, n[i], n[i+1])\n# # debugging statement\n# print(\"\\nmod r_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,r_amp[i][j]))\n# # debugging statement\n# print(\"\\nmod t_amp is:\")\n# for i in range(len(self.structure)):\n# for j in range(len(self.structure)):\n# print(\"{}{} {}\".format(i,j,t_amp[i][j]))\n\n M = np.zeros((len(self.structure),2,2),dtype=complex)\n# # debugging statement\n# print(\"\\nThe 'M' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"M{}{}{} ---> {}\".format(i,j,k,M[i][j][k]))\n\n m_r_amp = np.zeros((len(self.structure),2,2), dtype=complex)\n m_t_amp = np.zeros((len(self.structure),2,2), dtype=complex)\n for i in range(1,len(self.structure)-1):\n m_t_amp[i] = self._make_2x2(np.exp(-1j*delta[i]), 0., 0., np.exp(1j*delta[i]), dtype=complex)\n m_r_amp[i] = self._make_2x2(1., r_amp[i,i+1], r_amp[i,i+1], 1., dtype=complex)\n\n# # debugging statement\n# print(\"\\nThe temporary 'm_r_amp' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_r_amp{}{}{} ---> {}\".format(i,j,k,m_r_amp[i][j][k]))\n\n# # debugging statement\n# print(\"\\nThe temporary 'm_t_amp' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_t_amp{}{}{} ---> {}\".format(i,j,k,m_t_amp[i][j][k]))\n\n m_temp = np.dot(m_t_amp, m_r_amp)\n\n# # debugging statement\n# print(\"\\nThe 'm_temp' matrix is:\")\n# for i in m_temp:\n# print i\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"m_temp{}{}{} ---> {}\".format(i,j,k,m_temp[i][j][k]))\n\n for i in range(1,len(self.structure)-1):\n M[i] = 1/t_amp[i,i+1] * np.dot(self._make_2x2(np.exp(-1j*delta[i]),\n 0., 0., np.exp(1j*delta[i]),\n dtype=complex),\n self._make_2x2(1., r_amp[i,i+1], \\\n r_amp[i,i+1], 1., \\\n dtype=complex))\n# # debugging statement\n# print(\"\\nThe modified 'M' matrix is:\")\n# for i in range(len(self.structure)):\n# for j in range(2):\n# for k in range(2):\n# print(\"mod M{}{}{} ---> {}\".format(i,j,k,M[i][j][k]))\n\n M_prime = self._make_2x2(1., 0., 0., 1., dtype=complex)\n\n# # debugging statement\n# print(\"\\nThe first modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"1st mod M_prime{}{} ---> {}\".format(i,j,M_prime[i][j]))\n\n for i in range(1, len(self.structure)-1):\n# print(\"\\n'M_prime' #{} is:\\n{}\".format(i,M_prime))\n M_prime = np.dot(M_prime, M[i])\n\n# # debugging statement\n# print(\"\\nThe second modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"2nd mod M_prime{}{} ---> {}\".format(i,j,M_prime[i][j]))\n\n# print(\"\\nr_amp01 is ---> {}\".format(r_amp[0,1]))\n# print(\"t_amp01 is ---> {}\".format(t_amp[0,1]))\n\n mod_M_prime = self._make_2x2(1.,r_amp[0,1], r_amp[0,1], 1., dtype=complex)/t_amp[0,1]\n\n# # debugging statement\n# print(\"\\nThe third modified 'M_prime' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"3rd mod M_prime{}{} ---> {}\".format(i, j, mod_M_prime[i][j]))\n\n M_prime = np.dot(self._make_2x2(1., r_amp[0,1], r_amp[0,1], 1., \\\n dtype=complex)/t_amp[0,1], M_prime)\n\n# # debugging statement\n# print(\"\\nThe 'M_final' matrix is:\")\n# for i in range(2):\n# for j in range(2):\n# print(\"M_final{}{} ---> {}\".format(i, j, M_prime[i][j]))\n\n t = 1/M_prime[0,0]\n r = M_prime[0,1]/M_prime[0,0]\n\n# # debugging statement\n# print(\"\\n't' ---> {}\".format(t))\n# print(\"'r' ---> {}\".format(r))\n\n return (r, t)", "def _compute_gravity_torque(self, curr_date):\n if self._to_add[0]:\n # return gravity gradient torque in satellite frame\n body2inertial = self.earth.getBodyFrame().getTransformTo(self.in_frame, curr_date)\n body2sat = self.inertial2Sat.applyTo(body2inertial.getRotation())\n body2satRot = PyRotation(body2sat.q0,\n body2sat.q1,\n body2sat.q2,\n body2sat.q3)\n sat2bodyRot = body2satRot.revert()\n body2sat = body2satRot.getMatrix()\n sat2body = sat2bodyRot.getMatrix()\n\n satM = self.spacecraft_state.getMass()\n mCub = self.inCub['dm'] * satM\n # add booms\n if \"dm_boom\" in self.inCub:\n mCub = np.concatenate((mCub, self.inCub['dm_boom']), axis=0) # boom store with mass\n CoM = self.inCub['CoM_np']\n\n dmPos_s = CoM + self.satPos_s\n\n gNewton = (-self.muGM / np.linalg.norm(dmPos_s,\n axis=1,\n keepdims=True)**3) * dmPos_s\n\n # rotate vectors:\n dmPos_b = np.einsum('ij,kj->ki', sat2body, dmPos_s)\n\n gDist = np.empty(dmPos_b.shape)\n for i in xrange(0, dmPos_b.shape[0]):\n gDist[i, :] = np.asarray(\n self.GravityModel.gradient(curr_date,\n Vector3D(float(dmPos_b[i, 0]),\n float(dmPos_b[i, 1]),\n float(dmPos_b[i, 2])),\n self.muGM))\n\n gDist_s = np.einsum('ij,kj->ki', body2sat, gDist)\n\n gT = np.sum(np.cross(CoM, mCub*(gNewton + gDist_s)), axis=0)\n\n self._gTorque = Vector3D(float(gT[0]), float(gT[1]), float(gT[2]))\n\n else:\n self._gTorque = Vector3D.ZERO", "def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)" ]
[ "0.69949347", "0.6673097", "0.60990465", "0.59711844", "0.58253074", "0.5795665", "0.57630336", "0.5722515", "0.5703465", "0.5680917", "0.5680917", "0.5657641", "0.5528349", "0.5487832", "0.5445919", "0.5436321", "0.5404575", "0.53043115", "0.52985704", "0.52985704", "0.5266157", "0.52504313", "0.52427036", "0.5234059", "0.5219405", "0.5214757", "0.5205143", "0.5197736", "0.5153261", "0.5143777" ]
0.6856325
1
Wait for task to complete async 'accepted' task Notes Certain operations use an async task pattern, where a 202 response on the initial POST is returned along with a self link to query. The operation is complete when one of the following is true (depends on the REST API) The self link returns 200 (as opposed to 202)
def _wait_for_task(self, task_url): response, status_code = self._client.make_request( http_utils.parse_url(task_url)['path'], advanced_return=True ) # check for async task pattern success/failure if status_code != constants.HTTP_STATUS_CODE['OK']: raise Exception('Successful status code not returned: %s' % status_code) if 'status' in response and response['status'].upper() not in ['FINISHED', 'COMPLETED']: raise Exception('Successful status message not returned: %s' % response['status']) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wait(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n first_response = self.generate_task_dictionary(\n tid, state=\"waiting\", completed=False\n )\n\n responses = [\n {\"json\": first_response},\n {\"json\": self.generate_task_dictionary(tid)},\n ]\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, responses)\n\n task = self.client.site(site).task(tid).wait()\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"done\")", "async def handle_async(req):\n return await logic_async(req)", "def answer_waiting_call(self) -> None:", "async def wait_until_done(self) -> None:\n ...", "async def wait_async(self):\n await self._future", "def wait_for_task(task):\n task_done = False\n while not task_done:\n if task.info.state == 'success':\n return task.info.result\n\n if task.info.state == 'error':\n print \"there was an error\"\n #task_error = task.info.error.msg\n task_done = True", "def _async_wait(self, **kwargs):\n # Reset first query ID. It will be found from the logs below.\n self.query_id = None\n r = super().execute(self.q.query, async_=True, **kwargs)\n\n # Thrift statuses, with human names.\n statuses = TOperationState._VALUES_TO_NAMES\n\n full_error = []\n # Start marker of the useful error message\n short_prefix = 'info=['\n short_error = None\n\n status = None\n full_status_interval = 5\n last_full_status = datetime.datetime.utcnow()\n while status in [\n None,\n TOperationState.INITIALIZED_STATE,\n TOperationState.PENDING_STATE,\n TOperationState.RUNNING_STATE\n ]:\n\n time.sleep(0.5)\n new_status = self.poll().operationState\n\n if new_status != status:\n if status is not None:\n # Keep the last line of status, which was written with \\r\n print()\n print(f\"Status change. Was {statuses[status] if status is not None else 'None'},\" +\n f\" is now {statuses[new_status]}.\")\n status = new_status\n\n logs = self.fetch_logs()\n for message in logs:\n # Need to extract the query ID to talk to yarn\n if self.query_id is None:\n # The id is displayed many times, in a few different formats.\n # It looks like the earliest and most repeated is eg.\n # (queryId=hive_20190628102710_bf894b75-f6d4-4da2-b0a5-2a2d44045711)\n m = re.search(r'\\(queryId=(?P<qid>hive.*?)\\)', message)\n if m:\n self.query_id = m.group('qid')\n\n # If ERROR begins a line, let's remember and all after the full error message (contains massive\n # useless stacktrace and all attempts).\n # Extract as well the one relevant human-friendly line.\n if message.strip().startswith('ERROR') or (full_error and not message.strip().startswith('INFO')):\n full_error.append(message)\n if short_prefix in message and not short_error:\n short_error = message.partition(short_prefix)[2]\n # Sometimes the error is only one line long, without error_prefix\n if not short_error and full_error:\n short_error = full_error[0]\n\n logging.debug(message)\n if last_full_status + datetime.timedelta(seconds=full_status_interval) < datetime.datetime.utcnow():\n last_full_status = datetime.datetime.utcnow()\n try:\n self._print_progress_info()\n except Exception as e:\n # Whatever happens, trying to display progress should never stop the actual query run.\n # Furthermore, some of those errors are transient (mostly at query start)\n print(\"Error fetching progress info (query is probably not actually started yet): \" + str(e),\n end='\\r')\n if full_error:\n self.q.full_error('\\n'.join(full_error))\n self.q.short_error(short_error)\n print(self.q.short_error())\n\n print(f\"Final status is {statuses[status]}.\")\n return r", "def custom_wait_for_completion(task_description, output):\n state = 'UNSUBMITTED'\n while not (state == 'COMPLETED' or state =='FAILED'):\n output.add_live_msg(ms.STATUS.format(state))\n time.sleep(5)\n \n #search for the task in task_list\n for task in task_description:\n current_task = gs.isTask(task)\n if current_task:\n state = current_task.state\n if state == 'RUNNING' or state == 'FAILED': \n break\n \n return state", "def wait(self, task: RemoteTask) -> None:\n raise NotImplementedError()", "def wait_task(self, task_id, time_before_retry = 100):\n while True:\n res = AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/task/%d/\" % (self.url_index_name, task_id), self.client.timeout)\n if (res[\"status\"] == \"published\"):\n return res\n time.sleep(time_before_retry / 1000.)", "def apply_async(self):\n should_send = self._state in (TASK_PENDING, TASK_SENDING)\n if self._state == TASK_PENDING:\n self.set_state(TASK_SENDING)\n try:\n self._set_queue_kwargs()\n self.workflow_context.internal.handler.wait_for_result(\n self, self._task_target)\n if should_send:\n self.set_state(TASK_SENT)\n self.workflow_context.internal.handler.send_task(\n self, self._task_target, self._task_queue)\n except (exceptions.NonRecoverableError,\n exceptions.RecoverableError) as e:\n self.error = serialize_known_exception(e)\n self.set_state(TASK_FAILED)\n self.async_result.result = e\n return self.async_result", "def complete():\n\n # Check if the client is connected.\n if not session.is_connected():\n return dict(ok=False, error=\"Client not connected\")\n\n # Extract the result ftom the request body.\n body = request.json or {}\n result = body.get(\"result\")\n operation = body.get(\"operation\")\n\n # Mark the operation as solved.\n if manager.solve(session.get_sid(), operation, result):\n return dict(ok=True)\n\n return dict(ok=False, error=\"Operation not queued by this client\")", "def task_verify_site(self, req, resp, json_data):\n action = json_data.get('action', None)\n\n if action != 'verify_site':\n self.error(\n req.context,\n \"Task body ended up in wrong handler: action %s in task_verify_site\"\n % action)\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Error\",\n retry=False)\n\n try:\n task = self.create_task(json_data, req.context)\n resp.text = json.dumps(task.to_dict())\n resp.append_header('Location',\n \"/api/v1.0/tasks/%s\" % str(task.task_id))\n resp.status = falcon.HTTP_201\n except errors.InvalidFormat as ex:\n self.error(req.context, ex.msg)\n self.return_error(resp,\n falcon.HTTP_400,\n message=ex.msg,\n retry=False)", "def handle_task(msg):\n\n task = simplejson.loads(msg.body)\n\n if twistedae.taskqueue.is_deferred_eta(task['eta']):\n return False\n\n req = urllib2.Request(\n url='http://%(host)s:%(port)s%(url)s' % task,\n data=task['payload'],\n headers={'Content-Type': 'text/plain'}\n )\n\n try:\n res = urllib2.urlopen(req)\n except urllib2.URLError, err_obj:\n reason = getattr(err_obj, 'reason', err_obj)\n logging.error(\"failed task %s %s\" % (task, reason))\n return False\n\n return True", "def is_async(self) -> bool:", "def fusion_api_wait_for_task_to_complete(self, uri=None, api=None, headers=None, retries=5, sleep_time=5, param=''):\n task_attempts = 0\n for _ in range(0, retries):\n time.sleep(sleep_time)\n task_attempts += 1\n response = self.task.get(uri=uri, api=api, headers=headers, param=param)\n if response[\"percentComplete\"] == 100:\n break\n if task_attempts == retries:\n raise Exception(\n \"Task did not complete after %d tries.\" % retries)\n return response", "def run(request, resulthash):\n try:\n stored = models.HBTask.objects.get(resulthash=resulthash)\n except:\n stored = None\n thisone = {'Error', 'not found in database'}\n \n # Finished, and reported back\n if stored.status == models.HBTask.OK_STATUS:\n thisone = True\n\n # Submitted, have not heard from since\n elif stored.status == models.HBTask.PENDING_STATUS:\n obj = HbObject(hash=resulthash)\n status,fullstatus = check_stored_status(obj)\n thisone = fullstatus or True \n\n # resulted in error\n elif stored.status == models.HBTask.ERROR_STATUS:\n thisone = {'Error','something'}\n\n # no status: submit now\n else:\n # print 'Now status : ',stored.status\n # print 'Now submit task : ',stored.celery_taskname\n\n # to submit hb task\n todo = getattr(tasks,stored.hb_taskname)\n # celery_result = todo.delay(**json.loads(stored.parameters))\n parameters = json.loads(stored.parameters)\n \n action = todo(**parameters)\n\n if not action.ready_to_go:\n thisone = {'Warning':'Not all dependencies are met',\n 'dependency_status':action.dependency_status()}\n\n # Add me as waiting for a few\n todo = [d.split(':')[1] for d in action.dependencies_todo]\n dep = models.HBTask.objects.filter(resulthash__in=todo)\n for d in dep:\n w,isnew = models.Waiting.objects.get_or_create(todo=stored,dependency=d)\n # print 'Created ? ',w,isnew\n # submit dependency to run\n run(None,resulthash=d.resulthash)\n else:\n action.submit()\n time.sleep(0.5)\n obj = HbObject(hash=resulthash)\n status,fullstatus = check_stored_status(obj)\n thisone = fullstatus or True \n\n return JsonResponse({'result':thisone})\n # return JsonResponse(thisone)", "async def get_task_result(task_id: TaskId):", "def Task(self) -> _n_10_t_0[TResult]:", "def on_get(self, req, resp, task_id):\n task_result = AsyncResult(task_id)\n result = {'status': task_result.status, 'result': task_result.result}\n resp.status = falcon.HTTP_200\n resp.body = json.dumps(result)", "def _wait_for_task(self, task_ref):\n return self.wait_for_task(task_ref)", "def _wait_for_task(self, task_ref):\n return self.wait_for_task(task_ref)", "def async_get():\n print(\"Updating YT Search Database\")\n status = async_get.schedule()\n return status", "def __await__(self):\n return self.waiter.__await__()", "def __await__(self):\n return self.waiter.__await__()", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True" ]
[ "0.61062074", "0.60079914", "0.60069877", "0.58521867", "0.581347", "0.5812886", "0.5768431", "0.5765323", "0.5762925", "0.5761387", "0.57408994", "0.573335", "0.5708843", "0.5695533", "0.5674839", "0.56686956", "0.56040645", "0.55940294", "0.5587207", "0.5552761", "0.5543241", "0.5543241", "0.55256265", "0.55056244", "0.55056244", "0.5503209", "0.5503209", "0.5503209", "0.5503209", "0.5503209" ]
0.6627306
0
Test migration of field group references in field definitions
def test_field_fieldgroup_references(self): # Create field group self.test_group = RecordGroup_migration.create( self.testcoll, test_group_id, test_group_create_values ) # Create field definition referencing field group self.test_field = RecordField.create( self.testcoll, test_field_id, test_field_group_create_values ) # Apply migration to collection migrate_coll_data(self.testcoll) # Read field definition and check for inline field list field_data = self.check_entity_values( "_field", test_field_id, check_values=test_field_group_migrated_values ) self.assertNotIn("annal:group_ref", field_data) self.check_entity_does_not_exist("_group", test_group_id) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_field_rules():", "def test_group_field(self):\n field = self.record.find('field[@name=\\'groups_id\\']')\n self.assertEqual(field.attrib['eval'],\n '[(4, ref(\\'nh_clinical.group_nhc_admin\\'))]',\n 'Incorrect eval on groups id')", "def test_groups_group_ref_put(self):\n pass", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def test_groups_group_ref_get(self):\n pass", "def test_entities__Field__1():\n zope.interface.verify.verifyObject(IField, Field())", "def test_normal_fields_can_be_defined_between_primary_keys(self):", "def test_products_ref_groups_put(self):\n pass", "def test_mutate_field(self):\n # Test adding a field\n with self.assertRaises(ValueError):\n self.email.add_field('', '')\n\n self.email.add_field(self.key, self.regex)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertTrue(found_key)\n self.assertEqual(found_regex, self.regex)\n\n # Test getting a field\n with self.assertRaises(LookupError):\n self.email.get_field('')\n\n field = self.email.get_field(self.key)\n self.assertEqual(\n field, {'key': self.key, 'regex': self.regex, 'value': []})\n\n # Test removing a field\n with self.assertRaises(LookupError):\n self.email.remove_field('')\n\n self.email.remove_field(self.key)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertFalse(found_key)\n self.assertNotEqual(found_regex, self.regex)", "def test_fields(self):\n\n class Foo(Model):\n field1 = StringField()\n field2 = IntegralField()\n\n assert hasattr(Foo, \"_fields\")\n assert type(Foo._fields) is dict\n\n assert not hasattr(Foo, \"field1\")\n assert \"field1\" in Foo._fields\n assert type(Foo._fields[\"field1\"]) is StringField\n\n assert not hasattr(Foo, \"field2\")\n assert \"field2\" in Foo._fields\n assert type(Foo._fields[\"field2\"]) is IntegralField", "def test_entities__Entity__addField__2(entities, entity, field):\n entity.addField(field)\n assert IDummy == entities[u'Field-1'].interface", "def test_replace_group(self):\n pass", "def test_update_group(self):\n pass", "def test_entities__Entity__addField__1(entities, entity, field):\n entity.addField(field)\n assert ['Field-1'] == list(entities.keys())\n assert field is entities[u'Field-1']", "def test_replace_groups(self):\n pass", "def test_entities__Entity__getFields__1(entity_with_field, schemaized_field):\n entity = entity_with_field\n entity.setFieldOrder(['dummy2', schemaized_field.__name__, 'dummy'])\n assert ([('dummy2', IDummy['dummy2']),\n (schemaized_field.__name__, schemaized_field),\n ('dummy', IDummy['dummy'])] == list(entity.getFields()))", "def test_update_groups_Returns_false_for_invalid_field(\n self, mock_modify_groups_file , mock_read_groups_file\n ):\n mock_read_groups_file.return_value = self.groups_data\n mock_modify_groups_file.return_value = True \n crud = CRUD()\n self.assertFalse(crud.update_groups(\"1\",\"invalidField\",\"nomgroupe\"))", "def test_field_types(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n db_relations = connection.introspection.get_relations(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n expected_field_type = None\n if db_relations.get(i):\n expected_field_type = u'ForeignKey'\n else:\n expected_field_type = get_field_type(db_cols[i].type_code)\n\n self.assertEqual(\n mb_fields[i].get_internal_type(),\n expected_field_type\n )", "def test_groups_group_ref_delete(self):\n pass", "def test_entities__Entity__getField__3(entity_with_field, schemaized_field):\n assert (schemaized_field ==\n entity_with_field.getField(schemaized_field.__name__))", "def _validate_fields(self, change_fields):\n pass", "def test_prep_fields(self):\n pass", "def test_all_field_dependencies_data(self, all_field_dependencies):\n # Select one dependency\n field_dependency = all_field_dependencies[4]\n # Tests whether the proper dependency is returned\n assert field_dependency == self.test_field_dep", "def test_entities__Entity__getFields__2(entity_with_field, schemaized_field):\n entity = entity_with_field\n entity.setFieldOrder(['dummy2', schemaized_field.__name__, 'dummy'])\n assert ([('dummy', IDummy['dummy']),\n ('dummy2', IDummy['dummy2']),\n (schemaized_field.__name__, schemaized_field)] ==\n list(entity.getFields(sorted=False)))", "def test_products_ref_groups_get(self):\n pass", "def test_fields(self):\n expected = (\n 'id',\n # Incoming foreign keys from subclasses\n 'routeredirect', # conman.redirects.models.RouteRedirect\n 'routesubclass', # tests.models.RouteSubclass\n 'templateroute', # tests.models.TemplateRoute\n 'urlconfroute', # tests.models.URLConfRoute\n 'urlredirect', # conman.redirects.models.URLRedirect\n 'viewroute', # tests.models.ViewRoute\n ) + NODE_BASE_FIELDS\n fields = field_names(Route)\n self.assertCountEqual(fields, expected)", "def test_aliases(self):\n field = self.base_field\n self.assertFalse(field.get('aliases'))\n self.assertEqual([], SchemaField(field).aliases)\n field['aliases'] = []\n self.assertEqual([], SchemaField(field).aliases)\n field['aliases'] = ['alias1', 'Alias2']\n sch = SchemaField(field)\n self.assertEqual(field['aliases'], sch.aliases)\n # test some related method\n self.assertTrue(sch.has_alias('alias1'))\n self.assertTrue(sch.has_alias('Alias2'))\n self.assertFalse(sch.has_alias('alias2'))\n self.assertTrue(sch.has_alias('alias2', icase=True))\n self.assertFalse(sch.has_alias(field['name']))\n self.assertTrue(sch.has_name_or_alias(field['name'], 'aaaa'))\n self.assertFalse(sch.has_name_or_alias(field['name'].lower(), 'aaaa'))\n self.assertTrue(sch.has_name_or_alias(field['name'].lower(), 'aaaa', icase=True))\n self.assertFalse(sch.has_name_or_alias('aaaa', 'alias2'))\n self.assertTrue(sch.has_name_or_alias('aaaa', 'alias2', icase=True))", "def test_change_relation_types(self):\n pass", "def db_fields(self):", "def test_patch_group(self):\n pass" ]
[ "0.61829984", "0.6167331", "0.6130675", "0.6105369", "0.6037458", "0.5969111", "0.59682834", "0.5849834", "0.5825737", "0.58188885", "0.5818871", "0.57729965", "0.5756657", "0.57528496", "0.57460386", "0.5722967", "0.5712481", "0.5708259", "0.5704885", "0.56987375", "0.5659786", "0.56534463", "0.5639173", "0.5606452", "0.5563066", "0.556083", "0.5552144", "0.5541039", "0.55325186", "0.5517654" ]
0.81420827
0
Test migration of field without tooltip
def test_field_comment_tooltip(self): # Create field definition self.test_field = RecordField.create( self.testcoll, test_field_id, test_field_tooltip_create_values ) # Apply migration to collection migrate_coll_data(self.testcoll) # Read field definition and check for inline field list field_data = self.check_entity_values( "_field", test_field_id, check_values=test_field_tooltip_migrated_values ) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_column_name(self):\n field = self.base_field\n sch = SchemaField(field)\n self.assertEqual(sch.name, sch.column_name)\n self.assertNotEqual(sch.column_name, sch.title)", "def test_validation_column(self):\n assert self.check.validation_column == \"foo_bar_is_unique_identifier\"", "def column_verbose_name():\r\n class PersonTable(tables.Table):\r\n \"\"\"\r\n The test_colX columns are to test that the accessor is used to\r\n determine the field on the model, rather than the column name.\r\n \"\"\"\r\n first_name = tables.Column()\r\n fn1 = tables.Column(accessor='first_name')\r\n fn2 = tables.Column(accessor='first_name.upper')\r\n fn3 = tables.Column(accessor='last_name', verbose_name='OVERRIDE')\r\n last_name = tables.Column()\r\n ln1 = tables.Column(accessor='last_name')\r\n ln2 = tables.Column(accessor='last_name.upper')\r\n ln3 = tables.Column(accessor='last_name', verbose_name='OVERRIDE')\r\n region = tables.Column(accessor='occupation.region.name')\r\n r1 = tables.Column(accessor='occupation.region.name')\r\n r2 = tables.Column(accessor='occupation.region.name.upper')\r\n r3 = tables.Column(accessor='occupation.region.name', verbose_name='OVERRIDE')\r\n trans_test = tables.Column()\r\n trans_test_lazy = tables.Column()\r\n\r\n # The Person model has a ``first_name`` and ``last_name`` field, but only\r\n # the ``last_name`` field has an explicit ``verbose_name`` set. This means\r\n # that we should expect that the two columns that use the ``last_name``\r\n # field should both use the model's ``last_name`` field's ``verbose_name``,\r\n # however both fields that use the ``first_name`` field should just use a\r\n # capitalized version of the column name as the column header.\r\n table = PersonTable(Person.objects.all())\r\n # Should be generated (capitalized column name)\r\n assert 'first name' == table.columns['first_name'].verbose_name\r\n assert 'first name' == table.columns['fn1'].verbose_name\r\n assert 'first name' == table.columns['fn2'].verbose_name\r\n assert 'OVERRIDE' == table.columns['fn3'].verbose_name\r\n # Should use the model field's verbose_name\r\n assert 'surname' == table.columns['last_name'].verbose_name\r\n assert 'surname' == table.columns['ln1'].verbose_name\r\n assert 'surname' == table.columns['ln2'].verbose_name\r\n assert 'OVERRIDE' == table.columns['ln3'].verbose_name\r\n assert 'name' == table.columns['region'].verbose_name\r\n assert 'name' == table.columns['r1'].verbose_name\r\n assert 'name' == table.columns['r2'].verbose_name\r\n assert 'OVERRIDE' == table.columns['r3'].verbose_name\r\n assert \"translation test\" == table.columns[\"trans_test\"].verbose_name\r\n assert \"translation test lazy\" == table.columns[\"trans_test_lazy\"].verbose_name\r\n\r\n # -------------------------------------------------------------------------\r\n\r\n # Now we'll try using a table with Meta.model\r\n class PersonTable(tables.Table):\r\n class Meta:\r\n model = Person\r\n # Issue #16\r\n table = PersonTable([])\r\n assert \"translation test\" == table.columns[\"trans_test\"].verbose_name\r\n assert \"translation test lazy\" == table.columns[\"trans_test_lazy\"].verbose_name", "def test_prep_fields(self):\n pass", "def test_Migration_columns(self):\n migration = self.DBSession.query(Migration).filter_by().first()\n if self.engine.dialect.name == 'sqlite': # pragma: no cover\n # pysqlite driver always convert the strings collumns to unicode\n self.assertIsInstance(migration.version_num, unicode)\n else: # pragma: no cover\n self.assertIsInstance(migration.version_num, str)", "def test_uuid_editable(self):\n uuid_field = Game._meta.get_field_by_name('uuid')[0]\n self.assertEqual(uuid_field.editable, True)", "def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)", "def test_verbose_name_post(self): \n field_verboses = {\n \"title\": \"Название статьи\",\n \"text\": \"Текст статьи\",\n \"pub_date\": \"Дата публикации\",\n \"group\": \"Название группы\",\n \"author\": \"Автор статьи\",\n }\n for value, expected in field_verboses.items():\n with self.subTest(value=value):\n self.assertEqual(self.post._meta.get_field(value).verbose_name, expected)", "def test_help_text_post(self): \n field_help_text = {\n \"title\": \"Дайте название статье\",\n \"group\": \"Укажите группу для статьи\",\n } \n for value, expected in field_help_text.items():\n with self.subTest(value=value):\n self.assertEqual(self.post._meta.get_field(value).help_text, expected)", "def test_uuid_uneditable(self):\n id_field = Movie._meta.get_field_by_name('id')[0]\n self.assertEqual(id_field.editable, False)", "def test_entities__Field__1():\n zope.interface.verify.verifyObject(IField, Field())", "def test_entities__Entity__getRawField__3(entity_with_field, field):\n assert field == entity_with_field.getRawField(field.__name__)", "def test_favourite_fields(self):\n\n fav = Favourite.objects.get(id=1)\n\n # test the type of former_barcode field\n fav_type = fav._meta.get_field('former_barcode').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label former_barcode\n max_length = fav._meta.get_field('former_barcode').max_length\n self.assertEqual(max_length, 80)\n # test blank field in label former_barcode\n fav_blank = fav._meta.get_field('former_barcode').blank\n self.assertFalse(fav_blank)\n # test null field in label former_barcode\n fav_null = fav._meta.get_field('former_barcode').null\n self.assertFalse(fav_null)\n\n # test the type of favourite_barcode field\n fav_type = fav._meta.get_field('favourite_barcode').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label favourite_barcode\n max_length = fav._meta.get_field('favourite_barcode').max_length\n self.assertEqual(max_length, 80)\n # test blank field in label favourite_barcode\n fav_blank = fav._meta.get_field('favourite_barcode').blank\n self.assertFalse(fav_blank)\n # test null field in label favourite_barcode\n fav_null = fav._meta.get_field('favourite_barcode').null\n self.assertFalse(fav_null)\n\n # test the type of email_user field\n fav_type = fav._meta.get_field('email_user').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label email_user\n max_length = fav._meta.get_field('email_user').max_length\n self.assertEqual(max_length, 150)\n # test blank field in label email_user\n fav_blank = fav._meta.get_field('email_user').blank\n self.assertFalse(fav_blank)\n # test null field in label email_user\n fav_null = fav._meta.get_field('email_user').null\n self.assertFalse(fav_null)", "def test_issue_edit_label(self):\n pass", "def test_bool_field():", "def check_property(self, descriptor):\r\n self.assertEqual(descriptor.get_html(), descriptor.render('studio_view').content)", "def test_datetime_field():", "def test_table_false_positives(self):\n pass", "def generate_altered_fields(self):\n result = super(MigrationAutodetector, self).generate_altered_fields()\n self.generate_sql_changes()\n return result", "def test_normal_fields_can_be_defined_between_primary_keys(self):", "def db_fields(self):", "def test_entities__Entity__getRawField__1(entity):\n with pytest.raises(KeyError):\n entity.getRawField('asdf')", "def test_serve_user_field(self):\n pass", "def test_text_field():", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def test_entities__Entity__getRawField__2(entity):\n assert IDummy['dummy2'] == entity.getRawField('dummy2')", "def post_migrate_function(obj):\n obj.a = obj.a + u\"-modified\"\n return True", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"company_url\"].help_text,\n \"Please ensure this is a valid web address.\",\n )", "def test_fields_updated_with_computed(self):\n pass", "def test_product_fields(self):\n\n prd = Product.objects.get(id=1)\n\n # test the type of name field\n prd_type = prd._meta.get_field('name').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label name\n max_length = prd._meta.get_field('name').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label name\n prd_blank = prd._meta.get_field('name').blank\n self.assertTrue(prd_blank)\n # test null field in label name\n prd_null = prd._meta.get_field('name').null\n self.assertTrue(prd_null)\n\n # test the type of description field\n prd_type = prd._meta.get_field('description').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label description\n max_length = prd._meta.get_field('description').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label description\n prd_blank = prd._meta.get_field('description').blank\n self.assertTrue(prd_blank)\n # test null field in label description\n prd_null = prd._meta.get_field('description').null\n self.assertTrue(prd_null)\n\n # test the type of nutrition_grade field\n prd_type = prd._meta.get_field('nutrition_grade').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label nutrition_grade\n max_length = prd._meta.get_field('nutrition_grade').max_length\n self.assertEqual(max_length, 1)\n # test blank field in label nutrition_grade\n prd_blank = prd._meta.get_field('nutrition_grade').blank\n self.assertTrue(prd_blank)\n # test null field in label nutrition_grade\n prd_null = prd._meta.get_field('nutrition_grade').null\n self.assertTrue(prd_null)\n\n # test the type of barcode field\n prd_type = prd._meta.get_field('barcode').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label barcode\n max_length = prd._meta.get_field('barcode').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label barcode\n prd_blank = prd._meta.get_field('barcode').blank\n self.assertFalse(prd_blank)\n # test null field in label barcode\n prd_null = prd._meta.get_field('barcode').null\n self.assertFalse(prd_null)\n\n # test the type of url field\n prd_type = prd._meta.get_field('url').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url\n max_length = prd._meta.get_field('url').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url\n prd_blank = prd._meta.get_field('url').blank\n self.assertTrue(prd_blank)\n # test null field in label url\n prd_null = prd._meta.get_field('url').null\n self.assertTrue(prd_null)\n\n # test the type of url_pic field\n prd_type = prd._meta.get_field('url_pic').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url_pic\n max_length = prd._meta.get_field('url_pic').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url_pic\n prd_blank = prd._meta.get_field('url_pic').blank\n self.assertTrue(prd_blank)\n # test null field in label url_pic\n prd_null = prd._meta.get_field('url_pic').null\n self.assertTrue(prd_null)\n\n # test the type of store field\n prd_type = prd._meta.get_field('store').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label store\n max_length = prd._meta.get_field('store').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label store\n prd_blank = prd._meta.get_field('store').blank\n self.assertTrue(prd_blank)\n # test null field in label store\n prd_null = prd._meta.get_field('store').null\n self.assertTrue(prd_null)\n\n # test the type of fat field\n prd_type = prd._meta.get_field('fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label fat max digits\n max_digits = prd._meta.get_field('fat').max_digits\n self.assertEqual(max_digits, 5)\n # label fat decimal places\n dec_places = prd._meta.get_field('fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label fat\n prd_blank = prd._meta.get_field('fat').blank\n self.assertTrue(prd_blank)\n # test null field in label fat\n prd_null = prd._meta.get_field('fat').null\n self.assertTrue(prd_null)\n\n # test the type of saturated_fat field\n prd_type = prd._meta.get_field('saturated_fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label saturated_fat max digits\n max_digits = prd._meta.get_field('saturated_fat').max_digits\n self.assertEqual(max_digits, 5)\n # label saturated_fat decimal places\n dec_places = prd._meta.get_field('saturated_fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label saturated_fat\n prd_blank = prd._meta.get_field('saturated_fat').blank\n self.assertTrue(prd_blank)\n # test null field in label saturated_fat\n prd_null = prd._meta.get_field('saturated_fat').null\n self.assertTrue(prd_null)\n\n # test the type of sugar field\n prd_type = prd._meta.get_field('sugar').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label sugar max digits\n max_digits = prd._meta.get_field('sugar').max_digits\n self.assertEqual(max_digits, 5)\n # label sugar decimal places\n dec_places = prd._meta.get_field('sugar').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label sugar\n prd_blank = prd._meta.get_field('sugar').blank\n self.assertTrue(prd_blank)\n # test null field in label sugar\n prd_null = prd._meta.get_field('sugar').null\n self.assertTrue(prd_null)\n\n # test the type of salt\n prd_type = prd._meta.get_field('salt').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label salt max digits\n max_digits = prd._meta.get_field('salt').max_digits\n self.assertEqual(max_digits, 5)\n # label salt decimal places\n dec_places = prd._meta.get_field('salt').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label salt\n prd_blank = prd._meta.get_field('salt').blank\n self.assertTrue(prd_blank)\n # test null field in label salt\n prd_null = prd._meta.get_field('salt').null\n self.assertTrue(prd_null)\n\n # test the type of prd_cat\n prd_type = prd._meta.get_field('prd_cat').get_internal_type()\n self.assertEqual(prd_type, 'ForeignKey')\n # label db_column\n fk = prd._meta.get_field('prd_cat').db_column\n self.assertEqual(fk, 'prd_cat')\n # test blank field in label prd_cat\n prd_blank = prd._meta.get_field('prd_cat').blank\n self.assertFalse(prd_blank)\n # test null field in label prd_cat\n prd_null = prd._meta.get_field('prd_cat').null\n self.assertFalse(prd_null)\n\n # Favourite table ----------------------------------------------------" ]
[ "0.6383244", "0.6045873", "0.5803746", "0.57448345", "0.57064986", "0.5700613", "0.5686739", "0.5685216", "0.5632432", "0.5609656", "0.55931246", "0.5588414", "0.5569909", "0.55542284", "0.5545484", "0.55209637", "0.55139685", "0.55029345", "0.5502049", "0.5484399", "0.5468578", "0.54618156", "0.5431891", "0.5420319", "0.5415872", "0.5410062", "0.53791946", "0.5359344", "0.534949", "0.5345656" ]
0.69139206
0
Test migration of view fields
def test_migrate_view_fields(self): self.test_view = RecordView.create( self.testcoll, test_view_id, test_view_create_values ) migrate_coll_data(self.testcoll) # Read field definition and check for inline field list view_data = self.check_entity_values( "_view", test_view_id, check_values=test_view_migrated_values ) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_prep_fields(self):\n pass", "def test_Migration_columns(self):\n migration = self.DBSession.query(Migration).filter_by().first()\n if self.engine.dialect.name == 'sqlite': # pragma: no cover\n # pysqlite driver always convert the strings collumns to unicode\n self.assertIsInstance(migration.version_num, unicode)\n else: # pragma: no cover\n self.assertIsInstance(migration.version_num, str)", "def test_favourite_fields(self):\n\n fav = Favourite.objects.get(id=1)\n\n # test the type of former_barcode field\n fav_type = fav._meta.get_field('former_barcode').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label former_barcode\n max_length = fav._meta.get_field('former_barcode').max_length\n self.assertEqual(max_length, 80)\n # test blank field in label former_barcode\n fav_blank = fav._meta.get_field('former_barcode').blank\n self.assertFalse(fav_blank)\n # test null field in label former_barcode\n fav_null = fav._meta.get_field('former_barcode').null\n self.assertFalse(fav_null)\n\n # test the type of favourite_barcode field\n fav_type = fav._meta.get_field('favourite_barcode').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label favourite_barcode\n max_length = fav._meta.get_field('favourite_barcode').max_length\n self.assertEqual(max_length, 80)\n # test blank field in label favourite_barcode\n fav_blank = fav._meta.get_field('favourite_barcode').blank\n self.assertFalse(fav_blank)\n # test null field in label favourite_barcode\n fav_null = fav._meta.get_field('favourite_barcode').null\n self.assertFalse(fav_null)\n\n # test the type of email_user field\n fav_type = fav._meta.get_field('email_user').get_internal_type()\n self.assertEqual(fav_type, 'CharField')\n # label email_user\n max_length = fav._meta.get_field('email_user').max_length\n self.assertEqual(max_length, 150)\n # test blank field in label email_user\n fav_blank = fav._meta.get_field('email_user').blank\n self.assertFalse(fav_blank)\n # test null field in label email_user\n fav_null = fav._meta.get_field('email_user').null\n self.assertFalse(fav_null)", "def test_VIEW_pass(self):\n for V in self.mod.views.itervalues():\n self.assertTrue(V.isset)", "def test_uuid_editable(self):\n uuid_field = Game._meta.get_field_by_name('uuid')[0]\n self.assertEqual(uuid_field.editable, True)", "def db_fields(self):", "def generate_altered_fields(self):\n result = super(MigrationAutodetector, self).generate_altered_fields()\n self.generate_sql_changes()\n return result", "def test_set_user_field(self):\n pass", "def test_fields_updated_with_computed(self):\n pass", "def test_domain_and_target_type(self):\n t = View(shape=None, index=None, domain_type=\"some fancy type\")\n assert t.domain_type == \"some fancy type\"\n assert t.target_type == \"some fancy type\"", "def test_select_field():", "def test_make_form_field():", "def migration():", "def test_fields(self):\n\n class Foo(Model):\n field1 = StringField()\n field2 = IntegralField()\n\n assert hasattr(Foo, \"_fields\")\n assert type(Foo._fields) is dict\n\n assert not hasattr(Foo, \"field1\")\n assert \"field1\" in Foo._fields\n assert type(Foo._fields[\"field1\"]) is StringField\n\n assert not hasattr(Foo, \"field2\")\n assert \"field2\" in Foo._fields\n assert type(Foo._fields[\"field2\"]) is IntegralField", "def test_field_names(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n self.assertEqual(\n mb_fields[i].column,\n db_cols[i].name\n )", "def test_uuid_uneditable(self):\n id_field = Movie._meta.get_field_by_name('id')[0]\n self.assertEqual(id_field.editable, False)", "def test_serve_user_field(self):\n pass", "def test_fields(self):\n expected = (\n 'id',\n # Incoming foreign keys from subclasses\n 'routeredirect', # conman.redirects.models.RouteRedirect\n 'routesubclass', # tests.models.RouteSubclass\n 'templateroute', # tests.models.TemplateRoute\n 'urlconfroute', # tests.models.URLConfRoute\n 'urlredirect', # conman.redirects.models.URLRedirect\n 'viewroute', # tests.models.ViewRoute\n ) + NODE_BASE_FIELDS\n fields = field_names(Route)\n self.assertCountEqual(fields, expected)", "def test_normal_fields_can_be_defined_between_primary_keys(self):", "def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):\n session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1, node2, node3 = self.cluster.nodelist()\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t \"\n \"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, 1, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected column, view row is removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # update unselected with ts=3, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # insert livenesssInfo, view row should be alive\n self.update_view(session, \"INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be alive because of base livenessInfo alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # add selected column, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # update unselected, view row should be alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n # delete with ts=3, view row should be alive due to unselected@ts4\n self.update_view(session, \"DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n\n # remove unselected, view row should be removed\n self.update_view(session, \"UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=7, view row is alive\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, 1, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, 1])\n\n # remove selected with ts=7, view row is dead\n self.update_view(session, \"UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;\", flush)\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")\n\n # add selected with ts=5, view row is alive (selected column should not affects each other)\n self.update_view(session, \"UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;\", flush)\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n\n start = time.time()\n # add selected with ttl=30 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET a=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, 1, None, None, None])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, 1, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n start = time.time()\n # update unselected with ttl=30, view row should be alive\n update_time = self.update_view(session, \"UPDATE t USING TTL 30 SET f=1 WHERE k=1 AND c=1;\", flush)\n try:\n assert_one(session, \"SELECT * FROM t\", [1, 1, None, None, None, 1])\n assert_one(session, \"SELECT * FROM mv\", [1, 1, None, None])\n except AssertionError as ae:\n if (time.time() - start) >= 30:\n pytest.fail(\"Please increase the 30 TTL which expired before we could test due to a slow env.\")\n else:\n raise ae\n\n wait_time = update_time + 30 - time.time()\n if wait_time > 0:\n time.sleep(wait_time)\n\n # view row still alive due to base livenessInfo\n assert_none(session, \"SELECT * FROM t\")\n assert_none(session, \"SELECT * FROM mv\")", "def test_int_field():", "def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)", "def test_migrate_list_fields(self):\n self.test_list = RecordList.create(\n self.testcoll, test_list_id, test_list_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_list\", test_list_id, check_values=test_list_migrated_values\n )\n return", "def MigrateV2View(view, log):\n newView, warnFlag = {\n View.Administrator: ([qtiv2.core.View.proctor], True),\n View.AdminAuthority: ([qtiv2.core.View.proctor], True),\n View.Assessor: ([qtiv2.core.View.scorer], True),\n View.Author: ([qtiv2.core.View.author], False),\n View.Candidate: ([qtiv2.core.View.candidate], False),\n View.Invigilator: ([qtiv2.core.View.proctor], False),\n View.Proctor: ([qtiv2.core.View.proctor], False),\n View.InvigilatorProctor: ([qtiv2.core.View.proctor], False),\n View.Psychometrician: ([qtiv2.core.View.testConstructor], True),\n View.Scorer: ([qtiv2.core.View.scorer], False),\n View.Tutor: ([qtiv2.core.View.tutor], False),\n View.All: ([\n qtiv2.core.View.author,\n qtiv2.core.View.candidate,\n qtiv2.core.View.proctor,\n qtiv2.core.View.scorer,\n qtiv2.core.View.testConstructor,\n qtiv2.core.View.tutor], False)\n }[view]\n if warnFlag:\n log.append(\"Warning: changing view %s to %s\" % (\n View.to_str(view), qtiv2.core.View.list_to_str(newView)))\n return newView", "def test_field_rules():", "def test_column_attributes_handled_correctly(self):\r\n\r\n #check class attibutes\r\n self.assertHasAttr(TestModel, '_columns')\r\n self.assertHasAttr(TestModel, 'vid')\r\n self.assertHasAttr(TestModel, 'text')\r\n\r\n #check instance attributes\r\n inst = TestModel()\r\n self.assertHasAttr(inst, 'vid')\r\n self.assertHasAttr(inst, 'text')\r\n self.assertIsNone(inst.vid)\r\n self.assertIsNone(inst.text)", "def test_entities__Field__1():\n zope.interface.verify.verifyObject(IField, Field())", "def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting 1 materialized view == got\" + str(result)", "def test_create_view(self):\n config = config_factory()\n db = config.arango_db\n\n # Create analyzer\n analyzer = ArangoAnalyzer(\"analyzer_sample\")\n analyzer.set_stopwords(\n language=\"english\",\n custom_stopwords=[\"stop\", \"word\"],\n include_default=False,\n )\n analyzer.type = ArangoAnalyzer._TYPE_TEXT\n\n analyzer.create(db)\n\n # Create Link - a view can hvae 0 to * links\n link = Link(name=\"TextNode\") # Name of a collection in the database\n linkAnalyzers = AnalyzerList([\"identity\"])\n link.analyzers = linkAnalyzers\n\n # A link can have 0..* fields\n field = Field(\n \"text\",\n AnalyzerList([\"text_en\", \"invalid_analyzer\", \"analyzer_sample\"]),\n ) # text_en is a predifined analyzer from arango\n field.analyzers.filter_invalid_analyzers(\n db, verbose=1\n ) # filters out the analyzer that are not defined in the database\n\n assert (\n str(field.analyzers)\n == \"AnalyzerList(analyzerList=['text_en', 'analyzer_sample'], database=None)\"\n )\n\n link.add_field(field)\n\n ## Show the dict format of all the fields in a link\n assert link.get_fields_dict() == {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n }\n\n # create view\n view = View(\"sample_view\", view_type=\"arangosearch\")\n ## add the link (can have 0 or 1 link)\n view.add_link(link)\n\n ## can have 0..* primary sort\n view.add_primary_sort(\"text\", asc=False)\n view.add_stored_value([\"text\", \"timestamp\"], compression=\"lz4\")\n\n assert view.summary() == {\n \"name\": \"sample_view\",\n \"viewType\": \"arangosearch\",\n \"properties\": {\n \"cleanupintervalstep\": 0,\n \"cleanupIntervalStep\": 0,\n \"commitIntervalMsec\": 1000,\n \"consolidationIntervalMsec\": 0,\n \"consolidationPolicy\": {\n \"type\": \"tier\",\n \"segmentsMin\": 1,\n \"segmentsMax\": 10,\n \"segmentsBytesMax\": 5368709120,\n \"segmentsBytesFloor\": 2097152,\n \"minScore\": 0,\n },\n \"primarySortCompression\": \"lz4\",\n \"writebufferIdle\": 64,\n \"writebufferActive\": 0,\n \"writebufferMaxSize\": 33554432,\n },\n \"links\": {\n \"TextNode\": {\n \"analyzers\": [\"identity\"],\n \"fields\": {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n },\n \"includeAllFields\": False,\n \"trackListPositions\": False,\n \"inBackground\": False,\n }\n },\n \"primarySort\": [{\"field\": \"text\", \"asc\": False}],\n \"storedValues\": [\n {\"fields\": [\"text\"], \"compression\": \"lz4\"},\n {\"fields\": [\"timestamp\"], \"compression\": \"lz4\"},\n ],\n }\n\n ## creates the view in the database\n view.create(db)\n\n assert db.view(\"sample_view\")[\"name\"] == \"sample_view\"", "def test_product_fields(self):\n\n prd = Product.objects.get(id=1)\n\n # test the type of name field\n prd_type = prd._meta.get_field('name').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label name\n max_length = prd._meta.get_field('name').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label name\n prd_blank = prd._meta.get_field('name').blank\n self.assertTrue(prd_blank)\n # test null field in label name\n prd_null = prd._meta.get_field('name').null\n self.assertTrue(prd_null)\n\n # test the type of description field\n prd_type = prd._meta.get_field('description').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label description\n max_length = prd._meta.get_field('description').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label description\n prd_blank = prd._meta.get_field('description').blank\n self.assertTrue(prd_blank)\n # test null field in label description\n prd_null = prd._meta.get_field('description').null\n self.assertTrue(prd_null)\n\n # test the type of nutrition_grade field\n prd_type = prd._meta.get_field('nutrition_grade').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label nutrition_grade\n max_length = prd._meta.get_field('nutrition_grade').max_length\n self.assertEqual(max_length, 1)\n # test blank field in label nutrition_grade\n prd_blank = prd._meta.get_field('nutrition_grade').blank\n self.assertTrue(prd_blank)\n # test null field in label nutrition_grade\n prd_null = prd._meta.get_field('nutrition_grade').null\n self.assertTrue(prd_null)\n\n # test the type of barcode field\n prd_type = prd._meta.get_field('barcode').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label barcode\n max_length = prd._meta.get_field('barcode').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label barcode\n prd_blank = prd._meta.get_field('barcode').blank\n self.assertFalse(prd_blank)\n # test null field in label barcode\n prd_null = prd._meta.get_field('barcode').null\n self.assertFalse(prd_null)\n\n # test the type of url field\n prd_type = prd._meta.get_field('url').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url\n max_length = prd._meta.get_field('url').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url\n prd_blank = prd._meta.get_field('url').blank\n self.assertTrue(prd_blank)\n # test null field in label url\n prd_null = prd._meta.get_field('url').null\n self.assertTrue(prd_null)\n\n # test the type of url_pic field\n prd_type = prd._meta.get_field('url_pic').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url_pic\n max_length = prd._meta.get_field('url_pic').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url_pic\n prd_blank = prd._meta.get_field('url_pic').blank\n self.assertTrue(prd_blank)\n # test null field in label url_pic\n prd_null = prd._meta.get_field('url_pic').null\n self.assertTrue(prd_null)\n\n # test the type of store field\n prd_type = prd._meta.get_field('store').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label store\n max_length = prd._meta.get_field('store').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label store\n prd_blank = prd._meta.get_field('store').blank\n self.assertTrue(prd_blank)\n # test null field in label store\n prd_null = prd._meta.get_field('store').null\n self.assertTrue(prd_null)\n\n # test the type of fat field\n prd_type = prd._meta.get_field('fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label fat max digits\n max_digits = prd._meta.get_field('fat').max_digits\n self.assertEqual(max_digits, 5)\n # label fat decimal places\n dec_places = prd._meta.get_field('fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label fat\n prd_blank = prd._meta.get_field('fat').blank\n self.assertTrue(prd_blank)\n # test null field in label fat\n prd_null = prd._meta.get_field('fat').null\n self.assertTrue(prd_null)\n\n # test the type of saturated_fat field\n prd_type = prd._meta.get_field('saturated_fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label saturated_fat max digits\n max_digits = prd._meta.get_field('saturated_fat').max_digits\n self.assertEqual(max_digits, 5)\n # label saturated_fat decimal places\n dec_places = prd._meta.get_field('saturated_fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label saturated_fat\n prd_blank = prd._meta.get_field('saturated_fat').blank\n self.assertTrue(prd_blank)\n # test null field in label saturated_fat\n prd_null = prd._meta.get_field('saturated_fat').null\n self.assertTrue(prd_null)\n\n # test the type of sugar field\n prd_type = prd._meta.get_field('sugar').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label sugar max digits\n max_digits = prd._meta.get_field('sugar').max_digits\n self.assertEqual(max_digits, 5)\n # label sugar decimal places\n dec_places = prd._meta.get_field('sugar').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label sugar\n prd_blank = prd._meta.get_field('sugar').blank\n self.assertTrue(prd_blank)\n # test null field in label sugar\n prd_null = prd._meta.get_field('sugar').null\n self.assertTrue(prd_null)\n\n # test the type of salt\n prd_type = prd._meta.get_field('salt').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label salt max digits\n max_digits = prd._meta.get_field('salt').max_digits\n self.assertEqual(max_digits, 5)\n # label salt decimal places\n dec_places = prd._meta.get_field('salt').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label salt\n prd_blank = prd._meta.get_field('salt').blank\n self.assertTrue(prd_blank)\n # test null field in label salt\n prd_null = prd._meta.get_field('salt').null\n self.assertTrue(prd_null)\n\n # test the type of prd_cat\n prd_type = prd._meta.get_field('prd_cat').get_internal_type()\n self.assertEqual(prd_type, 'ForeignKey')\n # label db_column\n fk = prd._meta.get_field('prd_cat').db_column\n self.assertEqual(fk, 'prd_cat')\n # test blank field in label prd_cat\n prd_blank = prd._meta.get_field('prd_cat').blank\n self.assertFalse(prd_blank)\n # test null field in label prd_cat\n prd_null = prd._meta.get_field('prd_cat').null\n self.assertFalse(prd_null)\n\n # Favourite table ----------------------------------------------------" ]
[ "0.64087135", "0.6328817", "0.60160935", "0.6006993", "0.59865445", "0.59838164", "0.59082943", "0.590558", "0.5854425", "0.58354616", "0.58255655", "0.5821566", "0.5816661", "0.57960916", "0.5781735", "0.5768386", "0.5765273", "0.5749321", "0.5749269", "0.5725564", "0.57224756", "0.57212037", "0.5707461", "0.57048035", "0.5688792", "0.56753486", "0.5675025", "0.56671923", "0.56506896", "0.5633957" ]
0.8429944
0
Test migration of list fields
def test_migrate_list_fields(self): self.test_list = RecordList.create( self.testcoll, test_list_id, test_list_create_values ) migrate_coll_data(self.testcoll) # Read field definition and check for inline field list view_data = self.check_entity_values( "_list", test_list_id, check_values=test_list_migrated_values ) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_field():", "def test_single_field_is_listified(self):\r\n ss = SelectStatement('table', 'field')\r\n self.assertEqual(ss.fields, ['field'])", "def test__list_fields(self):\n correct_fields = [\n \"distance\",\n \"verbose\",\n \"min_core_neighbors\",\n \"num_features\",\n \"unpacked_features\",\n \"num_distance_components\",\n \"training_time\",\n \"radius\",\n \"num_unpacked_features\",\n \"num_examples\",\n \"cluster_id\",\n \"num_clusters\",\n \"features\",\n ]\n\n self.assertItemsEqual(self.model._list_fields(), correct_fields)", "def test_prep_fields(self):\n pass", "def test_list(self):\n pass", "def test_list(self):\n pass", "def test_column(self):\n c = Column('foo')\n self.assertEqual(list(c), [])", "def test_migrate_view_fields(self):\n self.test_view = RecordView.create(\n self.testcoll, test_view_id, test_view_create_values\n )\n migrate_coll_data(self.testcoll)\n # Read field definition and check for inline field list\n view_data = self.check_entity_values(\n \"_view\", test_view_id, check_values=test_view_migrated_values\n )\n return", "def test_field_names(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n self.assertEqual(\n mb_fields[i].column,\n db_cols[i].name\n )", "def test_column_data(self):\n c = Column('foo', range(3))\n self.assertEqual(list(c), [0, 1, 2])", "def db_fields(self):", "def test_single_field_is_listified(self):\r\n ds = DeleteStatement('table', 'field')\r\n self.assertEqual(len(ds.fields), 1)\r\n self.assertEqual(ds.fields[0].field, 'field')", "def test_Migration_columns(self):\n migration = self.DBSession.query(Migration).filter_by().first()\n if self.engine.dialect.name == 'sqlite': # pragma: no cover\n # pysqlite driver always convert the strings collumns to unicode\n self.assertIsInstance(migration.version_num, unicode)\n else: # pragma: no cover\n self.assertIsInstance(migration.version_num, str)", "def test_list_format(self) -> None:\n r = self.perform_request('list', False)\n self.assert_json_schema(r.json(), self.get_list_schema())", "def test_list_default(self):\r\n default = [1,2]\r\n prop = String(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))", "def set_list_fields(self,fields):\n # import pdb;pdb.set_trace()\n \n if not fields:\n fields = []\n \n list_fields_temp = [x for x in fields] # make a copy\n \n if not isinstance(fields,list):\n fields = [fields]\n \n for field in fields:\n if isinstance(field,str):\n # assume to be a field name\n field = {'name':field,'label':'{}'.format(self.make_label(field)),'class':'','search':True,'type':\"TEXT\",'list':True}\n if not isinstance(field,dict):\n continue # it must be a dict\n for x in range(len(list_fields_temp)-1,-1,-1): # turkey shoot loop\n default_field_dict = {'label':'','class':'','search':True}\n if not isinstance(list_fields_temp[x],dict) or 'name' not in list_fields_temp[x]:\n # bad element got into fields somehow...\n del list_fields_temp[x]\n continue\n if list_fields_temp[x].get('name',False) == field.get('name',None):\n default_field_dict = {'label':'','class':'','search':True,'type':'TEXT','default':'','list':True}\n for k in default_field_dict.keys():\n if k in field:\n default_field_dict.update({k:field[k]})\n elif k == 'type':\n field_type = \"TEXT\"\n try:\n field_type = self.table.get_column_type(field['name'])\n except KeyError:\n # the field name may be defined in the query \n pass\n default_field_dict.update({k:field_type})\n \n break\n \n list_fields_temp[x].update(default_field_dict)\n list_fields_temp[x]['label'] = list_fields_temp[x]['label'] if list_fields_temp[x]['label'] else self.make_label(list_fields_temp[x]['name'])\n if list_fields_temp[x]['search']:\n self.has_search_fields = True\n \n fields = list_fields_temp", "def test_to_python(self):\r\n column = columns.List(JsonTestColumn)\r\n val = [1, 2, 3]\r\n db_val = column.to_database(val)\r\n assert db_val.value == [json.dumps(v) for v in val]\r\n py_val = column.to_python(db_val.value)\r\n assert py_val == val", "def test_columns_list_element_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[[], \"a\"])", "def _update_allowed_fields(self) -> list:\n raise NotImplementedError('Each model has to have its list of update allowed fields')", "def migration():", "def test_columns_str_to_list(self):\n\n x = BaseTransformer(columns=\"a\")\n\n expected_attributes = {\"columns\": [\"a\"]}\n\n h.test_object_attributes(\n obj=x,\n expected_attributes=expected_attributes,\n msg=\"String put in list for columns\",\n )", "def test_list_properties(self):\n pass", "def test_field_types(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n db_relations = connection.introspection.get_relations(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n expected_field_type = None\n if db_relations.get(i):\n expected_field_type = u'ForeignKey'\n else:\n expected_field_type = get_field_type(db_cols[i].type_code)\n\n self.assertEqual(\n mb_fields[i].get_internal_type(),\n expected_field_type\n )", "def test_get_list(self):\n pass", "def test_list_options(self):\n pass", "def test_many_values(self):\n write this test!", "def test_cell_list_fields_success(self, mock_printlist, mock_list):\n self.shell('cell-list -r 1 --fields id name')\n mock_list.assert_called_once_with()\n mock_printlist.assert_called_once_with(mock.ANY,\n list({'id': 'ID',\n 'name': 'Name'}))", "def test_column_type(self):\n c = Column('foo', range(3), type=int)\n self.assertEqual(list(c), [0, 1, 2])", "def test_update_listfield_operator(self):\n\n class BlogPost(Document):\n tags = ListField(StringField())\n\n BlogPost.drop_collection()\n\n post = BlogPost(tags=[\"test\"])\n post.save()\n\n # ListField operator\n BlogPost.objects.update(push__tags=\"mongo\")\n post.reload()\n assert \"mongo\" in post.tags\n\n BlogPost.objects.update_one(push_all__tags=[\"db\", \"nosql\"])\n post.reload()\n assert \"db\" in post.tags\n assert \"nosql\" in post.tags\n\n tags = post.tags[:-1]\n BlogPost.objects.update(pop__tags=1)\n post.reload()\n assert post.tags == tags\n\n BlogPost.objects.update_one(add_to_set__tags=\"unique\")\n BlogPost.objects.update_one(add_to_set__tags=\"unique\")\n post.reload()\n assert post.tags.count(\"unique\") == 1\n\n BlogPost.drop_collection()", "def test_list_rules(self):\n pass" ]
[ "0.7982737", "0.7013082", "0.6694529", "0.6347495", "0.6340831", "0.6340831", "0.6202872", "0.62022257", "0.61912197", "0.6155615", "0.61215365", "0.61195683", "0.61160946", "0.6111083", "0.6088671", "0.607299", "0.59409857", "0.59221065", "0.59152704", "0.5907704", "0.5889256", "0.5874118", "0.5868125", "0.58275205", "0.580078", "0.5786647", "0.57819504", "0.57745266", "0.57673067", "0.57650167" ]
0.80628663
0
Sets the reference_currency of this CreditSupportAnnex.
def reference_currency(self, reference_currency): if self.local_vars_configuration.client_side_validation and reference_currency is None: # noqa: E501 raise ValueError("Invalid value for `reference_currency`, must not be `None`") # noqa: E501 self._reference_currency = reference_currency
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def card_currency(self, card_currency):\n\n self._card_currency = card_currency", "def billing_currency(self, billing_currency):\n\n self._billing_currency = billing_currency", "def reference(self, reference):\n\n self._reference = reference", "def reference(self, reference):\n\n self._reference = reference", "def reference_number(self, reference_number):\n\n self._reference_number = reference_number", "def set_reference(self, reference):\n\t\tif ((reference == 0) or (reference == 1)):\n\t\t\tself._reference = reference\n\t\telse:\n\t\t\tsys.stderr.write(\"\\nERROR : %s reference must be 0 (internal) or 1 (external) so it can't be %s !\\n\" % (self._target_id, reference))\n\t\t\tsys.exit(1)", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def reference_id(self, reference_id):\n\n self._reference_id = reference_id", "def contact_reference(self, contact_reference):\n\n self._contact_reference = contact_reference", "def currency(self, currency: str):\n\n self._currency = currency", "def set_reference_id(self, reference_id):\n self.reference_id = reference_id", "def update_reference_value(self, reference_value: float):\n self.__reference_value = reference_value", "def reference(self, reference):\n if reference is not None and len(reference) > 100:\n raise ValueError(\"Invalid value for `reference`, length must be less than or equal to `100`\")\n if reference is not None and len(reference) < 3:\n raise ValueError(\"Invalid value for `reference`, length must be greater than or equal to `3`\")\n\n self._reference = reference", "def base_currency(self, base_currency):\n\n self._base_currency = base_currency", "def setReferencePrimary(self, reference: ghidra.program.model.symbol.Reference) -> None:\n ...", "def currency(self, currency):\n if currency is None:\n raise ValueError(\"Invalid value for `currency`, must not be `None`\")\n\n self._currency = currency", "def set_adjustment_charge_currency(self, currency):\n self.single_selection_from_kendo_dropdown(self.currency_kendo_dropdown_locator, currency)", "def requested_currency(self, requested_currency):\n\n self._requested_currency = requested_currency", "def payment_reference_id(self, payment_reference_id):\n\n self._payment_reference_id = payment_reference_id", "def currency(self, currency):\n if self.local_vars_configuration.client_side_validation and currency is None: # noqa: E501\n raise ValueError(\"Invalid value for `currency`, must not be `None`\") # noqa: E501\n\n self._currency = currency", "def vendor_reference(self, vendor_reference):\n\n self._vendor_reference = vendor_reference", "def buy_currency(self, buy_currency):\n\n self._buy_currency = buy_currency", "def instrument_ref(self, instrument_ref):\n\n self._instrument_ref = instrument_ref", "def card_balance_in_card_currency(self, card_balance_in_card_currency):\n\n self._card_balance_in_card_currency = card_balance_in_card_currency", "def set_document_reference_number(self, reference_number):\n self.set_value_into_input_field(self.reference_number_text_field_locator, reference_number)", "def currency_conversion(self, currency_conversion):\n\n self._currency_conversion = currency_conversion", "def currency_conversion(self, currency_conversion):\n\n self._currency_conversion = currency_conversion" ]
[ "0.6202353", "0.620083", "0.6186801", "0.6186801", "0.596234", "0.59369206", "0.5925179", "0.5925179", "0.5925179", "0.5925179", "0.5810195", "0.5784062", "0.5707103", "0.5687867", "0.56441146", "0.560263", "0.5576023", "0.5565525", "0.5519472", "0.548651", "0.5457715", "0.54562765", "0.5363538", "0.531972", "0.53121775", "0.5292159", "0.52860004", "0.5285114", "0.52202386", "0.52202386" ]
0.8066907
0
Sets the collateral_currencies of this CreditSupportAnnex.
def collateral_currencies(self, collateral_currencies): if self.local_vars_configuration.client_side_validation and collateral_currencies is None: # noqa: E501 raise ValueError("Invalid value for `collateral_currencies`, must not be `None`") # noqa: E501 self._collateral_currencies = collateral_currencies
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def currencies(self, currencies):\n\n self._currencies = currencies", "def quote_currencies(self):\n pass", "def ciphers(self, ciphers):\n\n self._ciphers = ciphers", "def reset_currencies(self):\n self.currency_data = read_csv('base_currency_data.txt')\n for _ in range(2):\n del self.currency_data[0]\n self.currencies = [currency[0] for currency in self.currency_data]\n self.home_currency = 'Select'\n self.foreign_currency = 'Select'\n self.new_foreign_currency = 'Select'", "def currency_lookups(self, currency_lookups):\n\n self._currency_lookups = currency_lookups", "def add_currency(self, iso_4217_ccy=None):\n self.__input_mandatory_fields[self.__data_sets.MONEY_DATA_CURRENCY] = self.__data_types.MONEY_DATA_CURRENCY\n\n self.__input_data_structure.update({self.__data_sets.MONEY_DATA_CURRENCY: iso_4217_ccy})\n self.__input_data_set.update(self.__input_data_structure)", "def map_to_currencies(self, raw_currencies: HitbtcRawCurrencies) -> HitbtcCurrencies:\n\n currencies = list(map(self.map_to_currency, raw_currencies))\n return currencies", "def currencies(self):\r\n return currencies.Currencies(self)", "def set_charges_grid_currency(self, currency):\n column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::th[@data-title='Currency']\" % self.charges_grid_div_id)\n column_element = self.wait().until(EC.presence_of_element_located(column_locator), 'column locator not found before specified time out')\n column_index = int(column_element.get_attribute(\"data-index\")) + 1\n first_row_column_locator = (By.XPATH, \"//div[contains(@id, '%s')]/descendant::div[@class='k-grid-content']/descendant::td[%s]/span[@class='k-grid-cell']\" % (self.charges_grid_div_id, str(column_index)))\n first_row_column_element = self.wait().until(EC.presence_of_element_located(first_row_column_locator), 'first row column locator not found before specified time out')\n self.script_executor_click(first_row_column_element)\n self.wait_for_ajax_spinner_load()\n dropdown_item_locator = (By.XPATH, \"//ul[@id='CurrencyId_listbox']/descendant::li[contains(text(), '%s')]\" % currency)\n dropdown_item_element = self.wait().until(EC.presence_of_element_located(dropdown_item_locator), 'dropdown item locator not found before specified time out')\n self.script_executor_click(dropdown_item_element)\n self.wait_for_ajax_spinner_load()", "def set_cylindrical(self, cyl_ang):\n self.FCYL = 1\n self.CIL_ANG = cyl_ang", "def get_currencies(self):\n return self.__call__('currencies', 'getcurrencies')", "def base_currency(self, base_currency):\n\n self._base_currency = base_currency", "def supported_countries(self, supported_countries):\n\n self._supported_countries = supported_countries", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def currency(self, currency):\n\n self._currency = currency", "def currency_conversion(self, currency_conversion):\n\n self._currency_conversion = currency_conversion", "def currency_conversion(self, currency_conversion):\n\n self._currency_conversion = currency_conversion", "def card_currency(self, card_currency):\n\n self._card_currency = card_currency", "def orbital_eccentricity(self, orbital_eccentricity):\n\n self._orbital_eccentricity = orbital_eccentricity", "def collaborators(self, collaborators):\n\n self._collaborators = collaborators", "def update_all_currencies():\n rates_to_update = []\n rates_to_create = []\n for currency in settings.ALLOWED_CURRENCIES:\n try:\n rate = update_currency_data_from_rss(currency, commit=False)\n if rate.id is None:\n rates_to_create.append(rate)\n else:\n rates_to_update.append(rate)\n except AttributeError as e:\n logger.error(str(e))\n\n Rate.objects.bulk_create(rates_to_create)\n Rate.objects.bulk_update(\n rates_to_update, fields=[\"exchange_rate\", \"parser_update_date\", \"description\"]\n )", "def requested_currency(self, requested_currency):\n\n self._requested_currency = requested_currency", "def get_currencies(self) -> list:\n return self.client.currencies.get_all()", "def returnCurrencies(self):\n pass", "async def set_currency(self, currency: str):\n json = {**self.get_data(), \"currencyId\": currency}\n return await self.easee.put(f\"/api/sites/{self.id}\", json=json)", "def get_available_currencies(self):\n auth = self.get_auth()\n currencies = []\n content = self._get_response(\n self.CURRENCY_URL,\n auth=auth\n )\n for option in content:\n currency = Currency.objects.get_or_create(\n iso_code=option['iso_code'],\n name=option['name']\n )[0]\n currencies.append(currency)\n return currencies", "def currencies(self) -> localedata.LocaleDataDict:\n return self._data['currency_names']", "def set_currency(self, currency):\n self.single_selection_from_kendo_dropdown(self.currency_kendo_dropdown_locator, currency)\n self.wait_for_ajax_spinner_load()" ]
[ "0.67481405", "0.5265579", "0.5176979", "0.5093953", "0.5061187", "0.49187213", "0.48065504", "0.47883865", "0.4766928", "0.4766007", "0.4744785", "0.4650204", "0.46441692", "0.4619848", "0.4619848", "0.4619848", "0.4619848", "0.4618425", "0.4618425", "0.46050042", "0.4600184", "0.45751792", "0.45749754", "0.45589155", "0.4518282", "0.44742545", "0.44706622", "0.44579425", "0.44502026", "0.44372928" ]
0.76613015
0
Sets the isda_agreement_version of this CreditSupportAnnex.
def isda_agreement_version(self, isda_agreement_version): if self.local_vars_configuration.client_side_validation and isda_agreement_version is None: # noqa: E501 raise ValueError("Invalid value for `isda_agreement_version`, must not be `None`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and isda_agreement_version is not None and len(isda_agreement_version) > 128): raise ValueError("Invalid value for `isda_agreement_version`, length must be less than or equal to `128`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and isda_agreement_version is not None and len(isda_agreement_version) < 0): raise ValueError("Invalid value for `isda_agreement_version`, length must be greater than or equal to `0`") # noqa: E501 self._isda_agreement_version = isda_agreement_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ata_version(self, ata_version: SmartSsdAtaVersion):\n\n self._ata_version = ata_version", "def guideline_version(self, guideline_version):\n\n self._guideline_version = guideline_version", "def sata_version(self, sata_version: SmartSsdSataVersion):\n\n self._sata_version = sata_version", "def sdas_data_date(self, sdas_data_date):\n\n self._sdas_data_date = sdas_data_date", "def setidd(self, iddinfo, iddindex, block, idd_version):\n self.idd_info = iddinfo\n self.block = block\n self.idd_index = iddindex\n self.idd_version = idd_version", "def a_flag(self):\n if self.datalogflag:\n self.datalog = DEFAULT_DATALOG_AQ", "def is_acd(self, is_acd):\n \n self._is_acd = is_acd", "def set_doc_version(self, doc, value):\n if not self.doc_version_set:\n self.doc_version_set = True\n m = self.VERS_STR_REGEX.match(value)\n if m is None:\n raise SPDXValueError('Document::Version')\n else:\n doc.version = version.Version(major=int(m.group(1)),\n minor=int(m.group(2)))\n return True\n else:\n raise CardinalityError('Document::Version')", "def setAnchorDateYear(self, value):\n return self._set(anchorDateYear=value)", "def ata_version(self) -> SmartSsdAtaVersion:\n return self._ata_version", "def set_pkg_vers(self, doc, version):\n self.assert_package_exists()\n if not self.package_vers_set:\n self.package_vers_set = True\n doc.package.version = version\n return True\n else:\n raise CardinalityError('Package::Version')", "def version(self, version):\n \n self._version = version", "def set_full_accession(self):\n return self.STUDY_VERSION_ACCESSION.format(self.study.phs, self.i_version, self.i_participant_set)", "def version(self, version):\n self._version = version", "def version(self, version):\n self._version = version", "def set_version(self, version, dataset_name=None):\n if dataset_name is None:\n self._version = version\n return self._version\n\n # resolve dataset name\n dataset = self.__getitem__(dataset_name)\n if dataset is None:\n raise KeyError(\"Dataset %s does not exist\" % dataset_name)\n dataset.attrs[\"version\"] = version\n return version", "def hxdp_build_version(self, hxdp_build_version):\n\n self._hxdp_build_version = hxdp_build_version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version", "def version(self, version):\n\n self._version = version" ]
[ "0.5928529", "0.52493036", "0.509096", "0.49936894", "0.4745028", "0.46790195", "0.4664767", "0.46101287", "0.4549267", "0.4519262", "0.44122893", "0.43925443", "0.43877813", "0.43829527", "0.43829527", "0.43659112", "0.4359275", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745", "0.43583745" ]
0.83479697
0
Sets the margin_call_frequency of this CreditSupportAnnex.
def margin_call_frequency(self, margin_call_frequency): if self.local_vars_configuration.client_side_validation and margin_call_frequency is None: # noqa: E501 raise ValueError("Invalid value for `margin_call_frequency`, must not be `None`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and margin_call_frequency is not None and len(margin_call_frequency) > 32): raise ValueError("Invalid value for `margin_call_frequency`, length must be less than or equal to `32`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and margin_call_frequency is not None and len(margin_call_frequency) < 0): raise ValueError("Invalid value for `margin_call_frequency`, length must be greater than or equal to `0`") # noqa: E501 self._margin_call_frequency = margin_call_frequency
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_margin(self, margin):\n _pal.lib.geometry_set_margin(self._geometry, c.c_float(margin))", "def exposureMargin(self, exposureMargin):\n\n self._exposureMargin = exposureMargin", "def change_margin(self, margin):\n self.margin = margin * self._MM_IN_MICRONS\n\n self._create_drawing_area()\n self.partition(self.rows, self.cols)", "def setMarginPercentage(self, perc):\n\t\tself.marginPercentage = perc", "def set_margin(self, margin, margin_text=None):\n self._margin = margin\n if margin_text is None:\n self._margin_text = margin\n else:\n self._margin_text = margin_text\n self._final = None # Force rebuild", "def margin(self, parameter, margin):\n #Use array type to multipy values by margin\n array = numpy.array(self.dataframe[parameter])\n array = array * (1+margin)\n self.dataframe[parameter] = array\n logging.info('Margin of {} successfully applied to the parameter values'.format(margin))", "def set_report_freq(self, freq):\n if freq < 0:\n freq = 0\n self.report_freq = freq", "def frequency(self, frequency: int):\n\n self._frequency = frequency", "def set_if_center_freq(self, center_freq):\n # self.center_freq = freq_calc.get_center_freq()\n self.center_freq = center_freq\n self.seq.add_param(self.mng_name, \"ifCenterFreq\",\n str(self.center_freq), 1)", "def frequency(self, frequency: int):\n self._freq = freq", "def setmargin(self, margin: Union [int, float, str, 'Margin']) -> 'Margin':\r\n\r\n # inefficient way to check conditions, changing eventually\r\n if True not in {isinstance(margin, _) for _ in (int, float, str, Margin)}:\r\n raise TypeError\r\n if isinstance(margin, int):\r\n if margin < 0:\r\n # margin can't be negative\r\n raise ValueError\r\n self.margin = int(margin)\r\n return self", "def setCarrierFrequency(self, frequency):\n if self._params['modulationMode'] != \"IQMixer\":\n print \"WARNING ! Carrier Frequency change also Tone Frequency in %s mode\" % self._params['modulationMode']\n self._MWSource.setFrequency(frequency)", "def frequency(self, frequency):\n\n self._frequency = frequency", "def set_Freq(self,freq):\n super(self.__class__, self).setFreq(self, freq)", "def setCenterFrequency(self,centerFreq: int) -> None:\n\n if not self.debug:\n self.myFieldFox.write(\"SENS:FREQ:CENT \" + str(centerFreq))\n\n return", "def daily_limit(self, daily_limit):\n\n self._daily_limit = daily_limit", "def exposureMarginTotal(self, exposureMarginTotal):\n\n self._exposureMarginTotal = exposureMarginTotal", "def marginType(self, marginType):\n\n self._marginType = marginType", "def set_frequency(self, f=1e9):\r\n self.f = f", "def set_freq(self, freq):\n\n return self._service.exposed_set_freq(freq)", "def set_refclock(self, frequency):\n\n self.refclock_freq = frequency\n self.clock_freq = self.freqmult*self.refclock_freq\n if (self.clock_freq < 99.999e6 or self.clock_freq > 500.001e6):\n warn('Clock frequency out of range. Use set_freqmult to set clock \\\n frequency between 100MHz and 500MHz')\n print ('Refclock =', \"{:.2e}\".format(frequency), 'Hz \\nFreqmult =', self.freqmult,\n '\\nClock Frequency =', \"{:.2e}\".format(self.clock_freq), 'Hz')", "def api_call_counter(self, api_call_counter):\n\n self._api_call_counter = api_call_counter", "def set_center_freq(self, *args):\n return _uhd_swig.usrp_source_sptr_set_center_freq(self, *args)", "def set_center_freq(self, *args):\n return _uhd_swig.usrp_source_set_center_freq(self, *args)", "def set_center_freq(self, *args):\n return _uhd_swig.usrp_sink_set_center_freq(self, *args)", "def setFreq(self,newfreq):\n\t\tself.freq = newfreq;", "def spanMargin(self, spanMargin):\n\n self._spanMargin = spanMargin", "def set_center_freq(self, *args):\n return _uhd_swig.usrp_sink_sptr_set_center_freq(self, *args)", "def clock_speed(self, clock_speed):\n\n self._clock_speed = clock_speed", "def set_frequency(self, f=1e9):\r\n _debug('simq03b_api.set_frequency')\r\n \r\n self.write('SOUR:FREQ:CW '+str(f))" ]
[ "0.5699754", "0.56821644", "0.53351855", "0.53324115", "0.5319535", "0.5294425", "0.52547044", "0.5080364", "0.50522316", "0.5033701", "0.5031883", "0.49959642", "0.498956", "0.49374285", "0.4922234", "0.48966405", "0.4871515", "0.481583", "0.4808186", "0.47932616", "0.47717464", "0.47668904", "0.4740724", "0.47192937", "0.4693931", "0.46806848", "0.4670353", "0.46549392", "0.46543533", "0.46409246" ]
0.7814718
0
Sets the valuation_agent of this CreditSupportAnnex.
def valuation_agent(self, valuation_agent): if self.local_vars_configuration.client_side_validation and valuation_agent is None: # noqa: E501 raise ValueError("Invalid value for `valuation_agent`, must not be `None`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and valuation_agent is not None and len(valuation_agent) > 256): raise ValueError("Invalid value for `valuation_agent`, length must be less than or equal to `256`") # noqa: E501 if (self.local_vars_configuration.client_side_validation and valuation_agent is not None and len(valuation_agent) < 0): raise ValueError("Invalid value for `valuation_agent`, length must be greater than or equal to `0`") # noqa: E501 self._valuation_agent = valuation_agent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent_requirement(self, agent_requirement):\n\n self._agent_requirement = agent_requirement", "def setAgility(self, agility):\n self.ag = agility", "def valuation_date_times(self, valuation_date_times):\n\n self._valuation_date_times = valuation_date_times", "def set_etacalc(self, etacalc):\n self.__etacalc = etacalc", "def agent_status(self, agent_status):\n\n self._agent_status = agent_status", "def election_date(self, election_date):\n\n self._election_date = election_date", "def with_agent(self, agent: Agent):\n self.agent = agent\n return self", "def education(self, education):\n\n self.logger.debug(\"In 'education' setter.\")\n\n self._education = education", "def acquisition_date(self, acquisition_date):\n\n self._acquisition_date = acquisition_date", "def setEvolution(self,evolution):\n\t\tself.evolution = evolution", "def expiration_date(self, expiration_date):\n\n self._expiration_date = expiration_date", "def expense(self, expense):\n self._expense = expense", "def agent_id(self, agent_id):\n\n self._agent_id = agent_id", "def occupation(self, occupation):\n\n self.logger.debug(\"In 'occupation' setter.\")\n\n self._occupation = occupation", "def causative_agent(self, causative_agent):\n if causative_agent is None:\n raise ValueError(\"Invalid value for `causative_agent`, must not be `None`\") # noqa: E501\n\n self._causative_agent = causative_agent", "def set_agent_env(self, param, value):\n logging.info(\"setting agent_env param:[%s] = value:[%s]\", param, value)\n self.agent_env[param] = value", "def set_voltage(self, v):\n self.environment.set_voltage(self.neuron_id, v)", "def maturity(self, maturity: int):\n\n self._maturity = maturity", "def allowance(self, allowance):\n\n self._allowance = allowance", "def date_validity(self, date_validity):\n self._date_validity = date_validity", "def set_agents(self, agents):\n if self.single_agent_mode:\n raise ValueError(\n \"Setting agent in single agent mode or human mode is not allowed.\"\n )\n\n self.agents = agents\n # If at least one agent needs raw data, we set self.allow_raw_data = True\n for agent in self.agents:\n if agent.use_raw:\n self.allow_raw_data = True\n break", "def election_state(self, election_state):\n\n self._election_state = election_state", "def setUA(self, useragent):\n\t\tpass", "def _set_value_date_32A(self, val):\n self.swift_obj.ValueDateCurrencyInterbankSettledAmount = val\n self.swift_obj.ValueDateCurrencyInterbankSettledAmount.swiftTag = \"32A\"", "def installment_amount(self, installment_amount: Amount):\n\n self._installment_amount = installment_amount", "def design_company(self, design_company):\n\n self._design_company = design_company", "def candidate_office_state(self, candidate_office_state):\n\n self._candidate_office_state = candidate_office_state", "def coverage_end_date(self, coverage_end_date):\n\n self._coverage_end_date = coverage_end_date", "def coverage_end_date(self, coverage_end_date):\n\n self._coverage_end_date = coverage_end_date", "def set_eht(self, target_eht):\n self.target_eht = round(target_eht, 2)\n # Setting SEM to target EHT must be implemented in child class!" ]
[ "0.5399857", "0.5215203", "0.5160706", "0.51547", "0.48660296", "0.47691083", "0.4741191", "0.47110885", "0.46982107", "0.46744177", "0.4664191", "0.46602476", "0.4644078", "0.4594802", "0.44882673", "0.44703478", "0.44610634", "0.44596305", "0.44565627", "0.4448571", "0.44395933", "0.44312093", "0.44258642", "0.44152364", "0.4396921", "0.4382175", "0.43718207", "0.43633935", "0.43633935", "0.4361425" ]
0.7366433
0
Sets the threshold_amount of this CreditSupportAnnex.
def threshold_amount(self, threshold_amount): if self.local_vars_configuration.client_side_validation and threshold_amount is None: # noqa: E501 raise ValueError("Invalid value for `threshold_amount`, must not be `None`") # noqa: E501 self._threshold_amount = threshold_amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)", "def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass", "def setThreshold(self, value):\n return self._set(threshold=value)", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def setLowerThreshold(self, lower_threshold):\r\n\t\tself.LowerThreshold = lower_threshold", "def matrix_filtering_threshold(self, matrix_filtering_threshold):\n\n self._matrix_filtering_threshold = matrix_filtering_threshold", "def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)", "def set_ThresholdValue(self, value):\n super(UpdateTriggerInputSet, self)._set_input('ThresholdValue', value)", "def setErrorThreshold(self, threshold):\n return self._set(errorThreshold=threshold)", "def setErrorThreshold(self, threshold):\n return self._set(errorThreshold=threshold)", "def set_sigma_threshold(self, sigma_threshold):\n\n if sigma_threshold < 0:\n raise ValueError(\"The sigma threshold cannot be smaller than 0.\")\n\n core.xc_func_set_sigma_threshold(self.xc_func, ctypes.c_double(sigma_threshold))", "def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)", "def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)", "def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)", "def token_cleanup_threshold(self, token_cleanup_threshold):\n\n self._token_cleanup_threshold = token_cleanup_threshold", "def set_threshold(self, cat, t):\n self.con.execute(\"update ct set threshold=%f where category='%s'\" \n % (t, cat))", "def set_dens_threshold(self, dens_threshold):\n\n if dens_threshold < 0:\n raise ValueError(\"The density threshold cannot be smaller than 0.\")\n\n core.xc_func_set_dens_threshold(self.xc_func, ctypes.c_double(dens_threshold))", "def set_amount(self, amount):\n self.amount = amount", "def copy(self, threshold):\n self.indicator = threshold['indicator']\n self.stage = threshold['stage']\n self.begin = threshold['begin']\n self.end = threshold['end']\n self.quality = threshold['quality']\n self.weight = threshold['weight']\n return self", "async def threshold(self, ctx, threshold: int):\n if threshold > 0:\n self.bot.db.execute(\"UPDATE starboards SET threshold = ? WHERE channel_id = ?\", (threshold, ctx.channel.id))\n await ctx.say(\"star.threshold\", threshold)\n await self.set_topic(ctx.channel.id)\n else:\n await ctx.say(\"star.unsigned\", threshold)", "def updateThreshold(self, t):\n\n budget = self.budget\n self.threshold = self.init_threshold * self.diameter * ((budget-t) / self.budget)**self.decay_factor", "def set_credit_amount(self, credit_amount):\n self.set_value_into_input_field(self.credit_amount_textbox_locator, credit_amount)", "def apply_threshold(heatmap, threshold):\n heatmap[heatmap <= threshold] = 0\n\n return heatmap", "def set_tau_threshold(self, tau_threshold):\n\n if tau_threshold < 0:\n raise ValueError(\"The tau threshold cannot be smaller than 0.\")\n\n core.xc_func_set_tau_threshold(self.xc_func, ctypes.c_double(tau_threshold))", "def amount(self, amount):\n\n self._amount = amount" ]
[ "0.76420635", "0.7484654", "0.7002138", "0.69110656", "0.69110656", "0.69110656", "0.69110656", "0.69110656", "0.6605099", "0.62813085", "0.6248368", "0.6243392", "0.6088954", "0.59891135", "0.59891135", "0.57627714", "0.5746402", "0.5746402", "0.5746402", "0.5658464", "0.56583554", "0.5653518", "0.5610976", "0.5596041", "0.5558965", "0.5489286", "0.5482587", "0.54579717", "0.54312235", "0.54282457" ]
0.7795437
0
Sets the rounding_decimal_places of this CreditSupportAnnex.
def rounding_decimal_places(self, rounding_decimal_places): if self.local_vars_configuration.client_side_validation and rounding_decimal_places is None: # noqa: E501 raise ValueError("Invalid value for `rounding_decimal_places`, must not be `None`") # noqa: E501 self._rounding_decimal_places = rounding_decimal_places
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rounding_decimal(self, rounding_decimal):\n\n self._rounding_decimal = rounding_decimal", "def rounding_decimal(self, rounding_decimal):\n\n self._rounding_decimal = rounding_decimal", "def set_decimal_precision(self, decimal_precision):\n assert (decimal_precision >= 0.0) and (decimal_precision <= 5.0), 'Decimal precision must be between 0 and 5'\n self.decimal_precision = int(round(decimal_precision))", "def round(self, round):\n\n self._round = round", "def set_precision(prec = None):\n context = decimal.getcontext()\n oldprec = context.prec\n if prec is not None:\n context.prec = prec\n return oldprec", "def round_decimal(cls, value: Dec) -> Dec:\n # This check for numbers which are smaller than the precision allows will\n # be commented out for now as it seems to kill economic activity.\n # if value < Dec('1E-8'):\n # return Dec(0)\n return round(value, cls.currency_precision)", "def set_tolerance(self, tol):\n self.precision = tol\n return", "def sparkline_decimal_precision(self, sparkline_decimal_precision):\n\n self._sparkline_decimal_precision = sparkline_decimal_precision", "def setRoundingRadius( self, radius ):\n self._roundingRadius = radius\n self.setDirty()", "def precision_changed(self, new_precision):\n super(PyDMSpinbox, self).precision_changed(new_precision)\n self.setDecimals(self.precision)", "def width_decimal(self, width_decimal):\n\n self._width_decimal = width_decimal", "def _round_decimals(self, dps):\n\n pass", "def __round(num):\n return float(round(decimal.Decimal(num), DataGen.precision))", "def decimals(self, decimals):\n if decimals is None:\n raise ValueError(\"Invalid value for `decimals`, must not be `None`\")\n\n self._decimals = decimals", "def round(self):\n return self._round", "def set_str_precision(cls, dp=3):\n cls.str_decimal_places = dp", "def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):\n strvalue = super().adapt_decimalfield_value(value, max_digits, decimal_places)\n return Decimal(strvalue)", "def set_num_precision(number, precision, mode='int'):\n fmt = '{:.%ie}' % (precision - 1)\n value = float(fmt.format(number))\n if mode == 'int':\n return int(value)\n else:\n return value", "def round_float(cls, value: float) -> Dec:\n # This check for numbers which are smaller than the precision allows will\n # be commented out for now as it seems to kill economic activity.\n # if value < 1E-8:\n # return Dec(0)\n return round(Dec(value), cls.currency_precision)", "def roundingRadius( self ):\n return self._roundingRadius", "def _set_precision(self, precision) :\n self.__precision = self.parent().action().filter(precision)", "def SetPrecision(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_SetPrecision(self, *args)", "def _set_precision(self, precision) :\n self.__precision = self.parent().monoid().filter(precision)", "def change_precision(self, precision):\n if precision <= 0:\n print(\"Precision must be positive\")\n else:\n self.precision = precision\n self.input_equation(self.equation)\n print(f'Precision: {precision}')", "def setValue(self, value):\n super().setValue(int(value * self._precision))", "def float_round(x, prec=2, base=.05):\n return round(base * round(float(x) / base), prec)", "def test_rounding_2(self):\n cr, uid = self.cr, self.uid\n self._add_services(49.95, 15.00, 0.90, 34.95, 99.95)\n for method, total in ((\"round_globally\", 230.81),\n (\"round_per_line\", 230.83)):\n self.company.write({\"tax_calculation_rounding_method\": method})\n voucher = self.account_obj.prepare_voucher(\n cr, uid, [self.account_id])\n\n invoice = self._create_invoice()\n self.assertAlmostEquals(\n invoice.amount_total,\n total,\n \"Expected invoice to round to 230.81 globally\",\n )\n self.assertAlmostEquals(\n voucher[\"context\"][\"default_amount\"],\n invoice.amount_total,\n msg=\"Exepcted default voucher amount to match invoice\")", "def test_rounding_1(self):\n cr, uid = self.cr, self.uid\n self._add_services(20, 49.95, 32.95, 9.95, -9.95)\n\n for method, total in ((\"round_globally\", 118.31),\n (\"round_per_line\", 118.32)):\n self.company.write({\"tax_calculation_rounding_method\": method})\n\n voucher = self.account_obj.prepare_voucher(\n cr, uid, [self.account_id])\n\n invoice = self._create_invoice()\n self.assertAlmostEquals(\n invoice.amount_total,\n total,\n msg=\"Expected invoice to round to 118.31 globally\"\n )\n self.assertAlmostEquals(\n voucher[\"context\"][\"default_amount\"],\n invoice.amount_total,\n msg=\"Exepcted default voucher amount to match invoice\")", "def SetPrecision(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivide_SetPrecision(self, *args)", "def round_decimals_down(self, number, decimals: int = 2):\n if not isinstance(decimals, int):\n raise TypeError(\"decimal places must be an integer\")\n elif decimals < 0:\n raise ValueError(\"decimal places has to be 0 or more\")\n elif decimals == 0:\n return math.floor(number)\n\n factor = 10 ** decimals\n return math.floor(number * factor) / factor" ]
[ "0.70312047", "0.70312047", "0.6526627", "0.6234227", "0.5845742", "0.58131725", "0.57996356", "0.56278586", "0.5584064", "0.55155945", "0.5437357", "0.5435349", "0.5397775", "0.5376162", "0.5353457", "0.53360766", "0.53161436", "0.5277059", "0.522207", "0.52174044", "0.5159012", "0.51395726", "0.5103835", "0.5019622", "0.50065416", "0.49229878", "0.48969567", "0.48809123", "0.48489287", "0.48371506" ]
0.7307036
0
Sets the initial_margin_amount of this CreditSupportAnnex.
def initial_margin_amount(self, initial_margin_amount): if self.local_vars_configuration.client_side_validation and initial_margin_amount is None: # noqa: E501 raise ValueError("Invalid value for `initial_margin_amount`, must not be `None`") # noqa: E501 self._initial_margin_amount = initial_margin_amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_margin(self, margin):\n _pal.lib.geometry_set_margin(self._geometry, c.c_float(margin))", "def set_margin(self, value):\n value = u.decimal(value)\n if u.isempty(value):\n self.sale_price = self.cost_price\n else:\n cp = self.cost_price or zero\n self.sale_price = u.decimal(cp/((cem-value)/cem), True)", "def set_margin(self, margin, margin_text=None):\n self._margin = margin\n if margin_text is None:\n self._margin_text = margin\n else:\n self._margin_text = margin_text\n self._final = None # Force rebuild", "def exposureMargin(self, exposureMargin):\n\n self._exposureMargin = exposureMargin", "def change_margin(self, margin):\n self.margin = margin * self._MM_IN_MICRONS\n\n self._create_drawing_area()\n self.partition(self.rows, self.cols)", "def setmargin(self, margin: Union [int, float, str, 'Margin']) -> 'Margin':\r\n\r\n # inefficient way to check conditions, changing eventually\r\n if True not in {isinstance(margin, _) for _ in (int, float, str, Margin)}:\r\n raise TypeError\r\n if isinstance(margin, int):\r\n if margin < 0:\r\n # margin can't be negative\r\n raise ValueError\r\n self.margin = int(margin)\r\n return self", "def setMarginPercentage(self, perc):\n\t\tself.marginPercentage = perc", "def initial_segment(self, initial_segment):\n\n self._initial_segment = initial_segment", "def set_initial_concentration(self, concentration):\n self.current_concentration = concentration", "def exposureMarginTotal(self, exposureMarginTotal):\n\n self._exposureMarginTotal = exposureMarginTotal", "def setInitialAmount(self, *args):\n return _libsbml.Species_setInitialAmount(self, *args)", "def onchange_on_margin(self):\n\n for tax in self:\n\n tax.amount_type = 'margin' if tax.on_margin else 'percent'\n tax.price_include = tax.on_margin\n tax.type_tax_use = 'sale' if tax.on_margin else 'none'\n\n if tax.on_margin:\n tax.include_base_amount = False", "def initial_point(self, initial_point: Sequence[float] | None) -> None:\n self._initial_point = initial_point", "def SetMarginsXY(self, x, y):\r\n \r\n self.SetMargins(x, x, y, y)", "def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass", "def set_initial_offset(self, offset):\n self.initial_offset = max(\n min(\n (len(self) + 0.5) * self.item_heights - self.my_surface.get_height(),\n offset\n ),\n 0\n )", "def set_margin_timed(self, value):\n value = u.decimal(value)\n if u.isempty(value):\n self.sale_price_timed = self.cost_price\n else:\n cp = self.cost_price or zero\n self.sale_price_timed = u.decimal(cp/((cem-value)/cem), True)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def margin(self, parameter, margin):\n #Use array type to multipy values by margin\n array = numpy.array(self.dataframe[parameter])\n array = array * (1+margin)\n self.dataframe[parameter] = array\n logging.info('Margin of {} successfully applied to the parameter values'.format(margin))", "def margin_size(self, value: int) -> None:\n\n if not isinstance(value, int):\n raise TypeError(\"The margin size must be an integer\")\n\n margin_spacing = (2 * value) + (2 * self._border_thickness)\n\n if margin_spacing >= self.widget_width:\n raise ValueError(\n \"The size of the borders and margins combined can total the same or more\"\n \"than the widget's width.\"\n )\n\n if margin_spacing >= self.widget_height:\n raise ValueError(\n \"The size of the borders and margins combined can total the same or more\"\n \"than the widget's height.\"\n )\n\n self._margin_size = value\n self._set_progress(self._progress) # For a render pass", "def set_xmin(self, xmin):\n self.__xmin = xmin", "def leftMargin(self, leftMargin=10):\n if not(0 <= leftMargin <= 65535) or type(leftMargin) is not int:\n raise ValueError('leftMargin must be a int between 0 and 65535')\n else:\n nH = leftMargin >> 8\n nL = leftMargin - (nH << 8)\n self._write(self.__class__.__GS + 'L' + chr(nL) + chr(nH))", "def setMinimumWidth( self, value ):\n self._minimumWidth = value", "def marginType(self, marginType):\n\n self._marginType = marginType", "def getMargin(self):\n assert False", "def margin(self):\n sp = self.sale_price or zero\n if u.isempty(sp):\n return zero\n cp = self.cost_price or zero\n return u.decimal((um-(cp/sp))*cem, True)", "def set_adjustment_charge_begin_date(self, begin_date):\n self.set_value_into_input_field(self.adjustment_begin_date_locator, begin_date)", "def resetAlignmentCenter(self):\n cent = self.TiltSeries_._TiltAlignmentParas.cent\n imdimX = self.TiltSeries_._imdimX\n imdimY = self.TiltSeries_._imdimY\n print(imdimX, imdimY)\n if cent[0] != imdimX//2+1 or cent[1] != imdimY//2+1:\n #rint \"Centers do not match: cent=\"+str(cent)+\", imdim=\"+str(imdim)\n self.TiltSeries_._TiltAlignmentParas.cent = [imdimX//2+1, imdimY//2+1]", "def setInitialConcentration(self, *args):\n return _libsbml.Species_setInitialConcentration(self, *args)" ]
[ "0.63743806", "0.6039284", "0.5959661", "0.5892813", "0.58600277", "0.5603433", "0.5558049", "0.5511332", "0.55063444", "0.53762347", "0.5342674", "0.51481956", "0.5045504", "0.49929357", "0.49733338", "0.4962956", "0.4943264", "0.48602268", "0.48602268", "0.4854743", "0.48407254", "0.48058677", "0.48025793", "0.48017472", "0.47654855", "0.47621715", "0.47118032", "0.4688906", "0.46818277", "0.46345907" ]
0.81832016
0
Sets the minimum_transfer_amount of this CreditSupportAnnex.
def minimum_transfer_amount(self, minimum_transfer_amount): if self.local_vars_configuration.client_side_validation and minimum_transfer_amount is None: # noqa: E501 raise ValueError("Invalid value for `minimum_transfer_amount`, must not be `None`") # noqa: E501 self._minimum_transfer_amount = minimum_transfer_amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_minimum(self, min_value):\n\n self._progress.setMinimum(min_value)", "def buy_min_amount(self, buy_min_amount):\n\n self._buy_min_amount = buy_min_amount", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value", "def minimum_number(self, minimum_number):\n\n self._minimum_number = minimum_number", "def min(self, min):\n\n self._min = min", "def min(self, min):\n\n self._min = min", "def _send_minimum(self):\n content = {'minimum': self.minimum.isoformat()}\n self.send_action('set_minimum', content)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def userMinimum(self, new_min: float) -> None:\n self._user_minimum = new_min\n self.reset_limits()", "def minimum_temperature(self, minimum_temperature):\n\n self._minimum_temperature = minimum_temperature", "def set_min(self, min):\n self.set_val((min, self.val[1]))", "def minimum_item_count(self, minimum_item_count):\n\n self._minimum_item_count = minimum_item_count", "def minimum_size(self, minimum_size):\n\n self._minimum_size = minimum_size", "def minimum_agent_version(self, minimum_agent_version):\n\n self._minimum_agent_version = minimum_agent_version", "def sell_min_amount(self, sell_min_amount):\n\n self._sell_min_amount = sell_min_amount", "def _set_minimum(self):\n self._level_gen.minimum_length = self._minimum_length_spinbox.value()\n self._refresh_view()", "def setMinGain(self, channel, gain, group=\"I\", unitCode=0):\n if group in nogainGroups: # E is expansion, GAIN is set on source unit, so return max\n raise Exception('Gain not available on Expansion Bus')\n resp = self.XAPCommand(\"MIN\", channel, group, gain, unitCode=unitCode)\n return resp", "def set_locked_temp_min(self, value: int = 0):\r\n if self._temperature_scale == \"F\":\r\n self._locked_temp_min = celsius_to_kelvin(\r\n fahrenheit_to_celsius(value)\r\n )\r\n elif self._temperature_scale == \"C\":\r\n self._locked_temp_min = celsius_to_kelvin(value)\r\n else:\r\n self._locked_temp_min = value\r\n\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"locked_temp_min\", value))", "def set_minVal(self, val):\n self.minVal = val", "def minimum_subtotal(self, minimum_subtotal):\n\n self._minimum_subtotal = minimum_subtotal", "def _set_minimum_value(self, value):\n # Check if the current value is not None, and if so, throw an error\n # because this property should not be set twice\n if self._get_minimum_value() is not None:\n raise RuntimeError('maximum value should not be set twice')\n\n # If the value is None, ignore it\n if value is None:\n return\n\n # Set the value\n self._minimum_value = value", "async def update_trade_minimums(self):\n\n trade_base_btc_pair = '{}-BTC'.format(config['trade_base'])\n\n if config['trade_base'] != 'BTC':\n trade_base_rate = self.base_rates[trade_base_btc_pair]\n else:\n trade_base_rate = 1.0\n\n base_mult = await self.get_pair_base_mult(config['trade_base'], trade_base_btc_pair)\n self.min_trade_size = trade_base_rate * config['trade_min_size_btc'] * base_mult\n self.min_safe_trade_size = self.min_trade_size * (1.0 + config['trade_min_safe_percent'])", "def minimum_health_capacity(self, minimum_health_capacity):\n self._minimum_health_capacity = minimum_health_capacity", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def take_money(self, amount, min_amount=0):\n if self.money_amount < min_amount or amount <= 0:\n return 0\n amount = min(amount, self.money_amount)\n\n if amount <= 0:\n return 0\n\n result = self._service.decrease(money=amount)\n amount = result[\"money\"] if result and \"money\" in result else 0\n\n self.money_amount -= amount\n\n return amount", "def setMinimumWidth( self, value ):\n self._minimumWidth = value" ]
[ "0.69782037", "0.6906807", "0.6490221", "0.6490221", "0.6490221", "0.640613", "0.6306588", "0.6306588", "0.6292194", "0.60590804", "0.60590804", "0.6001845", "0.59859973", "0.5823921", "0.5805904", "0.576669", "0.55387044", "0.5535776", "0.5526052", "0.54994684", "0.5489253", "0.54652596", "0.54581153", "0.5449066", "0.5427712", "0.54181004", "0.53710055", "0.53710055", "0.5369563", "0.53503317" ]
0.8311381
0
Plot the land uses of the resting points
def plotLandUse(layer, x): # features of the layer features = layer.getFeatures() # Create empty list for landuses list_lu = [] # Iterate over features and add to a list for feature in features: list_lu.append(feature['Landuse']) list_lu.sort() # bins of the landuse numbers # bins = [10,11,30, 60, 70,90, 100,110,120,122,130,140,150,152,160,180,210] if(x=="Hist"): # Create histogram plt.hist(list_lu, density = True, color="orange") plt.xlabel('Landuse type') plt.xticks(rotation = "vertical", size = "x-small", stretch = 'condensed') plt.ylabel('Frequency') plt.title('Histogram of landuses - resting points (Distance below mean-variance)') plt.grid(True) plt.tight_layout() plt.subplots_adjust(bottom = 0.45) plt.show() # Create Piechart autopct='%1.2f',lambda pct: func(pct, data) elif(x=="Pie"): counts = Counter(list_lu) keys = counts.keys() values = counts.values() colours = ["goldenrod","navajowhite","yellowgreen","darkgoldenrod", "forestgreen","olive","limegreen","lime", "green","coral","gold", "olivedrab","black", "blue","darkseagreen","lightskyblue"] fig, ax = plt.subplots() data = [float(v) for v in values] wedges, texts, autotexts = ax.pie(data, labels=None,autopct='%1.2f', colors = colours) ax.legend(wedges, keys, title = "Landuse types", loc="left", bbox_to_anchor=(1, 0.8)) #plt.setp(autotexts) ax.set_title("Landuses resting points (Threshold: Distance < [Mean-Variance])") fig.subplots_adjust(left=0.0125) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_landmarks(image, landmarks):\n plt.imshow(image)\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n plt.pause(0.001) # pause a bit so that plots are updated", "def show_landmarks(image, landmarks):\n plt.imshow(image)\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n plt.pause(0.001) # pause a bit so that plots are updated", "def show_landmarks(image, landmarks):\n plt.imshow(image)\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n plt.pause(0.001) # pause a bit so that plots are updated", "def show_landmarks(image, landmarks):\n\n if isinstance(image, str):\n # image = io.imread(image)\n image = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)\n\n plt.imshow(image)\n\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n plt.pause(0.001) # pause a bit so that plots are updated", "def show_landmarks(image, landmarks):\n if Image.isImageType(image):\n image = np.asarray(image)\n plt.imshow(image)\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n plt.pause(0.001) # pause a bit so that plots are updated", "def plot(self):\n\t\tself.plotOfXray().plot()", "def land(self, n, e):\n pass", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot(self):\n # Find only unmasked data :\n # xyz, sData, sColor, _ = self._select_unmasked()\n xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n self.mesh = visu.Markers(name='Sources')\n self.mesh.set_data(xyz, edge_color=self.edgecolor, face_color=sColor,\n size=sData, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n self.mesh.set_gl_state('translucent')", "def plot(self):\n #prepare the marker list\n marker = itertools.cycle((',', '+', '.', 'o', '*',\n '^', 'v', '<', '>', '8',\n 's', 'p', 'h', 'H', 'D',\n 'd'))\n # first categorised with plane\n for each_plane in self.plane_list:\n if self.is_literal:\n label = \"[\" + \"{0} {1} {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"]\"\n else:\n label = \"{\"+\"{0}, {1}, {2}\".format(each_plane[0], each_plane[1], each_plane[2]) + \"}\"\n x_list = []\n y_list = []\n if self.is_literal:\n tmp = [each_plane]\n opposite_plane = [-item for item in each_plane]\n tmp.append(opposite_plane)\n else:\n tmp = PoleFigure.get_permutations(each_plane)\n # second categorised with grain ID\n my_marker = \".\" # default marker\n for i in range(len(self.__data)):\n each_euler = self.__data[i]\n if self.unique_marker:\n my_marker = marker.next()\n plt.rcParams['text.usetex'] = False # otherwise, '^' will cause trouble\n euler = EulerAngle(each_euler[0], each_euler[1], each_euler[2])\n rot_m = np.dot(self.__ref, euler.rotation_matrix)\n self.__data[i] = RotationMatrix(rot_m).euler_angle\n for each_pole in tmp:\n tmp_pole = np.array(each_pole) / self.lattice_vector\n tmp_pole /= np.linalg.norm(tmp_pole)\n coord = np.dot(rot_m, tmp_pole)\n if coord[2] < 0:\n continue # not pointing up, moving on\n else:\n x = coord[0] / (1.0 + float(coord[2]))\n y = coord[1] / (1.0 + float(coord[2]))\n # need to rotate 90 degree\n x_list.append(y)\n y_list.append(-x)\n # start plotting\n if self.__clr_list is not None:\n clr = self.__clr_list.next()\n else:\n clr = np.random.rand(3, 1)\n plt.scatter(x_list, y_list, marker=my_marker, c=clr, label=label, edgecolor='none')\n # label x/y axis\n plt.text(1.1, 0.0, \"y\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n plt.text(0.0, -1.1, \"x\", horizontalalignment='center', verticalalignment='center', fontsize=15)\n # set legend\n plt.legend(loc='upper left', numpoints=1, ncol=6, fontsize=8, bbox_to_anchor=(0, 0))\n plt.title(self.title)\n plt.savefig(self.title + \".\" + self.output)\n plt.close()", "def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()", "def plot(self, routePoints=None):\n return plot(routePoints, self.profiles)", "def show_landmarks(image, landmarks, save_fig=False, save_path=None):\n\n plt.imshow(image)\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')\n if save_fig and save_path is not None:\n plt.savefig(save_path)\n plt.pause(0.001) # pause a bit so that plots are updated", "def show_landmarks(image, landmarks, ground_truth=None):\n plt.imshow(image, cmap='gray')\n plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r', label=\"Prediction\")\n if ground_truth is not None:\n plt.scatter(ground_truth[:, 0], ground_truth[:, 1], s=10, marker='.', c='b', label=\"Ground Truth\")\n # plt.figlegend('', ('Red', 'Blue'), 'center left')\n plt.pause(0.001) # pause a bit so that plots are updated", "def plot_landings(df):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_position(default_timeseries_position) \n\n Fn = df['CatchMT'].groupby([df.Year, df.Reg, df.Sreg]).mean()\n\n all_data = Fn.loc[:, 'All', 'All']\n ma_data = Fn.loc[:, '1', 'All']\n gb_data = Fn.loc[:, '2', 'All']\n\n # Don't plot the first year. Also, the data is shifted by one year.\n # For some reason, restricting the year range above results in a series\n # that still have a multi-index. This seems like the cleanest way to do\n # that.\n all_data = all_data.iloc[2:]\n ma_data = ma_data.iloc[2:]\n gb_data = gb_data.iloc[2:]\n all_data.index = all_data.index - 1\n ma_data.index = ma_data.index - 1\n gb_data.index = gb_data.index - 1\n\n all_data.plot(ax=ax, label='All') \n ma_data.plot(ax=ax, label='Mid Atlantic')\n gb_data.plot(ax=ax, label='Georges Bank')\n\n ax.legend(loc='best')\n\n content = io.BytesIO()\n plt.savefig(content, format='png')\n content.seek(0)\n image_cache['landings']['landings'] = content\n\n plt.close()", "def plot(arrivals_file, region): # pragma: no cover\n region = [float(s) for s in region.split()]\n reg = Region(*region)\n\n arrivals = pd.read_csv(arrivals_file, header=None, names=column_names,\n sep=' ')\n arr_file_base = os.path.splitext(arrivals_file.name)[0]\n # import IPython; IPython.embed(); import sys; sys.exit()\n source = _source_or_stations_in_region(\n arrivals, reg, SOURCE_LATITUDE, SOURCE_LONGITUDE,\n 'sources_in_region_{}.png'.format(arr_file_base))\n\n station = _source_or_stations_in_region(\n arrivals, reg, STATION_LATITUDE, STATION_LONGITUDE,\n 'stations_in_region_{}.png'.format(arr_file_base))\n\n # sources and stations both in region\n sources_and_stations = arrivals[source & station]\n\n fig = plt.figure()\n\n _plot_on_map(sources_and_stations,\n SOURCE_LONGITUDE, SOURCE_LATITUDE,\n marker='*', color='r')\n _plot_on_map(sources_and_stations,\n STATION_LONGITUDE, STATION_LATITUDE,\n marker='^', color='b')\n\n plt.title('Sources and stations in \\n region {}'.format(region))\n # plt.xlabel('Longitude')\n # plt.ylabel('Latitude')\n fig.savefig('sources_and_stations_in_region_{}.png'.format(arr_file_base))\n\n # rays originating and terminating in region\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for i, arr in enumerate(sources_and_stations.iterrows()):\n dat = arr[1]\n ax.add_line(Line2D([dat[SOURCE_LONGITUDE], dat[STATION_LONGITUDE]],\n [dat[SOURCE_LATITUDE], dat[STATION_LATITUDE]],\n color='b', zorder=i))\n ANZ.drawcoastlines(linewidth=2.0, color='k',\n zorder=sources_and_stations.shape[0]+1)\n\n # ax.set_xlim(reg.leftlon - 5, reg.rightlon + 5)\n # ax.set_ylim(reg.bottomlat - 5, reg.upperlat + 5)\n _draw_paras_merids(ANZ)\n plt.title('Ray paths in \\n region {}'.format(region))\n # plt.xlabel('Longitude')\n # plt.ylabel('Latitude')\n fig.savefig('rays_in_region_{}.png'.format(arr_file_base))", "def plot_lanes(self, img, ax):\n left_fit, right_fit, left_fit_curve, right_fit_curve, left_lane_inds, right_lane_inds, out_img, nonzerox, nonzeroy = self.detect_lanes(img)\n # Visualization\n ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n ax.imshow(out_img)\n ax.plot(left_fitx, ploty, color='yellow')\n ax.plot(right_fitx, ploty, color='yellow')\n return left_fit, right_fit, left_fit_curve, right_fit_curve", "def plot(self):\n pass", "def world_map_template():\n fig, ax = plt.subplots(figsize=(20, 10))\n ax.plot(\n laea_x(np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(0, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(0, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), 0),\n laea_y(np.linspace(-np.pi, np.pi), 0),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), np.pi / 6),\n laea_y(np.linspace(-np.pi, np.pi), np.pi / 6),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), -np.pi / 6),\n laea_y(np.linspace(-np.pi, np.pi), -np.pi / 6),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), np.pi / 3),\n laea_y(np.linspace(-np.pi, np.pi), np.pi / 3),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), -np.pi / 3),\n laea_y(np.linspace(-np.pi, np.pi), -np.pi / 3),\n color=\"k\",\n zorder=10,\n )\n ax.text(0, 1.47, r\"$|0\\rangle$\", fontsize=20)\n ax.text(0, -1.53, r\"$|1\\rangle$\", fontsize=20)\n ax.text(0.05, 0.05, r\"$|+\\rangle$\", fontsize=20)\n ax.text(2.9, 0, r\"$|-\\rangle$\", fontsize=20)\n ax.text(-3.2, 0, r\"$|-\\rangle$\", fontsize=20)\n\n return fig, ax", "def main():\n colors = {\n 0: 'w',\n 1: 'g',\n 2: 'r',\n 3: 'c',\n 4: 'm',\n 5: 'y',\n 6: 'k',\n 7: 'b',\n UNKNOWN_EMOTION: '0.1'\n }\n\n plot_data = { emotion: ([], []) for emotion in EMOTIONS }\n\n subjects = get_subjects()\n for subject in subjects:\n image_sequences = get_image_sequences(subject)\n for image_sequence in image_sequences:\n emotion = read_emotion(subject, image_sequence)\n X, Y = read_peak_landmarks(subject, image_sequence)\n\n plot_data[emotion][0].append(X)\n plot_data[emotion][1].append(Y)\n\n for emotion in EMOTIONS:\n if emotion == UNKNOWN_EMOTION or len(plot_data[emotion][0]) == 0:\n continue\n\n X = np.concatenate(plot_data[emotion][0])\n Y = np.concatenate(plot_data[emotion][1])\n plt.scatter(X, Y, color=colors[emotion], alpha=0.5, s=20, lw=0, label=EMOTIONS[emotion])\n\n plt.xlabel('X pixel position of landmark.')\n plt.ylabel('Y pixel position of landmark.')\n plt.legend()\n plt.grid(True)\n plt.show()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plotFeatures(self):\n fl=np.array(self.xp)*0.0+0.25*self.farr.max()\n self.splines=self.axes.plot(self.xp, fl , ls='', marker='|', ms=20, color='#00FF00')\n #set up the text position\n tsize=0.83\n self.ymin, self.ymax = self.axes.get_ylim()\n ppp=(self.ymax-self.ymin)/(self.arcfigure.figure.get_figheight()*self.arcfigure.figure.get_dpi())\n f=self.ymax-10*tsize*ppp\n for x,w in zip(self.xp, self.wp):\n w='%6.2f' % float(w)\n self.axes.text(x, f, w, size='small', rotation='vertical', color='#00FF00')", "def sample_pin_position_range():\n #Create a sample goniometer\n g = TopazInHouseGoniometer()\n\n #Initialize the leg limits\n g.relative_sample_position = column([0.0, 0.0, 0.0])\n g.getplatepos(0.0, 0.0, 0.0)\n g.calculate_leg_xy_limits(visualize=True)\n\n# if True:\n# pylab.show()\n# return\n\n n = 17\n positions = np.linspace(-8, 8, n) #Range calculated in mm\n allowed = np.zeros( (n,n,n) )\n for (ix, x) in enumerate(positions):\n print \"Calculating x\", x\n for (iy, y) in enumerate(positions):\n for (iz, z) in enumerate(positions):\n #Set up\n g.relative_sample_position = column([x, y, z])\n allowed[ix,iy,iz] = g.are_angles_allowed([0., 0., 0.], return_reason=False)\n\n #Do a plot\n\n pylab.figure(1, figsize=[15,15])\n pylab.title(\"Allowable XZ sample positions\")\n for (iy, y) in enumerate(positions):\n print \"At y of\", y, \", # of points = \", np.sum( allowed[:, iy,:])\n if iy < 16:\n pylab.subplot(4,4,iy+1)\n pylab.pcolor(positions, positions, allowed[:, iy, :].transpose(), norm=pylab.Normalize(0, 1))\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"y = %.3f mm\" % y)\n pylab.draw()\n pylab.axis('equal')\n pylab.show()\n #pylab.", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def plot_scatter_points(self):\n self.plot(1)", "def get_landmarks(self, image): # from https://www.paulvangent.com/2016/08/05/emotion-recognition-using-facial-landmarks/\n # Ask the detector to find the bounding boxes of each face. The 1 in the\n # second argument indicates that we should upsample the image 1 time. This\n # will make everything bigger and allow us to detect more faces.\n detections = self.detector(image, 1)\n if len(detections) < 1: # Number of faces detected = 0\n # print(\"Number of faces detected: {}\".format(len(detections)))\n return None\n # Draw Facial Landmarks with the predictor class\n shape = self.predictor(image, detections[0])\n xlist = []\n ylist = []\n for i in range(68): # Store X and Y coordinates in two lists\n xlist.append(float(shape.part(i).x))\n ylist.append(float(shape.part(i).y))\n\n landmarks_vectorised = []\n landmarks_vectorised = self.our_ft_landmark(xlist, ylist)# Extaraction des features\n\n xmean = np.mean(xlist)\n ymean = np.mean(ylist)\n xcentral = [(x-xmean) for x in xlist]\n ycentral = [(y-ymean) for y in ylist]\n \n for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):\n landmarks_vectorised.append(w)\n landmarks_vectorised.append(z)\n # landmarks_vectorised.append(x)\n # landmarks_vectorised.append(y)\n meannp = np.asarray((ymean, xmean))\n coornp = np.asarray((z, w))\n dist = np.linalg.norm(coornp-meannp)# Distance euclidienne\n landmarks_vectorised.append(dist)\n landmarks_vectorised.append((math.atan2(y, x)*360)/(2*math.pi))# Calcule de l'ongle entre le moyenne et un point\n\n return landmarks_vectorised", "def land(self):\n self.drone.land()", "def _plot_map(self):\n\n # Plot points if they exist\n\n if len(self._laserX) > 0:\n self._plot_laser()\n\n if len(self._goalX) > 0:\n self._plot_goal()\n\n if len(self._summitX) > 0:\n self._plot_summit()\n\n self._plot_objects()\n\n # Update Plot\n self._fig.canvas.draw_idle()\n\n plt.pause(0.01)" ]
[ "0.65083843", "0.65083843", "0.65083843", "0.63315463", "0.63290757", "0.615328", "0.615054", "0.60093755", "0.5987699", "0.59867996", "0.5930163", "0.58884436", "0.5883454", "0.587664", "0.58760786", "0.5844076", "0.5840335", "0.58364946", "0.5829448", "0.58185834", "0.58177054", "0.57857263", "0.57656926", "0.576437", "0.57448727", "0.5734852", "0.5722001", "0.5721624", "0.57086796", "0.56786686" ]
0.6548161
0
Switches between two operations depending on a scalar value (int or bool). Note that both `then_expression` and `else_expression` should be symbolic tensors of the same shape. Arguments
def switch(condition, then_expression, else_expression): x_shape = copy.copy(then_expression.get_shape()) x = tf.cond(tf.cast(condition, 'bool'), lambda: then_expression, lambda: else_expression) x.set_shape(x_shape) return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ifelse(\n self,\n true_expr: ir.Value,\n false_expr: ir.Value,\n ) -> ir.Value:\n # Result will be the result of promotion of true/false exprs. These\n # might be conflicting types; same type resolution as case expressions\n # must be used.\n return ops.Where(self, true_expr, false_expr).to_expr()", "def IF(logical_statement, expression_true, expression_false):\n if(type(logical_statement) == bool):\n if(logical_statement == True):\n return(expression_true)\n else:\n return(expression_false)\n else:\n print('Invalid type: logical statement does not evaluate to True or False.')", "def switch(cond, ift, iff):\n if (cf.use_theano and (isinstance(cond, theano.graph.basic.Variable)\n or isinstance(ift, theano.graph.basic.Variable)\n or isinstance(iff, theano.graph.basic.Variable))):\n return T.switch(cond, ift, iff)\n else:\n return np.where(cond, ift, iff)", "def ifelse(condition, then_branch, else_branch, name=None, outshape=None):\n # First check if we can replace an Theano conditional by a Python one\n if is_theano_object(condition) and is_constant(condition):\n condition = bool(condition.data)\n\n # Now the actual function\n if (cf.use_theano\n and not isinstance(condition, builtins.bool)\n and (isinstance(condition, theano.graph.basic.Variable)\n or isinstance(then_branch, theano.graph.basic.Variable)\n or isinstance(else_branch, theano.graph.basic.Variable))):\n # Theano function\n if isinstance(then_branch, LazyEval):\n then_branch = then_branch.eval()\n if isinstance(else_branch, LazyEval):\n else_branch = else_branch.eval()\n if outshape is None:\n # We call `bool` on the condition, in case it's a Python boolean\n # (even shim.ge & friends can return bools)\n return theano.ifelse.ifelse(bool(condition), then_branch,\n else_branch, name)\n else:\n return theano.ifelse.ifelse(bool(condition), then_branch.reshape(outshape),\n else_branch.reshape(outshape), name)\n else:\n # Python function\n if condition:\n if isinstance(then_branch, LazyEval):\n then_branch = then_branch.eval()\n return then_branch\n else:\n if isinstance(else_branch, LazyEval):\n else_branch = else_branch.eval()\n return else_branch", "def toggle(condition, if_true, if_false):\n return (if_true if condition else if_false)", "def _pick_scalar_condition(pred, cond_true, cond_false):\n # Note: This function is only valid if all of pred, cond_true, and cond_false\n # are scalars. This means its semantics are arguably more like tf.cond than\n # tf.select even though we use tf.select to implement it.\n pred_ = _static_value(pred)\n if pred_ is None:\n return array_ops.where_v2(pred, cond_true, cond_false)\n return cond_true if pred_ else cond_false", "def switch(cond, ift, iff):", "def ifelse(test, if_true, if_false):\n if test:\n return if_true\n else:\n return if_false", "def if_then(condition: Callable[[], bool], then_source: ObservableBase,\n else_source: ObservableBase = None) -> ObservableBase:\n from ..operators.observable.ifthen import if_then\n return if_then(condition, then_source, else_source)", "def _op(\n x: Union[bool, dts.Boolean, tps.BooleanValue],\n y: Union[bool, dts.Boolean, tps.BooleanValue],\n ) -> T:", "def test_or_else(\n self,\n start: Result[int, int],\n first: t.Callable[[int], Result[int, int]],\n second: t.Callable[[int], Result[int, int]],\n exp: Result[int, int],\n ) -> None:\n assert start.or_else(first).or_else(second) == exp", "def ifelse_lift_single_if_through_acceptable_ops(main_node):\r\n if not (isinstance(main_node.op, acceptable_ops)):\r\n return False\r\n all_inp_nodes = set()\r\n for inp in main_node.inputs:\r\n all_inp_nodes.add(inp.owner)\r\n ifnodes = [x for x in list(all_inp_nodes)\r\n if x and isinstance(x.op, IfElse)]\r\n # if we have multiple ifs as inputs .. it all becomes quite complicated\r\n # :)\r\n if len(ifnodes) != 1:\r\n return False\r\n node = ifnodes[0]\r\n op = node.op\r\n\r\n ts = node.inputs[1:][:op.n_outs]\r\n fs = node.inputs[1:][op.n_outs:]\r\n\r\n outs = main_node.outputs\r\n mop = main_node.op\r\n true_ins = []\r\n false_ins = []\r\n\r\n for x in main_node.inputs:\r\n if x in node.outputs:\r\n idx = node.outputs.index(x)\r\n true_ins.append(ts[idx])\r\n false_ins.append(fs[idx])\r\n else:\r\n true_ins.append(x)\r\n false_ins.append(x)\r\n true_eval = mop(*true_ins, **dict(return_list=True))\r\n false_eval = mop(*false_ins, **dict(return_list=True))\r\n #true_eval = clone(outs, replace = dict(zip(node.outputs, ts)))\r\n #false_eval = clone(outs, replace = dict(zip(node.outputs, fs)))\r\n\r\n nw_outs = ifelse(node.inputs[0], true_eval, false_eval, return_list=True)\r\n return nw_outs", "def iff(bool,trueValue,falseValue):\n if bool:\n return trueValue\n else:\n return falseValue", "def Switch(*args):\n arg_list = list(args)\n arg_list.reverse()\n #\n while arg_list:\n cond, expr = arg_list.pop(), arg_list.pop()\n if cond:\n return expr\n return None", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def logical_eval(value, logic, check, var):\r\n # Logical statement aquired from input\r\n if logic == '=':\r\n return value == check\r\n # All other string logical expressions can be evaluated with eval()\r\n else:\r\n return eval(var)", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def evaluate_operation(\n statement: ast.BinOp,\n) -> Optional[Union[int, float, str, bytes]]:\n if isinstance(statement.left, ast.BinOp):\n left = evaluate_operation(statement.left)\n else:\n left = evaluate_node(statement.left)\n\n if isinstance(statement.right, ast.BinOp):\n right = evaluate_operation(statement.right)\n else:\n right = evaluate_node(statement.right)\n\n op = _AST_OPS_TO_OPERATORS.get(type(statement.op))\n\n evaluation = None\n if op is not None:\n with suppress(Exception):\n evaluation = op(left, right)\n\n return evaluation", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def cond(\n scheduler: Scheduler,\n parent_job: Job,\n sexpr: SchedulerExpression,\n cond_expr: Any,\n then_expr: Any,\n *rest: Any,\n) -> Promise:\n exprs = (cond_expr, then_expr) + rest\n\n def then(args):\n i, cond_value = args\n\n if cond_value:\n # Return 'then' clause.\n return scheduler.evaluate(exprs[i + 1], parent_job=parent_job)\n\n elif len(exprs) - i == 3:\n # No more expresses, so return 'otherwise' clause.\n return scheduler.evaluate(exprs[i + 2], parent_job=parent_job)\n\n else:\n # Recurse to next conditional clause.\n return scheduler.evaluate((i + 2, exprs[i + 2]), parent_job=parent_job).then(then)\n\n # Evaluate conditional clause.\n return scheduler.evaluate((0, cond_expr), parent_job=parent_job).then(then)", "def where(cond, x_1, x_2):\n cond = cond.type(dtype_float) \n return (cond * x_1) + ((1-cond) * x_2)", "def syntax_if_elif_else():\n s = 0.1\n if s < 0:\n print(\"s is smaller than 0\")\n elif s > 1:\n print(\"s is greater than 1\")\n else:\n print(\"s is between 0 and 1\")\n\n ## Output\n # s is between 0 and 1", "def scalar_function(x, y):\n #Your code here\n if x<=y:\n fs = x*y\n else:\n fs = x/y\n return fs\n raise NotImplementedError", "def eval_if_else(item, motif_node_dict):\n # evaluate the `if` branch first\n true_branch = item.iftrue\n if type(true_branch).__name__ == 'FuncCall':\n motif_node, left = eval_function_call(true_branch, motif_node_dict) \n elif type(true_branch).__name__ == 'Assignment':\n left = eval_assignment(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Decl':\n left = eval_declaration(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Return':\n left = eval_return(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Compound':\n left = eval_function_body(true_branch, motif_node_dict)\n else:\n left = None\n # evaluate the `else` branch if it exists\n false_branch = item.iffalse\n if type(false_branch).__name__ == 'FuncCall':\n motif_node, right = eval_function_call(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Assignment':\n right = eval_assignment(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Decl':\n right = eval_declaration(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Return':\n right = eval_return(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Compound':\n right = eval_function_body(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'If': # else if case\n right = eval_if_else(false_branch, motif_node_dict)\n else:\n right = None\n\n if left or right:\n # only under certain circumstances do we actually create alternation node\n if eval_if_condition(item.cond):\n return provenance.create_alternation_node(left, right)\n else:\n # if only one branch is not None, we need not create a group node\n if not left:\n return right\n if not right:\n return left\n return provenance.create_group_node(left, right)\n else:\n return None", "def or_else_call(\n self,\n function: Callable[[], _ValueType],\n ) -> Union[_FirstType, _ValueType]:", "def wrap_if_constant(x):\n\n if is_python_constant(x):\n return const(x)\n else:\n assert isinstance(x, Expr), \"Expected expression, got \" + str(x)\n return x", "def expression(self, item):\n a = item.get(\"assertion\", item.get(\"expression\"))\n\n values = item[\"values\"]\n\n left = self.resolve(values[0])\n\n if a == \"equals\" or a == \"equal\":\n right = self.resolve(values[1])\n return left == right\n elif a == \"less\":\n right = self.resolve(values[1])\n return left < right\n elif a == \"less_equal\":\n right = self.resolve(values[1])\n return left <= right\n elif a == \"not\":\n return not left\n elif a == \"or\":\n if left is True:\n return True\n\n for i in range(1, len(values)):\n result = self.resolve(values[i])\n if result is True:\n return True\n\n return False\n elif a == \"and\":\n if left is False:\n return False\n\n for i in range(1, len(values)):\n result = self.resolve(values[i])\n if result is False:\n return False\n\n return True\n elif a == \"sum\":\n result = left\n\n assert type(left) in (int, float, str, list)\n # Sum supports flattened values since this only occurs when\n # a string like \"{a} {b} {c}\" is compiled. Everything else,\n # including arithmetic is compiled as a nested expression.\n for i in range(1, len(values)):\n r = self.resolve(values[i])\n\n if type(r) in (int, float, list) and type(result) in (\n int,\n float,\n list,\n ):\n result += r\n else:\n result = f\"{str(result)}{str(r)}\"\n\n return result\n elif a == \"subtraction\":\n right = self.resolve(values[1])\n assert type(left) in (int, float)\n assert type(right) in (int, float)\n return left - right\n elif a == \"multiplication\":\n right = self.resolve(values[1])\n assert type(left) in (int, float, str)\n assert type(right) in (int, float, str)\n return left * right\n elif a == \"modulus\":\n right = self.resolve(values[1])\n assert type(left) in (int, float)\n assert type(right) in (int, float)\n return left % right\n elif a == \"division\":\n right = self.resolve(values[1])\n assert type(left) in (int, float, str)\n assert type(right) in (int, float, str)\n return left / right\n elif a == \"exponential\":\n right = self.resolve(values[1])\n assert type(left) in (int, float)\n assert type(right) in (int, float)\n return left ** right\n else:\n assert False, f\"Unsupported operation: {a}\"" ]
[ "0.63129026", "0.60874236", "0.608386", "0.60220104", "0.59478444", "0.59066546", "0.58980834", "0.58406264", "0.56265366", "0.5610067", "0.5571444", "0.55693847", "0.55487734", "0.5547", "0.5502982", "0.54745007", "0.5456266", "0.5454656", "0.5417349", "0.5417349", "0.5417349", "0.5417349", "0.54079", "0.53816575", "0.5381615", "0.5365841", "0.53297764", "0.53145", "0.5300022", "0.5298388" ]
0.7355844
0
Returns a session that will use CPU's only
def make_session(num_cpu=None, make_default=False): if num_cpu is None: num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count())) tf_config = tf.ConfigProto( inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu) tf_config.gpu_options.allocator_type = 'BFC' if make_default: return tf.InteractiveSession(config=tf_config) else: return tf.Session(config=tf_config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def single_threaded_session():\n return make_session(num_cpu=1)", "def make_session(num_cpu, graph=None):\n tf_config = tf.compat.v1.ConfigProto(\n allow_soft_placement=True,\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n\n # Prevent tensorflow from taking all the gpu memory.\n tf_config.gpu_options.allow_growth = True\n\n return tf.compat.v1.Session(config=tf_config, graph=graph)", "def make_session(config=None, num_cpu=None, make_default=False, graph=None):\n if num_cpu is None:\n num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))\n if config is None:\n config = tf.ConfigProto(\n allow_soft_placement=True,\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n config.gpu_options.allow_growth = True\n\n if make_default:\n return tf.InteractiveSession(config=config, graph=graph)\n else:\n return tf.Session(config=config, graph=graph)", "def cpu(device_id=0):\n return Context('cpu', device_id)", "def cpu(device_id=0):\n return Context('cpu', device_id)", "def get_session():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)", "def create_cpu():\n return CPU()", "def get_cpu(self):\n pass", "def get_session():\n # See: https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n session = tf.Session(config=config)\n return session", "def get_cpu_profiler() -> SimpleCPUProfiler:\n return _cpu_instance", "def get_active_test_session():\n result=None\n sql=\"SELECT * FROM sessions WHERE is_over=0\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n result=c.fetchone()\n conn.close()\n return result", "def _get_session_from_cache(thread_ident: int) -> requests.Session:\n return _GLOBAL_BACKEND_FACTORY()", "def cpu(self):\n self.share = self.share.cpu()\n return self", "def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuMode', self.handle)", "def get_session(gpu_fraction=0.6):\n num_threads = os.environ.get('OMP_NUM_THREADS')\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)\n\n if num_threads:\n return tf.Session(config=tf.ConfigProto(\n gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))\n else:\n return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))", "def get_session() -> requests.Session:\n return _get_session_from_cache(thread_ident=threading.get_ident())", "def session(self):\n session = None\n connected = False\n while not connected:\n try:\n session = self.session_pool.pop()\n except IndexError:\n connection = connect(self.address, self.ssl_context, **self.config)\n session = Session(self, connection)\n connected = True\n else:\n if session.healthy:\n #session.connection.reset()\n connected = session.healthy\n return session", "def build_session():\n return requests.Session()", "def make_session():\n import aiohttp\n conn = aiohttp.TCPConnector(limit_per_host=int(\n os.getenv('AIO_CONN_LIMIT', 10)))\n timeout = aiohttp.ClientTimeout(\n total=int(os.getenv('AIO_TOTAL_TIMEOUT', 80)),\n connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n sock_read=int(os.getenv('AOI_READ_TIMEOUT', 30)),\n sock_connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n )\n s = aiohttp.ClientSession(connector=conn, timeout=timeout)\n return s", "def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuMode', self.handle)", "def startSession():\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n return sess", "def get_tf_session(gpumem):\n tf.reset_default_graph()\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1,\n intra_op_parallelism_threads=1)\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpumem)\n session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n def get_available_gpus():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']\n print(\"AVAILABLE GPUS: \", get_available_gpus())\n return session", "def get(**kwargs):\n # Hack to get the session object from Streamlit.\n\n ctx = get_report_ctx()\n\n this_session = None\n\n session_info = Server.get_current()._get_session_info(ctx.session_id)\n this_session = session_info.session\n\n if this_session is None:\n raise RuntimeError(\n \"Oh noes. Couldn't get your Streamlit Session object\"\n \"Are you doing something fancy with threads?\"\n )\n\n # Got the session object! Now let's attach some state into it.\n\n if not hasattr(this_session, \"_custom_session_state\"):\n this_session._custom_session_state = SessionState(**kwargs)\n\n return this_session._custom_session_state", "def session_for_read():\n return enginefacade.reader.using(_CONTEXT)", "def session():\n def session():\n return BaseUrlSession()\n return session", "def get_current_session(self) -> SessionType:", "def getSession():\n return call(\"getSession\")", "def open_session(self):\n return self.Session()", "def cpu(self):\r\n return self._cpu", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")" ]
[ "0.7916806", "0.6607453", "0.63830155", "0.6224698", "0.6224698", "0.6165562", "0.61295897", "0.61049616", "0.60454756", "0.600789", "0.6007467", "0.5951829", "0.5934167", "0.59015", "0.58531624", "0.57040143", "0.56954604", "0.5674512", "0.5665763", "0.562259", "0.56143194", "0.55907685", "0.55847234", "0.5576154", "0.55462366", "0.55270654", "0.54696316", "0.54399717", "0.54312897", "0.54218316" ]
0.66379386
1
Returns a session which will only use a single CPU
def single_threaded_session(): return make_session(num_cpu=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_session(num_cpu=None, make_default=False):\n if num_cpu is None:\n num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))\n tf_config = tf.ConfigProto(\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n tf_config.gpu_options.allocator_type = 'BFC'\n if make_default:\n return tf.InteractiveSession(config=tf_config)\n else:\n return tf.Session(config=tf_config)", "def make_session(num_cpu, graph=None):\n tf_config = tf.compat.v1.ConfigProto(\n allow_soft_placement=True,\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n\n # Prevent tensorflow from taking all the gpu memory.\n tf_config.gpu_options.allow_growth = True\n\n return tf.compat.v1.Session(config=tf_config, graph=graph)", "def make_session(config=None, num_cpu=None, make_default=False, graph=None):\n if num_cpu is None:\n num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))\n if config is None:\n config = tf.ConfigProto(\n allow_soft_placement=True,\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n config.gpu_options.allow_growth = True\n\n if make_default:\n return tf.InteractiveSession(config=config, graph=graph)\n else:\n return tf.Session(config=config, graph=graph)", "def get_session():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)", "def _get_session_from_cache(thread_ident: int) -> requests.Session:\n return _GLOBAL_BACKEND_FACTORY()", "def get_session(gpu_fraction=0.6):\n num_threads = os.environ.get('OMP_NUM_THREADS')\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)\n\n if num_threads:\n return tf.Session(config=tf.ConfigProto(\n gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))\n else:\n return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))", "def get_session():\n # See: https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n session = tf.Session(config=config)\n return session", "def session(self):\n session = None\n connected = False\n while not connected:\n try:\n session = self.session_pool.pop()\n except IndexError:\n connection = connect(self.address, self.ssl_context, **self.config)\n session = Session(self, connection)\n connected = True\n else:\n if session.healthy:\n #session.connection.reset()\n connected = session.healthy\n return session", "def get_active_test_session():\n result=None\n sql=\"SELECT * FROM sessions WHERE is_over=0\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n result=c.fetchone()\n conn.close()\n return result", "def create_cpu():\n return CPU()", "def cpu(self):\n self.share = self.share.cpu()\n return self", "def get_session() -> requests.Session:\n return _get_session_from_cache(thread_ident=threading.get_ident())", "def cpu(device_id=0):\n return Context('cpu', device_id)", "def cpu(device_id=0):\n return Context('cpu', device_id)", "def get_cpu_profiler() -> SimpleCPUProfiler:\n return _cpu_instance", "def get_tf_session(gpumem):\n tf.reset_default_graph()\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1,\n intra_op_parallelism_threads=1)\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpumem)\n session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n def get_available_gpus():\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']\n print(\"AVAILABLE GPUS: \", get_available_gpus())\n return session", "def get_session(config=None):\n sess = tf.get_default_session()\n if sess is None:\n sess = make_session(config=config, make_default=True)\n return sess", "def new_session(self):\n return self._SessionLocal()", "def make_session():\n import aiohttp\n conn = aiohttp.TCPConnector(limit_per_host=int(\n os.getenv('AIO_CONN_LIMIT', 10)))\n timeout = aiohttp.ClientTimeout(\n total=int(os.getenv('AIO_TOTAL_TIMEOUT', 80)),\n connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n sock_read=int(os.getenv('AOI_READ_TIMEOUT', 30)),\n sock_connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n )\n s = aiohttp.ClientSession(connector=conn, timeout=timeout)\n return s", "def get_cpu(self):\n pass", "def backend(anonymous_session):\n return anonymous_session", "def session():\n def session():\n return BaseUrlSession()\n return session", "def build_session():\n return requests.Session()", "def session(self):\n return self.ssession()", "def open_session(self):\n return self.Session()", "def get_current_session(self) -> SessionType:", "def get_session(self):\n return ESSession(self)", "def _get_session(self):\n if current_uow and not self._outside_uow:\n return current_uow.get_session(self.provider.name)\n else:\n new_connection = self.provider.get_connection()\n if not new_connection.is_active:\n new_connection.begin()\n return new_connection", "def new_session(self):\n return self.Session()", "def get(**kwargs):\n # Hack to get the session object from Streamlit.\n\n ctx = get_report_ctx()\n\n this_session = None\n\n session_info = Server.get_current()._get_session_info(ctx.session_id)\n this_session = session_info.session\n\n if this_session is None:\n raise RuntimeError(\n \"Oh noes. Couldn't get your Streamlit Session object\"\n \"Are you doing something fancy with threads?\"\n )\n\n # Got the session object! Now let's attach some state into it.\n\n if not hasattr(this_session, \"_custom_session_state\"):\n this_session._custom_session_state = SessionState(**kwargs)\n\n return this_session._custom_session_state" ]
[ "0.6815164", "0.67610806", "0.652727", "0.6317698", "0.6214333", "0.61678463", "0.6132221", "0.6112054", "0.6099076", "0.59487903", "0.59289527", "0.59029776", "0.5881761", "0.5881761", "0.58647627", "0.5776358", "0.57622933", "0.5718971", "0.57157207", "0.5713068", "0.5638183", "0.56323713", "0.56230086", "0.56129944", "0.5611254", "0.55815256", "0.55335766", "0.55271035", "0.5517806", "0.5494408" ]
0.843942
0
Linear interpolation between initial_p and final_p over schedule_timesteps. After this many timesteps pass final_p is returned.
def __init__(self, schedule_timesteps, final_p, initial_p=1.0): self.schedule_timesteps = schedule_timesteps self.final_p = final_p self.initial_p = initial_p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolate(self, previous_goalpoint, next_goalpoint, t_prev, t_next):\n des_pos = previous_goalpoint + (next_goalpoint - previous_goalpoint) * (self.curr_delta_time- t_prev)/ (t_next - t_prev)\n\n # print 'current_delta_time: ', self.curr_delta_time\n # print \"interpolated pos:\", des_pos\n\n return des_pos", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining: (float)\n :return: (float) current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def _lerp(self, start_value, end_value):\n # @todo: can probably replace this with np.interp(self.step_lerp_pcts, [0, 1], [start_value, end_value])\n return (1.0-self.step_lerp_pcts)*start_value + self.step_lerp_pcts*end_value", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining:\n :return: current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining:\n :return: current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def linear(initial, final, T):\n def f(t):\n if t < T:\n r = t / float(T)\n p = (1 - r) * initial + r * final\n else:\n p = final\n return p\n return f", "def linear_interpolate(self, last_goal, goal):\n # We interpolate to reach the commanded desired position in self.ramp_ratio % of the time we have this goal\n delta_x_per_step = (goal - last_goal) / self.interpolation_steps\n self.linear = np.array(\n [(last_goal + i * delta_x_per_step) for i in range(1, int(self.interpolation_steps) + 1)])", "def compute_spline(self, initial_state, final_state):\r\n a, b, c, s = self._initialize_spline(initial_state, final_state)\r\n final_state_pred = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n\r\n converge = self._check_converge(final_state, final_state_pred)\r\n total_iter = 0\r\n # pdb.set_trace()\r\n while (total_iter < self.max_iter) & (converge is not True): # (total_iter < self.max_iter) \r\n \r\n \r\n correction = self._compute_correction(initial_state, final_state, a, b, c, s)\r\n a = a - correction[0]\r\n b = b - correction[1]\r\n # c = c - correction[2]\r\n s = s - correction[2]\r\n \r\n final_state_pred = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n\r\n converge = self._check_converge(final_state, final_state_pred)\r\n total_iter = total_iter +1\r\n\r\n # print(total_iter)\r\n # print(final_state_pred)\r\n # print(s)\r\n\r\n # sometimes it converge to negative s (travel distance) which \r\n # is invalid..., need to figure it out...\r\n if (converge == True) & (s > 0):\r\n final_state_pred, point_list = self._path_sampling_one_shot(initial_state, a, b, c, s)\r\n else:\r\n point_list = [[-1,-1]]\r\n\r\n return point_list", "def getLinearSchedule(self, schedule, startTime):\n\t\ttime_step = self.time_step\n\t\tvmax = self.maximam_velocity_vector\n\t\ttrips = self.no_of_trips_vector\n\t\tdst = self.dst\n\n\t\tfor fIndex,fItem in enumerate(schedule):\n\t\t\tforwardDirection = True\n\t\t\ttripNo = 1\n\t\t\tfor tIndex, tItem in enumerate(fItem):\n\t\t\t\tposition = (vmax[fIndex] * ((time_step * tIndex) + startTime[fIndex]))\n\t\t\t\trangeStart = game_utility.findRangeStart(position, dst)\n\t\t\t\tif(position > dst and (rangeStart/dst)%2 != 0):\n\t\t\t\t\t# RETURNING FERRY\n\t\t\t\t\tposition = dst - (position - rangeStart)\n\t\t\t\t\tif(forwardDirection):\n\t\t\t\t\t\t#tripNo = tripNo + 1\n\t\t\t\t\t\tforwardDirection = False\n\t\t\t\t\t\t#print(\"return\", position)\n\t\t\t\telif (position > dst and (rangeStart/dst)%2 == 0):\n\t\t\t\t\t# MOVING FORWARD FERRY\n\t\t\t\t\tposition = position - rangeStart;\n\t\t\t\t\tif(not forwardDirection):\n\t\t\t\t\t\ttripNo = tripNo + 1\n\t\t\t\t\t\tforwardDirection = True\n\t\t\t\t\t\t#print(\"forward\", position)\n\t\t\t\t#print(format(max(game_utility.normalize(position, dst), 0.0), '.2f'))\n\t\t\t\t#print(rangeStart)\n\t\t\t\tif(tripNo > trips[fIndex]):\n\t\t\t\t\tposition = 0\n\t\t\t\tschedule[fIndex][tIndex] = format(max(game_utility.normalize(position, dst), 0.0), '.2f')\n\t\treturn schedule", "def linear_interpolation(self, pt1, pt2, unknown):\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity", "def linear_evolve(self,nt=1):\n for l in range(nt):\n y_temp = np.empty(self.y.shape[0])\n for i in range(self.y.shape[0]):\n \n # idx left to the departure point\n j = int(np.floor((self.x[i]-self.u[i]*self.dt)/self.dx))\n # idx right to the departure point\n k = j+1\n print i, j, k\n # linear interpolation\n alpha = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n y_temp[i] = (1-alpha)*self.y[j] + alpha*self.y[k]\n # copy array to current time\n self.y = np.copy(y_temp)\n stop\n #return current varibale\n return self.y", "def linear_annealing(self, n, total, p_initial, p_final):\n if n == total:\n print \"exploration period over\"\n if n >= total:\n return p_final\n else:\n return p_initial - (n * (p_initial - p_final)) / (total)", "def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):\n final_lr = initial_lr * 0.1\n init_lr = (initial_lr - final_lr)\n lr = final_lr + init_lr - (init_lr * (min(1., epoch / (float(total_num_epochs)))))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def lerp(a, b, t):\n return (1 - t) * a + t * b", "def _interp_array(self, start: np.ndarray, end: np.ndarray,\n num_steps: int):\n alpha = np.linspace(0., 1., num_steps)\n beta = 1 - alpha\n return np.einsum('a,bc->abc', alpha, end) + np.einsum('a,bc->abc', beta,\n start)", "def linear_schedule(initial_value: Union[float, str]) -> Callable[[float], float]:\n if isinstance(initial_value, str):\n initial_value = float(initial_value)\n\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0\n :param progress_remaining: (float)\n :return: (float)\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def linear(min_iterations, i, start = start_temp, final = final_temp):\n\n\ttemperature = start - i * (start - final) / min_iterations\n\n\treturn temperature", "def interpolate(self, t):\n try:\n n = len(self.t)\n except TypeError:\n # self.t is not a sequence. Re-raise the exception\n # with an appropriate error message.\n raise TypeError(\"Please run the simulation first\")\n else:\n if (n < 2):\n raise ValueError(\"Not enough simulation steps\")\n tmin = self.t[0]\n tmax = self.t[n-1]\n if t < tmin or t > tmax:\n raise ValueError(\"Requested time is outside the simulated interval\")\n if self.adaptiveStepSize:\n nbelow = bisect_right(self.t, t) - 1 \n else:\n dt = (tmax - tmin)*1.0/(n - 1)\n nbelow = int(math.floor((t - tmin)/dt))\n nabove = nbelow + 1\n if nabove >= n:\n nabove = n - 1\n nbelow = nabove - 1\n x = interpolate_Hermite(t, self.t[nbelow], self.x[nbelow], self.v[nbelow],\n self.t[nabove], self.x[nabove], self.v[nabove])\n v = interpolate_linear(t, self.t[nbelow], self.v[nbelow],\n self.t[nabove], self.v[nabove])\n return x, v", "def linear_interpolate_release(mvi, j):\n set_system_state(mvi, 1)\n lam1 = mvi.system.lambda_()[j]\n set_system_state(mvi, 2)\n lam2 = mvi.system.lambda_()[j]\n\n # If either of the following loops are entered, there are likely going to\n # be problems.\n if (lam1 < 0) and (lam2 < 0):\n #add_constraints(mvi, mvi._state1_releases)\n #print mvi.lambda1c[j]\n #print mvi\n #raise Exception(\"Bad release interpolation.\")\n print 'WARNING: BAD INTERPOLANT'\n return mvi.t1, mvi.q1\n\n if lam1 < 0:\n return mvi.t1, mvi.q1\n\n tr = mvi.t1 - (lam1/(lam2-lam1))*(mvi.t2-mvi.t1)\n frac = (tr-mvi.t1)/(mvi.t2-mvi.t1)\n qr = frac*(mvi.q2-mvi.q1)+mvi.q1\n\n return tr, qr", "def linear_schedule(progress):\n return 1 - progress", "def __init__(self,\n schedule_timesteps,\n final_p,\n framework,\n initial_p=1.0,\n power=2.0):\n super().__init__(framework=framework)\n assert schedule_timesteps > 0\n self.schedule_timesteps = schedule_timesteps\n self.final_p = final_p\n self.initial_p = initial_p\n self.power = power", "def interpolate_linear(self, transect):\n\n u = np.copy(self.u_mps)\n v = np.copy(self.v_mps)\n\n valid = np.isnan(u) == False\n\n # Check for valid data\n if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1:\n\n # Compute ens_time\n ens_time = np.nancumsum(transect.date_time.ens_duration_sec)\n\n # Apply linear interpolation\n self.u_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=u[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)\n # Apply linear interpolation\n self.v_processed_mps = np.interp(x=ens_time,\n xp=ens_time[self.valid_data[0, :]],\n fp=v[self.valid_data[0, :]],\n left=np.nan,\n right=np.nan)", "def interpolate(a, b):\n x = 1\n i = 1\n f = b[0]\n while i < n:\n b = b*(x-a[i])\n i += 1\n f += (b[i] - f(a[i]))/a[i]) * b\n return f", "def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):\n lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def lerp(self, t):\n pass", "def linear_interpolate_value_change(t0, v0, t1, v1, dt):\n return (v1 - v0)/float(t1-t0) * dt", "def linear_interpolation(left, right, alpha):\n\n return left + alpha * (right - left)", "def get_linear_distribution_for_variable_section(x_initial, x_final, N):\n n = np.arange(N, dtype=float)/N\n n_shift = np.arange(1, N+1, 1, dtype=float)/N\n return (x_final-x_initial)*n + x_initial, (x_final-x_initial)*n_shift + x_initial", "def lerp(a, b, momentum=0.001, momentum_nontrainable=1., trainable=True):\n assert 0.0 < momentum < 1.0, 'momentum must be in range (0.0, 1.0)'\\\n f'but got {momentum}'\n assert 0.0 < momentum_nontrainable <= 1.0, (\n 'momentum_nontrainable must be in range (0.0, 1.0] but got '\n f'{momentum_nontrainable}')\n if momentum > 0.5:\n warnings.warn(\n 'The value of momentum in EMA is usually a small number,'\n 'which is different from the conventional notion of '\n f'momentum but got {momentum}. Please make sure the '\n f'value is correct.')\n m = momentum if trainable else momentum_nontrainable\n return b + (a - b) * m", "def _compute_newton_step(lambdas, p_norm, w_norm):\n return lambdas.candidate + (p_norm / w_norm) ** 2 * (p_norm - 1)" ]
[ "0.5953423", "0.59146756", "0.5886902", "0.58812714", "0.58812714", "0.5858246", "0.58575207", "0.5845823", "0.5793997", "0.56578064", "0.5634574", "0.55303574", "0.5486555", "0.5473343", "0.5472917", "0.5465888", "0.5437795", "0.540126", "0.54007995", "0.53590375", "0.5346209", "0.52747446", "0.5243442", "0.5243271", "0.5222809", "0.52005017", "0.5197274", "0.51740515", "0.51522017", "0.51346505" ]
0.6360331
1
Test that syntax error are caught when reading a mapping.
def test_read_mapping_errors(content): with pytest.raises(IOError): vermouth.map_input._read_mapping_partial(content.split('\n'), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_parse_mapping_file_handles_errors(self):\r\n # Empty file\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n [])\r\n # string\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n 'my_mapping_file.txt')\r\n # invalid format (no header line with leading # sign)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['sampleID\\ta\\tb',\r\n '1\\tf\\t43',\r\n '2\\tt\\t44'])\r\n # invalid format (no non-header lines)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['#sampleID\\ta\\tb'])\r\n # invalid format (no header line)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['1\\tf\\t43',\r\n '2\\tt\\t44'])", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def test_input_reader_errors():\n with pytest.raises(TypeError):\n load_input_reader(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_input_reader({})\n with pytest.raises(errors.MapcheteDriverError):\n load_input_reader({\"abstract\": {\"format\": \"invalid_format\"}})", "def test_bad_data_fail(self):\n with self.assertRaises(ValueError):\n mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.map'))", "def test_rule1_SectionNotFound(self):\n odml.terminology.terminologies['map'] = parse(\"S1[T1]\")\n src = parse(\"s1[t1] mapping [T2]\")\n with self.assertRaises(mapping.MappingError):\n self.check(src, None)", "def test_output_reader_errors():\n with pytest.raises(TypeError):\n load_output_reader(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_output_reader({\"format\": \"invalid_driver\"})", "def test_lexing_error():\n with pytest.raises(SyntaxError):\n lex._lexer(None, None)._load_text(\"TEST\")._throw_lexing_error()", "def testMaplErrorHandling(self):\n try:\n action = Parser.parse_as(error_load1.split(\"\\n\"), mapl.MAPLAction, self.domain)\n self.fail(\"Action with duplicate precondition didn't raise exception\")\n except ParseError, e:\n self.assertEqual(e.token.string, \":precondition\")\n self.assertEqual(e.token.line, 6)\n\n try:\n action = Parser.parse_as(error_load2.split(\"\\n\"), mapl.MAPLAction, self.domain)\n self.fail(\"Action with duplicate replan condition didn't raise exception\")\n except ParseError, e:\n self.assertEqual(e.token.string, \":replan\")\n self.assertEqual(e.token.line, 7)\n\n try:\n action = Parser.parse_as(error_load3.split(\"\\n\"), mapl.MAPLAction, self.domain)\n self.fail(\"Action with duplicate effect statement didn't raise exception\")\n except ParseError, e:\n self.assertEqual(e.token.string, \":effect\")\n self.assertEqual(e.token.line, 8)\n\n try:\n action = Parser.parse_as(error_load4.split(\"\\n\"), mapl.MAPLAction, self.domain)\n self.fail(\"Action with duplicate parameters didn't raise exception\")\n except ParseError, e:\n self.assertEqual(e.token.string, \"?p\")\n self.assertEqual(e.token.line, 4)", "def test_bad_data_fail2(self):\n with self.assertRaises(ValueError):\n mapreader.get_data(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data_corrupt_header.map'))", "def test_check_mapping_data_invalid_mapping_file_format(self):\r\n\r\n mapping_data = ['Sample1\\tAAAA\\tACTG\\tFile1\\ts.1'.split('\\t'),\r\n 'Sample2\\tCCCC\\tACTG'.split('\\t'),\r\n 'Sample3\\tTTTT\\tACTG\\tFile3\\ts.3'.split('\\t')\r\n ]\r\n\r\n headers = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'InputFileNames', 'Description']\r\n\r\n filename_column = 'InputFileNames'\r\n\r\n self.assertRaises(IndexError, check_mapping_data, mapping_data,\r\n headers, filename_column)", "def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)", "def test_read_raw_suggested(fname):\n with pytest.raises(ValueError, match='Try reading'):\n read_raw(fname)", "def test_output_writer_errors():\n with pytest.raises(TypeError):\n load_output_writer(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_output_writer({\"format\": \"invalid_driver\"})", "def test_check_mapping_data_invalid_sampleids(self):\r\n\r\n mapping_data = ['Sample1\\tAAAA\\tACTG\\tFile1\\ts.1'.split('\\t'),\r\n 'Sam&ple2\\tCCCC\\tACTG\\tFile2\\ts.2'.split('\\t'),\r\n 'Sample3\\tTTTT\\tACTG\\tFile3\\ts.3'.split('\\t')\r\n ]\r\n\r\n headers = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'InputFileNames', 'Description']\r\n\r\n filename_column = 'InputFileNames'\r\n\r\n self.assertRaises(ValueError, check_mapping_data, mapping_data,\r\n headers, filename_column)", "def test_read_type_error():\n filename = {}\n with pytest.raises(TypeError):\n read_file(filename)", "def test_error_no_mapping(self, app, data_queues):\n res = self._call(app, [1], status=400)\n detail = {\n \"\": (\n '\"[1]\" is not a mapping type: Does not implement dict-like'\n \" functionality.\"\n )\n }\n self.check_response(\n data_queues, res, \"parse_error\", details={\"validation\": detail}\n )", "def test_get_invalid_line(self):\n ars = self.ar[2009][11]['general']\n self.assertRaises(KeyError, ars.__getitem__, 'invalid_section')", "def test_syntax_errors(self):\r\n bad_math_list = [\r\n '11+',\r\n '11*',\r\n 'f((x)',\r\n 'sqrt(x^)',\r\n '3f(x)', # Not 3*f(x)\r\n '3|4',\r\n '3|||4'\r\n ]\r\n bad_exceptions = {}\r\n for math in bad_math_list:\r\n try:\r\n preview.latex_preview(math)\r\n except pyparsing.ParseException:\r\n pass # This is what we were expecting. (not excepting :P)\r\n except Exception as error: # pragma: no cover\r\n bad_exceptions[math] = error\r\n else: # pragma: no cover\r\n # If there is no exception thrown, this is a problem\r\n bad_exceptions[math] = None\r\n\r\n self.assertEquals({}, bad_exceptions)", "def test_parseMetadataMap_empty(self):\n self.assertRaises(QiimeParseError, MetadataMap.parseMetadataMap, [])", "def test_syntax_error(self):\n\n self.assertRaises(SyntaxError, self.table.where, 'foo bar')", "def test_check_map_errors(self):\r\n\r\n # Bad header\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_header)\r\n # non DNA characters\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_dna)\r\n # Duplicate barcodes\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_dup_bcs)\r\n # Duplicate SampleIDs\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_dup_sids)\r\n # More than one SampleID, no barcodes or added demultiplex specified\r\n self.assertRaises(ValueError, check_map,\r\n self.valid_mapping_data_no_bcs_added_demultiplex, barcode_type=0)\r\n # No barcodes, added_demultiplex has duplicates\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_no_bcs_added_demultiplex, barcode_type=0,\r\n added_demultiplex_field=\"Added_Demultiplex\")\r\n # Barcodes plus added demultiplex results in duplications\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_bcs_added_demultiplex,\r\n added_demultiplex_field=\"Added_Demultiplex\")\r\n # Missing a barcode\r\n self.assertRaises(ValueError, check_map,\r\n self.invalid_mapping_data_golay_missing_bc,\r\n barcode_type=\"variable_length\")", "def test_parseMetadataMap_empty(self):\r\n self.assertRaises(QiimeParseError, MetadataMap.parseMetadataMap, [])", "def test_bad_structures(bad_structures, mapper):\n for index, structure in enumerate(bad_structures):\n # This is for helping devs finding any errors that may occur\n print(f\"Trying structure number {index} from 'test_bad_structures.json'\")\n with pytest.raises(ValidationError):\n StructureResource(**mapper(MAPPER).map_back(structure))", "def test_map_err(\n self, start: Result[str, int], exp: Result[str, str]\n ) -> None:\n assert start.map_err(str) == exp", "def test_sam_parser_throw(self):\n # self.cleanup = False\n\n some_file = '%s/fake_results' % GOLDEN_DIR\n\n try:\n with open(some_file, 'r') as something:\n for line in something:\n parse_sam_line(line)\n # pylint: disable=broad-except\n except Exception:\n pass\n else:\n self.fail('Exception should have been called when parsing a non-SAM file.')", "def test_bad_data_fail3(self):\n with self.assertRaises(ValueError):\n # create a map file with a header larger than 1024 to see the exception\n map = mapreader.get_data(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.map'))\n for i in range(map._nlabl):\n label = getattr(map, '_label_{}'.format(i))\n y = 11\n for j in range(1, y):\n setattr(map, '_label_{}'.format(j), label)\n map._nlabl = y\n with open('rm.map', 'w') as f:\n map.write(f)", "def test_missing_column(self):\n df = pd.DataFrame({\"notlat\": [1, 2, 3], \"lon\": [11, 12, 13]})\n with self.assertRaises(Exception) as ctx:\n st.map(df)\n\n self.assertTrue(\"Map data must contain a column named\" in str(ctx.exception))", "def test_format_otu_map_error_on_bad_prefix(self):\r\n self.assertRaises(ValueError, list,\r\n format_otu_map(self.otu_map1, 'my_otu_'))", "def test_sample_ids_from_metadata_description(self):\n self.assertRaises(ValueError, sample_ids_from_metadata_description,\n self.tutorial_mapping_f, \"Treatment:Foo\")\n self.tutorial_mapping_f.seek(0)\n self.assertRaises(ValueError, sample_ids_from_metadata_description,\n self.tutorial_mapping_f, \"DOB:!20061218,!20070314,!20071112,\"\n \"!20080116\")", "def test_incompatible_rules():\n\n grammar = \"\"\"\n A: B | C;\n B: 'enumeration';\n C: value=INT;\n \"\"\"\n with pytest.raises(TextXSyntaxError):\n metamodel_from_str(grammar)" ]
[ "0.7488966", "0.71245766", "0.71224445", "0.7086689", "0.685664", "0.68011135", "0.67126226", "0.6616215", "0.6579855", "0.6512076", "0.64878446", "0.64554673", "0.64214325", "0.64154875", "0.64133835", "0.6383648", "0.6362922", "0.6348759", "0.63308614", "0.6324306", "0.63193077", "0.6317401", "0.62844765", "0.62487286", "0.6199227", "0.61846876", "0.61509234", "0.61411375", "0.61354536", "0.61217886" ]
0.7941152
0
Test that regular mapping files are read as expected.
def test_read_mapping_file(case): reference = collections.defaultdict(lambda: collections.defaultdict(dict)) for from_ff, to_ff in itertools.product(case.from_ff, case.to_ff): reference[from_ff][to_ff][case.name] = ( case.mapping, case.weights, case.extra ) ffs = case_to_dummy_ffs(case.from_ff + case.to_ff, [case.name], case.mapping, case.weights, case.extra) reference = vermouth.map_input._default_to_dict(reference) mappings = vermouth.map_input.read_backmapping_file( ['[ molecule ]'] + case.string.split('\n'), ffs ) compare_old_new_mappings(mappings, reference)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_read_mapping_directory(ref_mapping_directory):\n dirpath, ref_mappings = ref_mapping_directory\n from_names = list(ref_mappings.keys())\n to_names = []\n block_names = []\n mapping = {}\n weights = {}\n\n\n for k in ref_mappings:\n to_names.extend(ref_mappings[k].keys())\n for to in ref_mappings[k]:\n block_names.extend(ref_mappings[k][to].keys())\n for block_name in ref_mappings[k][to]:\n m, w, _ = ref_mappings[k][to][block_name]\n mapping.update(m)\n weights.update(w)\n force_fields = case_to_dummy_ffs(from_names + to_names, block_names,\n mapping, weights, [])\n\n\n mappings = vermouth.map_input.read_mapping_directory(dirpath, force_fields)\n compare_old_new_mappings(mappings, ref_mappings)", "def test_read_mapping_errors(content):\n with pytest.raises(IOError):\n vermouth.map_input._read_mapping_partial(content.split('\\n'), 1)", "def test_check_mapping_file_correct_file(self):\r\n\r\n # Use valid data, default parameters\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)\r\n\r\n # With additional parameters added should not change results using\r\n # same valid input data\r\n check_mapping_file(mapping_fp=self.correct_mapping_fp,\r\n output_dir=self.output_dir,\r\n has_barcodes=True,\r\n char_replace=\"A\",\r\n verbose=False,\r\n variable_len_barcodes=True,\r\n disable_primer_check=True,\r\n added_demultiplex_field=None)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.correct_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_data_correct_input)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_correct_input)\r\n self.assertEqual(log_data, self.expected_log_data_correct_input)", "def test_check_mapping_file_errors(self):\r\n\r\n # Use data with errors, default parameters\r\n check_mapping_file(mapping_fp=self.errors_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_data_errors_corrected_output)\r\n self.assertEqual(log_data, self.expected_data_log_errors_output)", "def test_filter_mapping_file(self):\r\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\r\n ['a', 'b', 'c', 'd', 'e', 'f']), (self.map_headers, self.map_data))\r\n self.assertEqual(\r\n filter_mapping_file(self.map_data, self.map_headers, ['a']),\r\n (['SampleID', 'Description'], ['a\\tx'.split('\\t')]))", "def test_format_mapping_file(self):\r\n headers = ['SampleID', 'col1', 'col0', 'Description']\r\n samples =\\\r\n [['bsample', 'v1_3', 'v0_3', 'd1'],\r\n ['asample', 'aval', 'another', 'd2']]\r\n comments = ['this goes after headers', 'this too']\r\n self.assertEqual(format_mapping_file(headers, samples, comments),\r\n example_mapping_file)\r\n # need file or stringIO for roundtrip test\r\n # roundtrip = parse_mapping_file(format_mapping_file(headers,samples,comments))\r\n # self.assertEqual(roundtrip, [headers,samples,comments])\r", "def test_map(self, audio_store_and_expected_files):\n audio_store = audio_store_and_expected_files[0]\n expected_files = audio_store_and_expected_files[1]\n\n # Check number of files.\n assert len(audio_store.file_map) == expected_files\n\n # Ensure the keys are as expected.\n key_list = list(audio_store.file_map.keys())\n assert key_list == [x + 1 for x in range(len(key_list))]\n\n # Ensure the values are as expected.\n for key, file in audio_store.file_map.items():\n\n # Check the extension.\n ext = os.path.splitext(file)[1].replace('.', '')\n assert ext in audio_store.audio_extensions\n\n # File should exist.\n assert os.path.exists(\n os.path.join(\n audio_store.top_dir, audio_store.audio_dir, file))", "def test_filter_mapping_file(self):\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\\\n ['a','b','c','d','e','f']), (self.map_headers, self.map_data))\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers, ['a']),\n (['SampleID','Description'],['a\\tx'.split('\\t')]))", "def test_store_mapping(self):\r\n\r\n expected = [\"1:\\t0\\t2\\t5\\t6\\n\",\r\n \"3:\\n\",\r\n \"4:\\n\",\r\n \"8:\\t7\\n\"]\r\n\r\n self.files_to_remove.append(\"/tmp/test_store_mapping_mapping.txt\")\r\n store_mapping(self.mapping, \"/tmp/\", prefix=\"test_store_mapping\")\r\n observed = list(open(\"/tmp/test_store_mapping_mapping.txt\", \"U\"))\r\n self.assertItemsEqual(observed, expected)", "def test_parse_mapping_file_handles_file_handle(self):\r\n fd, fp = mkstemp(prefix='test_parse_mapping_file',\r\n suffix='.txt')\r\n close(fd)\r\n self.files_to_remove.append(fp)\r\n open(fp, 'w').write('\\n'.join(['#sample\\ta\\tb',\r\n '#comment line to skip',\r\n 'x \\t y \\t z ', ' ',\r\n '#more skip',\r\n 'i\\tj\\tk']))\r\n obs = parse_mapping_file(open(fp))\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n self.assertEqual(obs, exp)", "def test_filter_mapping_file_from_mapping_f(self):\r\n actual = filter_mapping_file_from_mapping_f(\r\n self.tutorial_mapping_f, [\"PC.354\", \"PC.355\"])\r\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\r\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\r\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\r\n self.assertEqual(actual, expected)", "def test_check_mapping_file_warnings(self):\r\n\r\n check_mapping_file(mapping_fp=self.warnings_mapping_fp,\r\n output_dir=self.output_dir,\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.warnings_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.warnings_mapping_fp).replace('.txt', '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.warnings_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_output_warnings)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_warnings_output)\r\n self.assertEqual(log_data, self.expected_log_warnings_output)", "def test_mapping(self):\n\n # Input PatternDS has constraints fake, file and pattern.\n # Use fake from first input as animal constraint.\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/a/new/pattern/%animal%/%file%/%pattern%.file',\n 'echo', map_dict={'animal': ('fake', 0)})\n output = the_process_unit.execute(simulate=True)\n\n all_files = [thing for thing in output.files]\n\n self.assertEqual(len(all_files), 1)\n self.assertEqual(all_files[0].full_path, '/a/new/pattern/fake_1/file_1/pattern_1.file')", "def test_filter_mapping_file_from_mapping_f(self):\n actual = filter_mapping_file_from_mapping_f(self.tutorial_mapping_f,[\"PC.354\",\"PC.355\"])\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\n self.assertEqual(actual,expected)", "def test_check_mapping_file_multiple_problems(self):\r\n\r\n check_mapping_file(mapping_fp=self.errors_warnings_mapping_fp,\r\n output_dir=self.output_dir,\r\n added_demultiplex_field=\"DoesNotExist\",\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt',\r\n '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_warnings_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_errors_warnings)\r\n self.assertEqual(log_data, self.expected_log_errors_warnings_output)", "def test_check_mapping_data_valid_data(self):\r\n\r\n mapping_data = ['Sample1\\tAAAA\\tACTG\\tFile1\\ts.1'.split('\\t'),\r\n 'Sample2\\tCCCC\\tACTG\\tFile2\\ts.2'.split('\\t'),\r\n 'Sample3\\tTTTT\\tACTG\\tFile3\\ts.3'.split('\\t')\r\n ]\r\n\r\n headers = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'InputFileNames', 'Description']\r\n\r\n filename_column = 'InputFileNames'\r\n\r\n expected_data = {'File3': 'Sample3',\r\n 'File2': 'Sample2',\r\n 'File1': 'Sample1'}\r\n\r\n actual_data = check_mapping_data(\r\n mapping_data,\r\n headers,\r\n filename_column)\r\n\r\n self.assertEqual(actual_data, expected_data)", "def test_tb_full_mapping_iter_02():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_2 = resource_path + \"tb.Human.SRR1658573_2.fastq\"\n\n files = [\n gem_file,\n fastq_file_2\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm2 = tbFullMappingTool()\n tfm2_files, tfm2_meta = tfm2.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_2_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_2_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_2_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_2_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def test_tb_full_mapping_iter_01():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_1 = resource_path + \"tb.Human.SRR1658573_1.fastq\"\n\n files = [\n gem_file,\n fastq_file_1\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm1 = tbFullMappingTool()\n tfm1_files, tfm1_meta = tfm1.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_1_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_1_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_1_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_1_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def test_reading_user_map_definition_from_file():\n with open(\"tempfile.buf\", \"w\") as f:\n f.write(\"\"\"\n 1. key : string\n 2. bpm : int\n \"\"\")\n\n with open(\"tempfile.buf\") as f:\n assert Map(\n MapEntrySpec(1, \"key\", String),\n MapEntrySpec(2, \"bpm\", UnsignedInt)\n ) == Map.from_open_file(f)\n\n os.remove(\"tempfile.buf\")", "def test_parse_mapping_file_handles_filepath(self):\r\n fd, fp = mkstemp(prefix='test_parse_mapping_file',\r\n suffix='.txt')\r\n close(fd)\r\n self.files_to_remove.append(fp)\r\n open(fp, 'w').write('\\n'.join(['#sample\\ta\\tb',\r\n '#comment line to skip',\r\n 'x \\t y \\t z ', ' ',\r\n '#more skip',\r\n 'i\\tj\\tk']))\r\n obs = parse_mapping_file(fp)\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n self.assertEqual(obs, exp)", "def test_read_mapping_file_multiple(reference_multi):\n content, reference = reference_multi\n from_names = list(reference.keys())\n to_names = []\n block_names = []\n\n for k in reference:\n to_names.extend(reference[k].keys())\n for to in reference[k]:\n block_names.extend(reference[k][to].keys())\n force_fields = case_to_dummy_ffs(from_names + to_names, block_names,\n {(0, 'X1'): [(0, 'A')], (0, 'X2'): [(0, 'B')], (0, 'X3'): [(0, 'D')]},\n {(0, 'A'): {(0, 'X1'): 1.0}, (0, 'B'): {(0, 'X2'): 1.0}, (0, 'C'): {(0, 'X2'): 1.0}, (0, 'D'): {(0, 'X3'): 1.0}},\n [])\n mappings = vermouth.map_input.read_backmapping_file(content, force_fields)\n compare_old_new_mappings(mappings, reference)", "def test_bad_data_fail(self):\n with self.assertRaises(ValueError):\n mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.map'))", "def test_check_map(self):\r\n\r\n header, mapping_data = check_map(self.valid_mapping_data_golay)\r\n\r\n expected_header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n expected_mapping_data =\\\r\n [['s1', 'AACTCGTCGATG', 'ATTCGATART', 's1_description'],\r\n ['s2', 'agcAGCACTTGT', 'ATTCGATART', 's2_description'],\r\n ['s3', 'ACCGCAGAGTCA', 'YATGCTGCCTCCCGTAGGAGT', 's3_description']]\r\n\r\n self.assertEquals(header, expected_header)\r\n self.assertEquals(mapping_data, expected_mapping_data)", "def test_get_file_map_type():\n for file in sd.get_file_map(\"resources/\"):\n assert file.endswith(\".txt\")", "def test_check_mapping_data_invalid_mapping_file_format(self):\r\n\r\n mapping_data = ['Sample1\\tAAAA\\tACTG\\tFile1\\ts.1'.split('\\t'),\r\n 'Sample2\\tCCCC\\tACTG'.split('\\t'),\r\n 'Sample3\\tTTTT\\tACTG\\tFile3\\ts.3'.split('\\t')\r\n ]\r\n\r\n headers = ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence',\r\n 'InputFileNames', 'Description']\r\n\r\n filename_column = 'InputFileNames'\r\n\r\n self.assertRaises(IndexError, check_mapping_data, mapping_data,\r\n headers, filename_column)", "def test_get_file_map_len():\n file_map = sd.get_file_map(\"resources/\")\n files = glob.glob(\"resources/\" + \"/**\" + \".txt\", recursive=True)\n assert len(file_map) == len(files)", "def read_mapfiles():\n mappings = []\n\n # matches stuff like\n # \"/GLOW/*\" glow\n # \"/cms/Role=pilot/Capability=NULL\" cmspilot\n # and extracts the stuff between the quotes, and the username in the second field\n regex = re.compile(r'^\\s*[\"](/[^\"]+)[\"]\\s+([A-Za-z0-9_]+)\\s*(?:$|[#])')\n for filepath in [DEFAULT_VOMS_MAPFILE, VOMS_MAPFILE]:\n try:\n with open(filepath, \"r\", encoding=\"latin-1\") as filehandle:\n for line in filehandle:\n match = regex.match(line)\n if not match:\n continue\n else:\n mappings.append(Mapping(match.group(1), match.group(2)))\n except EnvironmentError as err:\n if err.errno == errno.ENOENT:\n continue\n else:\n raise\n\n return mappings", "def test_write(self):\n map_to_write = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_write_map.map')\n written_maps = glob.glob(map_to_write)\n self.assertEqual(len(written_maps), 0)\n with open(map_to_write, 'w') as f:\n map_ = mapreader.get_data(self.map_file)\n map_.write(f)\n written_maps = glob.glob(map_to_write)\n self.assertEqual(len(written_maps), 1)\n map(os.remove, written_maps)", "def test_parse_mapping_file_handles_errors(self):\r\n # Empty file\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n [])\r\n # string\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n 'my_mapping_file.txt')\r\n # invalid format (no header line with leading # sign)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['sampleID\\ta\\tb',\r\n '1\\tf\\t43',\r\n '2\\tt\\t44'])\r\n # invalid format (no non-header lines)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['#sampleID\\ta\\tb'])\r\n # invalid format (no header line)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['1\\tf\\t43',\r\n '2\\tt\\t44'])", "def test_check_map(self):\r\n s = \"\"\"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tX\\tDescription\r\n#fake data\r\nx\\tAA\\tAC\\t3\\tsample_x\r\ny\\t\"AC\"\\tAC\\t4\\t\"sample_y\"\r\nz\\tGG\\tGC\\t5\\tsample_z\"\"\"\r\n f = StringIO(s)\r\n f.name = 'test.xls'\r\n headers, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers = check_map(f,\r\n disable_primer_check=False)\r\n\r\n self.assertEqual(\r\n barcode_to_sample_id,\r\n {'AA': 'x',\r\n 'AC': 'y',\r\n 'GG': 'z'})\r\n\r\n self.assertEqual(errors, [])\r\n self.assertEqual(warnings, [])" ]
[ "0.7734215", "0.74426365", "0.74284154", "0.73957855", "0.738895", "0.7286489", "0.7281214", "0.7254501", "0.7174775", "0.7063158", "0.7009144", "0.69992447", "0.695043", "0.6936193", "0.68889594", "0.6878551", "0.685513", "0.6841759", "0.6801524", "0.6760218", "0.67302", "0.67237806", "0.67214453", "0.6704912", "0.66857", "0.66723204", "0.6661093", "0.6652583", "0.66297466", "0.6585858" ]
0.74782133
1