id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3239040
|
<gh_stars>0
from PageCheck import *
from Extrator import *
from bs4 import BeautifulSoup
import requests
import os
import codecs
class wikiMatrice:
def __init__(self):
self.url=" "
def saisirUrl(self):
url=input("veuillez entrez une url ")
page=PageCheck(url)
if(page.urlChek()!=" "):
extract=Extractor(url)
extract.extraction()
print("le nombre de tableau est {} ".format(extract.countTable(url)))
else:
print("l'url n\' est pas valide")
def lister(self):
f = open("urls.txt", "r")
fichier_entier = f.read()
files = fichier_entier.split("\n")
for file in files :
page=PageCheck(file)
url=page.urlChek()
if(url!=" "):
extract=Extractor(url)
extract.extraction()
print("le nombre de tableau est {} ".format(extract.countTable(url)))
else:
print("l'url n\' est pas valide")
def interface(self):
url=input("saisir 1 pour ajouter une url \n saisir 2 pour lister pour récuperer les tableaux des urls du fichier ")
if(url=='1'):
self.saisirUrl()
elif(url=='2'):
self.lister()
else:
print(" veuillez saisir un nombre correcte ")
# test de la fonction table
if __name__ == "__main__":
wiki=wikiMatrice()
#wiki.interface()
#wiki.lister()
wiki.interface()
|
StarcoderdataPython
|
38628
|
# Создайте модель мероприятия для сайта-афиши.
# У модели должны быть такие поля:
# Название мероприятия (name), не больше 200 символов
# Дата и время проведения мероприятия (start_at)
# Описание мероприятия (description)
# Адрес электронной почты организатора мероприятия (contact)
# Пользователь, который создал мероприятие (author,
# related_name этого поля должно быть events)
# Название места проведения мероприятия (location), не более 400 символов
from django.db import models
from django.contrib.auth import get_user_model
class Event(models.Model):
name = models.CharField(max_length=200)
start_at = models.DateTimeField('event published', auto_now_add=True)
description = models.TextField
contact = models.EmailField()
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE,
related_name="event_author")
location = models.CharField(max_length=400)
|
StarcoderdataPython
|
3372908
|
import numpy as np
from ..base import BaseSKI
from tods.feature_analysis.StatisticalVecSum import StatisticalVecSumPrimitive
class StatisticalVecSumSKI(BaseSKI):
def __init__(self, **hyperparams):
super().__init__(primitive=StatisticalVecSumPrimitive, **hyperparams)
self.fit_available = False
self.predict_available = False
self.produce_available = True
|
StarcoderdataPython
|
3324837
|
import hashlib
def hash_check(
type,
asset,
filename,
down_folder,
subfolder,
hash,
k=None,
b=None,
):
if type == "hdris":
file = down_folder + filename
else:
file = (
f"{subfolder}/{asset}_{k}/textures/{filename}"
if not b
else f"{subfolder}/{asset}_{k}/{filename}"
)
with open(file, "rb") as f:
file_hash = hashlib.md5()
while chunk := f.read(8192):
file_hash.update(chunk)
if hash != file_hash.hexdigest():
return False
return True
|
StarcoderdataPython
|
3365581
|
<filename>Algorithms_old/Easy/solve_me_second/solution/solution.py
def main(input_data):
input_data = [map(int, item.split(" ")) for item in
input_data.split("\n")[1:]]
return "\n".join(map(str, [a + b for (a, b) in input_data]))
if __name__ == "__main__":
from fileinput import input
def get_input():
return "\n".join([line.strip() for line in input()])
print main(get_input())
|
StarcoderdataPython
|
1683652
|
<gh_stars>100-1000
import pytest
from tri_struct import merged
from iommi._db_compat import field_defaults_factory
@pytest.mark.django
def test_field_defaults_factory():
from django.db import models
base = dict(parse_empty_string_as_none=True, required=True, display_name=None)
assert field_defaults_factory(models.CharField(null=False, blank=False)) == merged(
base, dict(parse_empty_string_as_none=False)
)
assert field_defaults_factory(models.CharField(null=False, blank=True)) == merged(
base, dict(parse_empty_string_as_none=False, required=False)
)
assert field_defaults_factory(models.CharField(null=True, blank=False)) == merged(base, dict(required=False))
assert field_defaults_factory(models.CharField(null=True, blank=True)) == merged(base, dict(required=False))
@pytest.mark.django
def test_field_defaults_factory_boolean():
from django.db import models
django_null_default = not models.BooleanField().null
base = dict(parse_empty_string_as_none=django_null_default, display_name=None)
assert field_defaults_factory(models.BooleanField(null=False, blank=False)) == merged(
base, dict(parse_empty_string_as_none=False)
)
assert field_defaults_factory(models.BooleanField(null=False, blank=True)) == merged(
base, dict(parse_empty_string_as_none=False)
)
assert field_defaults_factory(models.BooleanField(null=True, blank=False)) == base
assert field_defaults_factory(models.BooleanField(null=True, blank=True)) == base
|
StarcoderdataPython
|
4822350
|
import docker, logging, subprocess, random, io, os, time
import shutil
from django.conf import settings
from corere.main import git as g
from corere.main import models as m
from corere.main import constants as c
from django.db.models import Q
logger = logging.getLogger(__name__)
#TODO: Better error checking. stderr has concents even when its successful.
def build_repo2docker_image(manuscript):
logger.debug("Begin build_repo2docker_image for manuscript: " + str(manuscript.id))
path = g.get_submission_repo_path(manuscript)
sub_version = manuscript.get_max_submission_version_id()
image_name = ("jupyter-" + str(manuscript.id) + "-" + manuscript.slug + "-version" + str(sub_version))[:128] + ":" + settings.DOCKER_GEN_TAG + "-" + str(manuscript.id)
run_string = "jupyter-repo2docker --no-run --json-logs --image-name '" + image_name + "' '" + path + "'"
#this happens first so we create the folder and reset the file (with 'w' instead of 'a')
build_log_path = get_build_log_path(manuscript)
os.makedirs(os.path.dirname(build_log_path), exist_ok=True)
with open(build_log_path, 'w+') as logfile:
result = subprocess.run([run_string], shell=True, stdout=logfile, stderr=subprocess.STDOUT)
logger.debug("build_repo2docker_image for manuscript: "+ str(manuscript.id) + ". Result:" + str(result))
manuscript.manuscript_containerinfo.repo_image_name = image_name
manuscript.manuscript_containerinfo.submission_version = sub_version
manuscript.manuscript_containerinfo.manuscript = manuscript
manuscript.manuscript_containerinfo.save()
def delete_repo2docker_image(manuscript):
logger.debug("Begin delete_repo2docker_image for manuscript: " + str(manuscript.id))
client = docker.from_env()
client.images.remove(image=manuscript.manuscript_containerinfo.repo_image_name, force=True)
def _write_oauthproxy_email_list_to_working_directory(manuscript):
logger.debug("Begin _write_oauthproxy_email_list_to_working_directory for manuscript: " + str(manuscript.id))
container_info = manuscript.manuscript_containerinfo
client = docker.from_env()
#TODO: I need to write a file with the list of emails allowed to access the container to the filesystem where docker can use it to build
email_file_path = settings.DOCKER_BUILD_FOLDER + "/oauthproxy-" + str(manuscript.id) + "/authenticated_emails.txt"
os.makedirs(os.path.dirname(email_file_path), exist_ok=True) #make folder for build context
email_file = open(email_file_path, 'w')
#Get the list of emails allowed to access the notebook
#For now I think I'm just going to get a list of users in the 4 role groups
user_email_list = m.User.objects.filter( Q(groups__name__startswith=c.GROUP_MANUSCRIPT_AUTHOR_PREFIX + " " + str(manuscript.id))
| Q(groups__name__startswith=c.GROUP_MANUSCRIPT_EDITOR_PREFIX + " " + str(manuscript.id))
| Q(groups__name__startswith=c.GROUP_MANUSCRIPT_CURATOR_PREFIX + " " + str(manuscript.id))
| Q(groups__name__startswith=c.GROUP_MANUSCRIPT_VERIFIER_PREFIX + " " + str(manuscript.id))
#| Q(is_superuser=True) #This seems to return the same use like a ton of times
).values('email')
for ue in user_email_list:
email_file.write(ue.get("email")+"\n")
email_file.close()
def _write_oauth_proxy_html_templates_to_working_directory(manuscript):
logger.debug("Begin _write_oauth_proxy_html_templates_to_working_directory for manuscript: " + str(manuscript.id))
container_info = manuscript.manuscript_containerinfo
client = docker.from_env()
email_file_path = settings.DOCKER_BUILD_FOLDER + "/oauthproxy-" + str(manuscript.id) + "/email-templates"
if os.path.exists(email_file_path) and os.path.isdir(email_file_path):
shutil.rmtree(email_file_path)
#/Users/madunlap/Documents/GitHub/dataverse-corere/corere /main/static/oauth2-proxy/email-templates
shutil.copytree(settings.BASE_DIR + "/main/static/oauth2-proxy/email-templates", email_file_path )
def build_oauthproxy_image(manuscript):
logger.debug("Begin build_oauthproxy_image for manuscript: " + str(manuscript.id))
container_info = manuscript.manuscript_containerinfo
client = docker.from_env()
_write_oauthproxy_email_list_to_working_directory(manuscript)
_write_oauth_proxy_html_templates_to_working_directory(manuscript)
docker_build_folder = settings.DOCKER_BUILD_FOLDER + "/oauthproxy-" + str(manuscript.id) + "/"
dockerfile_path = docker_build_folder + "dockerfile"
docker_string = "FROM " + settings.DOCKER_OAUTH_PROXY_BASE_IMAGE + "\n" \
+ "COPY authenticated_emails.txt /opt/bitnami/oauth2-proxy/authenticated_emails.txt \n" \
+ "ADD email-templates /opt/bitnami/oauth2-proxy/email-templates"
with open(dockerfile_path, 'w') as f:
f.write(docker_string)
#run_string = "jupyter-repo2docker --no-run --json-logs --image-name '" + image_name + "' '" + path + "'"
container_info.proxy_image_name = ("oauthproxy-" + str(manuscript.id) + "-" + manuscript.slug)[:128] + ":" + settings.DOCKER_GEN_TAG + "-" + str(manuscript.id)
container_info.save()
run_string = "docker build . -t " + container_info.proxy_image_name
with open(get_build_log_path(manuscript), 'a+') as logfile:
result = subprocess.run([run_string], shell=True, stdout=logfile, stderr=subprocess.STDOUT, cwd=docker_build_folder)
logger.debug("build_oauthproxy_image result:" + str(result))
def delete_oauth2proxy_image(manuscript):
logger.debug("Begin delete_oauth2proxy_image for manuscript: " + str(manuscript.id))
client = docker.from_env()
client.images.remove(image=manuscript.manuscript_containerinfo.proxy_image_name, force=True)
def start_repo2docker_container(manuscript):
logger.debug("Begin start_repo2docker_container for manuscript: " + str(manuscript.id))
container_info = manuscript.manuscript_containerinfo
if(not container_info.repo_container_ip):
container_info.repo_container_ip = "0.0.0.0"
#NOTE: THIS IS COPIED FROM start_oauthproxy_container. We have to know the proxy port here though so we can set the allow_origin.
#If the info previously exists for the
if(not container_info.proxy_container_port):
while True:
#TODO: Random is pretty inefficient if the space is maxed. We should maybe start at a random and increment up
if(settings.CONTAINER_PROTOCOL == 'https'):
container_info.proxy_container_port = random.randint(50020-20, 50039-20)
else:
container_info.proxy_container_port = random.randint(50020, 50039)
if not m.ContainerInfo.objects.filter(proxy_container_port=container_info.proxy_container_port).exists():
break
if(not container_info.proxy_container_address):
container_info.proxy_container_address = settings.CONTAINER_ADDRESS
print("PUBLIC ADDRESS BEFORE REPO2DOCKER LAUNCH")
print(container_info.container_public_address())
client = docker.from_env()
#origin_addr = settings.CONTAINER_PROTOCOL + "://" + container_inf.proxy_container_address + ":" + str(container_info.proxy_container_port) #note, not adding 20
#run_string = "jupyter notebook --ip " + container_info.repo_container_ip + " --NotebookApp.token='' --NotebookApp.password='' --NotebookApp.allow_origin='"+ origin_addr +"'"
run_string = "jupyter notebook --ip " + container_info.repo_container_ip + " --NotebookApp.token='' --NotebookApp.password='' --NotebookApp.allow_origin='"+container_info.container_public_address() +"' " #trailing space is important!!!
#run_string = "jupyter notebook --ip " + container_info.repo_container_ip + " --NotebookApp.token='' --NotebookApp.password='' --NotebookApp.allow_origin='*'"
#run_string = "jupyter notebook --ip " + container_info.repo_container_ip + " --NotebookApp.token='' --NotebookApp.password='' --NotebookApp.allow_origin='"+container_info.container_public_address() +"/view/globus_logo_white.png'"
#TODO: Maybe set the '*' to specify only corere's host.
run_string += "--NotebookApp.tornado_settings=\"{ 'headers': { 'Content-Security-Policy': \\\"frame-ancestors 'self' *\\\" } }\""
#Add this if you need direct access. Defeats the whole point of a proxy. # ports={'8888/tcp': "60000"},
container = client.containers.run(container_info.repo_image_name, run_string, detach=True, network=container_info.container_network_name())
while container.status != "created": #This is a really lazy means of waiting for the container to complete
print(container.status)
time.sleep(.1)
#TODO: This never seems to have any contents. Maybe because when created first starts there is nothing?
print(container.logs())
print(container.logs(), file=open(get_build_log_path(manuscript), "a"))
notebook_network = client.networks.get(container_info.container_network_name())
notebook_network.disconnect(container, force=True) #we disconnect it and then re-add it with the correct ip. I couldn't find a way to start the contain with no network and then just add this.
notebook_network.connect(container, ipv4_address=container_info.network_ip_substring + ".2")
container_info.repo_container_id = container.id
container_info.save()
def stop_delete_repo2docker_container(manuscript):
logger.debug("Begin stop_delete_repo2docker_container for manuscript: " + str(manuscript.id))
stop_delete_container(manuscript.manuscript_containerinfo.repo_container_id)
#We need the request to get the server address to pass to oauth2-proxy. Technically we only need this when creating the back button but we require it anyways.
def start_oauthproxy_container(manuscript, request):
logger.debug("Begin start_oauthproxy_container for manuscript: " + str(manuscript.id))
container_info = manuscript.manuscript_containerinfo
#NOTE: THIS LOGIC IS RARELY CALLED BECAUSE WE ALREADY DO THE SAME LOGIC IN REPO2DOCKER. WE HAVE TO KNOW THE PORT BEFORE LAUNCHING THAT CONTAINER TO SET allow-origin.
#If the info previously exists for the
if(not container_info.proxy_container_port):
while True:
#TODO: Random is pretty inefficient if the space is maxed. We should maybe start at a random and increment up
if(settings.CONTAINER_PROTOCOL == 'https'):
container_info.proxy_container_port = random.randint(50020-20, 50039-20)
else:
container_info.proxy_container_port = random.randint(50020, 50039)
if not m.ContainerInfo.objects.filter(proxy_container_port=container_info.proxy_container_port).exists():
break
if(not container_info.proxy_container_address):
container_info.proxy_container_address = settings.CONTAINER_ADDRESS
run_string = ""
client = docker.from_env()
emails_file_path = "/opt/bitnami/oauth2-proxy/authenticated_emails.txt"
template_files_path = "/opt/bitnami/oauth2-proxy/email-templates"
latest_submission = manuscript.get_latest_submission()
#Note: We have hijacked "footer" to instead pass the corere server address to our custom oauth2-proxy template
#Note: host.docker.internal may have issues on linux.
#Note: whitelist-domain is used to allow redirects after using the oauth2 sign-in direct url
command = "--http-address=" + "'0.0.0.0:4180'" + " " \
+ "--https-address=" + "'0.0.0.0:443'" + " " \
+ "--redirect-url=" + "'" + container_info.container_public_address() + "/oauth2/callback' " \
+ "--upstream=" + "'http://" +container_info.network_ip_substring+ ".2:8888" + "/' " \
+ "--upstream=" + "'"+ settings.CONTAINER_PROTOCOL + "://"+ settings.CONTAINER_TO_CORERE_ADDRESS +"/submission/" + str(latest_submission.id) + "/notebooklogin/' " \
+ "--provider=" + "'oidc'" + " " \
+ "--provider-display-name=" + "'Globus'" + " " \
+ "--oidc-issuer-url=" + "'https://auth.globus.org'" + " " \
+ "--cookie-name=" + "'_oauth2_proxy'" + " " \
+ "--client-id=" + "'" + settings.SOCIAL_AUTH_GLOBUS_KEY + "'" + " " \
+ "--client-secret=" + "'" + settings.SOCIAL_AUTH_GLOBUS_SECRET + "'" + " " \
+ "--cookie-secret=" + "'" + settings.OAUTHPROXY_COOKIE_SECRET + "'" + " " \
+ "--cookie-refresh=" + "'0s'" + " " \
+ "--cookie-expire=" + "'168h'" + " " \
+ "--authenticated-emails-file=" + "'" + emails_file_path + "'" + " " \
+ "--custom-templates-dir='" + template_files_path + "' " \
+ "--banner=" + "'" + "Please re-authenticate to access the environment for Manuscript: " + manuscript.get_display_name() + "'" + " " \
+ "--footer=" + "'" + settings.CONTAINER_PROTOCOL + "://" + settings.SERVER_ADDRESS + "'" + " " \
+ "--whitelist-domain=" + "'" + settings.SERVER_ADDRESS + "'" + " "
if(settings.CONTAINER_PROTOCOL == 'https'):
command += "--cookie-secure=" + "'true'" + " "
else:
command += "--cookie-secure=" + "'false'" + " "
container = client.containers.run(container_info.proxy_image_name, command, ports={'4180/tcp': container_info.proxy_container_port}, detach=True)
#network=container_info.container_network_name())
while container.status != "created": #This is a really lazy means of waiting for the container to complete
#print(container.status)
time.sleep(.1)
#TODO: This never seems to have any contents. Maybe because when created first starts there is nothing?
#print(container.logs())
print(container.logs(), file=open(get_build_log_path(manuscript), "a"))
container_info.proxy_container_id = container.id
container_info.save()
# #Janky log access code
# import time
# time.sleep(5)
# print(container.logs()) #Should find a better way to stream these logs. Though you can get to them via docker.
notebook_network = client.networks.get(container_info.container_network_name())
notebook_network.connect(container, ipv4_address=container_info.network_ip_substring + ".3")
return container_info.container_public_address()
def update_oauthproxy_container_authenticated_emails(manuscript):
logger.debug("Begin update_oauthproxy_container_authenticated_emails for manuscript: " + str(manuscript.id))
container_info = manuscript.manuscript_containerinfo
_write_oauthproxy_email_list_to_working_directory(manuscript)
docker_build_folder = settings.DOCKER_BUILD_FOLDER + "/oauthproxy-" + str(manuscript.id) + "/"
run_string = "docker cp authenticated_emails.txt " + container_info.proxy_container_id +":/opt/bitnami/oauth2-proxy/authenticated_emails.txt"
result = subprocess.run([run_string], shell=True, capture_output=True, cwd=docker_build_folder)
logger.debug("update_oauthproxy_container_authenticated_emails result:" + str(result))
def stop_delete_oauthproxy_container(manuscript):
logger.debug("Begin stop_delete_oauthproxy_container for manuscript: " + str(manuscript.id))
stop_delete_container(manuscript.manuscript_containerinfo.proxy_container_id)
def stop_delete_container(container_id):
client = docker.from_env()
container = client.containers.get(container_id)
container.stop(timeout=2) #should I just use kill?
container.remove()
#Note: this does not handle recreation like the container code.
def start_network(manuscript):
logger.debug("Begin start_network for manuscript: " + str(manuscript.id))
while True: #get an unused subnet.
network_part_2 = random.randint(10, 255)
network_sub = "10." + str(network_part_2) + ".255"
if not m.ContainerInfo.objects.filter(network_ip_substring=network_sub).exists():
break
client = docker.from_env()
container_info = manuscript.manuscript_containerinfo
container_info.network_ip_substring = network_sub
ipam_pool = docker.types.IPAMPool(
subnet=network_sub + '.0/16',
gateway=network_sub + '.1'
)
ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
network = client.networks.create(container_info.container_network_name(), driver="bridge", ipam=ipam_config)
container_info.network_id = network.id
container_info.save()
def delete_network(manuscript):
logger.debug("Begin delete_network for manuscript: " + str(manuscript.id))
client = docker.from_env()
network = client.networks.get(manuscript.manuscript_containerinfo.network_id)
network.remove()
def delete_manuscript_docker_stack(manuscript):
logger.debug("Begin delete_manuscript_docker_stack for manuscript: " + str(manuscript.id))
try:
stop_delete_oauthproxy_container(manuscript)
stop_delete_repo2docker_container(manuscript)
delete_network(manuscript)
delete_repo2docker_image(manuscript)
delete_oauth2proxy_image(manuscript)
manuscript.manuscript_containerinfo.delete()
return("Manuscript stack and ContainerInfo deleted")
except m.ContainerInfo.DoesNotExist:
return("No ContainerInfo found, so stack was not deleted. Possibly it was never created.")
#This deletes the stack via tags based on manuscript id, not via info from ContainerInfo
#In the end its probably not much different, but its being designed to use only for admins
#TODO: If you delete the last stack with this method, starting up a new stack is very slow.
# I assume this has to do with deletion of intermediates, or the docker network prune.
# It would be good to fix this.
def delete_manuscript_docker_stack_crude(manuscript):
logger.debug("Begin delete_manuscript_docker_stack_crude for manuscript: " + str(manuscript.id))
try:
#delete containers via tags
run_string = "docker ps -a | grep ':" + settings.DOCKER_GEN_TAG + "-" + str(manuscript.id) + "' | awk '{print $1}' | xargs docker rm -f"
print(subprocess.run([run_string], shell=True, capture_output=True))
#delete images via tags. note the lack of a colon.
run_string = "docker images | grep '" + settings.DOCKER_GEN_TAG + "-" + str(manuscript.id) + "' | awk '{print $3}' | xargs docker rmi"
print(subprocess.run([run_string], shell=True, capture_output=True))
#delete all unused networks
run_string = "docker network prune -f"
print(subprocess.run([run_string], shell=True, capture_output=True))
manuscript.manuscript_containerinfo.delete()
return("Manuscript stack and ContainerInfo deleted")
except m.ContainerInfo.DoesNotExist:
return("No ContainerInfo found, so stack was not deleted. Possibly it was never created.")
def build_manuscript_docker_stack(manuscript, request, refresh_notebook_if_up=False):
logger.debug("Begin build_manuscript_docker_stack for manuscript: " + str(manuscript.id))
if (not (hasattr(manuscript, 'manuscript_containerinfo'))):
m.ContainerInfo().manuscript = manuscript
manuscript.manuscript_containerinfo.build_in_progress = True
manuscript.manuscript_containerinfo.save()
build_repo2docker_image(manuscript)
build_oauthproxy_image(manuscript)
start_network(manuscript)
start_repo2docker_container(manuscript)
start_oauthproxy_container(manuscript, request)
manuscript.manuscript_containerinfo.build_in_progress = False
manuscript.manuscript_containerinfo.save()
def refresh_notebook_stack(manuscript):
logger.debug("Begin refresh_notebook_stack for manuscript: " + str(manuscript.id))
if (not (hasattr(manuscript, 'manuscript_containerinfo'))):
m.ContainerInfo().manuscript = manuscript
manuscript.manuscript_containerinfo.build_in_progress = True
manuscript.manuscript_containerinfo.save()
stop_delete_repo2docker_container(manuscript)
delete_repo2docker_image(manuscript)
build_repo2docker_image(manuscript)
start_repo2docker_container(manuscript)
manuscript.manuscript_containerinfo.build_in_progress = False
manuscript.manuscript_containerinfo.save()
def get_build_log_path(manuscript):
return settings.DOCKER_BUILD_FOLDER + "/docker-build-logs/" + str(manuscript.id) + ".log"
|
StarcoderdataPython
|
4830829
|
# -*- coding: utf-8 -*-
# Created by restran on 2017/9/15
from __future__ import unicode_literals, absolute_import
import string
import subprocess
def run_shell_cmd(cmd):
try:
(stdout, stderr) = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()
if stdout is None:
stdout = ''
if stderr is None:
stderr = ''
return '%s%s' % (stdout, stderr)
except Exception as e:
print(e)
print('!!!error!!!')
return ''
def get_raw_plain_text(raw_data, decoded_data):
"""
因为密文中可能包含数字,符合等各种非字母的字符,一些解密的算法是不考虑这些
在输出明文的时候,要跟这些符合,按要原来的顺序还原回来
:param raw_data:
:param decoded_data:
:return:
"""
index = 0
plain = []
for i, c in enumerate(raw_data):
if c in string.ascii_lowercase:
new_c = decoded_data[index].lower()
index += 1
elif c in string.ascii_uppercase:
new_c = decoded_data[index].upper()
index += 1
else:
new_c = c
plain.append(new_c)
return ''.join(plain)
|
StarcoderdataPython
|
1605359
|
<reponame>bionet/ted.python<gh_stars>1-10
#!/usr/bin/env python
"""
Test and compare pure Python and Cython implementations of
the Bjork-Pereyra Algorithm.
"""
# Copyright (c) 2009-2015, <NAME>
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
from numpy import fliplr, vander, abs, arange, array
from scipy.special import chebyt
from time import time
import sys
import bionet.ted.bpa_python as bpa_python
if 'linux' in sys.platform:
import bionet.ted.bpa_cython_linux2 as bpa_cython
elif sys.platform == 'darwin':
import bionet.ted.bpa_cython_darwin as bpa_cython
else:
raise RuntimeError('cannot import binary BPA module')
# Try to find the coefficients of a Chebyshev polynomial by solving
# a Vandermonde system. This test case should exhibit good accuracy for
# N less than about 13:
N = 12
i = arange(N)
a = 1.0/(i+2)
T = chebyt(N-1)
f = T(a)
V = fliplr(vander(a))
c = array(T)[::-1]
start = time()
c_solve = bpa_python.bpa(V, f)
end = time()
print 'Python implementation results:'
print 'original c = ', c
print 'solved c = ', c_solve
print 'error = ', abs(c-c_solve)
print 'time = ', end-start
start = time()
c_solve = bpa_cython.bpa(V, f)
end = time()
print 'Cython implementation results:'
print 'original c = ', c
print 'solved c = ', c_solve
print 'error = ', abs(c-c_solve)
print 'time = ', end-start
|
StarcoderdataPython
|
85051
|
<reponame>KarlHammar/High-threshold-QEC-toric-RL<filename>predict_script.py
import numpy as np
import time
import os
import torch
import _pickle as cPickle
from src.RL import RL
from src.toric_model import Toric_code
from NN import NN_11, NN_17
from ResNet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
start = time.time()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
##########################################################################
# common system sizes are 3,5,7 and 9
# grid size must be odd!
system_size = 5
# valid network names:
# NN_11
# NN_17
# ResNet18
# ResNet34
# ResNet50
# ResNet101
# ResNet152
network = NN_11
# this file is stored in the network folder and contains the trained agent.
NETWORK_FILE_NAME = 'FirstLongNN11s5'
num_of_predictions = 100
# initialize RL class
rl = RL(Network=network,
Network_name=NETWORK_FILE_NAME,
system_size=system_size,
device=device)
# initial syndrome error generation
# generate syndrome with error probability 0.1
prediction_list_p_error = [0.1,0.11,0.12,0.13,0.14,0.15,0.16,0.17,0.18,0.19]
# generate syndrome with a fixed amount of errors
minimum_nbr_of_qubit_errors = 0#int(system_size/2)+1 # minimum number of erorrs for logical qubit flip
# Generate folder structure, all results are stored in the data folder
timestamp = time.strftime("%y_%m_%d__%H_%M_%S__")
PATH = 'data/prediction__' +str(NETWORK_FILE_NAME) +'__'+ timestamp
if not os.path.exists(PATH):
os.makedirs(PATH)
# Path for the network to use for the prediction
PATH2 = 'network/'+str(NETWORK_FILE_NAME)+'.pt'
print('Prediction')
error_corrected_list, ground_state_list, average_number_of_steps_list, mean_q_list, failed_syndroms, ground_state_list, prediction_list_p_error, failure_rate = rl.prediction(
num_of_predictions=num_of_predictions,
num_of_steps=75,
PATH=PATH2,
prediction_list_p_error=prediction_list_p_error,
minimum_nbr_of_qubit_errors=minimum_nbr_of_qubit_errors,
plot_one_episode=False)
# runtime of prediction
runtime = time.time()-start
runtime = runtime / 3600
print(error_corrected_list, 'error corrected')
print(ground_state_list, 'ground state conserved')
print(average_number_of_steps_list, 'average number of steps')
print(mean_q_list, 'mean q value')
print(runtime, 'h runtime')
# save training settings in txt file
data_all = np.array([[NETWORK_FILE_NAME, failure_rate, num_of_predictions, error_corrected_list[0], ground_state_list[0],average_number_of_steps_list[0], mean_q_list[0], len(failed_syndroms)/2, runtime]])
np.savetxt(PATH + '/data_all.txt', data_all, header='network, failure_rate, error corrected, ground state conserved, average number of steps, mean q value, number of failed syndroms, runtime (h)', delimiter=',', fmt="%s")
|
StarcoderdataPython
|
1734500
|
from __clrclasses__.System import Enum as _n_0_t_0
from __clrclasses__.System import IComparable as _n_0_t_1
from __clrclasses__.System import IFormattable as _n_0_t_2
from __clrclasses__.System import IConvertible as _n_0_t_3
from __clrclasses__.System import Array as _n_0_t_4
from __clrclasses__.System import Attribute as _n_0_t_5
from __clrclasses__.System import Version as _n_0_t_6
from __clrclasses__.System import Byte as _n_0_t_7
from __clrclasses__.System.Collections import ICollection as _n_1_t_0
from __clrclasses__.System.Collections import IEnumerator as _n_1_t_1
from __clrclasses__.System.Runtime.InteropServices import _Attribute as _n_2_t_0
from __clrclasses__.System.Security import CodeAccessPermission as _n_3_t_0
from __clrclasses__.System.Security import IPermission as _n_3_t_1
from __clrclasses__.System.Security import IStackWalk as _n_3_t_2
from __clrclasses__.System.Security import PermissionSet as _n_3_t_3
from __clrclasses__.System.Security import SecurityZone as _n_3_t_4
from __clrclasses__.System.Security.AccessControl import AccessControlActions as _n_4_t_0
from __clrclasses__.System.Security.Cryptography import CspParameters as _n_5_t_0
from __clrclasses__.System.Security.Cryptography.X509Certificates import X509Certificate as _n_6_t_0
import typing
class CodeAccessSecurityAttribute(SecurityAttribute, _n_2_t_0):
pass
class EnvironmentPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
def __init__(self, state: PermissionState) -> EnvironmentPermission:...
def __init__(self, flag: EnvironmentPermissionAccess, pathList: str) -> EnvironmentPermission:...
def AddPathList(self, flag: EnvironmentPermissionAccess, pathList: str):...
def GetPathList(self, flag: EnvironmentPermissionAccess) -> str:...
def SetPathList(self, flag: EnvironmentPermissionAccess, pathList: str):...
class EnvironmentPermissionAccess(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AllAccess: int
NoAccess: int
Read: int
value__: int
Write: int
class EnvironmentPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def All(self) -> str:"""All { get; set; } -> str"""
@property
def Read(self) -> str:"""Read { get; set; } -> str"""
@property
def Write(self) -> str:"""Write { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> EnvironmentPermissionAttribute:...
class FileDialogPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
@property
def Access(self) -> FileDialogPermissionAccess:"""Access { get; set; } -> FileDialogPermissionAccess"""
def __init__(self, state: PermissionState) -> FileDialogPermission:...
def __init__(self, access: FileDialogPermissionAccess) -> FileDialogPermission:...
class FileDialogPermissionAccess(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
_None: int
Open: int
OpenSave: int
Save: int
value__: int
class FileDialogPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Open(self) -> bool:"""Open { get; set; } -> bool"""
@property
def Save(self) -> bool:"""Save { get; set; } -> bool"""
def __init__(self, action: SecurityAction) -> FileDialogPermissionAttribute:...
class FileIOPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
@property
def AllFiles(self) -> FileIOPermissionAccess:"""AllFiles { get; set; } -> FileIOPermissionAccess"""
@property
def AllLocalFiles(self) -> FileIOPermissionAccess:"""AllLocalFiles { get; set; } -> FileIOPermissionAccess"""
def __init__(self, state: PermissionState) -> FileIOPermission:...
def __init__(self, access: FileIOPermissionAccess, path: str) -> FileIOPermission:...
def __init__(self, access: FileIOPermissionAccess, pathList: _n_0_t_4[str]) -> FileIOPermission:...
def __init__(self, access: FileIOPermissionAccess, control: _n_4_t_0, path: str) -> FileIOPermission:...
def __init__(self, access: FileIOPermissionAccess, control: _n_4_t_0, pathList: _n_0_t_4[str]) -> FileIOPermission:...
def AddPathList(self, access: FileIOPermissionAccess, path: str):...
def AddPathList(self, access: FileIOPermissionAccess, pathList: _n_0_t_4[str]):...
def GetPathList(self, access: FileIOPermissionAccess) -> _n_0_t_4[str]:...
def SetPathList(self, access: FileIOPermissionAccess, path: str):...
def SetPathList(self, access: FileIOPermissionAccess, pathList: _n_0_t_4[str]):...
class FileIOPermissionAccess(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AllAccess: int
Append: int
NoAccess: int
PathDiscovery: int
Read: int
value__: int
Write: int
class FileIOPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def All(self) -> str:"""All { get; set; } -> str"""
@property
def AllFiles(self) -> FileIOPermissionAccess:"""AllFiles { get; set; } -> FileIOPermissionAccess"""
@property
def AllLocalFiles(self) -> FileIOPermissionAccess:"""AllLocalFiles { get; set; } -> FileIOPermissionAccess"""
@property
def Append(self) -> str:"""Append { get; set; } -> str"""
@property
def ChangeAccessControl(self) -> str:"""ChangeAccessControl { get; set; } -> str"""
@property
def PathDiscovery(self) -> str:"""PathDiscovery { get; set; } -> str"""
@property
def Read(self) -> str:"""Read { get; set; } -> str"""
@property
def ViewAccessControl(self) -> str:"""ViewAccessControl { get; set; } -> str"""
@property
def ViewAndModify(self) -> str:"""ViewAndModify { get; set; } -> str"""
@property
def Write(self) -> str:"""Write { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> FileIOPermissionAttribute:...
class GacIdentityPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IBuiltInPermission):
def __init__(self, state: PermissionState) -> GacIdentityPermission:...
def __init__(self) -> GacIdentityPermission:...
class GacIdentityPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
def __init__(self, action: SecurityAction) -> GacIdentityPermissionAttribute:...
class HostProtectionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def ExternalProcessMgmt(self) -> bool:"""ExternalProcessMgmt { get; set; } -> bool"""
@property
def ExternalThreading(self) -> bool:"""ExternalThreading { get; set; } -> bool"""
@property
def MayLeakOnAbort(self) -> bool:"""MayLeakOnAbort { get; set; } -> bool"""
@property
def Resources(self) -> HostProtectionResource:"""Resources { get; set; } -> HostProtectionResource"""
@property
def SecurityInfrastructure(self) -> bool:"""SecurityInfrastructure { get; set; } -> bool"""
@property
def SelfAffectingProcessMgmt(self) -> bool:"""SelfAffectingProcessMgmt { get; set; } -> bool"""
@property
def SelfAffectingThreading(self) -> bool:"""SelfAffectingThreading { get; set; } -> bool"""
@property
def SharedState(self) -> bool:"""SharedState { get; set; } -> bool"""
@property
def Synchronization(self) -> bool:"""Synchronization { get; set; } -> bool"""
@property
def UI(self) -> bool:"""UI { get; set; } -> bool"""
def __init__(self) -> HostProtectionAttribute:...
def __init__(self, action: SecurityAction) -> HostProtectionAttribute:...
class HostProtectionResource(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
All: int
ExternalProcessMgmt: int
ExternalThreading: int
MayLeakOnAbort: int
_None: int
SecurityInfrastructure: int
SelfAffectingProcessMgmt: int
SelfAffectingThreading: int
SharedState: int
Synchronization: int
UI: int
value__: int
class IsolatedStorageContainment(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AdministerIsolatedStorageByUser: int
ApplicationIsolationByMachine: int
ApplicationIsolationByRoamingUser: int
ApplicationIsolationByUser: int
AssemblyIsolationByMachine: int
AssemblyIsolationByRoamingUser: int
AssemblyIsolationByUser: int
DomainIsolationByMachine: int
DomainIsolationByRoamingUser: int
DomainIsolationByUser: int
_None: int
UnrestrictedIsolatedStorage: int
value__: int
class IsolatedStorageFilePermission(IsolatedStoragePermission, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
def __init__(self, state: PermissionState) -> IsolatedStorageFilePermission:...
class IsolatedStorageFilePermissionAttribute(IsolatedStoragePermissionAttribute, _n_2_t_0):
def __init__(self, action: SecurityAction) -> IsolatedStorageFilePermissionAttribute:...
class IsolatedStoragePermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission):
@property
def UsageAllowed(self) -> IsolatedStorageContainment:"""UsageAllowed { get; set; } -> IsolatedStorageContainment"""
@property
def UserQuota(self) -> int:"""UserQuota { get; set; } -> int"""
class IsolatedStoragePermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def UsageAllowed(self) -> IsolatedStorageContainment:"""UsageAllowed { get; set; } -> IsolatedStorageContainment"""
@property
def UserQuota(self) -> int:"""UserQuota { get; set; } -> int"""
class IUnrestrictedPermission():
def IsUnrestricted(self) -> bool:...
class KeyContainerPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
@property
def AccessEntries(self) -> KeyContainerPermissionAccessEntryCollection:"""AccessEntries { get; } -> KeyContainerPermissionAccessEntryCollection"""
@property
def Flags(self) -> KeyContainerPermissionFlags:"""Flags { get; } -> KeyContainerPermissionFlags"""
def __init__(self, flags: KeyContainerPermissionFlags) -> KeyContainerPermission:...
def __init__(self, state: PermissionState) -> KeyContainerPermission:...
def __init__(self, flags: KeyContainerPermissionFlags, accessList: _n_0_t_4[KeyContainerPermissionAccessEntry]) -> KeyContainerPermission:...
class KeyContainerPermissionAccessEntry(object):
@property
def Flags(self) -> KeyContainerPermissionFlags:"""Flags { get; set; } -> KeyContainerPermissionFlags"""
@property
def KeyContainerName(self) -> str:"""KeyContainerName { get; set; } -> str"""
@property
def KeySpec(self) -> int:"""KeySpec { get; set; } -> int"""
@property
def KeyStore(self) -> str:"""KeyStore { get; set; } -> str"""
@property
def ProviderName(self) -> str:"""ProviderName { get; set; } -> str"""
@property
def ProviderType(self) -> int:"""ProviderType { get; set; } -> int"""
def __init__(self, keyContainerName: str, flags: KeyContainerPermissionFlags) -> KeyContainerPermissionAccessEntry:...
def __init__(self, parameters: _n_5_t_0, flags: KeyContainerPermissionFlags) -> KeyContainerPermissionAccessEntry:...
def __init__(self, keyStore: str, providerName: str, providerType: int, keyContainerName: str, keySpec: int, flags: KeyContainerPermissionFlags) -> KeyContainerPermissionAccessEntry:...
class KeyContainerPermissionAccessEntryCollection(_n_1_t_0, typing.Iterable[typing.Any]):
@property
def Item(self) -> KeyContainerPermissionAccessEntry:"""Item { get; } -> KeyContainerPermissionAccessEntry"""
def Add(self, accessEntry: KeyContainerPermissionAccessEntry) -> int:...
def Clear(self):...
def IndexOf(self, accessEntry: KeyContainerPermissionAccessEntry) -> int:...
def Remove(self, accessEntry: KeyContainerPermissionAccessEntry):...
class KeyContainerPermissionAccessEntryEnumerator(_n_1_t_1):
pass
class KeyContainerPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Flags(self) -> KeyContainerPermissionFlags:"""Flags { get; set; } -> KeyContainerPermissionFlags"""
@property
def KeyContainerName(self) -> str:"""KeyContainerName { get; set; } -> str"""
@property
def KeySpec(self) -> int:"""KeySpec { get; set; } -> int"""
@property
def KeyStore(self) -> str:"""KeyStore { get; set; } -> str"""
@property
def ProviderName(self) -> str:"""ProviderName { get; set; } -> str"""
@property
def ProviderType(self) -> int:"""ProviderType { get; set; } -> int"""
def __init__(self, action: SecurityAction) -> KeyContainerPermissionAttribute:...
class KeyContainerPermissionFlags(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AllFlags: int
ChangeAcl: int
Create: int
Decrypt: int
Delete: int
Export: int
Import: int
NoFlags: int
Open: int
Sign: int
value__: int
ViewAcl: int
class PermissionSetAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def File(self) -> str:"""File { get; set; } -> str"""
@property
def Hex(self) -> str:"""Hex { get; set; } -> str"""
@property
def Name(self) -> str:"""Name { get; set; } -> str"""
@property
def UnicodeEncoded(self) -> bool:"""UnicodeEncoded { get; set; } -> bool"""
@property
def XML(self) -> str:"""XML { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> PermissionSetAttribute:...
def CreatePermissionSet(self) -> _n_3_t_3:...
class PermissionState(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
_None: int
Unrestricted: int
value__: int
class PrincipalPermission(_n_3_t_1, IUnrestrictedPermission, IBuiltInPermission):
def __init__(self, state: PermissionState) -> PrincipalPermission:...
def __init__(self, name: str, role: str) -> PrincipalPermission:...
def __init__(self, name: str, role: str, isAuthenticated: bool) -> PrincipalPermission:...
class PrincipalPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Authenticated(self) -> bool:"""Authenticated { get; set; } -> bool"""
@property
def Name(self) -> str:"""Name { get; set; } -> str"""
@property
def Role(self) -> str:"""Role { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> PrincipalPermissionAttribute:...
class PublisherIdentityPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IBuiltInPermission):
@property
def Certificate(self) -> _n_6_t_0:"""Certificate { get; set; } -> X509Certificate"""
def __init__(self, state: PermissionState) -> PublisherIdentityPermission:...
def __init__(self, certificate: _n_6_t_0) -> PublisherIdentityPermission:...
class PublisherIdentityPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def CertFile(self) -> str:"""CertFile { get; set; } -> str"""
@property
def SignedFile(self) -> str:"""SignedFile { get; set; } -> str"""
@property
def X509Certificate(self) -> str:"""X509Certificate { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> PublisherIdentityPermissionAttribute:...
class ReflectionPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
@property
def Flags(self) -> ReflectionPermissionFlag:"""Flags { get; set; } -> ReflectionPermissionFlag"""
def __init__(self, state: PermissionState) -> ReflectionPermission:...
def __init__(self, flag: ReflectionPermissionFlag) -> ReflectionPermission:...
class ReflectionPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Flags(self) -> ReflectionPermissionFlag:"""Flags { get; set; } -> ReflectionPermissionFlag"""
@property
def MemberAccess(self) -> bool:"""MemberAccess { get; set; } -> bool"""
@property
def ReflectionEmit(self) -> bool:"""ReflectionEmit { get; set; } -> bool"""
@property
def RestrictedMemberAccess(self) -> bool:"""RestrictedMemberAccess { get; set; } -> bool"""
@property
def TypeInformation(self) -> bool:"""TypeInformation { get; set; } -> bool"""
def __init__(self, action: SecurityAction) -> ReflectionPermissionAttribute:...
class ReflectionPermissionFlag(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AllFlags: int
MemberAccess: int
NoFlags: int
ReflectionEmit: int
RestrictedMemberAccess: int
TypeInformation: int
value__: int
class RegistryPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
def __init__(self, state: PermissionState) -> RegistryPermission:...
def __init__(self, access: RegistryPermissionAccess, pathList: str) -> RegistryPermission:...
def __init__(self, access: RegistryPermissionAccess, control: _n_4_t_0, pathList: str) -> RegistryPermission:...
def AddPathList(self, access: RegistryPermissionAccess, pathList: str):...
def AddPathList(self, access: RegistryPermissionAccess, control: _n_4_t_0, pathList: str):...
def GetPathList(self, access: RegistryPermissionAccess) -> str:...
def SetPathList(self, access: RegistryPermissionAccess, pathList: str):...
class RegistryPermissionAccess(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AllAccess: int
Create: int
NoAccess: int
Read: int
value__: int
Write: int
class RegistryPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def All(self) -> str:"""All { get; set; } -> str"""
@property
def ChangeAccessControl(self) -> str:"""ChangeAccessControl { get; set; } -> str"""
@property
def Create(self) -> str:"""Create { get; set; } -> str"""
@property
def Read(self) -> str:"""Read { get; set; } -> str"""
@property
def ViewAccessControl(self) -> str:"""ViewAccessControl { get; set; } -> str"""
@property
def ViewAndModify(self) -> str:"""ViewAndModify { get; set; } -> str"""
@property
def Write(self) -> str:"""Write { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> RegistryPermissionAttribute:...
class ResourcePermissionBase(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission):
Any: int
Local: int
class ResourcePermissionBaseEntry(object):
@property
def PermissionAccess(self) -> int:"""PermissionAccess { get; } -> int"""
@property
def PermissionAccessPath(self) -> _n_0_t_4[str]:"""PermissionAccessPath { get; } -> Array"""
def __init__(self) -> ResourcePermissionBaseEntry:...
def __init__(self, permissionAccess: int, permissionAccessPath: _n_0_t_4[str]) -> ResourcePermissionBaseEntry:...
class SecurityAction(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
Assert: int
Demand: int
Deny: int
InheritanceDemand: int
LinkDemand: int
PermitOnly: int
RequestMinimum: int
RequestOptional: int
RequestRefuse: int
value__: int
class SecurityAttribute(_n_0_t_5, _n_2_t_0):
@property
def Action(self) -> SecurityAction:"""Action { get; set; } -> SecurityAction"""
@property
def Unrestricted(self) -> bool:"""Unrestricted { get; set; } -> bool"""
def CreatePermission(self) -> _n_3_t_1:...
class SecurityPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
@property
def Flags(self) -> SecurityPermissionFlag:"""Flags { get; set; } -> SecurityPermissionFlag"""
def __init__(self, state: PermissionState) -> SecurityPermission:...
def __init__(self, flag: SecurityPermissionFlag) -> SecurityPermission:...
class SecurityPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Assertion(self) -> bool:"""Assertion { get; set; } -> bool"""
@property
def BindingRedirects(self) -> bool:"""BindingRedirects { get; set; } -> bool"""
@property
def ControlAppDomain(self) -> bool:"""ControlAppDomain { get; set; } -> bool"""
@property
def ControlDomainPolicy(self) -> bool:"""ControlDomainPolicy { get; set; } -> bool"""
@property
def ControlEvidence(self) -> bool:"""ControlEvidence { get; set; } -> bool"""
@property
def ControlPolicy(self) -> bool:"""ControlPolicy { get; set; } -> bool"""
@property
def ControlPrincipal(self) -> bool:"""ControlPrincipal { get; set; } -> bool"""
@property
def ControlThread(self) -> bool:"""ControlThread { get; set; } -> bool"""
@property
def Execution(self) -> bool:"""Execution { get; set; } -> bool"""
@property
def Flags(self) -> SecurityPermissionFlag:"""Flags { get; set; } -> SecurityPermissionFlag"""
@property
def Infrastructure(self) -> bool:"""Infrastructure { get; set; } -> bool"""
@property
def RemotingConfiguration(self) -> bool:"""RemotingConfiguration { get; set; } -> bool"""
@property
def SerializationFormatter(self) -> bool:"""SerializationFormatter { get; set; } -> bool"""
@property
def SkipVerification(self) -> bool:"""SkipVerification { get; set; } -> bool"""
@property
def UnmanagedCode(self) -> bool:"""UnmanagedCode { get; set; } -> bool"""
def __init__(self, action: SecurityAction) -> SecurityPermissionAttribute:...
class SecurityPermissionFlag(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AllFlags: int
Assertion: int
BindingRedirects: int
ControlAppDomain: int
ControlDomainPolicy: int
ControlEvidence: int
ControlPolicy: int
ControlPrincipal: int
ControlThread: int
Execution: int
Infrastructure: int
NoFlags: int
RemotingConfiguration: int
SerializationFormatter: int
SkipVerification: int
UnmanagedCode: int
value__: int
class SiteIdentityPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IBuiltInPermission):
@property
def Site(self) -> str:"""Site { get; set; } -> str"""
def __init__(self, state: PermissionState) -> SiteIdentityPermission:...
def __init__(self, site: str) -> SiteIdentityPermission:...
class SiteIdentityPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Site(self) -> str:"""Site { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> SiteIdentityPermissionAttribute:...
class StorePermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission):
@property
def Flags(self) -> StorePermissionFlags:"""Flags { get; set; } -> StorePermissionFlags"""
def __init__(self, flag: StorePermissionFlags) -> StorePermission:...
def __init__(self, state: PermissionState) -> StorePermission:...
class StorePermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def AddToStore(self) -> bool:"""AddToStore { get; set; } -> bool"""
@property
def CreateStore(self) -> bool:"""CreateStore { get; set; } -> bool"""
@property
def DeleteStore(self) -> bool:"""DeleteStore { get; set; } -> bool"""
@property
def EnumerateCertificates(self) -> bool:"""EnumerateCertificates { get; set; } -> bool"""
@property
def EnumerateStores(self) -> bool:"""EnumerateStores { get; set; } -> bool"""
@property
def Flags(self) -> StorePermissionFlags:"""Flags { get; set; } -> StorePermissionFlags"""
@property
def OpenStore(self) -> bool:"""OpenStore { get; set; } -> bool"""
@property
def RemoveFromStore(self) -> bool:"""RemoveFromStore { get; set; } -> bool"""
def __init__(self, action: SecurityAction) -> StorePermissionAttribute:...
class StorePermissionFlags(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AddToStore: int
AllFlags: int
CreateStore: int
DeleteStore: int
EnumerateCertificates: int
EnumerateStores: int
NoFlags: int
OpenStore: int
RemoveFromStore: int
value__: int
class StrongNameIdentityPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IBuiltInPermission):
@property
def Name(self) -> str:"""Name { get; set; } -> str"""
@property
def PublicKey(self) -> StrongNamePublicKeyBlob:"""PublicKey { get; set; } -> StrongNamePublicKeyBlob"""
@property
def Version(self) -> _n_0_t_6:"""Version { get; set; } -> Version"""
def __init__(self, state: PermissionState) -> StrongNameIdentityPermission:...
def __init__(self, blob: StrongNamePublicKeyBlob, name: str, version: _n_0_t_6) -> StrongNameIdentityPermission:...
class StrongNameIdentityPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Name(self) -> str:"""Name { get; set; } -> str"""
@property
def PublicKey(self) -> str:"""PublicKey { get; set; } -> str"""
@property
def Version(self) -> str:"""Version { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> StrongNameIdentityPermissionAttribute:...
class StrongNamePublicKeyBlob(object):
def __init__(self, publicKey: _n_0_t_4[_n_0_t_7]) -> StrongNamePublicKeyBlob:...
class TypeDescriptorPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission):
@property
def Flags(self) -> TypeDescriptorPermissionFlags:"""Flags { get; set; } -> TypeDescriptorPermissionFlags"""
def __init__(self, flag: TypeDescriptorPermissionFlags) -> TypeDescriptorPermission:...
def __init__(self, state: PermissionState) -> TypeDescriptorPermission:...
class TypeDescriptorPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Flags(self) -> TypeDescriptorPermissionFlags:"""Flags { get; set; } -> TypeDescriptorPermissionFlags"""
@property
def RestrictedRegistrationAccess(self) -> bool:"""RestrictedRegistrationAccess { get; set; } -> bool"""
def __init__(self, action: SecurityAction) -> TypeDescriptorPermissionAttribute:...
class TypeDescriptorPermissionFlags(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
NoFlags: int
RestrictedRegistrationAccess: int
value__: int
class UIPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IUnrestrictedPermission, IBuiltInPermission):
@property
def Clipboard(self) -> UIPermissionClipboard:"""Clipboard { get; set; } -> UIPermissionClipboard"""
@property
def Window(self) -> UIPermissionWindow:"""Window { get; set; } -> UIPermissionWindow"""
def __init__(self, state: PermissionState) -> UIPermission:...
def __init__(self, windowFlag: UIPermissionWindow, clipboardFlag: UIPermissionClipboard) -> UIPermission:...
def __init__(self, windowFlag: UIPermissionWindow) -> UIPermission:...
def __init__(self, clipboardFlag: UIPermissionClipboard) -> UIPermission:...
class UIPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Clipboard(self) -> UIPermissionClipboard:"""Clipboard { get; set; } -> UIPermissionClipboard"""
@property
def Window(self) -> UIPermissionWindow:"""Window { get; set; } -> UIPermissionWindow"""
def __init__(self, action: SecurityAction) -> UIPermissionAttribute:...
class UIPermissionClipboard(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AllClipboard: int
NoClipboard: int
OwnClipboard: int
value__: int
class UIPermissionWindow(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
AllWindows: int
NoWindows: int
SafeSubWindows: int
SafeTopLevelWindows: int
value__: int
class UrlIdentityPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IBuiltInPermission):
@property
def Url(self) -> str:"""Url { get; set; } -> str"""
def __init__(self, state: PermissionState) -> UrlIdentityPermission:...
def __init__(self, site: str) -> UrlIdentityPermission:...
class UrlIdentityPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Url(self) -> str:"""Url { get; set; } -> str"""
def __init__(self, action: SecurityAction) -> UrlIdentityPermissionAttribute:...
class ZoneIdentityPermission(_n_3_t_0, _n_3_t_1, _n_3_t_2, IBuiltInPermission):
@property
def SecurityZone(self) -> _n_3_t_4:"""SecurityZone { get; set; } -> SecurityZone"""
def __init__(self, state: PermissionState) -> ZoneIdentityPermission:...
def __init__(self, zone: _n_3_t_4) -> ZoneIdentityPermission:...
class ZoneIdentityPermissionAttribute(CodeAccessSecurityAttribute, _n_2_t_0):
@property
def Zone(self) -> _n_3_t_4:"""Zone { get; set; } -> SecurityZone"""
def __init__(self, action: SecurityAction) -> ZoneIdentityPermissionAttribute:...
|
StarcoderdataPython
|
14708
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-03 15:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0027_auto_20170103_1130'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='navn')),
('url', models.URLField(blank=True, help_text='Valgfri. Link som kan klikkes på kalenderen.', max_length=255, null=True, verbose_name='link')),
('start', models.DateTimeField(null=True, verbose_name='Start')),
('end', models.DateTimeField(blank=True, null=True, verbose_name='Slut')),
('lan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Lan', verbose_name='lan')),
],
options={
'verbose_name_plural': 'begivenheder',
'verbose_name': 'begivenhed',
},
),
migrations.AlterField(
model_name='tournament',
name='end',
field=models.DateTimeField(blank=True, null=True, verbose_name='Slut'),
),
]
|
StarcoderdataPython
|
1749694
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Base class for resources based on DB models. """
from __future__ import absolute_import, unicode_literals
# stdlib imports
from logging import getLogger
from typing import Text
# 3rd party imports
import jsonschema
from serafin import Fieldspec, serialize
# local imports
from .resource import RestResource
from .util import iter_public_props
L = getLogger(__name__)
class ModelResource(RestResource):
""" Base class for resources based on DB models. """
model = None
spec = Fieldspec('*')
schema = {}
read_only = []
class AlreadyExists(RuntimeError):
""" Raised when an object already exists. """
pass
class ValidationError(RuntimeError):
""" Raised by .validate() if it fails. """
def __init__(self, jsonschema_error):
self.detail = jsonschema_error
super(ModelResource.ValidationError, self).__init__(
str(jsonschema_error)
)
def validate(self, data, schema=None):
""" Validate the *data* according to the given *schema*.
:param Dict[str, Any] data:
A dictionary of data. Probably coming from the user in some way.
:param Dict[str, Any] schema:
JSONSchema describing the required data structure.
:raises ModelResource.ValidationError:
If the validation fails. No value is returned.
"""
try:
jsonschema.validate(data, schema or self.schema)
except jsonschema.ValidationError as ex:
raise ModelResource.ValidationError(ex)
def serialize(self, item, spec=None):
""" Serialize an item or items into a dict.
This will just call serafin.serialize using the model spec (defined
class wide in the model - '*' by default).
:param item_or_items:
:return Dict[Any, Any]:
A dict with python native content. Can be easily dumped to any
format like JSON or YAML.
"""
if spec is None:
spec = self.spec
return serialize(item, spec)
def deserialize(self, data):
""" Convert JSON data into model field types.
The value returned by this function can be used directly to create new
item and update existing ones.
"""
return data
def implements(self, rest_verb):
# type: (Text) -> bool
""" Check whether this model resource implements a given REST verb.
Args:
rest_verb (str):
The REST verb you want to check. Possible values are *create*,
*query*, *get*, *update* and *delete*.
Returns:
bool: **True** if the given REST verb is implemented, **False**
otherwise.
"""
test = {
'create': lambda: self.create_item(None, {}, {}),
'query': lambda: self.query_items(None, {}, {}),
'get': lambda: self.get_item(None, {}, {}),
'update': lambda: self.update_item(None, {}, {}),
'delete': lambda: self.delete_item(None, {}, {}),
}.get(rest_verb)
if test:
try:
test()
return True
except NotImplementedError:
return False
except:
return True
else:
return False
def item_for_request(self, request):
""" Create new model item. """
del request # Unused here
raise NotImplementedError(
"All resources must implement .item_for_request()"
)
@property
def public_props(self):
""" All public properties on the resource model. """
if not hasattr(self, '_public_props'):
self._public_props = [
name for name, _ in iter_public_props(self.model)
]
return self._public_props
def create_item(self, request, params, payload):
""" Create new model item. """
raise NotImplementedError("Must implement .create_item()")
def update_item(self, request, params, payload):
""" Update existing model item. """
raise NotImplementedError("Must implement .update_item()")
def delete_item(self, request, params, payload):
""" Delete model instance. """
raise NotImplementedError("Must implement .delete_item()")
def query_items(self, request, params, payload):
""" Return a model query with the given filters.
The query can be further customised like any ndb query.
:return google.appengine.ext.ndb.Query:
The query with the given filters already applied.
"""
raise NotImplementedError("Must implement .query_items()")
def get_item(self, request, params, payload):
""" Get an item associated with the request.
This is used by all detail views/actions to get the item that the
request is concerned with (usually from the URL). This is an
implementation detail and is highly dependant on the underlying web
framework used.
:param request:
HTTP request.
:return RestResource:
The item associated with the request.
"""
raise NotImplementedError("{}.get_item() not implemented".format(
self.__class__.__name__
))
def rest_query(self, request, params, payload):
""" Query existing records as a list. """
try:
fields = params.pop('_fields', '*')
filters = self.deserialize(params)
items = self.query_items(request, filters, payload)
spec = Fieldspec(self.spec).restrict(Fieldspec(fields))
return 200, [self.serialize(x, spec) for x in items]
except NotImplementedError:
return 404, {'detail': 'Not Found'}
def rest_create(self, request, params, payload):
""" Create a new record. """
try:
self.validate(payload, self.schema)
values = self.deserialize(payload)
item = self.create_item(request, params, values)
return self.serialize(item)
except ModelResource.ValidationError as ex:
return 400, {'detail': str(ex)}
except ModelResource.AlreadyExists:
return 400, {'detail': 'Already exists'}
except NotImplementedError:
return 404, {'detail': 'Not Found'}
def rest_get(self, request, params, payload):
""" Get one record with the given id. """
try:
fields = Fieldspec(params.get('_fields', '*'))
spec = Fieldspec(self.spec).restrict(fields)
item = self.get_item(request, params, payload)
if item is not None:
return 200, self.serialize(item, spec)
else:
return 404, {'detail': "Not Found"}
except NotImplementedError:
return 404, {'detail': 'Not Found'}
def rest_update(self, request, params, payload):
""" Update existing item. """
schema = {}
schema.update(self.schema)
if 'required' in schema:
del schema['required']
try:
self.validate(payload, schema)
values = self.deserialize(payload)
read_only = (
frozenset(self.read_only or []) | frozenset(self.public_props or [])
)
for name in read_only:
values.pop(name, None)
item = self.update_item(request, params, values)
if item is not None:
return 200, self.serialize(item)
else:
return 404, {'detail': "Not Found"}
except ModelResource.ValidationError as ex:
return 400, {'detail': str(ex)}
except NotImplementedError:
return 404, {'detail': 'Not Found'}
def rest_delete(self, request, params, payload):
""" DELETE detail. """
try:
self.delete_item(request, params, payload)
return 204, {}
except NotImplementedError:
return 404, {'detail': 'Not Found'}
# Used only in type hint comments
del Text
|
StarcoderdataPython
|
153013
|
from flask import jsonify, request, Response, json, Blueprint
import datetime
ap = Blueprint('endpoint', __name__)
parcels = []
# GET parcels
@ap.route('/api/v1/parcels')
def get_parcels():
'''
returns a list of all requests
'''
if len(parcels) == 0:
return jsonify({'msg': 'No parcels yet'}), 200
return jsonify({'parcels': parcels, 'count': len(parcels)}), 200
# GET parcels/id
@ap.route('/api/v1/parcels/<int:id>')
def get_a_parcel(id):
'''
return order request details for a specific order
'''
theparcel = []
for parcel in parcels:
if parcel['id'] == id:
theparcel.append(parcel)
if len(theparcel) == 0:
return jsonify({"msg": "parcel delivery request not found"}), 404
return jsonify(theparcel[0]), 200
# POST /parcels
@ap.route('/api/v1/parcels', methods=['POST'])
def add_parcel():
'''
creates a new parcel order
'''
if not request.content_type == 'application/json':
return jsonify({"failed": 'Content-type must be application/json'}), 401
request_data = request.get_json()
if is_valid_request(request_data):
parcel = {
'id': len(parcels) + 1,
'pickup_address': request_data['pickup_address'],
'destination_address': request_data['destination_address'],
'comment_description': request_data['comment_description'],
'status': request_data['status'],
'current_location': request_data['current_location'],
'created': datetime.datetime.now(),
'user_id': request_data['user_id'],
'recipient_address': request_data['recipient_address'],
'recipient_phone': request_data['recipient_phone'],
'recipient_email': request_data['recipient_email']
}
parcels.append(parcel)
response = Response(response=json.dumps({
'msg': "Parcel delivery successfully created", 'request_id':
parcel.get('id')}),
status=201, mimetype="application/json")
response.headers['Location'] = "parcels/" + str(parcel['id'])
return response
else:
response = Response(json.dumps({"error":
"Invalid Parcel delivery order object"}),
status=400, mimetype="application/json")
return response
# PUT /parcels/<parcelId>/cancel
@ap.route('/api/v1/parcels/<int:id>/cancel', methods=['PUT'])
def cancel_parcel_request(id):
'''
cancels a specific request given its identifier
'''
if is_order_delivered(id):
return jsonify({"msg": "Not allowed parcel request has already been delivered"}), 403
for parcel in parcels:
if parcel['id'] == id:
cancelled_parcel = {
'id': parcel['id'],
'pickup_address': parcel['pickup_address'],
'destination_address': parcel['destination_address'],
'comment_description': parcel['comment_description'],
'status': "cancelled",
'current_location': parcel['current_location'],
'created': parcel['created'],
'user_id': parcel['user_id'],
'recipient_address': parcel['recipient_address'],
'recipient_phone': parcel['recipient_phone'],
'recipient_email': parcel['recipient_email']
}
parcel.update(cancelled_parcel)
if len(cancelled_parcel) == 0:
return jsonify({"msg": "parcel request does not exist"}), 404
return jsonify({"msg": "parcel request was cancelled successfully",
"status": cancelled_parcel.get("status"),
"id": cancelled_parcel.get("id")}), 200
def is_valid_request(newparcel):
if "destination_address" in newparcel and "pickup_address" in newparcel \
and "comment_description" in newparcel and "created" in newparcel and \
"user_id" in newparcel and "recipient_address" in newparcel and "recipient_phone" in newparcel and \
"recipient_email" in newparcel and "status" in newparcel:
return True
else:
return False
def is_order_delivered(id):
'''
checks that we cannot cancel an already delivered order
'''
for parcel in parcels:
if parcel['id'] == id:
if parcel['status'] == 'delivered':
return True
return False
|
StarcoderdataPython
|
3336949
|
import asyncio
import time
import os
from multiprocessing import Process,Lock,Value,Manager
import queue
import sqlite3
import threading
import collections
import platform
from ctypes import c_bool
import copy
"""
这里使用的是异步触发式服务器,因为在IO上同时需要保证效率,
所以代码会比较冗长,希望能提出宝贵建议
"""
__author__ = "chriskaliX"
class Receive:
_dict = dict() # 外部
_dict_tmp = dict() # 内部
class EchoServerProtocol:
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
if len(data) > 100:
Receive.counter(Receive._dict_tmp, addr[0])
def error_received(self,data,addr):
pass
@staticmethod
async def start_datagram_proxy(ip,port):
loop = asyncio.get_event_loop()
return await loop.create_datagram_endpoint(
lambda: Receive.EchoServerProtocol(),
local_addr=(ip, port))
@staticmethod
def run(ip,port,_dict,signal):
# 将当前Receive中_dict设为全局共享
Receive._dict = _dict
# Linux下uvloop提高速度
if platform.system() == "Linux":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
def getvalue(signal):
while True:
time.sleep(1)
if signal.value:
Receive._dict.update(Receive._dict_tmp)
Receive._dict_tmp.clear()
signal.value = False
# loop.call_soon_threadsafe(loop.stop)
# break
threading.Thread(target=getvalue,args=(signal,)).start()
coro = Receive.start_datagram_proxy(ip,port)
transport, _ = loop.run_until_complete(coro)
loop.run_forever()
@staticmethod
def counter(_dict,key):
_dict[key] = _dict.get(key) + 1 if key in _dict.keys() else 1
# class的调用和使用
if __name__ == '__main__':
# 用于与子进程交互的字典
_dict = Manager().dict()
# 信号值,用于获取子进程的字典
signal = Manager().Value(c_bool,False)
# Performance
#
# Q&A:
# Q:为什么不直接对Manager进行操作
# A:效率低
# 参考 https://stackoverflow.com/questions/10721915/shared-memory-objects-in-multiprocessing
# https://www.codesd.com/item/python-manager-dict-is-very-slow-compared-to-dict.html
#
# Q:为什么需要count
# A:有漏洞的服务器不一定都有较好的攻击效果,NTP monlist,SSDP等返回包的大小都比较固定,相反
# PPS就比较重要,这边记录下IP以及他们所返回包的数量,一定程度上筛选了漏洞服务器的质量
# 开启进程并且监听
pro = Process(target=Receive.run,args=('127.0.0.1',9999,_dict,signal))
pro.start()
time.sleep(20)
# 设置signal.value为True,即可获得_dict的值
signal.value = True
while True:
print(_dict)
print(pro.is_alive())
time.sleep(1)
|
StarcoderdataPython
|
3324534
|
import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import CONSTRAINT_TYPE_NAME, CONSTRAINT_TYPE_URI
from openapi_server.models.constraint import Constraint # noqa: E501
from openapi_server import util
def constraints_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of Constraint
Gets a list of all instances of Constraint (more information in https://w3id.org/okn/o/sd#Constraint) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[Constraint]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=CONSTRAINT_TYPE_URI,
rdf_type_name=CONSTRAINT_TYPE_NAME,
kls=Constraint)
def constraints_id_delete(id, user=None): # noqa: E501
"""Delete an existing Constraint
Delete an existing Constraint (more information in https://w3id.org/okn/o/sd#Constraint) # noqa: E501
:param id: The ID of the Constraint to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=CONSTRAINT_TYPE_URI,
rdf_type_name=CONSTRAINT_TYPE_NAME,
kls=Constraint)
def constraints_id_get(id, username=None): # noqa: E501
"""Get a single Constraint by its id
Gets the details of a given Constraint (more information in https://w3id.org/okn/o/sd#Constraint) # noqa: E501
:param id: The ID of the Constraint to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: Constraint
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=CONSTRAINT_TYPE_URI,
rdf_type_name=CONSTRAINT_TYPE_NAME,
kls=Constraint)
def constraints_id_put(id, user=None, constraint=None): # noqa: E501
"""Update an existing Constraint
Updates an existing Constraint (more information in https://w3id.org/okn/o/sd#Constraint) # noqa: E501
:param id: The ID of the Constraint to be retrieved
:type id: str
:param user: Username
:type user: str
:param constraint: An old Constraintto be updated
:type constraint: dict | bytes
:rtype: Constraint
"""
if connexion.request.is_json:
constraint = Constraint.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=constraint,
rdf_type_uri=CONSTRAINT_TYPE_URI,
rdf_type_name=CONSTRAINT_TYPE_NAME,
kls=Constraint)
def constraints_post(user=None, constraint=None): # noqa: E501
"""Create one Constraint
Create a new instance of Constraint (more information in https://w3id.org/okn/o/sd#Constraint) # noqa: E501
:param user: Username
:type user: str
:param constraint: Information about the Constraintto be created
:type constraint: dict | bytes
:rtype: Constraint
"""
if connexion.request.is_json:
constraint = Constraint.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=constraint,
rdf_type_uri=CONSTRAINT_TYPE_URI,
rdf_type_name=CONSTRAINT_TYPE_NAME,
kls=Constraint)
|
StarcoderdataPython
|
5915
|
from .stacking import StackingClassifier, stack_features
from .multitask import MultiTaskEstimator
|
StarcoderdataPython
|
4820619
|
import pytest
from datadog_checks.envoy import Envoy
from .common import DEFAULT_INSTANCE, FLAKY_METRICS, PROMETHEUS_METRICS, requires_new_environment
pytestmark = [requires_new_environment]
@pytest.mark.e2e
def test_e2e(dd_agent_check):
aggregator = dd_agent_check(DEFAULT_INSTANCE, rate=True)
for metric in PROMETHEUS_METRICS:
formatted_metric = "envoy.{}".format(metric)
if metric in FLAKY_METRICS:
aggregator.assert_metric(formatted_metric, at_least=0)
continue
aggregator.assert_metric(formatted_metric)
aggregator.assert_service_check('envoy.openmetrics.health', Envoy.OK)
|
StarcoderdataPython
|
3229923
|
<gh_stars>0
# a = 'vsem privet'
# print(a.isdigit())
#
# b = '15,7'
# print(b.isdigit())
#
# c = '156'
# print(c.isdigit())
#
# d = '15e6' # 15000000
# print(d.isdigit())
#
# r = '15000000'
# print(r.isdigit())
#
# avg_mark = input('Введите средний балл студента\n')
# # if avg_mark.isdigit():
# # avg_mark = float(avg_mark)
# # print('Ввод корректен', type(avg_mark), avg_mark)
# try:
# avg_mark = float(avg_mark)
# print('Ввод корректен', type(avg_mark), avg_mark)
# except ValueError:
# print('Некорректное число', avg_mark)
# class_pupils = input('Введите имена учеников через запятую\n')
class_pupils = ['1', '2', '3', '4', '5', '6']
correct_result = ['1', '2', '3', '4', '5', '6']
# print('Ученики класса:', class_pupils)
result = ['1', '2', '3', '4', '5', '6']
assert result == correct_result, 'Алгоритм реализован неверно'
|
StarcoderdataPython
|
3204388
|
<filename>src/part2_automation/t6_api_testing/locustfile.py
from locust import HttpUser, task, between
class WebsiteTestUser(HttpUser):
wait_time = between(0.5, 3.0)
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
pass
def on_stop(self):
""" on_stop is called when the TaskSet is stopping """
pass
@task(1)
def welcome_message(self):
self.client.get('http://localhost:5000')
# http://localhost:8089
# https://betterprogramming.pub/introduction-to-locust-an-open-source-load-testing-tool-in-python-2b2e89ea1ff
|
StarcoderdataPython
|
3332746
|
<gh_stars>10-100
#!/usr/bin/env python2.7
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import fileinput
import os
import re
import sys
from common import (FUCHSIA_ROOT, fx_format)
def main():
parser = argparse.ArgumentParser(
description='Removes references to `migrated_manifest` from BUILD.gn files')
parser.add_argument('--root',
help='Path to the directory to inspect',
default=FUCHSIA_ROOT)
args = parser.parse_args()
build_files = []
for base, _, files in os.walk(args.root):
for file in files:
if file == 'BUILD.gn':
build_files.append(os.path.join(base, file))
for build_path in build_files:
# Number of currently open curly brackets while processing a target to
# remove.
# A lesser than or equal to 0 number means no target is currently being
# erased.
curly_bracket_depth = 0
modified = False
for line in fileinput.FileInput(build_path, inplace=True):
if '//build/unification/images/migrated_manifest.gni' in line:
continue
target_match = re.match('\s*migrated_manifest\(', line)
if target_match:
curly_bracket_depth = 1
modified = True
continue
if curly_bracket_depth > 0:
curly_bracket_depth += line.count('{') - line.count('}')
if curly_bracket_depth >= 0:
# Keep erasing.
continue
sys.stdout.write(line)
if modified:
fx_format(build_path)
return 0
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
1685422
|
<reponame>tallzilla/project-euler<filename>euler001.py
#!/bin/python3
import sys, logging
t = int(input().strip())
cum_sum_cache = dict()
for a0 in range(t):
final_sum = 0
n = int(input().strip())
nearest_three_factor = int((n-1) / 3.0)
three_multiple = nearest_three_factor * (nearest_three_factor / 2 + 0.5)
final_sum += three_multiple * 3
nearest_five_factor = int((n-1) / 5.0)
five_multiple = nearest_five_factor * (nearest_five_factor / 2 + 0.5)
final_sum += five_multiple * 5
nearest_fifteen_factor = int((n-1) / 15.0)
fifteen_multiple = nearest_fifteen_factor * (nearest_fifteen_factor / 2 + 0.5)
final_sum -= fifteen_multiple * 15
#this works but is slow
#final_sum = 0
#for x in range(n):
# if x % 5 == 0 or x % 3 == 0:
# final_sum += x
print(int(final_sum))
|
StarcoderdataPython
|
1611513
|
import numpy as np
from math import ceil
def deriveSizeFromScale(img_shape, scale):
output_shape = []
for k in range(2):
output_shape.append(int(ceil(scale[k] * img_shape[k])))
return output_shape
def deriveScaleFromSize(img_shape_in, img_shape_out):
scale = []
for k in range(2):
scale.append(1.0 * img_shape_out[k] / img_shape_in[k])
return scale
def cubic(x):
x = np.array(x).astype(np.float64)
absx = np.absolute(x)
absx2 = np.multiply(absx, absx)
absx3 = np.multiply(absx2, absx)
f = np.multiply(1.5*absx3 - 2.5*absx2 + 1, absx <= 1) + np.multiply(-0.5*absx3 + 2.5*absx2 - 4*absx + 2, (1 < absx) & (absx <= 2))
return f
def contributions(in_length, out_length, scale, kernel, k_width):
if scale < 1:
h = lambda x: scale * kernel(scale * x)
kernel_width = 1.0 * k_width / scale
else:
h = kernel
kernel_width = k_width
x = np.arange(1, out_length+1).astype(np.float64)
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2)
P = int(ceil(kernel_width)) + 2
ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
indices = ind.astype(np.int32)
weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
aux = np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def imresizemex(inimg, weights, indices, dim):
in_shape = inimg.shape
w_shape = weights.shape
out_shape = list(in_shape)
out_shape[dim] = w_shape[0]
outimg = np.zeros(out_shape)
if dim == 0:
for i_img in range(in_shape[1]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[ind, i_img].astype(np.float64)
outimg[i_w, i_img] = np.sum(np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0)
elif dim == 1:
for i_img in range(in_shape[0]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[i_img, ind].astype(np.float64)
outimg[i_img, i_w] = np.sum(np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0)
if inimg.dtype == np.uint8:
outimg = np.clip(outimg, 0, 255)
return np.around(outimg).astype(np.uint8)
else:
return outimg
def imresizevec(inimg, weights, indices, dim):
wshape = weights.shape
if dim == 0:
weights = weights.reshape((wshape[0], wshape[2], 1, 1))
outimg = np.sum(weights*((inimg[indices].squeeze(axis=1)).astype(np.float64)), axis=1)
elif dim == 1:
weights = weights.reshape((1, wshape[0], wshape[2], 1))
outimg = np.sum(weights*((inimg[:, indices].squeeze(axis=2)).astype(np.float64)), axis=2)
if inimg.dtype == np.uint8:
outimg = np.clip(outimg, 0, 255)
return np.around(outimg).astype(np.uint8)
else:
return outimg
def resizeAlongDim(A, dim, weights, indices, mode="vec"):
if mode == "org":
out = imresizemex(A, weights, indices, dim)
else:
out = imresizevec(A, weights, indices, dim)
return out
def imresize(I, scalar_scale=None, output_shape=None, mode="vec"):
kernel = cubic
kernel_width = 4.0
# Fill scale and output_size
if scalar_scale is not None:
scalar_scale = float(scalar_scale)
scale = [scalar_scale, scalar_scale]
output_size = deriveSizeFromScale(I.shape, scale)
elif output_shape is not None:
scale = deriveScaleFromSize(I.shape, output_shape)
output_size = list(output_shape)
else:
print('Error: scalar_scale OR output_shape should be defined!')
return
scale_np = np.array(scale)
order = np.argsort(scale_np)
weights = []
indices = []
for k in range(2):
w, ind = contributions(I.shape[k], output_size[k], scale[k], kernel, kernel_width)
weights.append(w)
indices.append(ind)
B = np.copy(I)
flag2D = False
if B.ndim == 2:
B = np.expand_dims(B, axis=2)
flag2D = True
for k in range(2):
dim = order[k]
B = resizeAlongDim(B, dim, weights[dim], indices[dim], mode)
if flag2D:
B = np.squeeze(B, axis=2)
return B
def convertDouble2Byte(I):
B = np.clip(I, 0.0, 1.0)
B = 255*B
return np.around(B).astype(np.uint8)
if __name__ == '__main__':
import matplotlib.pyplot as plt
x = np.linspace(-2.5,2.5,100)
plt.figure(figsize=(10,10))
plt.plot(x,cubic(x))
plt.show()
x = np.linspace(-2,2,6)[1:-1]
w = 0.25*cubic(0.25*x)
w /= w.sum()
im = np.random.random((32,32))
im_small = imresize(im, 0.25)
|
StarcoderdataPython
|
1638955
|
"""Make BIDS compatible directory structures and infer meta data from MNE."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import errno
import shutil as sh
import pandas as pd
from collections import defaultdict, OrderedDict
import numpy as np
from mne import Epochs
from mne.io.constants import FIFF
from mne.io.pick import channel_type
from mne.io import BaseRaw
from mne.channels.channels import _unit2human
from mne.externals.six import string_types
from mne.utils import check_version
from datetime import datetime
from warnings import warn
from .pick import coil_type
from .utils import (make_bids_filename, make_bids_folders,
make_dataset_description, _write_json, _write_tsv,
_read_events, _mkdir_p, age_on_date,
copyfile_brainvision, copyfile_eeglab,
_infer_eeg_placement_scheme)
from .io import (_parse_ext, _read_raw, ALLOWED_EXTENSIONS)
ALLOWED_KINDS = ['meg', 'eeg', 'ieeg']
# Orientation of the coordinate system dependent on manufacturer
ORIENTATION = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.pdf': 'ALS',
'.ds': 'ALS'}
UNITS = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.pdf': 'm', '.ds': 'cm'}
meg_manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa',
'.fif': 'Elekta', '.pdf': '4D Magnes', '.ds': 'CTF',
'.meg4': 'CTF'}
eeg_manufacturers = {'.vhdr': 'BrainProducts', '.eeg': 'BrainProducts',
'.edf': 'Mixed', '.bdf': 'Biosemi', '.set': 'Mixed',
'.fdt': 'Mixed', '.cnt': 'Neuroscan'}
# Merge the manufacturer dictionaries in a python2 / python3 compatible way
MANUFACTURERS = dict()
MANUFACTURERS.update(meg_manufacturers)
MANUFACTURERS.update(eeg_manufacturers)
# List of synthetic channels by manufacturer that are to be excluded from the
# channel list. Currently this is only for stimulus channels.
IGNORED_CHANNELS = {'KIT/Yokogawa': ['STI 014'],
'BrainProducts': ['STI 014'],
'Mixed': ['STI 014'],
'Biosemi': ['STI 014'],
'Neuroscan': ['STI 014']}
def _channels_tsv(raw, fname, overwrite=False, verbose=True):
"""Create a channels.tsv file and save it.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
fname : str
Filename to save the channels.tsv to.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false.
"""
map_chs = defaultdict(lambda: 'OTHER')
map_chs.update(meggradaxial='MEGGRADAXIAL',
megrefgradaxial='MEGREFGRADAXIAL',
meggradplanar='MEGGRADPLANAR',
megmag='MEGMAG', megrefmag='MEGREFMAG',
eeg='EEG', misc='MISC', stim='TRIG', emg='EMG',
ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG')
map_desc = defaultdict(lambda: 'Other type of channel')
map_desc.update(meggradaxial='Axial Gradiometer',
megrefgradaxial='Axial Gradiometer Reference',
meggradplanar='Planar Gradiometer',
megmag='Magnetometer',
megrefmag='Magnetometer Reference',
stim='Trigger', eeg='ElectroEncephaloGram',
ecog='Electrocorticography',
seeg='StereoEEG',
ecg='ElectroCardioGram',
eog='ElectroOculoGram',
emg='ElectroMyoGram',
misc='Miscellaneous')
get_specific = ('mag', 'ref_meg', 'grad')
# get the manufacturer from the file in the Raw object
manufacturer = None
if hasattr(raw, 'filenames'):
# XXX: Hack for EEGLAB bug in MNE-Python 0.16; fixed in MNE-Python
# 0.17, ... remove the hack after upgrading dependencies in MNE-BIDS
if raw.filenames[0] is None: # hack
ext = '.set' # hack
else:
_, ext = _parse_ext(raw.filenames[0], verbose=verbose)
manufacturer = MANUFACTURERS[ext]
ignored_indexes = [raw.ch_names.index(ch_name) for ch_name in raw.ch_names
if ch_name in
IGNORED_CHANNELS.get(manufacturer, list())]
status, ch_type, description = list(), list(), list()
for idx, ch in enumerate(raw.info['ch_names']):
status.append('bad' if ch in raw.info['bads'] else 'good')
_channel_type = channel_type(raw.info, idx)
if _channel_type in get_specific:
_channel_type = coil_type(raw.info, idx)
ch_type.append(map_chs[_channel_type])
description.append(map_desc[_channel_type])
low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])
units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']]
units = [u if u not in ['NA'] else 'n/a' for u in units]
n_channels = raw.info['nchan']
sfreq = raw.info['sfreq']
df = pd.DataFrame(OrderedDict([
('name', raw.info['ch_names']),
('type', ch_type),
('units', units),
('description', description),
('sampling_frequency', np.full((n_channels), sfreq)),
('low_cutoff', np.full((n_channels), low_cutoff)),
('high_cutoff', np.full((n_channels), high_cutoff)),
('status', status)]))
df.drop(ignored_indexes, inplace=True)
_write_tsv(fname, df, overwrite, verbose)
return fname
def _events_tsv(events, raw, fname, trial_type, overwrite=False,
verbose=True):
"""Create an events.tsv file and save it.
This function will write the mandatory 'onset', and 'duration' columns as
well as the optional 'event_value' and 'event_sample'. The 'event_value'
corresponds to the marker value as found in the TRIG channel of the
recording. In addition, the 'trial_type' field can be written.
Parameters
----------
events : array, shape = (n_events, 3)
The first column contains the event time in samples and the third
column contains the event id. The second column is ignored for now but
typically contains the value of the trigger channel either immediately
before the event or immediately after.
raw : instance of Raw
The data as MNE-Python Raw object.
fname : str
Filename to save the events.tsv to.
trial_type : dict | None
Dictionary mapping a brief description key to an event id (value). For
example {'Go': 1, 'No Go': 2}.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false.
Notes
-----
The function writes durations of zero for each event.
"""
# Start by filling all data that we know into a df
first_samp = raw.first_samp
sfreq = raw.info['sfreq']
events[:, 0] -= first_samp
data = OrderedDict([('onset', events[:, 0]),
('duration', np.zeros(events.shape[0])),
('trial_type', events[:, 2]),
('event_value', events[:, 2]),
('event_sample', events[:, 0])])
df = pd.DataFrame.from_dict(data)
# Now check if trial_type is specified or should be removed
if trial_type:
trial_type_map = {v: k for k, v in trial_type.items()}
df.trial_type = df.trial_type.map(trial_type_map)
else:
df.drop(labels=['trial_type'], axis=1, inplace=True)
# Onset column needs to be specified in seconds
df.onset /= sfreq
_write_tsv(fname, df, overwrite, verbose)
return fname
def _participants_tsv(raw, subject_id, group, fname, overwrite=False,
verbose=True):
"""Create a participants.tsv file and save it.
This will append any new participant data to the current list if it
exists. Otherwise a new file will be created with the provided information.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
subject_id : str
The subject name in BIDS compatible format ('01', '02', etc.)
group : str
Name of group participant belongs to.
fname : str
Filename to save the participants.tsv to.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
If there is already data for the given `subject_id` and overwrite is
False, an error will be raised.
verbose : bool
Set verbose output to true or false.
"""
subject_id = 'sub-' + subject_id
data = {'participant_id': [subject_id]}
subject_info = raw.info['subject_info']
if subject_info is not None:
genders = {0: 'U', 1: 'M', 2: 'F'}
sex = genders[subject_info.get('sex', 0)]
# determine the age of the participant
age = subject_info.get('birthday', None)
meas_date = raw.info.get('meas_date', None)
if isinstance(meas_date, (tuple, list, np.ndarray)):
meas_date = meas_date[0]
if meas_date is not None and age is not None:
bday = datetime(age[0], age[1], age[2])
meas_datetime = datetime.fromtimestamp(meas_date)
subject_age = age_on_date(bday, meas_datetime)
else:
subject_age = "n/a"
data.update({'age': [subject_age], 'sex': [sex], 'group': [group]})
df = pd.DataFrame(data=data,
columns=['participant_id', 'age', 'sex', 'group'])
if os.path.exists(fname):
orig_df = pd.read_csv(fname, sep='\t')
# whether the data exists identically in the current DataFrame
exact_included = df.values.tolist()[0] in orig_df.values.tolist()
# whether the subject id is in the existing DataFrame
sid_included = subject_id in orig_df['participant_id'].values
# if the subject data provided is different to the currently existing
# data and overwrite is not True raise an error
if (sid_included and not exact_included) and not overwrite:
raise OSError(errno.EEXIST, '"%s" already exists in the '
'participant list. Please set overwrite to '
'True.' % subject_id)
# otherwise add the new data
df = orig_df.append(df)
# and drop any duplicates as we want overwrite = True to force the old
# data to be overwritten
df.drop_duplicates(subset='participant_id', keep='last',
inplace=True)
df = df.sort_values(by='participant_id')
# overwrite is forced to True as all issues with overwrite == False have
# been handled by this point
_write_tsv(fname, df, True, verbose)
return fname
def _scans_tsv(raw, raw_fname, fname, overwrite=False, verbose=True):
"""Create a scans.tsv file and save it.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
raw_fname : str
Relative path to the raw data file.
fname : str
Filename to save the scans.tsv to.
overwrite : bool
Defaults to False.
Whether to overwrite the existing data in the file.
If there is already data for the given `fname` and overwrite is False,
an error will be raised.
verbose : bool
Set verbose output to true or false.
"""
# get measurement date from the data info
meas_date = raw.info['meas_date']
if isinstance(meas_date, (tuple, list, np.ndarray)):
meas_date = meas_date[0]
acq_time = datetime.fromtimestamp(
meas_date).strftime('%Y-%m-%dT%H:%M:%S')
else:
acq_time = 'n/a'
df = pd.DataFrame(data={'filename': ['%s' % raw_fname],
'acq_time': [acq_time]},
columns=['filename', 'acq_time'])
if os.path.exists(fname):
orig_df = pd.read_csv(fname, sep='\t')
# if the file name is already in the file raise an error
if raw_fname in orig_df['filename'].values and not overwrite:
raise OSError(errno.EEXIST, '"%s" already exists in the '
'scans list. Please set overwrite to '
'True.' % raw_fname)
# otherwise add the new data
df = orig_df.append(df)
# and drop any duplicates as we want overwrite = True to force the old
# data to be overwritten
df.drop_duplicates(subset='filename', keep='last', inplace=True)
df = df.sort_values(by='acq_time')
# overwrite is forced to True as all issues with overwrite == False have
# been handled by this point
_write_tsv(fname, df, True, verbose)
return fname
def _coordsystem_json(raw, unit, orient, manufacturer, fname,
overwrite=False, verbose=True):
"""Create a coordsystem.json file and save it.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
unit : str
Units to be used in the coordsystem specification.
orient : str
Used to define the coordinate system for the head coils.
manufacturer : str
Used to define the coordinate system for the MEG sensors.
fname : str
Filename to save the coordsystem.json to.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false.
"""
dig = raw.info['dig']
coords = dict()
fids = {d['ident']: d for d in dig if d['kind'] ==
FIFF.FIFFV_POINT_CARDINAL}
if fids:
if FIFF.FIFFV_POINT_NASION in fids:
coords['NAS'] = fids[FIFF.FIFFV_POINT_NASION]['r'].tolist()
if FIFF.FIFFV_POINT_LPA in fids:
coords['LPA'] = fids[FIFF.FIFFV_POINT_LPA]['r'].tolist()
if FIFF.FIFFV_POINT_RPA in fids:
coords['RPA'] = fids[FIFF.FIFFV_POINT_RPA]['r'].tolist()
hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}
if hpi:
for ident in hpi.keys():
coords['coil%d' % ident] = hpi[ident]['r'].tolist()
coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])
if len(coord_frame) > 1:
err = 'All HPI and Fiducials must be in the same coordinate frame.'
raise ValueError(err)
fid_json = {'MEGCoordinateSystem': manufacturer,
'MEGCoordinateUnits': unit, # XXX validate this
'HeadCoilCoordinates': coords,
'HeadCoilCoordinateSystem': orient,
'HeadCoilCoordinateUnits': unit # XXX validate this
}
_write_json(fid_json, fname, overwrite)
return fname
def _sidecar_json(raw, task, manufacturer, fname, kind, eeg_ref=None,
eeg_gnd=None, overwrite=False, verbose=True):
"""Create a sidecar json file depending on the kind and save it.
The sidecar json file provides meta data about the data of a certain kind.
Parameters
----------
raw : instance of Raw
The data as MNE-Python Raw object.
task : str
Name of the task the data is based on.
manufacturer : str
Manufacturer of the acquisition system. For MEG also used to define the
coordinate system for the MEG sensors.
fname : str
Filename to save the sidecar json to.
kind : str
Type of the data as in ALLOWED_KINDS.
eeg_ref : str
Description of the type of reference used and (when applicable) of
location of the reference electrode. Defaults to None.
eeg_gnd : str
Description of the location of the ground electrode. Defaults to None.
overwrite : bool
Whether to overwrite the existing file.
Defaults to False.
verbose : bool
Set verbose output to true or false. Defaults to true.
"""
sfreq = raw.info['sfreq']
powerlinefrequency = raw.info.get('line_freq', None)
if powerlinefrequency is None:
warn('No line frequency found, defaulting to 50 Hz')
powerlinefrequency = 50
if not eeg_ref:
eeg_ref = 'n/a'
if not eeg_gnd:
eeg_gnd = 'n/a'
if isinstance(raw, BaseRaw):
rec_type = 'continuous'
elif isinstance(raw, Epochs):
rec_type = 'epoched'
else:
rec_type = 'n/a'
# determine whether any channels have to be ignored:
n_ignored = len([ch_name for ch_name in
IGNORED_CHANNELS.get(manufacturer, list()) if
ch_name in raw.ch_names])
# all ignored channels are trigger channels at the moment...
n_megchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_MEG_CH])
n_megrefchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_REF_MEG_CH])
n_eegchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_EEG_CH])
n_ecogchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_ECOG_CH])
n_seegchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_SEEG_CH])
n_eogchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_EOG_CH])
n_ecgchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_ECG_CH])
n_emgchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_EMG_CH])
n_miscchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_MISC_CH])
n_stimchan = len([ch for ch in raw.info['chs']
if ch['kind'] == FIFF.FIFFV_STIM_CH]) - n_ignored
# Define modality-specific JSON dictionaries
ch_info_json_common = [
('TaskName', task),
('Manufacturer', manufacturer),
('PowerLineFrequency', powerlinefrequency),
('SamplingFrequency', sfreq),
('SoftwareFilters', 'n/a'),
('RecordingDuration', raw.times[-1]),
('RecordingType', rec_type)]
ch_info_json_meg = [
('DewarPosition', 'n/a'),
('DigitizedLandmarks', False),
('DigitizedHeadPoints', False),
('MEGChannelCount', n_megchan),
('MEGREFChannelCount', n_megrefchan)]
ch_info_json_eeg = [
('EEGReference', eeg_ref),
('EEGGround', eeg_gnd),
('EEGPlacementScheme', _infer_eeg_placement_scheme(raw)),
('Manufacturer', manufacturer)]
ch_info_json_ieeg = [
('ECOGChannelCount', n_ecogchan),
('SEEGChannelCount', n_seegchan)]
ch_info_ch_counts = [
('EEGChannelCount', n_eegchan),
('EOGChannelCount', n_eogchan),
('ECGChannelCount', n_ecgchan),
('EMGChannelCount', n_emgchan),
('MiscChannelCount', n_miscchan),
('TriggerChannelCount', n_stimchan)]
# Stitch together the complete JSON dictionary
ch_info_json = ch_info_json_common
if kind == 'meg':
append_kind_json = ch_info_json_meg
elif kind == 'eeg':
append_kind_json = ch_info_json_eeg
elif kind == 'ieeg':
append_kind_json = ch_info_json_ieeg
else:
raise ValueError('Unexpected "kind": {}'
' Use one of: {}'.format(kind, ALLOWED_KINDS))
ch_info_json += append_kind_json
ch_info_json += ch_info_ch_counts
ch_info_json = OrderedDict(ch_info_json)
_write_json(ch_info_json, fname, overwrite, verbose)
return fname
def raw_to_bids(subject_id, task, raw_file, output_path, session_id=None,
acquisition=None, run=None, kind='meg', events_data=None,
event_id=None, hpi=None, electrode=None, hsp=None,
eeg_ref=None, eeg_gnd=None, config=None,
overwrite=False, verbose=True):
"""Walk over a folder of files and create BIDS compatible folder.
Parameters
----------
subject_id : str
The subject name in BIDS compatible format ('01', '02', etc.)
task : str
Name of the task the data is based on.
raw_file : str | instance of mne.Raw
The raw data. If a string, it is assumed to be the path to the raw data
file. Otherwise it must be an instance of mne.Raw
output_path : str
The path of the BIDS compatible folder
session_id : str | None
The session name in BIDS compatible format.
acquisition : str | None
Acquisition parameter for the dataset.
run : int | None
The run number for this dataset.
kind : str, one of ('meg', 'eeg', 'ieeg')
The kind of data being converted. Defaults to "meg".
events_data : str | array | None
The events file. If a string, a path to the events file. If an array,
the MNE events array (shape n_events, 3). If None, events will be
inferred from the stim channel using `mne.find_events`.
event_id : dict | None
The event id dict used to create a 'trial_type' column in events.tsv
hpi : None | str
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
electrode : None | str
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
eeg_ref : str
Description of the type of reference used and (when applicable) of
location of the reference electrode. Defaults to None.
eeg_gnd : str
Description of the location of the ground electrode. Defaults to None.
config : str | None
A path to the configuration file to use if the data is from a BTi
system.
overwrite : bool
Whether to overwrite existing files or data in files.
Defaults to False.
If overwrite is True, any existing files with the same BIDS parameters
will be overwritten with the exception of the `participants.tsv` and
`scans.tsv` files. For these files, parts of pre-existing data that
match the current data will be replaced.
If overwrite is False, no existing data will be overwritten or
replaced.
verbose : bool
If verbose is True, this will print a snippet of the sidecar files. If
False, no content will be printed.
Notes
-----
For the participants.tsv file, the raw.info['subjects_info'] should be
updated and raw.info['meas_date'] should not be None to compute the age
of the participant correctly.
"""
if isinstance(raw_file, string_types):
# We must read in the raw data
raw = _read_raw(raw_file, electrode=electrode, hsp=hsp, hpi=hpi,
config=config, verbose=verbose)
_, ext = _parse_ext(raw_file, verbose=verbose)
raw_fname = raw_file
elif isinstance(raw_file, BaseRaw):
# We got a raw mne object, get back the filename if possible
# Assume that if no filename attr exists, it's a fif file.
raw = raw_file.copy()
if hasattr(raw, 'filenames'):
_, ext = _parse_ext(raw.filenames[0], verbose=verbose)
raw_fname = raw.filenames[0]
else:
# FIXME: How to get the filename if no filenames attribute?
raw_fname = 'unknown_file_name'
ext = '.fif'
else:
raise ValueError('raw_file must be an instance of str or BaseRaw, '
'got %s' % type(raw_file))
data_path = make_bids_folders(subject=subject_id, session=session_id,
kind=kind, root=output_path,
overwrite=False, verbose=verbose)
if session_id is None:
ses_path = os.sep.join(data_path.split(os.sep)[:-1])
else:
ses_path = make_bids_folders(subject=subject_id, session=session_id,
root=output_path, make_dir=False,
overwrite=False, verbose=verbose)
# create filenames
scans_fname = make_bids_filename(
subject=subject_id, session=session_id, suffix='scans.tsv',
prefix=ses_path)
participants_fname = make_bids_filename(prefix=output_path,
suffix='participants.tsv')
coordsystem_fname = make_bids_filename(
subject=subject_id, session=session_id, acquisition=acquisition,
suffix='coordsystem.json', prefix=data_path)
data_meta_fname = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='%s.json' % kind, prefix=data_path)
if ext in ['.fif', '.ds', '.vhdr', '.edf', '.bdf', '.set', '.cnt']:
raw_file_bids = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='%s%s' % (kind, ext))
else:
raw_folder = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='%s' % kind)
raw_file_bids = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='%s%s' % (kind, ext),
prefix=raw_folder)
events_tsv_fname = make_bids_filename(
subject=subject_id, session=session_id, task=task,
acquisition=acquisition, run=run, suffix='events.tsv',
prefix=data_path)
channels_fname = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='channels.tsv', prefix=data_path)
# Read in Raw object and extract metadata from Raw object if needed
orient = ORIENTATION.get(ext, 'n/a')
unit = UNITS.get(ext, 'n/a')
manufacturer = MANUFACTURERS.get(ext, 'n/a')
if manufacturer == 'Mixed':
manufacturer = 'n/a'
# save all meta data
_participants_tsv(raw, subject_id, "n/a", participants_fname, overwrite,
verbose)
_scans_tsv(raw, os.path.join(kind, raw_file_bids), scans_fname,
overwrite, verbose)
# TODO: Implement coordystem.json and electrodes.tsv for EEG and iEEG
if kind == 'meg':
_coordsystem_json(raw, unit, orient, manufacturer, coordsystem_fname,
overwrite, verbose)
events = _read_events(events_data, raw)
if len(events) > 0:
_events_tsv(events, raw, events_tsv_fname, event_id, overwrite,
verbose)
make_dataset_description(output_path, name=" ", verbose=verbose)
_sidecar_json(raw, task, manufacturer, data_meta_fname, kind, eeg_ref,
eeg_gnd, overwrite, verbose)
_channels_tsv(raw, channels_fname, overwrite, verbose)
# set the raw file name to now be the absolute path to ensure the files
# are placed in the right location
raw_file_bids = os.path.join(data_path, raw_file_bids)
if os.path.exists(raw_file_bids) and not overwrite:
raise OSError(errno.EEXIST, '"%s" already exists. Please set '
'overwrite to True.' % raw_file_bids)
_mkdir_p(os.path.dirname(raw_file_bids))
if verbose:
print('Writing data files to %s' % raw_file_bids)
if ext not in ALLOWED_EXTENSIONS:
raise ValueError('ext must be in %s, got %s'
% (''.join(ALLOWED_EXTENSIONS), ext))
# Copy the imaging data files
if ext in ['.fif']:
n_rawfiles = len(raw.filenames)
if n_rawfiles > 1:
# TODO Update MNE requirement to version 0.17 when it's released
if check_version('mne', '0.17.dev'):
split_naming = 'bids'
raw.save(raw_file_bids, split_naming=split_naming,
overwrite=True)
else:
raise NotImplementedError(
'Renaming split fif files is not supported on your '
'version of MNE. Please upgrade to at least "0.17.dev". '
'Please contact MNE developers if you have '
'any questions.')
else:
# TODO insert arg `split_naming=split_naming`
# when MNE releases 0.17
raw.save(raw_file_bids, overwrite=True)
# CTF data is saved in a directory
elif ext == '.ds':
sh.copytree(raw_fname, raw_file_bids)
# BrainVision is multifile, copy over all of them and fix pointers
elif ext == '.vhdr':
copyfile_brainvision(raw_fname, raw_file_bids)
# EEGLAB .set might be accompanied by a .fdt - find out and copy it too
elif ext == '.set':
copyfile_eeglab(raw_fname, raw_file_bids)
else:
sh.copyfile(raw_fname, raw_file_bids)
# KIT data requires the marker file to be copied over too
if hpi is not None:
if isinstance(hpi, list):
# No currently accepted way to name multiple marker files. See:
# https://github.com/bids-standard/bids-specification/issues/45
raise ValueError('Only single marker coils supported currently')
_, marker_ext = _parse_ext(hpi)
marker_fname = make_bids_filename(
subject=subject_id, session=session_id, task=task, run=run,
acquisition=acquisition, suffix='markers%s' % marker_ext,
prefix=os.path.join(data_path, raw_folder))
sh.copyfile(hpi, marker_fname)
return output_path
|
StarcoderdataPython
|
35160
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from enum import Enum
class Type(Enum):
STRING = 'string'
NUMBER = 'number'
BOOLEAN = 'boolean'
DATE = 'date'
DATETIME = 'datetime'
TIMEOFDAY = 'timeofday'
|
StarcoderdataPython
|
4825121
|
from collections import OrderedDict
import copy
import hashlib
import io
import itertools
import logging
import os, os.path
import platform
import random
import shutil
import subprocess
import sys
import struct
import time
import zipfile
from World import World
from Spoiler import Spoiler
from Rom import Rom
from Patches import patch_rom
from Cosmetics import patch_cosmetics
from DungeonList import create_dungeons
from Fill import distribute_items_restrictive, ShuffleError
from Item import Item
from ItemPool import generate_itempool
from Hints import buildGossipHints
from Utils import default_output_path, is_bundled, subprocess_args, data_path
from version import __version__
from N64Patch import create_patch_file, apply_patch_file
from SettingsList import setting_infos, logic_tricks
from Rules import set_rules, set_shop_rules
from Plandomizer import Distribution
from Search import Search, RewindableSearch
from EntranceShuffle import set_entrances
from LocationList import set_drop_location_names
class dummy_window():
def __init__(self):
pass
def update_status(self, text):
pass
def update_progress(self, val):
pass
def main(settings, window=dummy_window()):
start = time.process_time()
logger = logging.getLogger('')
old_tricks = settings.allowed_tricks
settings.load_distribution()
# compare pointers to lists rather than contents, so even if the two are identical
# we'll still log the error and note the dist file overrides completely.
if old_tricks and old_tricks is not settings.allowed_tricks:
logger.error('Tricks are set in two places! Using only the tricks from the distribution file.')
for trick in logic_tricks.values():
settings.__dict__[trick['name']] = trick['name'] in settings.allowed_tricks
# we load the rom before creating the seed so that errors get caught early
if settings.compress_rom == 'None' and not settings.create_spoiler:
raise Exception('`No Output` must have spoiler enabled to produce anything.')
if settings.compress_rom != 'None':
window.update_status('Loading ROM')
rom = Rom(settings.rom)
else:
rom = None
if not settings.world_count:
settings.world_count = 1
elif settings.world_count < 1 or settings.world_count > 255:
raise Exception('World Count must be between 1 and 255')
# Bounds-check the player_num settings, in case something's gone wrong we want to know.
if settings.player_num < 1:
raise Exception(f'Invalid player num: {settings.player_num}; must be between (1, {settings.world_count})')
if settings.player_num > settings.world_count:
if settings.compress_rom not in ['None', 'Patch']:
raise Exception(f'Player Num is {settings.player_num}; must be between (1, {settings.world_count})')
settings.player_num = settings.world_count
logger.info('OoT Randomizer Version %s - Seed: %s', __version__, settings.seed)
settings.remove_disabled()
logger.info('(Original) Settings string: %s\n', settings.settings_string)
random.seed(settings.numeric_seed)
settings.resolve_random_settings(cosmetic=False)
logger.debug(settings.get_settings_display())
max_attempts = 10
for attempt in range(1, max_attempts + 1):
try:
spoiler = generate(settings, window)
break
except ShuffleError as e:
logger.warning('Failed attempt %d of %d: %s', attempt, max_attempts, e)
if attempt >= max_attempts:
raise
else:
logger.info('Retrying...\n\n')
settings.reset_distribution()
return patch_and_output(settings, window, spoiler, rom, start)
def generate(settings, window):
logger = logging.getLogger('')
worlds = []
for i in range(0, settings.world_count):
worlds.append(World(i, settings))
window.update_status('Creating the Worlds')
for id, world in enumerate(worlds):
logger.info('Generating World %d.' % (id + 1))
window.update_progress(0 + 1*(id + 1)/settings.world_count)
logger.info('Creating Overworld')
if settings.logic_rules == 'glitched':
overworld_data = os.path.join(data_path('Glitched World'), 'Overworld.json')
else:
overworld_data = os.path.join(data_path('World'), 'Overworld.json')
# Compile the json rules based on settings
world.load_regions_from_json(overworld_data)
create_dungeons(world)
world.create_internal_locations()
if settings.shopsanity != 'off':
world.random_shop_prices()
world.set_scrub_prices()
window.update_progress(0 + 4*(id + 1)/settings.world_count)
logger.info('Calculating Access Rules.')
set_rules(world)
window.update_progress(0 + 5*(id + 1)/settings.world_count)
logger.info('Generating Item Pool.')
generate_itempool(world)
set_shop_rules(world)
set_drop_location_names(world)
world.fill_bosses()
if settings.triforce_hunt:
settings.distribution.configure_triforce_hunt(worlds)
logger.info('Setting Entrances.')
set_entrances(worlds)
window.update_status('Placing the Items')
logger.info('Fill the world.')
distribute_items_restrictive(window, worlds)
window.update_progress(35)
spoiler = Spoiler(worlds)
if settings.create_spoiler:
window.update_status('Calculating Spoiler Data')
logger.info('Calculating playthrough.')
create_playthrough(spoiler)
window.update_progress(50)
if settings.create_spoiler or settings.hints != 'none':
window.update_status('Calculating Hint Data')
logger.info('Calculating hint data.')
update_required_items(spoiler)
buildGossipHints(spoiler, worlds)
window.update_progress(55)
spoiler.build_file_hash()
return spoiler
def patch_and_output(settings, window, spoiler, rom, start):
logger = logging.getLogger('')
logger.info('Patching ROM.')
worlds = spoiler.worlds
cosmetics_log = None
settings_string_hash = hashlib.sha1(settings.settings_string.encode('utf-8')).hexdigest().upper()[:5]
if settings.output_file:
outfilebase = settings.output_file
elif settings.world_count > 1:
outfilebase = 'OoT_%s_%s_W%d' % (settings_string_hash, settings.seed, settings.world_count)
else:
outfilebase = 'OoT_%s_%s' % (settings_string_hash, settings.seed)
output_dir = default_output_path(settings.output_dir)
if settings.compress_rom == 'Patch':
rng_state = random.getstate()
file_list = []
window.update_progress(65)
for world in worlds:
if settings.world_count > 1:
window.update_status('Patching ROM: Player %d' % (world.id + 1))
patchfilename = '%sP%d.zpf' % (outfilebase, world.id + 1)
else:
window.update_status('Patching ROM')
patchfilename = '%s.zpf' % outfilebase
random.setstate(rng_state)
patch_rom(spoiler, world, rom)
cosmetics_log = patch_cosmetics(settings, rom)
rom.update_header()
window.update_progress(65 + 20*(world.id + 1)/settings.world_count)
window.update_status('Creating Patch File')
output_path = os.path.join(output_dir, patchfilename)
file_list.append(patchfilename)
create_patch_file(rom, output_path)
rom.restore()
window.update_progress(65 + 30*(world.id + 1)/settings.world_count)
if settings.create_cosmetics_log and cosmetics_log:
window.update_status('Creating Cosmetics Log')
if settings.world_count > 1:
cosmetics_log_filename = "%sP%d_Cosmetics.txt" % (outfilebase, world.id + 1)
else:
cosmetics_log_filename = '%s_Cosmetics.txt' % outfilebase
cosmetics_log.to_file(os.path.join(output_dir, cosmetics_log_filename))
file_list.append(cosmetics_log_filename)
cosmetics_log = None
if settings.world_count > 1:
window.update_status('Creating Patch Archive')
output_path = os.path.join(output_dir, '%s.zpfz' % outfilebase)
with zipfile.ZipFile(output_path, mode="w") as patch_archive:
for file in file_list:
file_path = os.path.join(output_dir, file)
patch_archive.write(file_path, file.replace(outfilebase, ''), compress_type=zipfile.ZIP_DEFLATED)
for file in file_list:
os.remove(os.path.join(output_dir, file))
logger.info("Created patchfile at: %s" % output_path)
window.update_progress(95)
elif settings.compress_rom != 'None':
window.update_status('Patching ROM')
patch_rom(spoiler, worlds[settings.player_num - 1], rom)
cosmetics_log = patch_cosmetics(settings, rom)
window.update_progress(65)
window.update_status('Saving Uncompressed ROM')
if settings.world_count > 1:
filename = "%sP%d.z64" % (outfilebase, settings.player_num)
else:
filename = '%s.z64' % outfilebase
output_path = os.path.join(output_dir, filename)
rom.write_to_file(output_path)
if settings.compress_rom == 'True':
window.update_status('Compressing ROM')
logger.info('Compressing ROM.')
if is_bundled():
compressor_path = "."
else:
compressor_path = "Compress"
if platform.system() == 'Windows':
if 8 * struct.calcsize("P") == 64:
compressor_path += "\\Compress.exe"
else:
compressor_path += "\\Compress32.exe"
elif platform.system() == 'Linux':
if platform.uname()[4] == 'aarch64' or platform.uname()[4] == 'arm64':
compressor_path += "/Compress_ARM64"
else:
compressor_path += "/Compress"
elif platform.system() == 'Darwin':
compressor_path += "/Compress.out"
else:
compressor_path = ""
logger.info('OS not supported for compression')
output_compress_path = output_path[:output_path.rfind('.')] + '-comp.z64'
if compressor_path != "":
run_process(window, logger, [compressor_path, output_path, output_compress_path])
os.remove(output_path)
logger.info("Created compressed rom at: %s" % output_compress_path)
else:
logger.info("Created uncompressed rom at: %s" % output_path)
window.update_progress(95)
if not settings.create_spoiler or settings.output_settings:
settings.distribution.update_spoiler(spoiler, False)
window.update_status('Creating Settings Log')
settings_path = os.path.join(output_dir, '%s_Settings.json' % outfilebase)
settings.distribution.to_file(settings_path, False)
logger.info("Created settings log at: %s" % ('%s_Settings.json' % outfilebase))
if settings.create_spoiler:
settings.distribution.update_spoiler(spoiler, True)
window.update_status('Creating Spoiler Log')
spoiler_path = os.path.join(output_dir, '%s_Spoiler.json' % outfilebase)
settings.distribution.to_file(spoiler_path, True)
logger.info("Created spoiler log at: %s" % ('%s_Spoiler.json' % outfilebase))
if settings.create_cosmetics_log and cosmetics_log:
window.update_status('Creating Cosmetics Log')
if settings.world_count > 1 and not settings.output_file:
filename = "%sP%d_Cosmetics.txt" % (outfilebase, settings.player_num)
else:
filename = '%s_Cosmetics.txt' % outfilebase
cosmetic_path = os.path.join(output_dir, filename)
cosmetics_log.to_file(cosmetic_path)
logger.info("Created cosmetic log at: %s" % cosmetic_path)
if settings.enable_distribution_file:
window.update_status('Copying Distribution File')
try:
filename = os.path.join(output_dir, '%s_Distribution.json' % outfilebase)
shutil.copyfile(settings.distribution_file, filename)
logger.info("Copied distribution file to: %s" % filename)
except:
logger.info('Distribution file copy failed.')
window.update_progress(100)
if cosmetics_log and cosmetics_log.error:
window.update_status('Success: Rom patched successfully. Some cosmetics could not be applied.')
else:
window.update_status('Success: Rom patched successfully')
logger.info('Done. Enjoy.')
logger.debug('Total Time: %s', time.process_time() - start)
return worlds[settings.player_num - 1]
def from_patch_file(settings, window=dummy_window()):
start = time.process_time()
logger = logging.getLogger('')
# we load the rom before creating the seed so that error get caught early
if settings.compress_rom == 'None' or settings.compress_rom == 'Patch':
raise Exception('Output Type must be a ROM when patching from a patch file.')
window.update_status('Loading ROM')
rom = Rom(settings.rom)
logger.info('Patching ROM.')
filename_split = os.path.basename(settings.patch_file).split('.')
if settings.output_file:
outfilebase = settings.output_file
else:
outfilebase = filename_split[0]
extension = filename_split[-1]
output_dir = default_output_path(settings.output_dir)
output_path = os.path.join(output_dir, outfilebase)
window.update_status('Patching ROM')
if extension == 'zpf':
subfile = None
else:
subfile = 'P%d.zpf' % (settings.player_num)
if not settings.output_file:
output_path += 'P%d' % (settings.player_num)
apply_patch_file(rom, settings.patch_file, subfile)
cosmetics_log = None
if settings.repatch_cosmetics:
cosmetics_log = patch_cosmetics(settings, rom)
window.update_progress(65)
window.update_status('Saving Uncompressed ROM')
uncompressed_output_path = output_path + '.z64'
rom.write_to_file(uncompressed_output_path)
if settings.compress_rom == 'True':
window.update_status('Compressing ROM')
logger.info('Compressing ROM.')
if is_bundled():
compressor_path = "."
else:
compressor_path = "Compress"
if platform.system() == 'Windows':
if 8 * struct.calcsize("P") == 64:
compressor_path += "\\Compress.exe"
else:
compressor_path += "\\Compress32.exe"
elif platform.system() == 'Linux':
compressor_path += "/Compress"
elif platform.system() == 'Darwin':
compressor_path += "/Compress.out"
else:
compressor_path = ""
logger.info('OS not supported for compression')
output_compress_path = output_path + '-comp.z64'
if compressor_path != "":
run_process(window, logger, [compressor_path, uncompressed_output_path, output_compress_path])
os.remove(uncompressed_output_path)
logger.info("Created compressed rom at: %s" % output_compress_path)
else:
logger.info("Created uncompressed rom at: %s" % output_path)
window.update_progress(95)
if settings.create_cosmetics_log and cosmetics_log:
window.update_status('Creating Cosmetics Log')
if settings.world_count > 1 and not settings.output_file:
filename = "%sP%d_Cosmetics.txt" % (outfilebase, settings.player_num)
else:
filename = '%s_Cosmetics.txt' % outfilebase
cosmetic_path = os.path.join(output_dir, filename)
cosmetics_log.to_file(cosmetic_path)
logger.info("Created cosmetic log at: %s" % cosmetic_path)
window.update_progress(100)
if cosmetics_log and cosmetics_log.error:
window.update_status('Success: Rom patched successfully. Some cosmetics could not be applied.')
else:
window.update_status('Success: Rom patched successfully')
logger.info('Done. Enjoy.')
logger.debug('Total Time: %s', time.process_time() - start)
return True
def cosmetic_patch(settings, window=dummy_window()):
start = time.process_time()
logger = logging.getLogger('')
if settings.patch_file == '':
raise Exception('Cosmetic Only must have a patch file supplied.')
window.update_status('Loading ROM')
rom = Rom(settings.rom)
logger.info('Patching ROM.')
filename_split = os.path.basename(settings.patch_file).split('.')
if settings.output_file:
outfilebase = settings.output_file
else:
outfilebase = filename_split[0]
extension = filename_split[-1]
output_dir = default_output_path(settings.output_dir)
output_path = os.path.join(output_dir, outfilebase)
window.update_status('Patching ROM')
if extension == 'zpf':
subfile = None
else:
subfile = 'P%d.zpf' % (settings.player_num)
apply_patch_file(rom, settings.patch_file, subfile)
window.update_progress(65)
# clear changes from the base patch file
patched_base_rom = copy.copy(rom.buffer)
rom.changed_address = {}
rom.changed_dma = {}
rom.force_patch = []
window.update_status('Patching ROM')
patchfilename = '%s_Cosmetic.zpf' % output_path
cosmetics_log = patch_cosmetics(settings, rom)
window.update_progress(80)
window.update_status('Creating Patch File')
# base the new patch file on the base patch file
rom.original.buffer = patched_base_rom
rom.update_header()
create_patch_file(rom, patchfilename)
logger.info("Created patchfile at: %s" % patchfilename)
window.update_progress(95)
if settings.create_cosmetics_log and cosmetics_log:
window.update_status('Creating Cosmetics Log')
if settings.world_count > 1 and not settings.output_file:
filename = "%sP%d_Cosmetics.txt" % (outfilebase, settings.player_num)
else:
filename = '%s_Cosmetics.txt' % outfilebase
cosmetic_path = os.path.join(output_dir, filename)
cosmetics_log.to_file(cosmetic_path)
logger.info("Created cosmetic log at: %s" % cosmetic_path)
window.update_progress(100)
if cosmetics_log and cosmetics_log.error:
window.update_status('Success: Rom patched successfully. Some cosmetics could not be applied.')
else:
window.update_status('Success: Rom patched successfully')
logger.info('Done. Enjoy.')
logger.debug('Total Time: %s', time.process_time() - start)
return True
def run_process(window, logger, args):
process = subprocess.Popen(args, **subprocess_args(True))
filecount = None
while True:
line = process.stdout.readline()
if line != b'':
find_index = line.find(b'files remaining')
if find_index > -1:
files = int(line[:find_index].strip())
if filecount == None:
filecount = files
window.update_progress(65 + 30*(1 - files/filecount))
logger.info(line.decode('utf-8').strip('\n'))
else:
break
def copy_worlds(worlds):
worlds = [world.copy() for world in worlds]
Item.fix_worlds_after_copy(worlds)
return worlds
def update_required_items(spoiler):
worlds = spoiler.worlds
# get list of all of the progressive items that can appear in hints
# all_locations: all progressive items. have to collect from these
# item_locations: only the ones that should appear as "required"/WotH
all_locations = [location for world in worlds for location in world.get_filled_locations()]
# Set to test inclusion against
item_locations = {location for location in all_locations if location.item.majoritem and not location.locked and location.item.name != 'Triforce Piece'}
# if the playthrough was generated, filter the list of locations to the
# locations in the playthrough. The required locations is a subset of these
# locations. Can't use the locations directly since they are location to the
# copied spoiler world, so must compare via name and world id
if spoiler.playthrough:
translate = lambda loc: worlds[loc.world.id].get_location(loc.name)
spoiler_locations = set(map(translate, itertools.chain.from_iterable(spoiler.playthrough.values())))
item_locations &= spoiler_locations
required_locations = []
search = Search([world.state for world in worlds])
for location in search.iter_reachable_locations(all_locations):
# Try to remove items one at a time and see if the game is still beatable
if location in item_locations:
old_item = location.item
location.item = None
# copies state! This is very important as we're in the middle of a search
# already, but beneficially, has search it can start from
if not search.can_beat_game():
required_locations.append(location)
location.item = old_item
search.state_list[location.item.world.id].collect(location.item)
# Filter the required location to only include location in the world
required_locations_dict = {}
for world in worlds:
required_locations_dict[world.id] = list(filter(lambda location: location.world.id == world.id, required_locations))
spoiler.required_locations = required_locations_dict
def create_playthrough(spoiler):
worlds = spoiler.worlds
if worlds[0].check_beatable_only and not Search([world.state for world in worlds]).can_beat_game():
raise RuntimeError('Uncopied is broken too.')
# create a copy as we will modify it
old_worlds = worlds
worlds = copy_worlds(worlds)
# if we only check for beatable, we can do this sanity check first before writing down spheres
if worlds[0].check_beatable_only and not Search([world.state for world in worlds]).can_beat_game():
raise RuntimeError('Cannot beat game. Something went terribly wrong here!')
search = RewindableSearch([world.state for world in worlds])
# Get all item locations in the worlds
item_locations = search.progression_locations()
# Omit certain items from the playthrough
internal_locations = {location for location in item_locations if location.internal}
# Generate a list of spheres by iterating over reachable locations without collecting as we go.
# Collecting every item in one sphere means that every item
# in the next sphere is collectable. Will contain every reachable item this way.
logger = logging.getLogger('')
logger.debug('Building up collection spheres.')
collection_spheres = []
entrance_spheres = []
remaining_entrances = set(entrance for world in worlds for entrance in world.get_shuffled_entrances())
while True:
search.checkpoint()
# Not collecting while the generator runs means we only get one sphere at a time
# Otherwise, an item we collect could influence later item collection in the same sphere
collected = list(search.iter_reachable_locations(item_locations))
if not collected: break
# Gather the new entrances before collecting items.
collection_spheres.append(collected)
accessed_entrances = set(filter(search.spot_access, remaining_entrances))
entrance_spheres.append(accessed_entrances)
remaining_entrances -= accessed_entrances
for location in collected:
# Collect the item for the state world it is for
search.state_list[location.item.world.id].collect(location.item)
logger.info('Collected %d spheres', len(collection_spheres))
# Reduce each sphere in reverse order, by checking if the game is beatable
# when we remove the item. We do this to make sure that progressive items
# like bow and slingshot appear as early as possible rather than as late as possible.
required_locations = []
for sphere in reversed(collection_spheres):
for location in sphere:
# we remove the item at location and check if the game is still beatable in case the item could be required
old_item = location.item
# Uncollect the item and location.
search.state_list[old_item.world.id].remove(old_item)
search.unvisit(location)
# Generic events might show up or not, as usual, but since we don't
# show them in the final output, might as well skip over them. We'll
# still need them in the final pass, so make sure to include them.
if location.internal:
required_locations.append(location)
continue
location.item = None
# An item can only be required if it isn't already obtained or if it's progressive
if search.state_list[old_item.world.id].item_count(old_item.name) < old_item.world.max_progressions[old_item.name]:
# Test whether the game is still beatable from here.
logger.debug('Checking if %s is required to beat the game.', old_item.name)
if not search.can_beat_game():
# still required, so reset the item
location.item = old_item
required_locations.append(location)
# Reduce each entrance sphere in reverse order, by checking if the game is beatable when we disconnect the entrance.
required_entrances = []
for sphere in reversed(entrance_spheres):
for entrance in sphere:
# we disconnect the entrance and check if the game is still beatable
old_connected_region = entrance.disconnect()
# we use a new search to ensure the disconnected entrance is no longer used
sub_search = Search([world.state for world in worlds])
# Test whether the game is still beatable from here.
logger.debug('Checking if reaching %s, through %s, is required to beat the game.', old_connected_region.name, entrance.name)
if not sub_search.can_beat_game():
# still required, so reconnect the entrance
entrance.connect(old_connected_region)
required_entrances.append(entrance)
# Regenerate the spheres as we might not reach places the same way anymore.
search.reset() # search state has no items, okay to reuse sphere 0 cache
collection_spheres = []
entrance_spheres = []
remaining_entrances = set(required_entrances)
collected = set()
while True:
# Not collecting while the generator runs means we only get one sphere at a time
# Otherwise, an item we collect could influence later item collection in the same sphere
collected.update(search.iter_reachable_locations(required_locations))
if not collected: break
internal = collected & internal_locations
if internal:
# collect only the internal events but don't record them in a sphere
for location in internal:
search.state_list[location.item.world.id].collect(location.item)
# Remaining locations need to be saved to be collected later
collected -= internal
continue
# Gather the new entrances before collecting items.
collection_spheres.append(list(collected))
accessed_entrances = set(filter(search.spot_access, remaining_entrances))
entrance_spheres.append(accessed_entrances)
remaining_entrances -= accessed_entrances
for location in collected:
# Collect the item for the state world it is for
search.state_list[location.item.world.id].collect(location.item)
collected.clear()
logger.info('Collected %d final spheres', len(collection_spheres))
# Then we can finally output our playthrough
spoiler.playthrough = OrderedDict((str(i + 1), {location: location.item for location in sphere}) for i, sphere in enumerate(collection_spheres))
if worlds[0].entrance_shuffle != 'off':
spoiler.entrance_playthrough = OrderedDict((str(i + 1), list(sphere)) for i, sphere in enumerate(entrance_spheres))
|
StarcoderdataPython
|
3261923
|
"""db/models/ip.py
Database Model for the IP item
"""
import sqlalchemy as sa
from sqlalchemy.orm import declarative_base
from .target import Target
Base = declarative_base()
class IP(Base):
__tablename__ = 'ips'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
ip = sa.Column(sa.VARCHAR(40))
target = sa.Column(sa.VARCHAR(20), sa.ForeignKey(Target.slug))
|
StarcoderdataPython
|
34469
|
<gh_stars>100-1000
# The following comments couldn't be translated into the new config version:
#
# keep only muon-related info here
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("MISO")
process.load("Configuration.EventContent.EventContent_cff")
# service = MessageLogger {
# untracked vstring destinations = { "cout" }
# untracked vstring debugModules = { "muIsoDepositTk",
# "muIsoDepositCalByAssociatorHits",
# "muIsoDepositCalByAssociatorTowers",
# "muIsoDepositCal" }
# untracked vstring categories = { "RecoMuon" , "MuonIsolation" }
#
# untracked PSet cout = {
# untracked string threshold = "DEBUG"
# untracked int32 lineLength = 132
# untracked bool noLineBreaks = true
# untracked PSet DEBUG = {untracked int32 limit = 0 }
# untracked PSet RecoMuon = { untracked int32 limit = 10000000}
# untracked PSet MuonIsolation = { untracked int32 limit = 10000000}
# }
# }
process.load("FWCore.MessageLogger.MessageLogger_cfi")
#process.load("RecoLocalMuon.Configuration.RecoLocalMuon_cff")
#process.load("RecoMuon.Configuration.RecoMuon_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.load("Configuration.StandardSequences.FakeConditions_cff")
#process.load("Configuration.StandardSequences.RawToDigi_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
#has everything(?) one needs
# pick muIsolation sequence for "standard" iso reco for tracker and global muons
process.load("RecoMuon.MuonIsolationProducers.muIsolation_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/mc/2007/12/7/RelVal-RelValBJets_Pt_50_120-1197045102/0002/0A21A5F4-02A5-DC11-89F5-000423DD2F34.root')
)
process.source = cms.Source ("PoolSource",
fileNames = cms.untracked.vstring (
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/10438122-2A5F-DD11-A77F-000423D985E4.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/12F34420-2A5F-DD11-AB6E-000423D6CA6E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/244E7C0B-315F-DD11-ACFC-001617E30F58.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/2ADD8A12-315F-DD11-8AB8-000423D6C8E6.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/34A291FB-305F-DD11-833E-001617C3B6CC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/383E09CA-2C5F-DD11-9A28-000423D6BA18.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/40F0F8A4-2A5F-DD11-BC72-001617C3B64C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4AD39C8C-2A5F-DD11-B935-001617C3B710.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4C0D4911-315F-DD11-A20D-001617DBD332.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/4C32E425-2A5F-DD11-B819-000423D6C8EE.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/50881CBB-2A5F-DD11-92C6-001617C3B6E8.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/52B83F75-2A5F-DD11-AD56-001617C3B6CC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/544DC99A-2A5F-DD11-9160-001617C3B6E2.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/62F7698D-2A5F-DD11-907A-001617C3B6DC.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/7C8A2791-2A5F-DD11-814D-001617DBCF6A.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/7EDA5005-315F-DD11-8019-001617C3B706.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/8A91E518-2A5F-DD11-B49A-000423D6B42C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/8CC497AE-2A5F-DD11-AE43-000423DD2F34.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9A469FA8-2A5F-DD11-9909-001617C3B6FE.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9A5BE3A4-2A5F-DD11-A61B-001617DF785A.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9AC2141C-2A5F-DD11-ADF5-000423D6A6F4.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/9CCFA319-2A5F-DD11-B0AA-000423D94700.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/A0F6C41D-2A5F-DD11-8685-000423D6BA18.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/B0159DAC-2A5F-DD11-98A8-001617E30D00.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/B05C32FC-305F-DD11-A957-001617C3B70E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/C6ADD999-2A5F-DD11-AF9F-0016177CA7A0.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/C8AEE585-2A5F-DD11-BB37-001617C3B77C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/CC5178C4-2A5F-DD11-BCE6-001617E30F4C.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/CE9FE020-2A5F-DD11-9846-000423D6CA72.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/D24BFA7E-2A5F-DD11-8F79-001617C3B70E.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/D62761FA-305F-DD11-A108-0016177CA778.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/DA0DDFB6-2A5F-DD11-987A-001617DBD5B2.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/E64386FE-305F-DD11-BA68-0019DB29C614.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/E6BC0D37-2A5F-DD11-9ACB-000423D6B444.root',
'/store/relval/CMSSW_2_1_0_pre11/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/STARTUP_V4_v2/0000/F251D794-2A5F-DD11-BA5D-00161757BF42.root'
),
secondaryFileNames = cms.untracked.vstring (
)
)
process.RECO = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('file:isoTest.root')
)
process.p1 = cms.Path(process.muIsolation)
process.outpath = cms.EndPath(process.RECO)
process.RECO.outputCommands.append('drop *_*_*_*')
process.RECO.outputCommands.extend(process.RecoMuonRECO.outputCommands)
|
StarcoderdataPython
|
62202
|
import unittest
import invoiced
import responses
class TestTask(unittest.TestCase):
def setUp(self):
self.client = invoiced.Client('api_key')
def test_endpoint(self):
task = invoiced.Task(self.client, 123)
self.assertEqual('/tasks/123', task.endpoint())
@responses.activate
def test_create(self):
responses.add('POST', 'https://api.invoiced.com/tasks',
status=201,
json={"id": 123, "user_id": 234, "customer_id": 345,
"name": "<NAME>", "action": "phone",
"due_date": 1234567890})
task = invoiced.Task(self.client)
task = task.create(customer_id=345, user_id=234, name="<NAME>",
action="phone", due_date=1234567890)
self.assertIsInstance(task, invoiced.Task)
self.assertEqual(task.id, 123)
self.assertEqual(task.customer_id, 345)
self.assertEqual(task.name, "<NAME>")
@responses.activate
def test_retrieve(self):
responses.add('GET', 'https://api.invoiced.com/tasks/123',
status=200,
json={"id": 123, "user_id": 234, "customer_id": 345,
"name": "<NAME>", "action": "phone",
"due_date": 1234567890})
task = invoiced.Task(self.client)
task = task.retrieve(123)
self.assertIsInstance(task, invoiced.Task)
self.assertEqual(task.id, 123)
self.assertEqual(task.action, "phone")
def test_update_no_params(self):
task = invoiced.Task(self.client, 123)
self.assertFalse(task.save())
@responses.activate
def test_update(self):
responses.add('PATCH', 'https://api.invoiced.com/tasks/123',
status=200,
json={"id": 123, "user_id": 234, "customer_id": 345,
"name": "<NAME>", "action": "phone",
"due_date": 1234567890})
task = invoiced.Task(self.client, 123)
task.name = "<NAME>"
self.assertTrue(task.save())
self.assertEqual(task.name, "2nd Call")
@responses.activate
def test_list(self):
responses.add('GET', 'https://api.invoiced.com/tasks',
status=200,
json=[{"id": 123, "user_id": 234, "customer_id": 345,
"name": "<NAME>", "action": "phone",
"due_date": 1234567890}],
adding_headers={
'x-total-count': '15',
'link': '<https://api.invoiced.com/tasks?per_page=25&page=1>; rel="self", <https://api.invoiced.com/tasks?per_page=25&page=1>; rel="first", <https://api.invoiced.com/tasks?per_page=25&page=1>; rel="last"'}) # noqa
task = invoiced.Task(self.client)
tasks, metadata = task.list()
self.assertIsInstance(tasks, list)
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].id, 123)
self.assertIsInstance(metadata, invoiced.List)
self.assertEqual(metadata.total_count, 15)
@responses.activate
def test_delete(self):
responses.add('DELETE', 'https://api.invoiced.com/tasks/123',
status=204)
task = invoiced.Task(self.client, 123)
self.assertTrue(task.delete())
|
StarcoderdataPython
|
3382689
|
# Generated by Django 3.1.2 on 2021-01-28 10:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('elegislative_app', '0013_auto_20210122_1047'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_arocc_manager',
field=models.BooleanField(default=False, verbose_name='Can Manage Agenda, Resolution, Oridnance, Comments & Recommendation and Committee Reports'),
),
]
|
StarcoderdataPython
|
49350
|
# generated by 'clang2py'
# flags '-c -d -l ftd2xx64.dll ftd2xx.h -vvv -o _ftd2xx64.py'
# -*- coding: utf-8 -*-
#
# TARGET arch is: []
# WORD_SIZE is: 4
# POINTER_SIZE is: 8
# LONGDOUBLE_SIZE is: 8
#
import ctypes
# if local wordsize is same as target, keep ctypes pointer function.
if ctypes.sizeof(ctypes.c_void_p) == 8:
POINTER_T = ctypes.POINTER
else:
# required to access _ctypes
import _ctypes
# Emulate a pointer class using the approriate c_int32/c_int64 type
# The new class should have :
# ['__module__', 'from_param', '_type_', '__dict__', '__weakref__', '__doc__']
# but the class should be submitted to a unique instance for each base type
# to that if A == B, POINTER_T(A) == POINTER_T(B)
ctypes._pointer_t_type_cache = {}
def POINTER_T(pointee):
# a pointer should have the same length as LONG
fake_ptr_base_type = ctypes.c_uint32
# specific case for c_void_p
if pointee is None: # VOID pointer type. c_void_p.
pointee = type(None) # ctypes.c_void_p # ctypes.c_ulong
clsname = 'c_void'
else:
clsname = pointee.__name__
if clsname in ctypes._pointer_t_type_cache:
return ctypes._pointer_t_type_cache[clsname]
# make template
class _T(_ctypes._SimpleCData,):
_type_ = 'L'
_subtype_ = pointee
def _sub_addr_(self):
return self.value
def __repr__(self):
return '%s(%d)'%(clsname, self.value)
def contents(self):
raise TypeError('This is not a ctypes pointer.')
def __init__(self, **args):
raise TypeError('This is not a ctypes pointer. It is not instanciable.')
_class = type('LP_%d_%s'%(8, clsname), (_T,),{})
ctypes._pointer_t_type_cache[clsname] = _class
return _class
c_int128 = ctypes.c_ubyte*16
c_uint128 = c_int128
void = None
if ctypes.sizeof(ctypes.c_longdouble) == 8:
c_long_double_t = ctypes.c_longdouble
else:
c_long_double_t = ctypes.c_ubyte*8
_libraries = {}
_libraries['ftd2xx64.dll'] = ctypes.CDLL('ftd2xx64.dll')
PULONG = POINTER_T(ctypes.c_uint32)
PUCHAR = POINTER_T(ctypes.c_ubyte)
DWORD = ctypes.c_uint32
BOOL = ctypes.c_int32
WORD = ctypes.c_uint16
LPWORD = POINTER_T(ctypes.c_uint16)
LPLONG = POINTER_T(ctypes.c_int32)
LPDWORD = POINTER_T(ctypes.c_uint32)
LPVOID = POINTER_T(None)
ULONG = ctypes.c_uint32
UCHAR = ctypes.c_ubyte
USHORT = ctypes.c_uint16
class struct__SECURITY_ATTRIBUTES(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('nLength', ctypes.c_uint32),
('PADDING_0', ctypes.c_ubyte * 4),
('lpSecurityDescriptor', POINTER_T(None)),
('bInheritHandle', ctypes.c_int32),
('PADDING_1', ctypes.c_ubyte * 4),
]
LPSECURITY_ATTRIBUTES = POINTER_T(struct__SECURITY_ATTRIBUTES)
class struct__OVERLAPPED(ctypes.Structure):
pass
class union__OVERLAPPED_0(ctypes.Union):
pass
class struct__OVERLAPPED_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Offset', ctypes.c_uint32),
('OffsetHigh', ctypes.c_uint32),
]
union__OVERLAPPED_0._pack_ = True # source:False
union__OVERLAPPED_0._fields_ = [
('_0', struct__OVERLAPPED_0_0),
('Pointer', POINTER_T(None)),
]
struct__OVERLAPPED._pack_ = True # source:False
struct__OVERLAPPED._fields_ = [
('Internal', ctypes.c_uint64),
('InternalHigh', ctypes.c_uint64),
('_2', union__OVERLAPPED_0),
('hEvent', POINTER_T(None)),
]
LPOVERLAPPED = POINTER_T(struct__OVERLAPPED)
PVOID = POINTER_T(None)
PCHAR = POINTER_T(ctypes.c_char)
LPCTSTR = POINTER_T(ctypes.c_char)
HANDLE = POINTER_T(None)
FT_HANDLE = POINTER_T(None)
FT_STATUS = ctypes.c_uint32
# values for enumeration 'c__Ea_FT_OK'
FT_OK = 0
FT_INVALID_HANDLE = 1
FT_DEVICE_NOT_FOUND = 2
FT_DEVICE_NOT_OPENED = 3
FT_IO_ERROR = 4
FT_INSUFFICIENT_RESOURCES = 5
FT_INVALID_PARAMETER = 6
FT_INVALID_BAUD_RATE = 7
FT_DEVICE_NOT_OPENED_FOR_ERASE = 8
FT_DEVICE_NOT_OPENED_FOR_WRITE = 9
FT_FAILED_TO_WRITE_DEVICE = 10
FT_EEPROM_READ_FAILED = 11
FT_EEPROM_WRITE_FAILED = 12
FT_EEPROM_ERASE_FAILED = 13
FT_EEPROM_NOT_PRESENT = 14
FT_EEPROM_NOT_PROGRAMMED = 15
FT_INVALID_ARGS = 16
FT_NOT_SUPPORTED = 17
FT_OTHER_ERROR = 18
FT_DEVICE_LIST_NOT_READY = 19
c__Ea_FT_OK = ctypes.c_int # enum
PFT_EVENT_HANDLER = POINTER_T(ctypes.CFUNCTYPE(None, ctypes.c_uint32, ctypes.c_uint32))
FT_DEVICE = ctypes.c_uint32
# values for enumeration 'c__Ea_FT_DEVICE_BM'
FT_DEVICE_BM = 0
FT_DEVICE_AM = 1
FT_DEVICE_100AX = 2
FT_DEVICE_UNKNOWN = 3
FT_DEVICE_2232C = 4
FT_DEVICE_232R = 5
FT_DEVICE_2232H = 6
FT_DEVICE_4232H = 7
FT_DEVICE_232H = 8
FT_DEVICE_X_SERIES = 9
FT_DEVICE_4222H_0 = 10
FT_DEVICE_4222H_1_2 = 11
FT_DEVICE_4222H_3 = 12
FT_DEVICE_4222_PROG = 13
FT_DEVICE_900 = 14
FT_DEVICE_930 = 15
FT_DEVICE_UMFTPD3A = 16
c__Ea_FT_DEVICE_BM = ctypes.c_int # enum
FT_Open = _libraries['ftd2xx64.dll'].FT_Open
FT_Open.restype = FT_STATUS
# FT_Open(deviceNumber, pHandle)
FT_Open.argtypes = [ctypes.c_int32, POINTER_T(POINTER_T(None))]
FT_Open.__doc__ = \
"""FT_STATUS FT_Open(c_int32 deviceNumber, LP_LP_None pHandle)
ftd2xx.h:334"""
FT_OpenEx = _libraries['ftd2xx64.dll'].FT_OpenEx
FT_OpenEx.restype = FT_STATUS
# FT_OpenEx(pArg1, Flags, pHandle)
FT_OpenEx.argtypes = [PVOID, DWORD, POINTER_T(POINTER_T(None))]
FT_OpenEx.__doc__ = \
"""FT_STATUS FT_OpenEx(PVOID pArg1, DWORD Flags, LP_LP_None pHandle)
ftd2xx.h:340"""
FT_ListDevices = _libraries['ftd2xx64.dll'].FT_ListDevices
FT_ListDevices.restype = FT_STATUS
# FT_ListDevices(pArg1, pArg2, Flags)
FT_ListDevices.argtypes = [PVOID, PVOID, DWORD]
FT_ListDevices.__doc__ = \
"""FT_STATUS FT_ListDevices(PVOID pArg1, PVOID pArg2, DWORD Flags)
ftd2xx.h:347"""
FT_Close = _libraries['ftd2xx64.dll'].FT_Close
FT_Close.restype = FT_STATUS
# FT_Close(ftHandle)
FT_Close.argtypes = [FT_HANDLE]
FT_Close.__doc__ = \
"""FT_STATUS FT_Close(FT_HANDLE ftHandle)
ftd2xx.h:354"""
FT_Read = _libraries['ftd2xx64.dll'].FT_Read
FT_Read.restype = FT_STATUS
# FT_Read(ftHandle, lpBuffer, dwBytesToRead, lpBytesReturned)
FT_Read.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD]
FT_Read.__doc__ = \
"""FT_STATUS FT_Read(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD dwBytesToRead, LPDWORD lpBytesReturned)
ftd2xx.h:359"""
FT_Write = _libraries['ftd2xx64.dll'].FT_Write
FT_Write.restype = FT_STATUS
# FT_Write(ftHandle, lpBuffer, dwBytesToWrite, lpBytesWritten)
FT_Write.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD]
FT_Write.__doc__ = \
"""FT_STATUS FT_Write(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD dwBytesToWrite, LPDWORD lpBytesWritten)
ftd2xx.h:367"""
FT_IoCtl = _libraries['ftd2xx64.dll'].FT_IoCtl
FT_IoCtl.restype = FT_STATUS
# FT_IoCtl(ftHandle, dwIoControlCode, lpInBuf, nInBufSize, lpOutBuf, nOutBufSize, lpBytesReturned, lpOverlapped)
FT_IoCtl.argtypes = [FT_HANDLE, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_IoCtl.__doc__ = \
"""FT_STATUS FT_IoCtl(FT_HANDLE ftHandle, DWORD dwIoControlCode, LPVOID lpInBuf, DWORD nInBufSize, LPVOID lpOutBuf, DWORD nOutBufSize, LPDWORD lpBytesReturned, LPOVERLAPPED lpOverlapped)
ftd2xx.h:375"""
FT_SetBaudRate = _libraries['ftd2xx64.dll'].FT_SetBaudRate
FT_SetBaudRate.restype = FT_STATUS
# FT_SetBaudRate(ftHandle, BaudRate)
FT_SetBaudRate.argtypes = [FT_HANDLE, ULONG]
FT_SetBaudRate.__doc__ = \
"""FT_STATUS FT_SetBaudRate(FT_HANDLE ftHandle, ULONG BaudRate)
ftd2xx.h:387"""
FT_SetDivisor = _libraries['ftd2xx64.dll'].FT_SetDivisor
FT_SetDivisor.restype = FT_STATUS
# FT_SetDivisor(ftHandle, Divisor)
FT_SetDivisor.argtypes = [FT_HANDLE, USHORT]
FT_SetDivisor.__doc__ = \
"""FT_STATUS FT_SetDivisor(FT_HANDLE ftHandle, USHORT Divisor)
ftd2xx.h:393"""
FT_SetDataCharacteristics = _libraries['ftd2xx64.dll'].FT_SetDataCharacteristics
FT_SetDataCharacteristics.restype = FT_STATUS
# FT_SetDataCharacteristics(ftHandle, WordLength, StopBits, Parity)
FT_SetDataCharacteristics.argtypes = [FT_HANDLE, UCHAR, UCHAR, UCHAR]
FT_SetDataCharacteristics.__doc__ = \
"""FT_STATUS FT_SetDataCharacteristics(FT_HANDLE ftHandle, UCHAR WordLength, UCHAR StopBits, UCHAR Parity)
ftd2xx.h:399"""
FT_SetFlowControl = _libraries['ftd2xx64.dll'].FT_SetFlowControl
FT_SetFlowControl.restype = FT_STATUS
# FT_SetFlowControl(ftHandle, FlowControl, XonChar, XoffChar)
FT_SetFlowControl.argtypes = [FT_HANDLE, USHORT, UCHAR, UCHAR]
FT_SetFlowControl.__doc__ = \
"""FT_STATUS FT_SetFlowControl(FT_HANDLE ftHandle, USHORT FlowControl, UCHAR XonChar, UCHAR XoffChar)
ftd2xx.h:407"""
FT_ResetDevice = _libraries['ftd2xx64.dll'].FT_ResetDevice
FT_ResetDevice.restype = FT_STATUS
# FT_ResetDevice(ftHandle)
FT_ResetDevice.argtypes = [FT_HANDLE]
FT_ResetDevice.__doc__ = \
"""FT_STATUS FT_ResetDevice(FT_HANDLE ftHandle)
ftd2xx.h:415"""
FT_SetDtr = _libraries['ftd2xx64.dll'].FT_SetDtr
FT_SetDtr.restype = FT_STATUS
# FT_SetDtr(ftHandle)
FT_SetDtr.argtypes = [FT_HANDLE]
FT_SetDtr.__doc__ = \
"""FT_STATUS FT_SetDtr(FT_HANDLE ftHandle)
ftd2xx.h:420"""
FT_ClrDtr = _libraries['ftd2xx64.dll'].FT_ClrDtr
FT_ClrDtr.restype = FT_STATUS
# FT_ClrDtr(ftHandle)
FT_ClrDtr.argtypes = [FT_HANDLE]
FT_ClrDtr.__doc__ = \
"""FT_STATUS FT_ClrDtr(FT_HANDLE ftHandle)
ftd2xx.h:425"""
FT_SetRts = _libraries['ftd2xx64.dll'].FT_SetRts
FT_SetRts.restype = FT_STATUS
# FT_SetRts(ftHandle)
FT_SetRts.argtypes = [FT_HANDLE]
FT_SetRts.__doc__ = \
"""FT_STATUS FT_SetRts(FT_HANDLE ftHandle)
ftd2xx.h:430"""
FT_ClrRts = _libraries['ftd2xx64.dll'].FT_ClrRts
FT_ClrRts.restype = FT_STATUS
# FT_ClrRts(ftHandle)
FT_ClrRts.argtypes = [FT_HANDLE]
FT_ClrRts.__doc__ = \
"""FT_STATUS FT_ClrRts(FT_HANDLE ftHandle)
ftd2xx.h:435"""
FT_GetModemStatus = _libraries['ftd2xx64.dll'].FT_GetModemStatus
FT_GetModemStatus.restype = FT_STATUS
# FT_GetModemStatus(ftHandle, pModemStatus)
FT_GetModemStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetModemStatus.__doc__ = \
"""FT_STATUS FT_GetModemStatus(FT_HANDLE ftHandle, LP_c_uint32 pModemStatus)
ftd2xx.h:440"""
FT_SetChars = _libraries['ftd2xx64.dll'].FT_SetChars
FT_SetChars.restype = FT_STATUS
# FT_SetChars(ftHandle, EventChar, EventCharEnabled, ErrorChar, ErrorCharEnabled)
FT_SetChars.argtypes = [FT_HANDLE, UCHAR, UCHAR, UCHAR, UCHAR]
FT_SetChars.__doc__ = \
"""FT_STATUS FT_SetChars(FT_HANDLE ftHandle, UCHAR EventChar, UCHAR EventCharEnabled, UCHAR ErrorChar, UCHAR ErrorCharEnabled)
ftd2xx.h:446"""
FT_Purge = _libraries['ftd2xx64.dll'].FT_Purge
FT_Purge.restype = FT_STATUS
# FT_Purge(ftHandle, Mask)
FT_Purge.argtypes = [FT_HANDLE, ULONG]
FT_Purge.__doc__ = \
"""FT_STATUS FT_Purge(FT_HANDLE ftHandle, ULONG Mask)
ftd2xx.h:455"""
FT_SetTimeouts = _libraries['ftd2xx64.dll'].FT_SetTimeouts
FT_SetTimeouts.restype = FT_STATUS
# FT_SetTimeouts(ftHandle, ReadTimeout, WriteTimeout)
FT_SetTimeouts.argtypes = [FT_HANDLE, ULONG, ULONG]
FT_SetTimeouts.__doc__ = \
"""FT_STATUS FT_SetTimeouts(FT_HANDLE ftHandle, ULONG ReadTimeout, ULONG WriteTimeout)
ftd2xx.h:461"""
FT_GetQueueStatus = _libraries['ftd2xx64.dll'].FT_GetQueueStatus
FT_GetQueueStatus.restype = FT_STATUS
# FT_GetQueueStatus(ftHandle, dwRxBytes)
FT_GetQueueStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetQueueStatus.__doc__ = \
"""FT_STATUS FT_GetQueueStatus(FT_HANDLE ftHandle, LP_c_uint32 dwRxBytes)
ftd2xx.h:468"""
FT_SetEventNotification = _libraries['ftd2xx64.dll'].FT_SetEventNotification
FT_SetEventNotification.restype = FT_STATUS
# FT_SetEventNotification(ftHandle, Mask, Param)
FT_SetEventNotification.argtypes = [FT_HANDLE, DWORD, PVOID]
FT_SetEventNotification.__doc__ = \
"""FT_STATUS FT_SetEventNotification(FT_HANDLE ftHandle, DWORD Mask, PVOID Param)
ftd2xx.h:474"""
FT_GetStatus = _libraries['ftd2xx64.dll'].FT_GetStatus
FT_GetStatus.restype = FT_STATUS
# FT_GetStatus(ftHandle, dwRxBytes, dwTxBytes, dwEventDWord)
FT_GetStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32), POINTER_T(ctypes.c_uint32), POINTER_T(ctypes.c_uint32)]
FT_GetStatus.__doc__ = \
"""FT_STATUS FT_GetStatus(FT_HANDLE ftHandle, LP_c_uint32 dwRxBytes, LP_c_uint32 dwTxBytes, LP_c_uint32 dwEventDWord)
ftd2xx.h:481"""
FT_SetBreakOn = _libraries['ftd2xx64.dll'].FT_SetBreakOn
FT_SetBreakOn.restype = FT_STATUS
# FT_SetBreakOn(ftHandle)
FT_SetBreakOn.argtypes = [FT_HANDLE]
FT_SetBreakOn.__doc__ = \
"""FT_STATUS FT_SetBreakOn(FT_HANDLE ftHandle)
ftd2xx.h:489"""
FT_SetBreakOff = _libraries['ftd2xx64.dll'].FT_SetBreakOff
FT_SetBreakOff.restype = FT_STATUS
# FT_SetBreakOff(ftHandle)
FT_SetBreakOff.argtypes = [FT_HANDLE]
FT_SetBreakOff.__doc__ = \
"""FT_STATUS FT_SetBreakOff(FT_HANDLE ftHandle)
ftd2xx.h:494"""
FT_SetWaitMask = _libraries['ftd2xx64.dll'].FT_SetWaitMask
FT_SetWaitMask.restype = FT_STATUS
# FT_SetWaitMask(ftHandle, Mask)
FT_SetWaitMask.argtypes = [FT_HANDLE, DWORD]
FT_SetWaitMask.__doc__ = \
"""FT_STATUS FT_SetWaitMask(FT_HANDLE ftHandle, DWORD Mask)
ftd2xx.h:499"""
FT_WaitOnMask = _libraries['ftd2xx64.dll'].FT_WaitOnMask
FT_WaitOnMask.restype = FT_STATUS
# FT_WaitOnMask(ftHandle, Mask)
FT_WaitOnMask.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_WaitOnMask.__doc__ = \
"""FT_STATUS FT_WaitOnMask(FT_HANDLE ftHandle, LP_c_uint32 Mask)
ftd2xx.h:505"""
FT_GetEventStatus = _libraries['ftd2xx64.dll'].FT_GetEventStatus
FT_GetEventStatus.restype = FT_STATUS
# FT_GetEventStatus(ftHandle, dwEventDWord)
FT_GetEventStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetEventStatus.__doc__ = \
"""FT_STATUS FT_GetEventStatus(FT_HANDLE ftHandle, LP_c_uint32 dwEventDWord)
ftd2xx.h:511"""
FT_ReadEE = _libraries['ftd2xx64.dll'].FT_ReadEE
FT_ReadEE.restype = FT_STATUS
# FT_ReadEE(ftHandle, dwWordOffset, lpwValue)
FT_ReadEE.argtypes = [FT_HANDLE, DWORD, LPWORD]
FT_ReadEE.__doc__ = \
"""FT_STATUS FT_ReadEE(FT_HANDLE ftHandle, DWORD dwWordOffset, LPWORD lpwValue)
ftd2xx.h:517"""
FT_WriteEE = _libraries['ftd2xx64.dll'].FT_WriteEE
FT_WriteEE.restype = FT_STATUS
# FT_WriteEE(ftHandle, dwWordOffset, wValue)
FT_WriteEE.argtypes = [FT_HANDLE, DWORD, WORD]
FT_WriteEE.__doc__ = \
"""FT_STATUS FT_WriteEE(FT_HANDLE ftHandle, DWORD dwWordOffset, WORD wValue)
ftd2xx.h:524"""
FT_EraseEE = _libraries['ftd2xx64.dll'].FT_EraseEE
FT_EraseEE.restype = FT_STATUS
# FT_EraseEE(ftHandle)
FT_EraseEE.argtypes = [FT_HANDLE]
FT_EraseEE.__doc__ = \
"""FT_STATUS FT_EraseEE(FT_HANDLE ftHandle)
ftd2xx.h:531"""
class struct_ft_program_data(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Signature1', ctypes.c_uint32),
('Signature2', ctypes.c_uint32),
('Version', ctypes.c_uint32),
('VendorId', ctypes.c_uint16),
('ProductId', ctypes.c_uint16),
('Manufacturer', POINTER_T(ctypes.c_char)),
('ManufacturerId', POINTER_T(ctypes.c_char)),
('Description', POINTER_T(ctypes.c_char)),
('SerialNumber', POINTER_T(ctypes.c_char)),
('MaxPower', ctypes.c_uint16),
('PnP', ctypes.c_uint16),
('SelfPowered', ctypes.c_uint16),
('RemoteWakeup', ctypes.c_uint16),
('Rev4', ctypes.c_ubyte),
('IsoIn', ctypes.c_ubyte),
('IsoOut', ctypes.c_ubyte),
('PullDownEnable', ctypes.c_ubyte),
('SerNumEnable', ctypes.c_ubyte),
('USBVersionEnable', ctypes.c_ubyte),
('USBVersion', ctypes.c_uint16),
('Rev5', ctypes.c_ubyte),
('IsoInA', ctypes.c_ubyte),
('IsoInB', ctypes.c_ubyte),
('IsoOutA', ctypes.c_ubyte),
('IsoOutB', ctypes.c_ubyte),
('PullDownEnable5', ctypes.c_ubyte),
('SerNumEnable5', ctypes.c_ubyte),
('USBVersionEnable5', ctypes.c_ubyte),
('USBVersion5', ctypes.c_uint16),
('AIsHighCurrent', ctypes.c_ubyte),
('BIsHighCurrent', ctypes.c_ubyte),
('IFAIsFifo', ctypes.c_ubyte),
('IFAIsFifoTar', ctypes.c_ubyte),
('IFAIsFastSer', ctypes.c_ubyte),
('AIsVCP', ctypes.c_ubyte),
('IFBIsFifo', ctypes.c_ubyte),
('IFBIsFifoTar', ctypes.c_ubyte),
('IFBIsFastSer', ctypes.c_ubyte),
('BIsVCP', ctypes.c_ubyte),
('UseExtOsc', ctypes.c_ubyte),
('HighDriveIOs', ctypes.c_ubyte),
('EndpointSize', ctypes.c_ubyte),
('PullDownEnableR', ctypes.c_ubyte),
('SerNumEnableR', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('RIsD2XX', ctypes.c_ubyte),
('PullDownEnable7', ctypes.c_ubyte),
('SerNumEnable7', ctypes.c_ubyte),
('ALSlowSlew', ctypes.c_ubyte),
('ALSchmittInput', ctypes.c_ubyte),
('ALDriveCurrent', ctypes.c_ubyte),
('AHSlowSlew', ctypes.c_ubyte),
('AHSchmittInput', ctypes.c_ubyte),
('AHDriveCurrent', ctypes.c_ubyte),
('BLSlowSlew', ctypes.c_ubyte),
('BLSchmittInput', ctypes.c_ubyte),
('BLDriveCurrent', ctypes.c_ubyte),
('BHSlowSlew', ctypes.c_ubyte),
('BHSchmittInput', ctypes.c_ubyte),
('BHDriveCurrent', ctypes.c_ubyte),
('IFAIsFifo7', ctypes.c_ubyte),
('IFAIsFifoTar7', ctypes.c_ubyte),
('IFAIsFastSer7', ctypes.c_ubyte),
('AIsVCP7', ctypes.c_ubyte),
('IFBIsFifo7', ctypes.c_ubyte),
('IFBIsFifoTar7', ctypes.c_ubyte),
('IFBIsFastSer7', ctypes.c_ubyte),
('BIsVCP7', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('PullDownEnable8', ctypes.c_ubyte),
('SerNumEnable8', ctypes.c_ubyte),
('ASlowSlew', ctypes.c_ubyte),
('ASchmittInput', ctypes.c_ubyte),
('ADriveCurrent', ctypes.c_ubyte),
('BSlowSlew', ctypes.c_ubyte),
('BSchmittInput', ctypes.c_ubyte),
('BDriveCurrent', ctypes.c_ubyte),
('CSlowSlew', ctypes.c_ubyte),
('CSchmittInput', ctypes.c_ubyte),
('CDriveCurrent', ctypes.c_ubyte),
('DSlowSlew', ctypes.c_ubyte),
('DSchmittInput', ctypes.c_ubyte),
('DDriveCurrent', ctypes.c_ubyte),
('ARIIsTXDEN', ctypes.c_ubyte),
('BRIIsTXDEN', ctypes.c_ubyte),
('CRIIsTXDEN', ctypes.c_ubyte),
('DRIIsTXDEN', ctypes.c_ubyte),
('AIsVCP8', ctypes.c_ubyte),
('BIsVCP8', ctypes.c_ubyte),
('CIsVCP8', ctypes.c_ubyte),
('DIsVCP8', ctypes.c_ubyte),
('PullDownEnableH', ctypes.c_ubyte),
('SerNumEnableH', ctypes.c_ubyte),
('ACSlowSlewH', ctypes.c_ubyte),
('ACSchmittInputH', ctypes.c_ubyte),
('ACDriveCurrentH', ctypes.c_ubyte),
('ADSlowSlewH', ctypes.c_ubyte),
('ADSchmittInputH', ctypes.c_ubyte),
('ADDriveCurrentH', ctypes.c_ubyte),
('Cbus0H', ctypes.c_ubyte),
('Cbus1H', ctypes.c_ubyte),
('Cbus2H', ctypes.c_ubyte),
('Cbus3H', ctypes.c_ubyte),
('Cbus4H', ctypes.c_ubyte),
('Cbus5H', ctypes.c_ubyte),
('Cbus6H', ctypes.c_ubyte),
('Cbus7H', ctypes.c_ubyte),
('Cbus8H', ctypes.c_ubyte),
('Cbus9H', ctypes.c_ubyte),
('IsFifoH', ctypes.c_ubyte),
('IsFifoTarH', ctypes.c_ubyte),
('IsFastSerH', ctypes.c_ubyte),
('IsFT1248H', ctypes.c_ubyte),
('FT1248CpolH', ctypes.c_ubyte),
('FT1248LsbH', ctypes.c_ubyte),
('FT1248FlowControlH', ctypes.c_ubyte),
('IsVCPH', ctypes.c_ubyte),
('PowerSaveEnableH', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte),
]
FT_PROGRAM_DATA = struct_ft_program_data
PFT_PROGRAM_DATA = POINTER_T(struct_ft_program_data)
FT_EE_Program = _libraries['ftd2xx64.dll'].FT_EE_Program
FT_EE_Program.restype = FT_STATUS
# FT_EE_Program(ftHandle, pData)
FT_EE_Program.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA]
FT_EE_Program.__doc__ = \
"""FT_STATUS FT_EE_Program(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData)
ftd2xx.h:700"""
FT_EE_ProgramEx = _libraries['ftd2xx64.dll'].FT_EE_ProgramEx
FT_EE_ProgramEx.restype = FT_STATUS
# FT_EE_ProgramEx(ftHandle, pData, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EE_ProgramEx.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EE_ProgramEx.__doc__ = \
"""FT_STATUS FT_EE_ProgramEx(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:706"""
FT_EE_Read = _libraries['ftd2xx64.dll'].FT_EE_Read
FT_EE_Read.restype = FT_STATUS
# FT_EE_Read(ftHandle, pData)
FT_EE_Read.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA]
FT_EE_Read.__doc__ = \
"""FT_STATUS FT_EE_Read(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData)
ftd2xx.h:716"""
FT_EE_ReadEx = _libraries['ftd2xx64.dll'].FT_EE_ReadEx
FT_EE_ReadEx.restype = FT_STATUS
# FT_EE_ReadEx(ftHandle, pData, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EE_ReadEx.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EE_ReadEx.__doc__ = \
"""FT_STATUS FT_EE_ReadEx(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:722"""
FT_EE_UASize = _libraries['ftd2xx64.dll'].FT_EE_UASize
FT_EE_UASize.restype = FT_STATUS
# FT_EE_UASize(ftHandle, lpdwSize)
FT_EE_UASize.argtypes = [FT_HANDLE, LPDWORD]
FT_EE_UASize.__doc__ = \
"""FT_STATUS FT_EE_UASize(FT_HANDLE ftHandle, LPDWORD lpdwSize)
ftd2xx.h:732"""
FT_EE_UAWrite = _libraries['ftd2xx64.dll'].FT_EE_UAWrite
FT_EE_UAWrite.restype = FT_STATUS
# FT_EE_UAWrite(ftHandle, pucData, dwDataLen)
FT_EE_UAWrite.argtypes = [FT_HANDLE, PUCHAR, DWORD]
FT_EE_UAWrite.__doc__ = \
"""FT_STATUS FT_EE_UAWrite(FT_HANDLE ftHandle, PUCHAR pucData, DWORD dwDataLen)
ftd2xx.h:738"""
FT_EE_UARead = _libraries['ftd2xx64.dll'].FT_EE_UARead
FT_EE_UARead.restype = FT_STATUS
# FT_EE_UARead(ftHandle, pucData, dwDataLen, lpdwBytesRead)
FT_EE_UARead.argtypes = [FT_HANDLE, PUCHAR, DWORD, LPDWORD]
FT_EE_UARead.__doc__ = \
"""FT_STATUS FT_EE_UARead(FT_HANDLE ftHandle, PUCHAR pucData, DWORD dwDataLen, LPDWORD lpdwBytesRead)
ftd2xx.h:745"""
class struct_ft_eeprom_header(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('deviceType', ctypes.c_uint32),
('VendorId', ctypes.c_uint16),
('ProductId', ctypes.c_uint16),
('SerNumEnable', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte),
('MaxPower', ctypes.c_uint16),
('SelfPowered', ctypes.c_ubyte),
('RemoteWakeup', ctypes.c_ubyte),
('PullDownEnable', ctypes.c_ubyte),
('PADDING_1', ctypes.c_ubyte),
]
FT_EEPROM_HEADER = struct_ft_eeprom_header
PFT_EEPROM_HEADER = POINTER_T(struct_ft_eeprom_header)
class struct_ft_eeprom_232b(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
]
FT_EEPROM_232B = struct_ft_eeprom_232b
PFT_EEPROM_232B = POINTER_T(struct_ft_eeprom_232b)
class struct_ft_eeprom_2232(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('AIsHighCurrent', ctypes.c_ubyte),
('BIsHighCurrent', ctypes.c_ubyte),
('AIsFifo', ctypes.c_ubyte),
('AIsFifoTar', ctypes.c_ubyte),
('AIsFastSer', ctypes.c_ubyte),
('BIsFifo', ctypes.c_ubyte),
('BIsFifoTar', ctypes.c_ubyte),
('BIsFastSer', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 2),
]
FT_EEPROM_2232 = struct_ft_eeprom_2232
PFT_EEPROM_2232 = POINTER_T(struct_ft_eeprom_2232)
class struct_ft_eeprom_232r(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('IsHighCurrent', ctypes.c_ubyte),
('UseExtOsc', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
]
FT_EEPROM_232R = struct_ft_eeprom_232r
PFT_EEPROM_232R = POINTER_T(struct_ft_eeprom_232r)
class struct_ft_eeprom_2232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ALSlowSlew', ctypes.c_ubyte),
('ALSchmittInput', ctypes.c_ubyte),
('ALDriveCurrent', ctypes.c_ubyte),
('AHSlowSlew', ctypes.c_ubyte),
('AHSchmittInput', ctypes.c_ubyte),
('AHDriveCurrent', ctypes.c_ubyte),
('BLSlowSlew', ctypes.c_ubyte),
('BLSchmittInput', ctypes.c_ubyte),
('BLDriveCurrent', ctypes.c_ubyte),
('BHSlowSlew', ctypes.c_ubyte),
('BHSchmittInput', ctypes.c_ubyte),
('BHDriveCurrent', ctypes.c_ubyte),
('AIsFifo', ctypes.c_ubyte),
('AIsFifoTar', ctypes.c_ubyte),
('AIsFastSer', ctypes.c_ubyte),
('BIsFifo', ctypes.c_ubyte),
('BIsFifoTar', ctypes.c_ubyte),
('BIsFastSer', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 3),
]
FT_EEPROM_2232H = struct_ft_eeprom_2232h
PFT_EEPROM_2232H = POINTER_T(struct_ft_eeprom_2232h)
class struct_ft_eeprom_4232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ASlowSlew', ctypes.c_ubyte),
('ASchmittInput', ctypes.c_ubyte),
('ADriveCurrent', ctypes.c_ubyte),
('BSlowSlew', ctypes.c_ubyte),
('BSchmittInput', ctypes.c_ubyte),
('BDriveCurrent', ctypes.c_ubyte),
('CSlowSlew', ctypes.c_ubyte),
('CSchmittInput', ctypes.c_ubyte),
('CDriveCurrent', ctypes.c_ubyte),
('DSlowSlew', ctypes.c_ubyte),
('DSchmittInput', ctypes.c_ubyte),
('DDriveCurrent', ctypes.c_ubyte),
('ARIIsTXDEN', ctypes.c_ubyte),
('BRIIsTXDEN', ctypes.c_ubyte),
('CRIIsTXDEN', ctypes.c_ubyte),
('DRIIsTXDEN', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('CDriverType', ctypes.c_ubyte),
('DDriverType', ctypes.c_ubyte),
]
FT_EEPROM_4232H = struct_ft_eeprom_4232h
PFT_EEPROM_4232H = POINTER_T(struct_ft_eeprom_4232h)
class struct_ft_eeprom_232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ACSlowSlew', ctypes.c_ubyte),
('ACSchmittInput', ctypes.c_ubyte),
('ACDriveCurrent', ctypes.c_ubyte),
('ADSlowSlew', ctypes.c_ubyte),
('ADSchmittInput', ctypes.c_ubyte),
('ADDriveCurrent', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('Cbus5', ctypes.c_ubyte),
('Cbus6', ctypes.c_ubyte),
('Cbus7', ctypes.c_ubyte),
('Cbus8', ctypes.c_ubyte),
('Cbus9', ctypes.c_ubyte),
('FT1248Cpol', ctypes.c_ubyte),
('FT1248Lsb', ctypes.c_ubyte),
('FT1248FlowControl', ctypes.c_ubyte),
('IsFifo', ctypes.c_ubyte),
('IsFifoTar', ctypes.c_ubyte),
('IsFastSer', ctypes.c_ubyte),
('IsFT1248', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 3),
]
FT_EEPROM_232H = struct_ft_eeprom_232h
PFT_EEPROM_232H = POINTER_T(struct_ft_eeprom_232h)
class struct_ft_eeprom_x_series(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ACSlowSlew', ctypes.c_ubyte),
('ACSchmittInput', ctypes.c_ubyte),
('ACDriveCurrent', ctypes.c_ubyte),
('ADSlowSlew', ctypes.c_ubyte),
('ADSchmittInput', ctypes.c_ubyte),
('ADDriveCurrent', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('Cbus5', ctypes.c_ubyte),
('Cbus6', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('BCDEnable', ctypes.c_ubyte),
('BCDForceCbusPWREN', ctypes.c_ubyte),
('BCDDisableSleep', ctypes.c_ubyte),
('I2CSlaveAddress', ctypes.c_uint16),
('PADDING_0', ctypes.c_ubyte * 2),
('I2CDeviceId', ctypes.c_uint32),
('I2CDisableSchmitt', ctypes.c_ubyte),
('FT1248Cpol', ctypes.c_ubyte),
('FT1248Lsb', ctypes.c_ubyte),
('FT1248FlowControl', ctypes.c_ubyte),
('RS485EchoSuppress', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
('PADDING_1', ctypes.c_ubyte),
]
FT_EEPROM_X_SERIES = struct_ft_eeprom_x_series
PFT_EEPROM_X_SERIES = POINTER_T(struct_ft_eeprom_x_series)
FT_EEPROM_Read = _libraries['ftd2xx64.dll'].FT_EEPROM_Read
FT_EEPROM_Read.restype = FT_STATUS
# FT_EEPROM_Read(ftHandle, eepromData, eepromDataSize, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EEPROM_Read.argtypes = [FT_HANDLE, POINTER_T(None), DWORD, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EEPROM_Read.__doc__ = \
"""FT_STATUS FT_EEPROM_Read(FT_HANDLE ftHandle, LP_None eepromData, DWORD eepromDataSize, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:968"""
FT_EEPROM_Program = _libraries['ftd2xx64.dll'].FT_EEPROM_Program
FT_EEPROM_Program.restype = FT_STATUS
# FT_EEPROM_Program(ftHandle, eepromData, eepromDataSize, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EEPROM_Program.argtypes = [FT_HANDLE, POINTER_T(None), DWORD, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EEPROM_Program.__doc__ = \
"""FT_STATUS FT_EEPROM_Program(FT_HANDLE ftHandle, LP_None eepromData, DWORD eepromDataSize, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:980"""
FT_SetLatencyTimer = _libraries['ftd2xx64.dll'].FT_SetLatencyTimer
FT_SetLatencyTimer.restype = FT_STATUS
# FT_SetLatencyTimer(ftHandle, ucLatency)
FT_SetLatencyTimer.argtypes = [FT_HANDLE, UCHAR]
FT_SetLatencyTimer.__doc__ = \
"""FT_STATUS FT_SetLatencyTimer(FT_HANDLE ftHandle, UCHAR ucLatency)
ftd2xx.h:992"""
FT_GetLatencyTimer = _libraries['ftd2xx64.dll'].FT_GetLatencyTimer
FT_GetLatencyTimer.restype = FT_STATUS
# FT_GetLatencyTimer(ftHandle, pucLatency)
FT_GetLatencyTimer.argtypes = [FT_HANDLE, PUCHAR]
FT_GetLatencyTimer.__doc__ = \
"""FT_STATUS FT_GetLatencyTimer(FT_HANDLE ftHandle, PUCHAR pucLatency)
ftd2xx.h:998"""
FT_SetBitMode = _libraries['ftd2xx64.dll'].FT_SetBitMode
FT_SetBitMode.restype = FT_STATUS
# FT_SetBitMode(ftHandle, ucMask, ucEnable)
FT_SetBitMode.argtypes = [FT_HANDLE, UCHAR, UCHAR]
FT_SetBitMode.__doc__ = \
"""FT_STATUS FT_SetBitMode(FT_HANDLE ftHandle, UCHAR ucMask, UCHAR ucEnable)
ftd2xx.h:1004"""
FT_GetBitMode = _libraries['ftd2xx64.dll'].FT_GetBitMode
FT_GetBitMode.restype = FT_STATUS
# FT_GetBitMode(ftHandle, pucMode)
FT_GetBitMode.argtypes = [FT_HANDLE, PUCHAR]
FT_GetBitMode.__doc__ = \
"""FT_STATUS FT_GetBitMode(FT_HANDLE ftHandle, PUCHAR pucMode)
ftd2xx.h:1011"""
FT_SetUSBParameters = _libraries['ftd2xx64.dll'].FT_SetUSBParameters
FT_SetUSBParameters.restype = FT_STATUS
# FT_SetUSBParameters(ftHandle, ulInTransferSize, ulOutTransferSize)
FT_SetUSBParameters.argtypes = [FT_HANDLE, ULONG, ULONG]
FT_SetUSBParameters.__doc__ = \
"""FT_STATUS FT_SetUSBParameters(FT_HANDLE ftHandle, ULONG ulInTransferSize, ULONG ulOutTransferSize)
ftd2xx.h:1017"""
FT_SetDeadmanTimeout = _libraries['ftd2xx64.dll'].FT_SetDeadmanTimeout
FT_SetDeadmanTimeout.restype = FT_STATUS
# FT_SetDeadmanTimeout(ftHandle, ulDeadmanTimeout)
FT_SetDeadmanTimeout.argtypes = [FT_HANDLE, ULONG]
FT_SetDeadmanTimeout.__doc__ = \
"""FT_STATUS FT_SetDeadmanTimeout(FT_HANDLE ftHandle, ULONG ulDeadmanTimeout)
ftd2xx.h:1024"""
FT_GetDeviceInfo = _libraries['ftd2xx64.dll'].FT_GetDeviceInfo
FT_GetDeviceInfo.restype = FT_STATUS
# FT_GetDeviceInfo(ftHandle, lpftDevice, lpdwID, SerialNumber, Description, Dummy)
FT_GetDeviceInfo.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32), LPDWORD, PCHAR, PCHAR, LPVOID]
FT_GetDeviceInfo.__doc__ = \
"""FT_STATUS FT_GetDeviceInfo(FT_HANDLE ftHandle, LP_c_uint32 lpftDevice, LPDWORD lpdwID, PCHAR SerialNumber, PCHAR Description, LPVOID Dummy)
ftd2xx.h:1053"""
FT_StopInTask = _libraries['ftd2xx64.dll'].FT_StopInTask
FT_StopInTask.restype = FT_STATUS
# FT_StopInTask(ftHandle)
FT_StopInTask.argtypes = [FT_HANDLE]
FT_StopInTask.__doc__ = \
"""FT_STATUS FT_StopInTask(FT_HANDLE ftHandle)
ftd2xx.h:1063"""
FT_RestartInTask = _libraries['ftd2xx64.dll'].FT_RestartInTask
FT_RestartInTask.restype = FT_STATUS
# FT_RestartInTask(ftHandle)
FT_RestartInTask.argtypes = [FT_HANDLE]
FT_RestartInTask.__doc__ = \
"""FT_STATUS FT_RestartInTask(FT_HANDLE ftHandle)
ftd2xx.h:1068"""
FT_SetResetPipeRetryCount = _libraries['ftd2xx64.dll'].FT_SetResetPipeRetryCount
FT_SetResetPipeRetryCount.restype = FT_STATUS
# FT_SetResetPipeRetryCount(ftHandle, dwCount)
FT_SetResetPipeRetryCount.argtypes = [FT_HANDLE, DWORD]
FT_SetResetPipeRetryCount.__doc__ = \
"""FT_STATUS FT_SetResetPipeRetryCount(FT_HANDLE ftHandle, DWORD dwCount)
ftd2xx.h:1073"""
FT_ResetPort = _libraries['ftd2xx64.dll'].FT_ResetPort
FT_ResetPort.restype = FT_STATUS
# FT_ResetPort(ftHandle)
FT_ResetPort.argtypes = [FT_HANDLE]
FT_ResetPort.__doc__ = \
"""FT_STATUS FT_ResetPort(FT_HANDLE ftHandle)
ftd2xx.h:1079"""
FT_CyclePort = _libraries['ftd2xx64.dll'].FT_CyclePort
FT_CyclePort.restype = FT_STATUS
# FT_CyclePort(ftHandle)
FT_CyclePort.argtypes = [FT_HANDLE]
FT_CyclePort.__doc__ = \
"""FT_STATUS FT_CyclePort(FT_HANDLE ftHandle)
ftd2xx.h:1084"""
FT_W32_CreateFile = _libraries['ftd2xx64.dll'].FT_W32_CreateFile
FT_W32_CreateFile.restype = FT_HANDLE
# FT_W32_CreateFile(lpszName, dwAccess, dwShareMode, lpSecurityAttributes, dwCreate, dwAttrsAndFlags, hTemplate)
FT_W32_CreateFile.argtypes = [LPCTSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
FT_W32_CreateFile.__doc__ = \
"""FT_HANDLE FT_W32_CreateFile(LPCTSTR lpszName, DWORD dwAccess, DWORD dwShareMode, LPSECURITY_ATTRIBUTES lpSecurityAttributes, DWORD dwCreate, DWORD dwAttrsAndFlags, HANDLE hTemplate)
ftd2xx.h:1094"""
FT_W32_CloseHandle = _libraries['ftd2xx64.dll'].FT_W32_CloseHandle
FT_W32_CloseHandle.restype = BOOL
# FT_W32_CloseHandle(ftHandle)
FT_W32_CloseHandle.argtypes = [FT_HANDLE]
FT_W32_CloseHandle.__doc__ = \
"""BOOL FT_W32_CloseHandle(FT_HANDLE ftHandle)
ftd2xx.h:1105"""
FT_W32_ReadFile = _libraries['ftd2xx64.dll'].FT_W32_ReadFile
FT_W32_ReadFile.restype = BOOL
# FT_W32_ReadFile(ftHandle, lpBuffer, nBufferSize, lpBytesReturned, lpOverlapped)
FT_W32_ReadFile.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_W32_ReadFile.__doc__ = \
"""BOOL FT_W32_ReadFile(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD nBufferSize, LPDWORD lpBytesReturned, LPOVERLAPPED lpOverlapped)
ftd2xx.h:1110"""
FT_W32_WriteFile = _libraries['ftd2xx64.dll'].FT_W32_WriteFile
FT_W32_WriteFile.restype = BOOL
# FT_W32_WriteFile(ftHandle, lpBuffer, nBufferSize, lpBytesWritten, lpOverlapped)
FT_W32_WriteFile.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_W32_WriteFile.__doc__ = \
"""BOOL FT_W32_WriteFile(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD nBufferSize, LPDWORD lpBytesWritten, LPOVERLAPPED lpOverlapped)
ftd2xx.h:1119"""
FT_W32_GetLastError = _libraries['ftd2xx64.dll'].FT_W32_GetLastError
FT_W32_GetLastError.restype = DWORD
# FT_W32_GetLastError(ftHandle)
FT_W32_GetLastError.argtypes = [FT_HANDLE]
FT_W32_GetLastError.__doc__ = \
"""DWORD FT_W32_GetLastError(FT_HANDLE ftHandle)
ftd2xx.h:1128"""
FT_W32_GetOverlappedResult = _libraries['ftd2xx64.dll'].FT_W32_GetOverlappedResult
FT_W32_GetOverlappedResult.restype = BOOL
# FT_W32_GetOverlappedResult(ftHandle, lpOverlapped, lpdwBytesTransferred, bWait)
FT_W32_GetOverlappedResult.argtypes = [FT_HANDLE, LPOVERLAPPED, LPDWORD, BOOL]
FT_W32_GetOverlappedResult.__doc__ = \
"""BOOL FT_W32_GetOverlappedResult(FT_HANDLE ftHandle, LPOVERLAPPED lpOverlapped, LPDWORD lpdwBytesTransferred, BOOL bWait)
ftd2xx.h:1133"""
FT_W32_CancelIo = _libraries['ftd2xx64.dll'].FT_W32_CancelIo
FT_W32_CancelIo.restype = BOOL
# FT_W32_CancelIo(ftHandle)
FT_W32_CancelIo.argtypes = [FT_HANDLE]
FT_W32_CancelIo.__doc__ = \
"""BOOL FT_W32_CancelIo(FT_HANDLE ftHandle)
ftd2xx.h:1141"""
class struct__FTCOMSTAT(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('fCtsHold', ctypes.c_uint32, 1),
('fDsrHold', ctypes.c_uint32, 1),
('fRlsdHold', ctypes.c_uint32, 1),
('fXoffHold', ctypes.c_uint32, 1),
('fXoffSent', ctypes.c_uint32, 1),
('fEof', ctypes.c_uint32, 1),
('fTxim', ctypes.c_uint32, 1),
('fReserved', ctypes.c_uint32, 25),
('cbInQue', ctypes.c_uint32),
('cbOutQue', ctypes.c_uint32),
]
FTCOMSTAT = struct__FTCOMSTAT
LPFTCOMSTAT = POINTER_T(struct__FTCOMSTAT)
class struct__FTDCB(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('DCBlength', ctypes.c_uint32),
('BaudRate', ctypes.c_uint32),
('fBinary', ctypes.c_uint32, 1),
('fParity', ctypes.c_uint32, 1),
('fOutxCtsFlow', ctypes.c_uint32, 1),
('fOutxDsrFlow', ctypes.c_uint32, 1),
('fDtrControl', ctypes.c_uint32, 2),
('fDsrSensitivity', ctypes.c_uint32, 1),
('fTXContinueOnXoff', ctypes.c_uint32, 1),
('fOutX', ctypes.c_uint32, 1),
('fInX', ctypes.c_uint32, 1),
('fErrorChar', ctypes.c_uint32, 1),
('fNull', ctypes.c_uint32, 1),
('fRtsControl', ctypes.c_uint32, 2),
('fAbortOnError', ctypes.c_uint32, 1),
('fDummy2', ctypes.c_uint32, 17),
('wReserved', ctypes.c_uint16),
('XonLim', ctypes.c_uint16),
('XoffLim', ctypes.c_uint16),
('ByteSize', ctypes.c_ubyte),
('Parity', ctypes.c_ubyte),
('StopBits', ctypes.c_ubyte),
('XonChar', ctypes.c_char),
('XoffChar', ctypes.c_char),
('ErrorChar', ctypes.c_char),
('EofChar', ctypes.c_char),
('EvtChar', ctypes.c_char),
('wReserved1', ctypes.c_uint16),
]
FTDCB = struct__FTDCB
LPFTDCB = POINTER_T(struct__FTDCB)
class struct__FTTIMEOUTS(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('ReadIntervalTimeout', ctypes.c_uint32),
('ReadTotalTimeoutMultiplier', ctypes.c_uint32),
('ReadTotalTimeoutConstant', ctypes.c_uint32),
('WriteTotalTimeoutMultiplier', ctypes.c_uint32),
('WriteTotalTimeoutConstant', ctypes.c_uint32),
]
FTTIMEOUTS = struct__FTTIMEOUTS
LPFTTIMEOUTS = POINTER_T(struct__FTTIMEOUTS)
FT_W32_ClearCommBreak = _libraries['ftd2xx64.dll'].FT_W32_ClearCommBreak
FT_W32_ClearCommBreak.restype = BOOL
# FT_W32_ClearCommBreak(ftHandle)
FT_W32_ClearCommBreak.argtypes = [FT_HANDLE]
FT_W32_ClearCommBreak.__doc__ = \
"""BOOL FT_W32_ClearCommBreak(FT_HANDLE ftHandle)
ftd2xx.h:1203"""
FT_W32_ClearCommError = _libraries['ftd2xx64.dll'].FT_W32_ClearCommError
FT_W32_ClearCommError.restype = BOOL
# FT_W32_ClearCommError(ftHandle, lpdwErrors, lpftComstat)
FT_W32_ClearCommError.argtypes = [FT_HANDLE, LPDWORD, LPFTCOMSTAT]
FT_W32_ClearCommError.__doc__ = \
"""BOOL FT_W32_ClearCommError(FT_HANDLE ftHandle, LPDWORD lpdwErrors, LPFTCOMSTAT lpftComstat)
ftd2xx.h:1208"""
FT_W32_EscapeCommFunction = _libraries['ftd2xx64.dll'].FT_W32_EscapeCommFunction
FT_W32_EscapeCommFunction.restype = BOOL
# FT_W32_EscapeCommFunction(ftHandle, dwFunc)
FT_W32_EscapeCommFunction.argtypes = [FT_HANDLE, DWORD]
FT_W32_EscapeCommFunction.__doc__ = \
"""BOOL FT_W32_EscapeCommFunction(FT_HANDLE ftHandle, DWORD dwFunc)
ftd2xx.h:1215"""
FT_W32_GetCommModemStatus = _libraries['ftd2xx64.dll'].FT_W32_GetCommModemStatus
FT_W32_GetCommModemStatus.restype = BOOL
# FT_W32_GetCommModemStatus(ftHandle, lpdwModemStatus)
FT_W32_GetCommModemStatus.argtypes = [FT_HANDLE, LPDWORD]
FT_W32_GetCommModemStatus.__doc__ = \
"""BOOL FT_W32_GetCommModemStatus(FT_HANDLE ftHandle, LPDWORD lpdwModemStatus)
ftd2xx.h:1221"""
FT_W32_GetCommState = _libraries['ftd2xx64.dll'].FT_W32_GetCommState
FT_W32_GetCommState.restype = BOOL
# FT_W32_GetCommState(ftHandle, lpftDcb)
FT_W32_GetCommState.argtypes = [FT_HANDLE, LPFTDCB]
FT_W32_GetCommState.__doc__ = \
"""BOOL FT_W32_GetCommState(FT_HANDLE ftHandle, LPFTDCB lpftDcb)
ftd2xx.h:1227"""
FT_W32_GetCommTimeouts = _libraries['ftd2xx64.dll'].FT_W32_GetCommTimeouts
FT_W32_GetCommTimeouts.restype = BOOL
# FT_W32_GetCommTimeouts(ftHandle, pTimeouts)
FT_W32_GetCommTimeouts.argtypes = [FT_HANDLE, POINTER_T(struct__FTTIMEOUTS)]
FT_W32_GetCommTimeouts.__doc__ = \
"""BOOL FT_W32_GetCommTimeouts(FT_HANDLE ftHandle, LP_struct__FTTIMEOUTS pTimeouts)
ftd2xx.h:1233"""
FT_W32_PurgeComm = _libraries['ftd2xx64.dll'].FT_W32_PurgeComm
FT_W32_PurgeComm.restype = BOOL
# FT_W32_PurgeComm(ftHandle, dwMask)
FT_W32_PurgeComm.argtypes = [FT_HANDLE, DWORD]
FT_W32_PurgeComm.__doc__ = \
"""BOOL FT_W32_PurgeComm(FT_HANDLE ftHandle, DWORD dwMask)
ftd2xx.h:1239"""
FT_W32_SetCommBreak = _libraries['ftd2xx64.dll'].FT_W32_SetCommBreak
FT_W32_SetCommBreak.restype = BOOL
# FT_W32_SetCommBreak(ftHandle)
FT_W32_SetCommBreak.argtypes = [FT_HANDLE]
FT_W32_SetCommBreak.__doc__ = \
"""BOOL FT_W32_SetCommBreak(FT_HANDLE ftHandle)
ftd2xx.h:1245"""
FT_W32_SetCommMask = _libraries['ftd2xx64.dll'].FT_W32_SetCommMask
FT_W32_SetCommMask.restype = BOOL
# FT_W32_SetCommMask(ftHandle, ulEventMask)
FT_W32_SetCommMask.argtypes = [FT_HANDLE, ULONG]
FT_W32_SetCommMask.__doc__ = \
"""BOOL FT_W32_SetCommMask(FT_HANDLE ftHandle, ULONG ulEventMask)
ftd2xx.h:1250"""
FT_W32_GetCommMask = _libraries['ftd2xx64.dll'].FT_W32_GetCommMask
FT_W32_GetCommMask.restype = BOOL
# FT_W32_GetCommMask(ftHandle, lpdwEventMask)
FT_W32_GetCommMask.argtypes = [FT_HANDLE, LPDWORD]
FT_W32_GetCommMask.__doc__ = \
"""BOOL FT_W32_GetCommMask(FT_HANDLE ftHandle, LPDWORD lpdwEventMask)
ftd2xx.h:1256"""
FT_W32_SetCommState = _libraries['ftd2xx64.dll'].FT_W32_SetCommState
FT_W32_SetCommState.restype = BOOL
# FT_W32_SetCommState(ftHandle, lpftDcb)
FT_W32_SetCommState.argtypes = [FT_HANDLE, LPFTDCB]
FT_W32_SetCommState.__doc__ = \
"""BOOL FT_W32_SetCommState(FT_HANDLE ftHandle, LPFTDCB lpftDcb)
ftd2xx.h:1262"""
FT_W32_SetCommTimeouts = _libraries['ftd2xx64.dll'].FT_W32_SetCommTimeouts
FT_W32_SetCommTimeouts.restype = BOOL
# FT_W32_SetCommTimeouts(ftHandle, pTimeouts)
FT_W32_SetCommTimeouts.argtypes = [FT_HANDLE, POINTER_T(struct__FTTIMEOUTS)]
FT_W32_SetCommTimeouts.__doc__ = \
"""BOOL FT_W32_SetCommTimeouts(FT_HANDLE ftHandle, LP_struct__FTTIMEOUTS pTimeouts)
ftd2xx.h:1268"""
FT_W32_SetupComm = _libraries['ftd2xx64.dll'].FT_W32_SetupComm
FT_W32_SetupComm.restype = BOOL
# FT_W32_SetupComm(ftHandle, dwReadBufferSize, dwWriteBufferSize)
FT_W32_SetupComm.argtypes = [FT_HANDLE, DWORD, DWORD]
FT_W32_SetupComm.__doc__ = \
"""BOOL FT_W32_SetupComm(FT_HANDLE ftHandle, DWORD dwReadBufferSize, DWORD dwWriteBufferSize)
ftd2xx.h:1274"""
FT_W32_WaitCommEvent = _libraries['ftd2xx64.dll'].FT_W32_WaitCommEvent
FT_W32_WaitCommEvent.restype = BOOL
# FT_W32_WaitCommEvent(ftHandle, pulEvent, lpOverlapped)
FT_W32_WaitCommEvent.argtypes = [FT_HANDLE, PULONG, LPOVERLAPPED]
FT_W32_WaitCommEvent.__doc__ = \
"""BOOL FT_W32_WaitCommEvent(FT_HANDLE ftHandle, PULONG pulEvent, LPOVERLAPPED lpOverlapped)
ftd2xx.h:1281"""
class struct__ft_device_list_info_node(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Flags', ctypes.c_uint32),
('Type', ctypes.c_uint32),
('ID', ctypes.c_uint32),
('LocId', ctypes.c_uint32),
('SerialNumber', ctypes.c_char * 16),
('Description', ctypes.c_char * 64),
('ftHandle', POINTER_T(None)),
]
FT_DEVICE_LIST_INFO_NODE = struct__ft_device_list_info_node
# values for enumeration 'c__Ea_FT_FLAGS_OPENED'
FT_FLAGS_OPENED = 1
FT_FLAGS_HISPEED = 2
c__Ea_FT_FLAGS_OPENED = ctypes.c_int # enum
FT_CreateDeviceInfoList = _libraries['ftd2xx64.dll'].FT_CreateDeviceInfoList
FT_CreateDeviceInfoList.restype = FT_STATUS
# FT_CreateDeviceInfoList(lpdwNumDevs)
FT_CreateDeviceInfoList.argtypes = [LPDWORD]
FT_CreateDeviceInfoList.__doc__ = \
"""FT_STATUS FT_CreateDeviceInfoList(LPDWORD lpdwNumDevs)
ftd2xx.h:1310"""
FT_GetDeviceInfoList = _libraries['ftd2xx64.dll'].FT_GetDeviceInfoList
FT_GetDeviceInfoList.restype = FT_STATUS
# FT_GetDeviceInfoList(pDest, lpdwNumDevs)
FT_GetDeviceInfoList.argtypes = [POINTER_T(struct__ft_device_list_info_node), LPDWORD]
FT_GetDeviceInfoList.__doc__ = \
"""FT_STATUS FT_GetDeviceInfoList(LP_struct__ft_device_list_info_node pDest, LPDWORD lpdwNumDevs)
ftd2xx.h:1315"""
FT_GetDeviceInfoDetail = _libraries['ftd2xx64.dll'].FT_GetDeviceInfoDetail
FT_GetDeviceInfoDetail.restype = FT_STATUS
# FT_GetDeviceInfoDetail(dwIndex, lpdwFlags, lpdwType, lpdwID, lpdwLocId, lpSerialNumber, lpDescription, pftHandle)
FT_GetDeviceInfoDetail.argtypes = [DWORD, LPDWORD, LPDWORD, LPDWORD, LPDWORD, LPVOID, LPVOID, POINTER_T(POINTER_T(None))]
FT_GetDeviceInfoDetail.__doc__ = \
"""FT_STATUS FT_GetDeviceInfoDetail(DWORD dwIndex, LPDWORD lpdwFlags, LPDWORD lpdwType, LPDWORD lpdwID, LPDWORD lpdwLocId, LPVOID lpSerialNumber, LPVOID lpDescription, LP_LP_None pftHandle)
ftd2xx.h:1321"""
FT_GetDriverVersion = _libraries['ftd2xx64.dll'].FT_GetDriverVersion
FT_GetDriverVersion.restype = FT_STATUS
# FT_GetDriverVersion(ftHandle, lpdwVersion)
FT_GetDriverVersion.argtypes = [FT_HANDLE, LPDWORD]
FT_GetDriverVersion.__doc__ = \
"""FT_STATUS FT_GetDriverVersion(FT_HANDLE ftHandle, LPDWORD lpdwVersion)
ftd2xx.h:1338"""
FT_GetLibraryVersion = _libraries['ftd2xx64.dll'].FT_GetLibraryVersion
FT_GetLibraryVersion.restype = FT_STATUS
# FT_GetLibraryVersion(lpdwVersion)
FT_GetLibraryVersion.argtypes = [LPDWORD]
FT_GetLibraryVersion.__doc__ = \
"""FT_STATUS FT_GetLibraryVersion(LPDWORD lpdwVersion)
ftd2xx.h:1344"""
FT_Rescan = _libraries['ftd2xx64.dll'].FT_Rescan
FT_Rescan.restype = FT_STATUS
# FT_Rescan()
FT_Rescan.argtypes = []
FT_Rescan.__doc__ = \
"""FT_STATUS FT_Rescan()
ftd2xx.h:1350"""
FT_Reload = _libraries['ftd2xx64.dll'].FT_Reload
FT_Reload.restype = FT_STATUS
# FT_Reload(wVid, wPid)
FT_Reload.argtypes = [WORD, WORD]
FT_Reload.__doc__ = \
"""FT_STATUS FT_Reload(WORD wVid, WORD wPid)
ftd2xx.h:1355"""
FT_GetComPortNumber = _libraries['ftd2xx64.dll'].FT_GetComPortNumber
FT_GetComPortNumber.restype = FT_STATUS
# FT_GetComPortNumber(ftHandle, lpdwComPortNumber)
FT_GetComPortNumber.argtypes = [FT_HANDLE, LPLONG]
FT_GetComPortNumber.__doc__ = \
"""FT_STATUS FT_GetComPortNumber(FT_HANDLE ftHandle, LPLONG lpdwComPortNumber)
ftd2xx.h:1361"""
FT_EE_ReadConfig = _libraries['ftd2xx64.dll'].FT_EE_ReadConfig
FT_EE_ReadConfig.restype = FT_STATUS
# FT_EE_ReadConfig(ftHandle, ucAddress, pucValue)
FT_EE_ReadConfig.argtypes = [FT_HANDLE, UCHAR, PUCHAR]
FT_EE_ReadConfig.__doc__ = \
"""FT_STATUS FT_EE_ReadConfig(FT_HANDLE ftHandle, UCHAR ucAddress, PUCHAR pucValue)
ftd2xx.h:1372"""
FT_EE_WriteConfig = _libraries['ftd2xx64.dll'].FT_EE_WriteConfig
FT_EE_WriteConfig.restype = FT_STATUS
# FT_EE_WriteConfig(ftHandle, ucAddress, ucValue)
FT_EE_WriteConfig.argtypes = [FT_HANDLE, UCHAR, UCHAR]
FT_EE_WriteConfig.__doc__ = \
"""FT_STATUS FT_EE_WriteConfig(FT_HANDLE ftHandle, UCHAR ucAddress, UCHAR ucValue)
ftd2xx.h:1379"""
FT_EE_ReadECC = _libraries['ftd2xx64.dll'].FT_EE_ReadECC
FT_EE_ReadECC.restype = FT_STATUS
# FT_EE_ReadECC(ftHandle, ucOption, lpwValue)
FT_EE_ReadECC.argtypes = [FT_HANDLE, UCHAR, LPWORD]
FT_EE_ReadECC.__doc__ = \
"""FT_STATUS FT_EE_ReadECC(FT_HANDLE ftHandle, UCHAR ucOption, LPWORD lpwValue)
ftd2xx.h:1386"""
FT_GetQueueStatusEx = _libraries['ftd2xx64.dll'].FT_GetQueueStatusEx
FT_GetQueueStatusEx.restype = FT_STATUS
# FT_GetQueueStatusEx(ftHandle, dwRxBytes)
FT_GetQueueStatusEx.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetQueueStatusEx.__doc__ = \
"""FT_STATUS FT_GetQueueStatusEx(FT_HANDLE ftHandle, LP_c_uint32 dwRxBytes)
ftd2xx.h:1393"""
FT_ComPortIdle = _libraries['ftd2xx64.dll'].FT_ComPortIdle
FT_ComPortIdle.restype = FT_STATUS
# FT_ComPortIdle(ftHandle)
FT_ComPortIdle.argtypes = [FT_HANDLE]
FT_ComPortIdle.__doc__ = \
"""FT_STATUS FT_ComPortIdle(FT_HANDLE ftHandle)
ftd2xx.h:1399"""
FT_ComPortCancelIdle = _libraries['ftd2xx64.dll'].FT_ComPortCancelIdle
FT_ComPortCancelIdle.restype = FT_STATUS
# FT_ComPortCancelIdle(ftHandle)
FT_ComPortCancelIdle.argtypes = [FT_HANDLE]
FT_ComPortCancelIdle.__doc__ = \
"""FT_STATUS FT_ComPortCancelIdle(FT_HANDLE ftHandle)
ftd2xx.h:1404"""
FT_VendorCmdGet = _libraries['ftd2xx64.dll'].FT_VendorCmdGet
FT_VendorCmdGet.restype = FT_STATUS
# FT_VendorCmdGet(ftHandle, Request, Buf, Len)
FT_VendorCmdGet.argtypes = [FT_HANDLE, UCHAR, POINTER_T(ctypes.c_ubyte), USHORT]
FT_VendorCmdGet.__doc__ = \
"""FT_STATUS FT_VendorCmdGet(FT_HANDLE ftHandle, UCHAR Request, LP_c_ubyte Buf, USHORT Len)
ftd2xx.h:1409"""
FT_VendorCmdSet = _libraries['ftd2xx64.dll'].FT_VendorCmdSet
FT_VendorCmdSet.restype = FT_STATUS
# FT_VendorCmdSet(ftHandle, Request, Buf, Len)
FT_VendorCmdSet.argtypes = [FT_HANDLE, UCHAR, POINTER_T(ctypes.c_ubyte), USHORT]
FT_VendorCmdSet.__doc__ = \
"""FT_STATUS FT_VendorCmdSet(FT_HANDLE ftHandle, UCHAR Request, LP_c_ubyte Buf, USHORT Len)
ftd2xx.h:1417"""
FT_VendorCmdGetEx = _libraries['ftd2xx64.dll'].FT_VendorCmdGetEx
FT_VendorCmdGetEx.restype = FT_STATUS
# FT_VendorCmdGetEx(ftHandle, wValue, Buf, Len)
FT_VendorCmdGetEx.argtypes = [FT_HANDLE, USHORT, POINTER_T(ctypes.c_ubyte), USHORT]
FT_VendorCmdGetEx.__doc__ = \
"""FT_STATUS FT_VendorCmdGetEx(FT_HANDLE ftHandle, USHORT wValue, LP_c_ubyte Buf, USHORT Len)
ftd2xx.h:1425"""
FT_VendorCmdSetEx = _libraries['ftd2xx64.dll'].FT_VendorCmdSetEx
FT_VendorCmdSetEx.restype = FT_STATUS
# FT_VendorCmdSetEx(ftHandle, wValue, Buf, Len)
FT_VendorCmdSetEx.argtypes = [FT_HANDLE, USHORT, POINTER_T(ctypes.c_ubyte), USHORT]
FT_VendorCmdSetEx.__doc__ = \
"""FT_STATUS FT_VendorCmdSetEx(FT_HANDLE ftHandle, USHORT wValue, LP_c_ubyte Buf, USHORT Len)
ftd2xx.h:1433"""
__all__ = \
['struct_ft_eeprom_232r', 'FT_SetDtr', 'FT_INVALID_BAUD_RATE',
'FT_EEPROM_NOT_PRESENT', 'FT_DEVICE_232R', 'PULONG',
'FT_GetBitMode', 'FT_EE_ReadECC', 'PFT_EEPROM_2232H',
'FT_EEPROM_2232', 'FT_EE_UARead', 'FT_CyclePort',
'FT_EEPROM_X_SERIES', 'FT_W32_ReadFile', 'FT_DEVICE_4222_PROG',
'FT_WriteEE', 'struct_ft_eeprom_4232h', 'FT_VendorCmdGet',
'FT_EE_ReadEx', 'FT_DEVICE_930', 'FT_EraseEE', 'PFT_EEPROM_4232H',
'FT_DEVICE_NOT_FOUND', 'PFT_EEPROM_232B', 'FT_W32_SetCommMask',
'PUCHAR', 'FT_SetBreakOff', 'FT_EE_ProgramEx',
'FT_ComPortCancelIdle', 'c__Ea_FT_OK', 'PFT_EEPROM_X_SERIES',
'struct__FTDCB', 'FT_W32_GetOverlappedResult',
'FT_EEPROM_READ_FAILED', 'FT_SetWaitMask', 'FT_DEVICE',
'FT_EE_Read', 'FT_W32_CancelIo', 'FT_DEVICE_NOT_OPENED',
'FT_DEVICE_NOT_OPENED_FOR_ERASE', 'c__Ea_FT_FLAGS_OPENED',
'FT_GetDeviceInfoDetail', 'union__OVERLAPPED_0', 'FT_ListDevices',
'LPLONG', 'FT_W32_GetCommMask', 'FT_DEVICE_X_SERIES',
'FT_W32_ClearCommBreak', 'FT_ClrRts', 'FT_INVALID_PARAMETER',
'struct_ft_eeprom_232h', 'FT_GetDriverVersion',
'FT_INSUFFICIENT_RESOURCES', 'FT_RestartInTask',
'FT_W32_ClearCommError', 'FT_OTHER_ERROR', 'FT_SetRts',
'FT_DEVICE_4222H_0', 'FT_GetQueueStatusEx',
'FT_SetDataCharacteristics', 'struct_ft_eeprom_2232', 'PVOID',
'FT_W32_GetCommModemStatus', 'FT_DEVICE_100AX',
'FT_W32_WriteFile', 'FT_GetDeviceInfo', 'LPFTDCB',
'FT_EEPROM_WRITE_FAILED', 'FT_W32_GetCommTimeouts',
'PFT_PROGRAM_DATA', 'LPFTTIMEOUTS', 'FT_EEPROM_Read', 'BOOL',
'FT_DEVICE_4222H_1_2', 'FT_DEVICE_LIST_INFO_NODE',
'FT_GetComPortNumber', 'FT_INVALID_ARGS', 'FT_EE_WriteConfig',
'struct_ft_program_data', 'FT_DEVICE_LIST_NOT_READY',
'FT_WaitOnMask', 'FT_FAILED_TO_WRITE_DEVICE',
'FT_SetDeadmanTimeout', 'FT_StopInTask', 'struct__FTCOMSTAT',
'FT_EEPROM_NOT_PROGRAMMED', 'FT_GetModemStatus', 'LPDWORD',
'struct_ft_eeprom_2232h', 'FT_SetFlowControl', 'FT_EEPROM_2232H',
'PFT_EEPROM_2232', 'FT_EE_Program', 'FT_VendorCmdSet', 'FT_Purge',
'LPCTSTR', 'FT_GetQueueStatus', 'FT_SetEventNotification',
'FT_EEPROM_Program', 'FT_W32_PurgeComm', 'FT_GetLatencyTimer',
'FT_DEVICE_232H', 'FT_SetDivisor', 'PCHAR', 'HANDLE',
'struct_ft_eeprom_header', 'FTTIMEOUTS', 'FT_IO_ERROR',
'FT_ReadEE', 'USHORT', 'struct_ft_eeprom_x_series', 'FT_STATUS',
'FT_Close', 'struct__OVERLAPPED', 'FT_DEVICE_UMFTPD3A',
'FT_W32_CreateFile', 'struct__ft_device_list_info_node',
'FT_ComPortIdle', 'c__Ea_FT_DEVICE_BM', 'FT_Reload', 'WORD',
'FT_EE_ReadConfig', 'FT_SetBaudRate', 'FT_EEPROM_232B', 'FT_OK',
'ULONG', 'FT_OpenEx', 'FT_SetUSBParameters',
'FT_W32_GetLastError', 'FT_W32_EscapeCommFunction', 'FT_Open',
'FT_DEVICE_NOT_OPENED_FOR_WRITE', 'FT_SetChars',
'FT_DEVICE_4232H', 'struct__FTTIMEOUTS', 'FT_DEVICE_BM',
'FT_EEPROM_HEADER', 'struct__OVERLAPPED_0_0', 'FT_HANDLE',
'PFT_EVENT_HANDLER', 'FT_ClrDtr', 'FT_W32_SetCommState',
'FT_W32_WaitCommEvent', 'FT_GetLibraryVersion', 'FT_SetBitMode',
'FT_DEVICE_AM', 'struct_ft_eeprom_232b', 'FT_EEPROM_232R',
'FT_EEPROM_4232H', 'FT_Write', 'FT_W32_GetCommState',
'FT_DEVICE_2232H', 'PFT_EEPROM_HEADER', 'FT_W32_CloseHandle',
'PFT_EEPROM_232H', 'FT_W32_SetCommTimeouts', 'FT_EE_UASize',
'LPVOID', 'FT_DEVICE_900', 'LPOVERLAPPED',
'FT_CreateDeviceInfoList', 'LPSECURITY_ATTRIBUTES',
'struct__SECURITY_ATTRIBUTES', 'FT_W32_SetupComm',
'FT_VendorCmdGetEx', 'LPFTCOMSTAT', 'FT_VendorCmdSetEx',
'FT_EEPROM_ERASE_FAILED', 'FT_PROGRAM_DATA',
'FT_SetResetPipeRetryCount', 'UCHAR', 'FT_DEVICE_2232C',
'FT_FLAGS_HISPEED', 'FT_DEVICE_UNKNOWN', 'FT_SetLatencyTimer',
'FT_ResetDevice', 'FT_GetEventStatus', 'DWORD',
'FT_INVALID_HANDLE', 'FT_GetStatus', 'FT_EE_UAWrite',
'FT_SetBreakOn', 'FT_FLAGS_OPENED', 'FT_W32_SetCommBreak',
'FT_Rescan', 'LPWORD', 'FT_DEVICE_4222H_3', 'FT_SetTimeouts',
'PFT_EEPROM_232R', 'FT_IoCtl', 'FT_GetDeviceInfoList',
'FT_NOT_SUPPORTED', 'FT_ResetPort', 'FTDCB', 'FT_EEPROM_232H',
'FTCOMSTAT', 'FT_Read']
|
StarcoderdataPython
|
3307143
|
<gh_stars>10-100
# --------------------------------------------------------
# (c) Copyright 2014 by <NAME>.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
from pymonad.Applicative import *
class Monad(Applicative):
"""
Represents a "context" in which calculations can be executed.
You won't create `Monad` instances directly. Instead, sub-classes implement
specific contexts. Monads allow you to bind together a series of calculations
while maintaining the context of that specific monad.
"""
def __init__(self, value):
""" Wraps `value` in the Monad's context. """
super(Monad, self).__init__(value)
def bind(self, function):
""" Applies `function` to the result of a previous monadic calculation. """
raise NotImplementedError
def __rshift__(self, function):
"""
The 'bind' operator. The following are equivalent::
monadValue >> someFunction
monadValue.bind(someFunction)
"""
if callable(function):
result = self.bind(function)
if not isinstance(result, Monad): raise TypeError("Operator '>>' must return a Monad instance.")
return result
else:
if not isinstance(function, Monad): raise TypeError("Operator '>>' must return a Monad instance.")
return self.bind(lambda _: function)
|
StarcoderdataPython
|
1776313
|
from pyne.material import Material as pymat
import copy
from collections import Counter
class Materialflow(pymat):
""" Class contains information about burnable material flow. Based on PyNE
Material.
"""
def __init__(
self,
comp=None,
mass=-1.0,
density=1.0,
atoms_per_molecule=-1.0,
metadata=None,
vol=1.0,
temp=900,
mass_flowrate=0.0,
void_frac=0.0,
burnup=0.0):
""" Initializes the Materialflow object.
Parameters
----------
PyNE.Material : class
PyNE Material parent class containing nuclide vector, density,
mass, atoms_per_molecule, metadata
temp : float
temperature of the material flow (K)
mass_flowrate : float
mass flow rate of the material flow (g/s)
void_frac : float
void fraction in the material (%)
burnup : float
material burnup at the end of depletion step [MWd/kgU]
"""
# initialize parent class attributes
# super().__init__()
# initialize all object attributes
self.vol = vol
self.temp = temp
self.mass_flowrate = mass_flowrate
self.void_frac = void_frac
self.burnup = burnup
def get_mass(self):
"""Returns total mass of the material descibed in Materialflow object.
Returns
-------
float
The mass of the object.
"""
return self.mass
def print_attr(self):
"""Prints various attributes of Materialflow object.
"""
print("Volume %f cm3" % self.vol)
print("Mass %f g" % self.mass)
print("Density %f g/cm3" % self.density)
print("Atoms per molecule %f " % self.atoms_per_molecule)
print("Meta %s " % self.metadata)
print("Mass flowrate %f g/s" % self.mass_flowrate)
print("Temperature %f K" % self.temp)
print("Void fraction %f " % self.void_frac)
print("Burnup %f MWd/kgU" % self.burnup)
print("U-235 mass %f g" % self[922350000])
def scale_matflow(self, f=1.0):
"""Returns nuclide vector dictionary, obtained from object attrs and
then scaled by factor.
Parameters
----------
f : float
Scaling factor.
Returns
-------
dict
Materialflow nuclide component dictionary of relative mass.
The keys of `new_mat_comp` are preserved from PyNE Material
(integers representing nuclides in id-form). The values are floats
for each nuclide’s mass fraction, multiplied by factor f.
"""
old_dict = dict(self.comp)
new_mat_comp = {}
for key, value in old_dict.items():
new_mat_comp[key] = f * self.mass * value
return new_mat_comp
def copy_pymat_attrs(self, src):
"""Copies PyNE attributites from source object (`src`) to target
object.
Parameters
----------
src : obj
Materialflow object to copy attributes from.
"""
setattr(self, 'density', copy.deepcopy(src.density))
setattr(self,
'atoms_per_molecule',
copy.deepcopy(src.atoms_per_molecule))
self.metadata = src.metadata
def __deepcopy__(self, memo):
"""Return a deep copy of compound object `self`.
Parameters
----------
self : obj
Compound object.
memo : dict, optional
Id-to-object correspondence to control for recursion.
Returns
-------
obj
New compound object copied from `self`.
"""
# Initiate new object my copying class from self
cls = self.__class__
result = cls.__new__(cls)
# Copy nuclide vector from self
result = Materialflow(self.scale_matflow())
# Copy Materialflow density and atoms_per_molecule
result.copy_pymat_attrs(self)
# Copy other object attributes such as volume, burnup, etc
for k, v in self.__dict__.items():
if 'comp' not in k:
setattr(result, k, copy.deepcopy(v))
return result
def __eq__(self, other):
"""Overrides Python ``=`` operation to compare two Materialflow
objects. Compares objects total mass, density, atoms_per_molecule,
temperature, mass flowrate, and masses of important isotopes:
uranium-235 and uranium-238.
Parameters
----------
other : obj
Materialflow object to compare with.
Returns
-------
bool
Are the objects equal?
"""
if not isinstance(other, Materialflow):
# don't attempt to compare against unrelated types
return NotImplemented
return self.mass == other.mass and self.vol == other.vol \
and self.density == other.density \
and self.atoms_per_molecule == other.atoms_per_molecule \
and self.temp == other.temp \
and self.mass_flowrate == other.mass_flowrate \
and self[922350000] == other[922350000] \
and self[922380000] == other[922380000]
#
# Materialflow math operation Overloads
#
def __add__(x, y):
"""Overrides Python adding operation for Materialflow objects.
Parameters
----------
x : obj
Materialflow object #1.
y : obj
Materialflow object #2.
Returns
-------
obj
Materialflow which is a sum of isotope masses from `x` and `y`.
"""
cls = x.__class__
result = cls.__new__(cls)
result.mass = x.mass + y.mass
x_comp = Counter(x)
y_comp = Counter(y)
x_comp.update(y_comp)
result.comp = dict(x_comp)
result.norm_comp()
result.mass_flowrate = x.mass_flowrate + y.mass_flowrate
# result.temp = (x.temp*x.mass + y.temp*y.mass)/result.mass # averaged
result.temp = x.temp
# Burnup is simply averaged by should be renormilized by heavy metal
result.burnup = (x.burnup*x.mass + y.burnup*y.mass)/result.mass
# result.density = result.mass/result.vol
result.density = x.density
result.vol = result.mass/result.density
result.void_frac = (x.void_frac*x.vol + y.void_frac*y.vol)/result.vol
return result
def __rmul__(self, scaling_factor):
"""Overrides Python multiplication operation for Materialflow objects.
Parameters
----------
scaling_factor : float or int
Scaling factor.
Returns
-------
obj
Materialflow object which has mass of each isotope and
mass_flowrate scaled by `other`.
"""
if isinstance(scaling_factor, (int, float)):
result = copy.deepcopy(self)
result.mass = scaling_factor * self.mass
result.norm_comp()
result.vol = scaling_factor * self.vol
result.mass_flowrate = scaling_factor * self.mass_flowrate
# result.temp = (x.temp*x.mass + y.temp*y.mass)/result.mass
return result
else:
NotImplemented
|
StarcoderdataPython
|
3268551
|
import math
class Cpf(object):
""" Etapas:
- Primeira: Verificar se o CPF informado contém todos os dígidos nem são repetidos
- Segunda: Confirmar se o primeiro dígito do verificador está correto
- Terceira: Confirmar se o segundo dígito do verificador está correto
- Quarta: Validar o CPF
- Quinta: Informar o estado emissor do CPF
"""
_cpf = []
def __init__(self, cpf):
self._cpf = self.str2int(cpf)
if(not self.verificar()):
self._cpf = False
def str2int(self, string):
inteiro = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(len(string)):
inteiro[i] = int(string[i])
return inteiro
def verificar_valores_heterogenicos(self):
controle = False
for i in range(11):
if (self._cpf[1] != self._cpf[i]):
controle = True
return controle
def verificar_digito(self, digito):
quociente = sum = 0
resto_divisao = None
for i in range(8 + digito):
""" # Dígito verificador
# Primeiro Dígito
# Distribua os 9 primeiros dígitos em um quadro (Ex.: cpf[i]);
# Atribuindo pesos respectivos a eles: seguindo a fórmula: 8 + 1 - i;
# Multiplique os valores de cada coluna (Ex.: sum += (cpf[i] * (9 + 1 - i)))
# Esquematizando:
# 01 01 01 04 04 04 07 07 07
# x 10 09 08 07 06 05 04 03 02
# ------------------------------------
# 10 09 08 28 24 20 28 21 14
# Segundo Dígito
# Distribua os 10 primeiros dígitos em um quadro (Ex.: cpf[i]);
# Atribuindo pesos respectivos a eles: seguindo a fórmula: 8 + 2 - i;
# Multiplique os valores de cada coluna (Ex.: sum += (cpf[i] * (9 + 2- i)))
# Esquematizando:
# 01 01 01 04 04 04 07 07 07 03
# x 11 10 09 08 07 06 05 04 03 02
# -----------------------------------------
# 11 10 09 32 28 24 35 28 21 06
"""
sum += (self._cpf[i] * (9 + digito - i))
sum /= 11
quociente = int(sum)
resto_divisao = math.ceil((sum - quociente) * 10)
if (resto_divisao < 2):
return (0)
else:
return (11 - resto_divisao)
def verificar(self):
return (
(len(self._cpf) == 11) and
(self.verificar_valores_heterogenicos) and
(self._cpf[9] == self.verificar_digito(1)) and
(self._cpf[10] == self.verificar_digito(2))
)
def estado_emissor(self):
estado = self._cpf[8]
if estado == 0:
return 'Rio Grande do Sul'
elif estado == 1:
return 'Distrito Federal, Goiás, Mato Grosso do Sul ou Tocantins;'
elif estado == 2:
return 'Pará, Amazonas, Acre, Amapá, Rondônia ou Roraima'
elif estado == 3:
return 'Ceará, Maranhão ou Piauí'
elif estado == 4:
return 'Pernambuco, Rio Grande do Norte, Paraíba ou Alagoas'
elif estado == 5:
return 'Bahia ou Sergipe'
elif estado == 6:
return 'Minas Gerais'
elif estado == 7:
return 'Rio de Janeiro ou Espírito Santo'
elif estado == 8:
return 'São Paulo'
elif estado == 9:
return 'Paraná ou Santa Catarina'
else:
return 'Error: Emissor do CPF desconhecido'
|
StarcoderdataPython
|
3306676
|
<filename>oslo/data/indexing/cached_indexing.py
# coding=utf-8
# Copyright 2021 TUNiB Inc.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE.apache-2.0 file in the root directory of this source tree.
from functools import lru_cache
from typing import List, Union
import numpy as np
from oslo.data.indexing.lazy_indexing import IndexedDataset
class IndexedCachedDataset(IndexedDataset):
"""
Copy of ``IndexedCachedDataset`` from ``fairseq``.
Args:
path (str): dataset path
Attributes:
cache (np.array): in-memory cached array
cache_index (dict): indices of cached samples.
"""
def __init__(self, path: str):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
"""
Check indexed dataset supports cache prefetching
Returns:
bool: whether support prefetching or not
"""
return True
def prefetch(self, indices: List[int]) -> None:
"""
Prefetch dataset by given indices
Args:
indices (List[int]): dataset indices
"""
if all(i in self.cache_index for i in indices):
# If all indices are cached, quit method.
return
if not self.data_file:
# If dataset is not loaded, load dataset from external memory.
self.read_data(self.path)
# Sort indices to compute total size from ``data_offsets``, contiguous array.
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
# Create cache array
self.cache = np.empty(
total_size,
dtype=self.dtype,
)
# Ensure cache_index is cleared array.
self.cache_index.clear()
ptx = 0
for i in indices:
# store total array size of from start to end.
self.cache_index[i] = ptx
# get slice from total cached array by ptx size
size = self.data_offsets[i + 1] - self.data_offsets[i]
array = self.cache[ptx : ptx + size]
# sets the file's current position at the offset.
self.data_file.seek(self.data_offsets[i] * self.element_size)
# read data into array
self.data_file.readinto(array)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, idx: Union[int, tuple]) -> Union[np.ndarray, List]:
"""
Get item from given index or indices
Args:
idx (Union[int, tuple]: index or indices
Returns:
Union[np.ndarray, List]: loaded datasets
"""
if isinstance(idx, int):
# check index is valid
self.check_index(idx)
# compute tensor size
tensor_size = self.sizes[self.dim_offsets[idx] : self.dim_offsets[idx + 1]]
# create empty array to hold the data
array = np.empty(tensor_size, dtype=self.dtype)
# load data from cached array (not file access)
ptx = self.cache_index[idx]
# copy cached data to array
np.copyto(array, self.cache[ptx : ptx + array.size])
return array
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
return [self[i] for i in range(*idx.indices(len(self)))]
|
StarcoderdataPython
|
1626345
|
<gh_stars>0
# -*- coding: utf-8 -*-
import scrapy
import re
import time
from zufang.items import ZufangItem
class LianjiaSpider(scrapy.Spider):
name = 'lianjia'
city_name = ['zz', 'bj', 'hz', 'sh', 'sz', 'gz']
allowed_domains = ['%s.lianjia.com/zufang/' % i for i in city_name]
start_urls = ['https://zz.lianjia.com/zufang/']
page = 1
url = 'https://{}.lianjia.com/zufang/pg{}/'
sign = False
page_data = 1
city_num = 0
def parse(self, response):
if not self.sign:
data = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-data')[0].extract()
# print('*'*50)
c = re.compile(r'\d+')
s = c.search(data)
self.page_data = int(s.group())
self.sign = True
print(self.page_data)
else:
alldiv_list = response.xpath('//div[@class="list-wrap"]/ul[@id="house-lst"]/li')
for odiv in alldiv_list:
details = odiv.xpath('.//div[@class="info-panel"]/h2/a/@href').extract()[0]
# print(details)
yield scrapy.Request(url=details, callback=self.detail_page, dont_filter=True)
if self.page <= self.page_data:
self.page += 1
url = self.url.format(self.city_name[self.city_num], self.page)
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
else:
# print(self.page_data, '***')
self.city_num += 1
self.sign = False
self.page = 1
# try:
# url = self.url.format(self.city_name[self.city_num], self.page)
# except Exception as e:
# print(e)
# with open('url.txt', 'w') as f:
# f.write(url + '\n')
url = self.url.format(self.city_name[self.city_num], self.page)
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
def detail_page(self, response):
city = response.xpath('//div[@class="fl l-txt"]/a[2]/text()').extract()[0][:-2]
address_temp = response.xpath('//div[@class="zf-room"]/p[7]/a/text()').extract()
address = ''.join(address_temp)
name = response.xpath('//div[@class="zf-room"]/p[6]/a/text()').extract()[0]
price_temp = response.xpath('//div[@class="price "]//span[1]/text()').extract()
price = ''.join(price_temp)
price_num = price_temp[0]
area = response.xpath('//div[@class="zf-room"]/p[1]/text()').extract()[0]
area_num = area[:-2]
type = response.xpath('//div[@class="zf-room"]/p[2]/text()').extract()[0].replace(' ', '')
floor = response.xpath('//div[@class="zf-room"]/p[3]/text()').extract()[0]
direction = response.xpath('//div[@class="zf-room"]/p[4]/text()').extract()[0]
date_temp = response.xpath('//div[@class="zf-room"]/p[8]/text()').extract()[0]
insert_time = self.detail_date(date_temp)
detail_page = response.url
try:
img = response.xpath('//div[@class="thumbnail"]/ul/li[1]/@data-src').extract()[0]
except Exception as e:
img = 'none'
source = 'lianjia'
item = ZufangItem()
for field in item.fields.keys():
item[field] = eval(field)
yield item
def detail_date(self, date_temp):
c = re.compile(r'\d+')
date = c.search(date_temp).group()
t = int(time.time())
data_middle = time.localtime(t - (int(date) * 3600 * 24))
date_finish = '-'.join('%s' % a for a in [data_middle.tm_year, data_middle.tm_mon, data_middle.tm_mday])
return date_finish
|
StarcoderdataPython
|
81060
|
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.analyses.milhdbk217f.models.relay_unit_test.py is part of The RAMSTK
# Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for the relay module."""
# Third Party Imports
import pytest
# RAMSTK Package Imports
from ramstk.analyses.milhdbk217f import relay
@pytest.mark.unit
@pytest.mark.parametrize("subcategory_id", [1, 2])
@pytest.mark.parametrize("type_id", [1, 2])
@pytest.mark.parametrize(
"environment_active_id",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
)
def test_get_part_count_lambda_b(subcategory_id, type_id, environment_active_id):
"""get_part_count_lambda_b() should return a float value for the parts count base
hazard rate on success."""
_lambda_b = relay.get_part_count_lambda_b(
subcategory_id=subcategory_id,
type_id=type_id,
environment_active_id=environment_active_id,
)
assert isinstance(_lambda_b, float)
if subcategory_id == 1 and type_id == 1:
assert (
_lambda_b
== [
0.13,
0.28,
2.1,
1.1,
3.8,
1.1,
1.4,
1.9,
2.0,
7.0,
0.66,
3.5,
10.0,
0.0,
][environment_active_id - 1]
)
@pytest.mark.unit
def test_get_part_count_lambda_b_no_subcategory():
"""get_part_count_lambda_b() should raise a KeyError when passed an unknown
subcategory ID."""
with pytest.raises(KeyError):
relay.get_part_count_lambda_b(
subcategory_id=13, type_id=1, environment_active_id=2
)
@pytest.mark.unit
def test_get_part_count_lambda_b_no_type():
"""get_part_count_lambda_b() should raise a KeyError when passed an unknown type
ID."""
with pytest.raises(KeyError):
relay.get_part_count_lambda_b(
subcategory_id=1, type_id=11, environment_active_id=2
)
@pytest.mark.unit
def test_get_part_count_lambda_b_no_environment():
"""get_part_count_lambda_b() should raise an IndexError when passed an unknown
active environment ID."""
with pytest.raises(IndexError):
relay.get_part_count_lambda_b(
subcategory_id=1, type_id=1, environment_active_id=21
)
@pytest.mark.unit
@pytest.mark.usefixtures("test_attributes_relay")
def test_calculate_part_count(test_attributes_relay):
"""calculate_part_count() should return a float value for the parts count base
hazard rate on success."""
_lambda_b = relay.calculate_part_count(**test_attributes_relay)
assert isinstance(_lambda_b, float)
assert _lambda_b == 2.1
@pytest.mark.unit
@pytest.mark.parametrize("quality_id", [1, 7])
@pytest.mark.parametrize("n_cycles", [0.5, 100.0, 1103.4])
def test_calculate_cycling_factor(quality_id, n_cycles):
"""calculate_cycling_factor() should return a float value for piCYC on success or
0.0 if passed an unknown combination of arguments."""
_pi_cyc = relay.calculate_cycling_factor(quality_id, n_cycles)
assert isinstance(_pi_cyc, float)
if quality_id == 1 and n_cycles == 0.5:
assert _pi_cyc == 0.1
elif quality_id == 7 and n_cycles == 100.0:
assert _pi_cyc == 10.0
elif quality_id == 7 and n_cycles == 1103.4:
assert _pi_cyc == pytest.approx(121.749156)
elif quality_id == 1 and n_cycles == 100.0:
assert _pi_cyc == 0.0
@pytest.mark.unit
@pytest.mark.parametrize("technology_id", [1, 2, 3, 4])
def test_calculate_load_stress_factor(technology_id):
"""calculate_load_stress_factor() should return a float value for piL on
success."""
_pi_l = relay.calculate_load_stress_factor(technology_id, 0.382)
assert isinstance(_pi_l, float)
if technology_id == 1:
assert _pi_l == 0.22800625
elif technology_id == 2:
assert _pi_l == 0.912025
elif technology_id == 3:
assert _pi_l == 3.6481
elif technology_id == 4:
assert _pi_l == 0.0
@pytest.mark.unit
@pytest.mark.parametrize("subcategory_id", [1, 2])
@pytest.mark.parametrize("quality_id", [1, 7])
def test_get_environment_factor(subcategory_id, quality_id):
"""get_environment_factor() should return a float value for piE on success."""
_pi_e = relay.get_environment_factor(subcategory_id, quality_id, 1)
assert isinstance(_pi_e, float)
if subcategory_id == 1 and quality_id == 1:
assert _pi_e == 1.0
elif subcategory_id == 1 and quality_id == 7:
assert _pi_e == 2.0
elif subcategory_id == 2:
assert _pi_e == 1.0
@pytest.mark.unit
def test_get_environment_factor_no_subcategory():
"""get_environment_factor() should raise a KeyError if passed an unknown
subcategory ID."""
with pytest.raises(KeyError):
relay.get_environment_factor(12, 1, 1)
@pytest.mark.unit
def test_get_environment_factor_no_environment():
"""get_environment_factor() should raise an IndexError if passed an unknown active
environment ID."""
with pytest.raises(IndexError):
relay.get_environment_factor(1, 1, 21)
@pytest.mark.unit
@pytest.mark.parametrize("quality_id", [1, 7])
def test_get_application_construction_factor(quality_id):
"""get_application_construction_factor() should return a float value for piF on
success."""
_pi_f = relay.get_application_construction_factor(quality_id, 1, 1, 1)
assert isinstance(_pi_f, float)
assert _pi_f == {1: 4.0, 7: 8.0}[quality_id]
@pytest.mark.unit
def test_get_application_construction_factor_no_contact_rating():
"""get_application_construction_factor() should raise a KeyError if passed an
unknown contact rating ID."""
with pytest.raises(KeyError):
relay.get_application_construction_factor(1, 15, 1, 1)
@pytest.mark.unit
def test_get_application_construction_factor_no_construction():
"""get_application_construction_factor() should raise a KeyError if passed an
unknown construction ID."""
with pytest.raises(KeyError):
relay.get_application_construction_factor(1, 1, 15, 1)
@pytest.mark.unit
def test_get_application_construction_factor_no_application():
"""get_application_construction_factor() should raise a KeyError if passed an
unknown application ID."""
with pytest.raises(KeyError):
relay.get_application_construction_factor(1, 1, 1, 15)
@pytest.mark.unit
@pytest.mark.parametrize("subcategory_id", [1, 2, 3])
@pytest.mark.parametrize("type_id", [1, 2])
def test_calculate_part_stress_lambda_b(subcategory_id, type_id):
"""calculate_part_stress_lambda_b() should return a float value for the base hazard
rate on success."""
_lambda_b = relay.calculate_part_stress_lambda_b(subcategory_id, type_id, 38.2)
assert isinstance(_lambda_b, float)
if subcategory_id == 1 and type_id == 1:
assert _lambda_b == pytest.approx(0.0064130981)
elif subcategory_id == 1 and type_id == 2:
assert _lambda_b == pytest.approx(0.0061869201)
elif subcategory_id == 2:
assert _lambda_b == [0.4, 0.5, 0.5][type_id - 1]
@pytest.mark.unit
@pytest.mark.usefixtures("test_attributes_relay")
@pytest.mark.parametrize("subcategory_id", [1, 2])
def test_calculate_part_stress(
subcategory_id,
test_attributes_relay,
):
"""calculate_part_stress() should return the attributes with updated values on
success."""
test_attributes_relay["type_id"] = 1
test_attributes_relay["subcategory_id"] = subcategory_id
_attributes = relay.calculate_part_stress(**test_attributes_relay)
assert isinstance(_attributes, dict)
@pytest.mark.unit
@pytest.mark.parametrize("subcategory_id", [1, 4])
def test_set_default_quality(subcategory_id):
"""should return the default quality for the selected subcategory ID."""
_quality = relay._set_default_quality(subcategory_id)
assert _quality == {1: 1, 4: 5}[subcategory_id]
@pytest.mark.unit
@pytest.mark.parametrize("type_id", [1, 2])
def test_set_default_load_type(type_id):
"""should return the default load type for the selected type ID."""
_load_type = relay._set_default_load_type(-1, type_id)
assert _load_type == {1: 1, 2: 2}[type_id]
@pytest.mark.unit
@pytest.mark.parametrize("type_id", [1, 4])
def test_set_default_contact_form(type_id):
"""should return the default contact form for the selected type ID."""
_contact_form = relay._set_default_contact_form(-1, type_id)
assert _contact_form == {1: 6, 4: 1}[type_id]
@pytest.mark.unit
@pytest.mark.parametrize("type_id", [1, 2, 3, 4, 5, 6])
def test_set_default_contact_rating(type_id):
"""should return the default contact rating for the selected type ID."""
_contact_rating = relay._set_default_contact_rating(-2, type_id)
assert _contact_rating == {1: 2, 2: 4, 3: 2, 4: 1, 5: 2, 6: 2}[type_id]
@pytest.mark.unit
@pytest.mark.parametrize("type_id", [1, 2, 3, 4, 5, 6])
def test_set_default_application(type_id):
"""should return the default application for the selected type ID."""
_application = relay._set_default_application(0, type_id)
assert _application == {1: 1, 2: 1, 3: 8, 4: 1, 5: 6, 6: 3}[type_id]
@pytest.mark.unit
@pytest.mark.parametrize("type_id", [1, 2, 3, 4, 5, 6])
def test_set_default_construction(type_id):
"""should return the default construction for the selected type ID."""
_construction = relay._set_default_construction(0, type_id)
assert _construction == {1: 2, 2: 4, 3: 2, 4: 2, 5: 1, 6: 2}[type_id]
@pytest.mark.unit
@pytest.mark.parametrize("type_id", [1, 4])
def test_set_default_duty_cycle(type_id):
"""should return the default duty cycle for the selected type ID."""
_duty_cycle = relay._set_default_duty_cycle(0.0, type_id)
assert _duty_cycle == {1: 10.0, 4: 20.0}[type_id]
@pytest.mark.unit
@pytest.mark.parametrize("type_id", [1, 4])
def test_set_default_rated_temperature(type_id):
"""should return the default rated temperature for the selected type ID."""
_rated_temperature = relay._set_default_rated_temperature(0.0, type_id)
assert _rated_temperature == {1: 125.0, 4: 85.0}[type_id]
@pytest.mark.unit
@pytest.mark.usefixtures("test_attributes_relay")
def test_set_default_values(test_attributes_relay):
"""should set default values for each parameter <= 0.0."""
test_attributes_relay["application_id"] = 0
test_attributes_relay["construction_id"] = 0
test_attributes_relay["contact_form_id"] = 0
test_attributes_relay["contact_rating_id"] = 0
test_attributes_relay["current_ratio"] = 0.0
test_attributes_relay["duty_cycle"] = -2.5
test_attributes_relay["quality_id"] = 0
test_attributes_relay["subcategory_id"] = 1
test_attributes_relay["technology_id"] = -1
test_attributes_relay["temperature_rated_max"] = 0.0
test_attributes_relay["type_id"] = 1
_attributes = relay.set_default_values(**test_attributes_relay)
assert isinstance(_attributes, dict)
assert _attributes["application_id"] == 1
assert _attributes["construction_id"] == 2
assert _attributes["contact_form_id"] == 6
assert _attributes["contact_rating_id"] == 2
assert _attributes["current_ratio"] == 0.5
assert _attributes["duty_cycle"] == 10.0
assert _attributes["quality_id"] == 1
assert _attributes["technology_id"] == 1
assert _attributes["temperature_rated_max"] == 125.0
@pytest.mark.unit
@pytest.mark.usefixtures("test_attributes_relay")
def test_set_default_values_none_needed(test_attributes_relay):
"""should not set default values for each parameter > 0.0."""
test_attributes_relay["application_id"] = 2
test_attributes_relay["construction_id"] = 4
test_attributes_relay["contact_form_id"] = 2
test_attributes_relay["contact_rating_id"] = 1
test_attributes_relay["current_ratio"] = 0.3
test_attributes_relay["duty_cycle"] = 45.0
test_attributes_relay["quality_id"] = 2
test_attributes_relay["subcategory_id"] = 1
test_attributes_relay["technology_id"] = 2
test_attributes_relay["temperature_rated_max"] = 105.0
test_attributes_relay["type_id"] = 1
_attributes = relay.set_default_values(**test_attributes_relay)
assert isinstance(_attributes, dict)
assert _attributes["application_id"] == 2
assert _attributes["construction_id"] == 4
assert _attributes["contact_form_id"] == 2
assert _attributes["contact_rating_id"] == 1
assert _attributes["current_ratio"] == 0.3
assert _attributes["duty_cycle"] == 45.0
assert _attributes["quality_id"] == 2
assert _attributes["technology_id"] == 2
assert _attributes["temperature_rated_max"] == 105.0
|
StarcoderdataPython
|
137169
|
import os
import subprocess
import requests
from flask import Flask, request
app = Flask(__name__)
@app.route('/', methods = ['POST'])
def run_transform():
payload_url = request.args.get("payload_url")
r = requests.get(payload_url, allow_redirects=True)
open('payload.json', 'wb').write(r.content)
transform_dw = request.get_data()
open('transform.dw', 'wb').write(transform_dw)
dataweave = subprocess.run(["/root/.dw/dw", "-i", "payload", "payload.json", "-f", "transform.dw"], stdout=subprocess.PIPE, text=True)
return dataweave.stdout
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
|
StarcoderdataPython
|
4840566
|
<filename>samples/openapi3/client/features/dynamic-servers/python-experimental/dynamic_servers/__init__.py
# coding: utf-8
# flake8: noqa
"""
OpenAPI Extension with dynamic servers
This specification shows how to use dynamic servers. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
__version__ = "1.0.0"
# import ApiClient
from dynamic_servers.api_client import ApiClient
# import Configuration
from dynamic_servers.configuration import Configuration
# import exceptions
from dynamic_servers.exceptions import OpenApiException
from dynamic_servers.exceptions import ApiAttributeError
from dynamic_servers.exceptions import ApiTypeError
from dynamic_servers.exceptions import ApiValueError
from dynamic_servers.exceptions import ApiKeyError
from dynamic_servers.exceptions import ApiException
|
StarcoderdataPython
|
1737395
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.portouts import PortOutsData
XML_NAME_PORTOUTS = "LNPResponseWrapper"
XPATH_PORTOUTS = "/portouts"
class PortOuts(BaseResource, PortOutsData):
"""Local number portability orders from winning carriers for account"""
_node_name = XML_NAME_PORTOUTS
_xpath = XPATH_PORTOUTS
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
PortOutsData.__init__(self, self)
def list(self, params):
return self._get_data(params=params).lnp_port_info_for_given_status
|
StarcoderdataPython
|
31446
|
<reponame>ZhiruiFeng/CarsMemory
#!/usr/bin/env python
# aws s3
|
StarcoderdataPython
|
3214506
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) <NAME> <<EMAIL>>
Kernel class for modules results
"""
class WSResult(object):
results = None
def __init__(self):
self.results = []
def put(self, item):
""" Put item to results """
self.results.append(item)
return self
def as_string(self):
""" Return results as string """
result = ""
for row in self.results:
result += row + "\n"
return result
def get_all(self):
""" Get list of all results """
return self.results
def unique(self):
""" Remove dups from results list """
self.results = list(set(self.results))
return self
|
StarcoderdataPython
|
3398385
|
<gh_stars>1-10
#! /usr/bin/env python
import setuptools
from business_rules import __version__ as version
setuptools.setup(
name='business-rules-ext',
version=version,
description='Python DSL for setting up business intelligence rules that can be configured without code',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/maciejpolanczyk/django-business-rules-ext/',
packages=['business_rules'],
license='BSD License',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
StarcoderdataPython
|
27736
|
"""
WLS filter: Edge-preserving smoothing based onthe weightd least squares
optimization framework, as described in Farbman, Fattal, Lischinski, and
Szeliski, "Edge-Preserving Decompositions for Multi-Scale Tone and Detail
Manipulation", ACM Transactions on Graphics, 27(3), August 2008.
Given an input image IN, we seek a new image OUT, which, on the one hand,
is as close as possible to IN, and, at the same time, is as smooth as
possible everywhere, except across significant gradients in L.
"""
import cv2
import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve, lsqr
def wlsFilter(IN, Lambda=1.0, Alpha=1.2):
"""
IN : Input image (2D grayscale image, type float)
Lambda : Balances between the data term and the smoothness term.
Increasing lbda will produce smoother images.
Default value is 1.0
Alpha : Gives a degree of control over the affinities by
non-lineary scaling the gradients. Increasing alpha
will result in sharper preserved edges. Default value: 1.2
"""
L = np.log(IN+1e-22) # Source image for the affinity matrix. log_e(IN)
smallNum = 1e-6
height, width = IN.shape
k = height * width
# Compute affinities between adjacent pixels based on gradients of L
dy = np.diff(L, n=1, axis=0) # axis=0 is vertical direction
dy = -Lambda/(np.abs(dy)**Alpha + smallNum)
dy = np.pad(dy, ((0,1),(0,0)), 'constant') # add zeros row
dy = dy.flatten(order='F')
dx = np.diff(L, n=1, axis=1)
dx = -Lambda/(np.abs(dx)**Alpha + smallNum)
dx = np.pad(dx, ((0,0),(0,1)), 'constant') # add zeros col
dx = dx.flatten(order='F')
# Construct a five-point spatially inhomogeneous Laplacian matrix
B = np.concatenate([[dx], [dy]], axis=0)
d = np.array([-height, -1])
A = spdiags(B, d, k, k)
e = dx
w = np.pad(dx, (height, 0), 'constant'); w = w[0:-height]
s = dy
n = np.pad(dy, (1, 0), 'constant'); n = n[0:-1]
D = 1.0 - (e + w + s + n)
A = A + A.transpose() + spdiags(D, 0, k, k)
# Solve
OUT = spsolve(A, IN.flatten(order='F'))
return np.reshape(OUT, (height, width), order='F')
# Unit test
if __name__ == '__main__':
image = cv2.imread('1.png')
if image.shape[2] == 4: # Format RGBA
image = image[:,:, 0:3] # Discard alpha channel
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image1 = 1.0*image / np.max(image)
result = wlsFilter(image1)
cv2.imshow('1', result)
cv2.waitKey(0)
|
StarcoderdataPython
|
93352
|
<reponame>naviocean/imgclsmob
"""
DABNet for image segmentation, implemented in Gluon.
Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
"""
__all__ = ['DABNet', 'dabnet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\
DualPathSequential, PReLU2
class DwaConvBlock(HybridBlock):
"""
Depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(DwaConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
strides=strides,
padding=(padding, 0),
dilation=(dilation, 1),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
self.conv2 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
strides=strides,
padding=(0, padding),
dilation=(1, dilation),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dwa_conv3x3_block(channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 version of the depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return DwaConvBlock(
channels=channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
class DABBlock(HybridBlock):
"""
DABNet specific base block.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for a dilated branch in the unit.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
dilation,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABBlock, self).__init__(**kwargs)
mid_channels = channels // 2
with self.name_scope():
self.norm_activ1 = NormActivation(
in_channels=channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(channels)))
self.conv1 = conv3x3_block(
in_channels=channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels)))
self.branches = Concurrent(stack=True)
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels))))
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
padding=dilation,
dilation=dilation,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels))))
self.norm_activ2 = NormActivation(
in_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels)))
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels)
def hybrid_forward(self, F, x):
identity = x
x = self.norm_activ1(x)
x = self.conv1(x)
x = self.branches(x)
x = x.sum(axis=1)
x = self.norm_activ2(x)
x = self.conv2(x)
x = x + identity
return x
class DownBlock(HybridBlock):
"""
DABNet specific downsample block for the main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DownBlock, self).__init__(**kwargs)
self.expand = (in_channels < out_channels)
mid_channels = out_channels - in_channels if self.expand else out_channels
with self.name_scope():
self.conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
strides=2)
if self.expand:
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
y = self.conv(x)
if self.expand:
z = self.pool(x)
y = F.concat(y, z, dim=1)
y = self.norm_activ(y)
return y
class DABUnit(HybridBlock):
"""
DABNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilations : list of int
Dilations for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
dilations,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABUnit, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.down = DownBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.blocks = nn.HybridSequential(prefix="")
for i, dilation in enumerate(dilations):
self.blocks.add(DABBlock(
channels=mid_channels,
dilation=dilation,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
def hybrid_forward(self, F, x):
x = self.down(x)
y = self.blocks(x)
x = F.concat(y, x, dim=1)
return x
class DABStage(HybridBlock):
"""
DABNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
dilations : list of int
Dilations for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
dilations,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABStage, self).__init__(**kwargs)
self.use_unit = (len(dilations) > 0)
with self.name_scope():
self.x_down = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
if self.use_unit:
self.unit = DABUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
dilations=dilations,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(y_out_channels)))
def hybrid_forward(self, F, y, x):
x = self.x_down(x)
if self.use_unit:
y = self.unit(y)
y = F.concat(y, x, dim=1)
y = self.norm_activ(y)
return y, x
class DABInitBlock(HybridBlock):
"""
DABNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DABNet(HybridBlock):
"""
DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilations for blocks.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(DABNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0)
self.features.add(DABInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = init_block_channels
for i, (y_out_channels, dilations_i) in enumerate(zip(channels, dilations)):
self.features.add(DABStage(
x_channels=in_channels,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
dilations=dilations_i,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=classes)
self.up = InterpolationBlock(scale_factor=8)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
y = self.features(x, x)
y = self.classifier(y)
y = self.up(y, in_size)
return y
def get_dabnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DABNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
channels = [35, 131, 259]
dilations = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]]
bn_epsilon = 1e-3
net = DABNet(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def dabnet_cityscapes(classes=19, **kwargs):
"""
DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dabnet(classes=classes, model_name="dabnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
dabnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dabnet_cityscapes or weight_count == 756643)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
|
StarcoderdataPython
|
3233188
|
#!/usr/bin/python3 -u
import os
import numpy as np
import matplotlib as mpl; mpl.use('Agg'); print("plot WITHOUT Xserver"); # this makes it run without Xserver (e.g. on supercomputer) # see http://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server
import matplotlib.pyplot as plt
import sys
'''
import basUtils
import elements
import GridUtils as GU
import ProbeParticleUtils as PPU
import PPPlot
'''
import pyProbeParticle as PPU
import pyProbeParticle.GridUtils as GU
import pyProbeParticle.PPPlot as PPPlot
from pyProbeParticle import basUtils
from pyProbeParticle import elements
#import pyProbeParticle.core as PPC
import pyProbeParticle.HighLevel as PPH
import pyProbeParticle.cpp_utils as cpp_utils
# =============== arguments definition
from optparse import OptionParser
parser = OptionParser()
parser.add_option( "-k", action="store", type="float", help="tip stiffenss [N/m]" )
parser.add_option( "--krange", action="store", type="float", help="tip stiffenss range (min,max,n) [N/m]", nargs=3)
parser.add_option( "-q", action="store", type="float", help="tip charge [e]" )
parser.add_option( "--qrange", action="store", type="float", help="tip charge range (min,max,n) [e]", nargs=3)
parser.add_option( "-a", action="store", type="float", help="oscilation amplitude [A]" )
parser.add_option( "--arange", action="store", type="float", help="oscilation amplitude range (min,max,n) [A]", nargs=3)
parser.add_option( "--iets", action="store", type="float", help="mass [a.u.]; bias offset [eV]; peak width [eV] ", nargs=3 )
parser.add_option( "--tip_base_q", action="store", type="float", help="tip_base charge [e]" )
parser.add_option( "--tip_base_qrange", action="store", type="float", help="tip_base charge range (min,max,n) [e]", nargs=3)
parser.add_option( "--Fz", action="store_true", default=False, help="plot images for Fz " )
parser.add_option( "--df", action="store_true", default=False, help="plot images for dfz " )
parser.add_option( "--save_df" , action="store_true", default=False, help="save frequency shift as df.xsf " )
parser.add_option( "--pos", action="store_true", default=False, help="save probe particle positions" )
parser.add_option( "--atoms", action="store_true", default=False, help="plot atoms to images" )
parser.add_option( "--bonds", action="store_true", default=False, help="plot bonds to images" )
parser.add_option( "--cbar", action="store_true", default=False, help="plot bonds to images" )
parser.add_option( "--WSxM", action="store_true", default=False, help="save frequency shift into WsXM *.dat files" )
parser.add_option( "--bI", action="store_true", default=False, help="plot images for Boltzmann current" )
parser.add_option( "--npy" , action="store_true" , help="load and save fields in npy instead of xsf" , default=False)
parser.add_option( "--2Dnp" , action="store_true" , help="save fields in 2D npy instead of array" , default=False)
parser.add_option( "--noPBC", action="store_false", help="pbc False", default=True)
(options, args) = parser.parse_args()
opt_dict = vars(options)
print("opt_dict: ")
print(opt_dict)
if options.npy:
data_format ="npy"
else:
data_format ="xsf"
# =============== Setup
# dgdfgdfg
print(" >> OVEWRITING SETTINGS by params.ini ")
PPU.loadParams( 'params.ini' )
#PPPlot.params = PPU.params
print(" >> OVEWRITING SETTINGS by command line arguments ")
# Ks
if opt_dict['krange'] is not None:
Ks = np.linspace( opt_dict['krange'][0], opt_dict['krange'][1], int( opt_dict['krange'][2] ) )
elif opt_dict['k'] is not None:
Ks = [ opt_dict['k'] ]
else:
Ks = [ PPU.params['stiffness'][0] ]
# Qs
if opt_dict['qrange'] is not None:
Qs = np.linspace( opt_dict['qrange'][0], opt_dict['qrange'][1], int( opt_dict['qrange'][2] ) )
elif opt_dict['q'] is not None:
Qs = [ opt_dict['q'] ]
else:
Qs = [ PPU.params['charge'] ]
# Amps
if opt_dict['arange'] is not None:
Amps = np.linspace( opt_dict['arange'][0], opt_dict['arange'][1], int( opt_dict['arange'][2] ) )
elif opt_dict['a'] is not None:
Amps = [ opt_dict['a'] ]
else:
Amps = [ PPU.params['Amplitude'] ]
# TbQs
if opt_dict['tip_base_qrange'] is not None:
TbQs = np.linspace( opt_dict['tip_base_qrange'][0], opt_dict['tip_base_qrange'][1], int( opt_dict['tip_base_qrange'][2] ) )
elif opt_dict['tip_base_q'] is not None:
TbQs = [ opt_dict['tip_base_q'] ]
else:
TbQs = [ float(PPU.params['tip_base'][1]) ]
print("Ks =", Ks)
print("Qs =", Qs)
print("Amps =", Amps)
print("TbQs =", TbQs)
#sys.exit(" STOPPED ")
print(" ============= RUN ")
dz = PPU.params['scanStep'][2]
xTips,yTips,zTips,lvecScan = PPU.prepareScanGrids( )
extent = ( xTips[0], xTips[-1], yTips[0], yTips[-1] )
atoms_str=""
atoms = None
bonds = None
if opt_dict['atoms'] or opt_dict['bonds']:
atoms_str="_atoms"
atoms, tmp1, tmp2 = basUtils.loadAtoms( 'input_plot.xyz' )
del tmp1, tmp2;
# print "atoms ", atoms
if os.path.isfile( 'atomtypes.ini' ):
print(">> LOADING LOCAL atomtypes.ini")
FFparams=PPU.loadSpecies( 'atomtypes.ini' )
else:
FFparams = PPU.loadSpecies( cpp_utils.PACKAGE_PATH+'/defaults/atomtypes.ini' )
iZs,Rs,Qstmp=PPH.parseAtoms(atoms, autogeom = False,PBC = False, FFparams=FFparams)
atom_colors = basUtils.getAtomColors(iZs,FFparams=FFparams)
Rs=Rs.transpose().copy()
atoms= [iZs,Rs[0],Rs[1],Rs[2],atom_colors]
#print "atom_colors: ", atom_colors
if opt_dict['bonds']:
bonds = basUtils.findBonds(atoms,iZs,1.0,FFparams=FFparams)
#print "bonds ", bonds
atomSize = 0.15
cbar_str =""
if opt_dict['cbar']:
cbar_str="_cbar"
for iq,Q in enumerate( Qs ):
for ik,K in enumerate( Ks ):
dirname = "Q%1.2fK%1.2f" %(Q,K)
if opt_dict['pos']:
try:
PPpos, lvec, nDim = GU.load_vec_field( dirname+'/PPpos' ,data_format=data_format)
print(" plotting PPpos : ")
PPPlot.plotDistortions( dirname+"/xy"+atoms_str+cbar_str, PPpos[:,:,:,0], PPpos[:,:,:,1], slices = list(range( 0, len(PPpos))), BG=PPpos[:,:,:,2], extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, markersize=2.0, cbar=opt_dict['cbar'] )
del PPpos
except:
print("error: ", sys.exc_info())
print("cannot load : " + ( dirname+'/PPpos_?.' + data_format ))
if opt_dict['iets'] is not None:
#try:
eigvalK, lvec, nDim = GU.load_vec_field( dirname+'/eigvalKs' ,data_format=data_format)
M = opt_dict['iets'][0]
E0 = opt_dict['iets'][1]
w = opt_dict['iets'][2]
print(" plotting IETS M=%f V=%f w=%f " %(M,E0,w))
hbar = 6.58211951440e-16 # [eV.s]
aumass = 1.66053904020e-27 # [kg]
eVA2_to_Nm = 16.0217662 # [eV/A^2] / [N/m]
Evib = hbar * np.sqrt( ( eVA2_to_Nm * eigvalK )/( M * aumass ) )
IETS = PPH.symGauss(Evib[:,:,:,0], E0, w) + PPH.symGauss(Evib[:,:,:,1], E0, w) + PPH.symGauss(Evib[:,:,:,2], E0, w)
PPPlot.plotImages( dirname+"/IETS"+atoms_str+cbar_str, IETS, slices = list(range(0,len(IETS))), zs=zTips, extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
PPPlot.plotImages( dirname+"/Evib"+atoms_str+cbar_str, Evib[:,:,:,0], slices = list(range(0,len(IETS))), zs=zTips, extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
PPPlot.plotImages( dirname+"/Kvib"+atoms_str+cbar_str, 16.0217662 * eigvalK[:,:,:,0], slices = list(range(0,len(IETS))), zs=zTips, extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
print("Preparing data for plotting denominators: avoidning negative frequencies via nearest neighbours Uniform Filter")
from scipy.ndimage import uniform_filter
for i in range(len(eigvalK)):
for l in [0]: #range(len(eigvalK[0,0,0])):
eigvalK[i,:,:,l]=uniform_filter(eigvalK[i,:,:,l].copy(), size=3, mode='nearest')
tmp_bool=False
for i in range(len(eigvalK)):
for j in range(len(eigvalK[0])):
for k in range(len(eigvalK[0,0])):
for l in [0]: #range(len(eigvalK[0,0,0])):
if (eigvalK[i,j,k,l] < 0):
print("BEWARE: Negative value at: i,j,k,l:",i,j,k,l)
tmp_bool = True
if tmp_bool:
print("if many negative values appear change FF grid or scanning grid")
denomin = 1/(hbar * np.sqrt( ( eVA2_to_Nm * eigvalK )/( M * aumass ) ))
print("plotting denominators fpr frustrated translation: 1/w1 + 1/w2")
PPPlot.plotImages( dirname+"/denomin"+atoms_str+cbar_str, denomin[:,:,:,0]+denomin[:,:,:,1] , slices = list(range(0,len(denomin))), zs=zTips, extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
if opt_dict['WSxM']:
GU.saveWSxM_3D(dirname+"/denomin" , denomin[:,:,:,0]+denomin[:,:,:,1] , extent, slices = list(range(0,len(denomin))) )
GU.saveWSxM_3D(dirname+"/IETS" , IETS , extent, slices = list(range(0,len(IETS))) )
del eigvalK; del Evib; del IETS; del denomin;
#except:
# print "error: ", sys.exc_info()
# print "cannot load : " + ( dirname+'/eigvalKs_?.' + data_format )
if ( ( opt_dict['df'] or opt_dict['save_df'] or opt_dict['WSxM'] or opt_dict['2Dnp']) ):
try :
fzs, lvec, nDim = GU.load_scal_field( dirname+'/OutFz' , data_format=data_format)
if not ( (len(TbQs) == 1 ) and ( TbQs[0] == 0.0 ) ):
print("loading tip_base forces")
try:
fzt, lvect, nDimt = GU.load_scal_field( './OutFzTip_base' , data_format=data_format)
except:
print("error: ", sys.exc_info())
print("cannot load : ", './OutFzTip_base.'+data_format)
for iA,Amp in enumerate( Amps ):
for iT, TbQ in enumerate( TbQs ):
if (TbQ == 0.0 ):
AmpStr = "/Amp%2.2f" %Amp
print("Amp= ",AmpStr)
dirNameAmp = dirname+AmpStr
if not os.path.exists( dirNameAmp ):
os.makedirs( dirNameAmp )
dfs = PPU.Fz2df( fzs, dz = dz, k0 = PPU.params['kCantilever'], f0=PPU.params['f0Cantilever'], n= int(Amp/dz) )
else:
AmpStr = "/Amp%2.2f_qTip%2.2f" %(Amp,TbQ)
print("Amp= ",AmpStr)
dirNameAmp = dirname+AmpStr
if not os.path.exists( dirNameAmp ):
os.makedirs( dirNameAmp )
dfs = PPU.Fz2df( fzs + TbQ*fzt, dz = dz, k0 = PPU.params['kCantilever'], f0=PPU.params['f0Cantilever'], n= int(Amp/dz) )
if opt_dict['save_df']:
GU.save_scal_field( dirNameAmp+'/df', dfs, lvec, data_format=data_format )
if opt_dict['df']:
print(" plotting df : ")
PPPlot.plotImages( dirNameAmp+"/df"+atoms_str+cbar_str,
dfs, slices = list(range( 0,
len(dfs))), zs=zTips, extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
if opt_dict['WSxM']:
print(" printing df into WSxM files :")
GU.saveWSxM_3D( dirNameAmp+"/df" , dfs , extent , slices=None)
if opt_dict['2Dnp']:
print(" printing df into separate np files :")
for iz in range(len(dfs)):
np.save(dirNameAmp+"/df_%03d.npy" %iz ,dfs[iz])
del dfs
del fzs
except:
print("error: ", sys.exc_info())
print("cannot load : ", dirname+'/OutFz.'+data_format)
if opt_dict['Fz'] :
try :
fzs, lvec, nDim = GU.load_scal_field( dirname+'/OutFz' , data_format=data_format)
print(" plotting Fz : ")
PPPlot.plotImages(dirname+"/Fz"+atoms_str+cbar_str,
fzs, slices = list(range( 0,
len(fzs))), zs=zTips, extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
if opt_dict['WSxM']:
print(" printing Fz into WSxM files :")
GU.saveWSxM_3D( dirname+"/Fz" , fzs , extent , slices=None)
del fzs
except:
print("error: ", sys.exc_info())
print("cannot load : ", dirname+'/OutFz.'+data_format)
if opt_dict['bI']:
try:
I, lvec, nDim = GU.load_scal_field( dirname+'/OutI_boltzmann', data_format=data_format )
print(" plotting Boltzmann current: ")
PPPlot.plotImages(
dirname+"/OutI"+atoms_str+cbar_str,
I, slices = list(range( 0,
len(I))), zs=zTips, extent=extent, atoms=atoms, bonds=bonds, atomSize=atomSize, cbar=opt_dict['cbar'] )
del I
except:
print("error: ", sys.exc_info())
print("cannot load : " + ( dirname+'/OutI_boltzmann.'+data_format ))
print(" ***** ALL DONE ***** ")
#plt.show() # for interactive plotting you have to comment "import matplotlib as mpl; mpl.use('Agg');" at the end
|
StarcoderdataPython
|
1799242
|
<reponame>yyxiao/boardroom
#!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
"""
__author__ = cuizc
__mtime__ = 2016-08-09
"""
import transaction
import logging
from sqlalchemy.orm import aliased
from ..models.model import SysOrg, SysUser, SysUserOrg, HasPad, SysUserRole
from ..common.dateutils import date_now
from ..common.paginator import Paginator
logger = logging.getLogger(__name__)
def find_branch(dbs, user_org_id=None, org_type=None):
"""
获取机构列表
:param dbs:
:param user_org_id:
:param org_type:0公司,1部门
:return:
"""
branches = []
sql = 'WITH RECURSIVE r AS ( SELECT * FROM brms.sys_org '
if user_org_id:
sql += ' WHERE id = %s' % user_org_id
else:
sql += ' WHERE id = 1'
sql += ' union ALL SELECT sys_org.* FROM brms.sys_org, r WHERE sys_org.parent_id = r.id '
if org_type:
sql += ' and sys_org.org_type = \'' + org_type + '\''
sql += ') SELECT id,org_name,parent_id FROM r ORDER BY id'
curs = dbs.execute(sql)
for rec in curs:
branch = {}
branch['org_id'] = rec[0]
branch['org_name'] = rec[1]
branches.append(branch)
return branches
def find_branch_json(dbs, user_org_id=None, org_type=None):
"""
获取未分配的机构树
:param dbs:
:param user_org_id:
:param org_type:0公司,1部门
:return:
"""
branches = []
sql = 'WITH RECURSIVE r AS ( SELECT * FROM brms.sys_org '
if user_org_id:
sql += ' WHERE id = %s' % user_org_id
else:
sql += ' WHERE id = 1'
sql += ' union ALL SELECT sys_org.* FROM brms.sys_org, r WHERE sys_org.parent_id = r.id '
if org_type:
sql += ' and sys_org.org_type = \'' + org_type + '\''
sql += ') SELECT id,org_name,parent_id FROM r ORDER BY id'
curs = dbs.execute(sql)
for rec in curs:
branch = {}
branch['id'] = rec[0]
branch['name'] = rec[1]
branch['pId'] = rec[2]
if rec[2] == 0:
branch['open'] = True
branches.append(branch)
return branches
def find_branch_json_check(dbs, user_id, user_now=None):
"""
获取机构树
:param dbs:
:param user_id:
:param user_now:
:return:
"""
branches = []
orgs = dbs.query(SysOrg.id, SysOrg.org_name, SysOrg.parent_id).filter(SysOrg.org_type == '0').all()
# 当前的登录用户可分配的机构
user_orgs = dbs.query(SysUserOrg.org_id).filter(SysUserOrg.user_id == user_now).all()
user_org_list = []
for rec in user_orgs:
user_org_list.append(rec[0])
user_tuple = tuple(user_org_list)
# 当前勾选的用户已分配的机构
curs = dbs.query(SysUserOrg.org_id).filter(SysUserOrg.user_id == user_id).all()
for rec in orgs:
branch = {}
branch['id'] = rec[0]
branch['name'] = rec[1]
branch['pId'] = rec[2]
if rec[2] == 0:
branch['open'] = True
if rec[0] in user_tuple:
branch['doCheck'] = True
else:
branch['doCheck'] = False
branch['name'] += '(不可选)'
for org in curs:
if rec[0] == org[0]:
branch['checked'] = True
branches.append(branch)
return branches
def find_branch_json_4booking(dbs, user_id, user_org_id, tree=True):
"""
获取机构树
:param dbs:
:param user_id:
:param user_org_id:
:param tree:
:return:
"""
user_parent_org_id = find_parent_org(dbs, user_org_id)
if user_org_id != user_parent_org_id:
user_org_id = user_parent_org_id
user_orgs = dbs.query(SysUserOrg.org_id)\
.outerjoin(SysOrg, SysOrg.id == SysUserOrg.org_id)\
.filter(SysUserOrg.user_id == user_id, SysOrg.org_type == '0').all()
orgs_ids = [i.org_id for i in user_orgs]
user_orgs = dbs.query(SysOrg.id, SysOrg.org_name, SysOrg.parent_id).filter(SysOrg.id.in_(orgs_ids)).all()
org_dict = {}
for org in user_orgs:
branch = dict()
branch['id'] = org[0]
branch['name'] = org[1]
branch['pId'] = org[2]
branch['doCheck'] = True
if org[2] == 0:
branch['open'] = True
if org[0] == user_org_id:
branch['checked'] = True
org_dict[org[0]] = branch
if tree:
for org_id in orgs_ids:
find_parents(dbs, org_dict[org_id]['pId'], org_dict, is_open=(org_id == user_org_id))
return [v for k, v in org_dict.items()]
def find_parents(dbs, parent_id, org_dict, is_open=False):
"""
查找父机构并加入到字典中
:param dbs:
:param parent_id:
:param org_dict:
:param is_open:
:return:
"""
if parent_id == 0 or parent_id in org_dict.keys():
return
org = dbs.query(SysOrg.id, SysOrg.org_name, SysOrg.parent_id).filter(SysOrg.id == parent_id).first()
branch = dict()
branch['id'] = org[0]
branch['name'] = org[1] + '(不可选)'
branch['pId'] = org[2]
branch['chkDisabled'] = True
branch['open'] = is_open
org_dict[parent_id] = branch
if org[2] == 0:
return
find_parents(dbs, org[2], org_dict, is_open)
return
def find_orgs(dbs, org_name=None, parent_id=None, address=None, org_id=None, page_no=1, show_child=True):
"""
查询org列表
:param dbs:
:param org_name:
:param parent_id:
:param address:
:param org_id:
:param page_no:
:param show_child:
:return:
"""
sysorg1 = aliased(SysOrg)
orgs = dbs.query(SysOrg.id,
SysOrg.org_name,
SysOrg.org_type,
sysorg1.org_name,
SysOrg.org_manager,
SysOrg.phone,
SysOrg.address,
SysOrg.state,
SysUser.user_name,
SysOrg.create_time) \
.outerjoin(SysUser, SysUser.id == SysOrg.create_user) \
.outerjoin(sysorg1, SysOrg.parent_id == sysorg1.id)
if org_id:
if show_child:
tmp = find_branch_json(dbs, org_id)
child_org = list(map((lambda x: x['id']), tmp))
orgs = orgs.filter(SysOrg.id.in_(child_org))
else:
orgs = orgs.filter(SysOrg.id == org_id)
if org_name:
orgs = orgs.filter(SysOrg.org_name.like('%' + org_name + '%'))
if parent_id:
orgs = orgs.filter(SysOrg.parent_id == parent_id)
if address:
orgs = orgs.filter(SysOrg.address.like('%' + address + '%'))
orgs = orgs.order_by(SysOrg.create_time.desc())
results, paginator = Paginator(orgs, page_no).to_dict()
lists = []
for obj in results:
obj_id = obj[0] if obj[0] else ''
org_name = obj[1] if obj[1] else ''
org_type = obj[2] if obj[2] else ''
parent_name = obj[3] if obj[3] else ''
org_manager = obj[4] if obj[4] else ''
phone = obj[5] if obj[5] else ''
address = obj[6] if obj[6] else ''
state = obj[7] if obj[7] else ''
user_name = obj[8] if obj[8] else ''
create_time = obj[9] if obj[9] else ''
temp_dict = {
'id': obj_id,
'org_name': org_name,
'org_type': org_type,
'parent_name': parent_name,
'org_manager': org_manager,
'phone': phone,
'address': address,
'state': state,
'user_name': user_name,
'create_time': create_time
}
lists.append(temp_dict)
return lists, paginator
def find_org(dbs, org_id):
"""
:param dbs:
:param org_id:
:return:
"""
(orgs, paginator) = find_orgs(dbs, org_id=org_id)
if len(orgs) >= 1:
return orgs[0]
return None
def find_org_by_id(dbs, org_id):
"""
:param dbs:
:param org_id:
:return:
"""
org = dbs.query(SysOrg).filter(SysOrg.id == org_id).first()
if org:
return org
else:
return None
def check_org_name(dbs, org_name, parent_id):
"""
判断机构名称是否已被占用
:param dbs:
:param org_name:
:param parent_id
:return:
"""
if not org_name:
return "机构名称不能为空"
org = dbs.query(SysOrg).filter(SysOrg.org_name == org_name, SysOrg.parent_id == parent_id).first()
return "机构名称重复" if org else ""
def add(dbs, org):
"""
添加机构
:param dbs:
:param org:
:return:
"""
try:
dbs.add(org)
dbs.flush()
sys_user_org = SysUserOrg(user_id=org.create_user, org_id=org.id, create_user=org.create_user,
create_time=date_now())
dbs.merge(sys_user_org)
sys_user_org = SysUserOrg(user_id=1, org_id=org.id, create_user=org.create_user,
create_time=date_now())
dbs.merge(sys_user_org)
return ''
except Exception as e:
logger.error(e)
return '添加机构失败,请重试!'
def update(dbs, org):
"""
更新机构信息
:param dbs:
:param org:
:return:
"""
try:
with transaction.manager:
dbs.merge(org)
dbs.flush()
return ''
except Exception as e:
logger.error(e)
return '更新机构信息失败,请重试!'
def delete(dbs, org_id):
"""
删除机构,同时删除机构下用户、pad、用户的机构授权、用户的角色授权、其他用户对此机构的授权
:param dbs:
:param org_id:
:return:
"""
try:
with transaction.manager as tm:
children = dbs.query(SysOrg).filter(SysOrg.parent_id == org_id).all()
if children:
tm.abort()
return '请先删除此机构的子机构!'
dbs.query(HasPad).filter(HasPad.org_id == org_id).delete()
dbs.query(SysUserOrg).filter(SysUserOrg.org_id == org_id).delete()
users = dbs.query(SysUser).filter(SysUser.org_id == org_id).all()
if users:
for user in users:
dbs.query(SysUserOrg).filter(SysUserOrg.user_id == user.id).delete()
dbs.query(SysUserRole).filter(SysUserRole.user_id == user.id).delete()
dbs.delete(user)
dbs.query(SysOrg).filter(SysOrg.id == org_id).delete()
dbs.flush()
return ''
except Exception as e:
logger.error(e)
return '删除机构失败,请重试!'
def find_org_ids(dbs, user_org_id):
"""
获取当前用户所属机构及下属机构id
:param dbs:
:param user_org_id:
:return:
"""
branches = [] # 获取当前用户所属机构及下属机构id
sql = 'WITH RECURSIVE r AS ( SELECT * FROM brms.sys_org '
if user_org_id:
sql += ' WHERE id = %s' % user_org_id
else:
sql += ' WHERE id = 1'
sql += ' union ALL SELECT sys_org.* FROM brms.sys_org, r WHERE sys_org.parent_id = r.id ) ' \
'SELECT id,org_name,parent_id FROM r ORDER BY id'
orgs = dbs.execute(sql)
for rec in orgs:
branches.append(rec[0])
return branches
def find_org_by_user(dbs, user_id):
"""
:param dbs:
:param user_id:
:return:
"""
branches = [] # 获取当前用户所属机构及下属机构id
user_orgs = dbs.query(SysUserOrg.org_id).filter(SysUserOrg.user_id == user_id).all()
for rec in user_orgs:
branches.append(rec[0])
return branches
def find_parent_org(dbs, org_id):
org = dbs.query(SysOrg).filter(SysOrg.id == org_id).first()
if org.org_type == '0':
return org_id
else:
return find_parent_org(dbs, org.parent_id)
|
StarcoderdataPython
|
3314368
|
# coding=utf-8
from dbget import get_relate, get_parent
__author__ = 'GaoJie'
dim_relative_map = {}
class Relative(object):
@staticmethod
def get_os():
return get_relate('osName', 'b_base_os', where='osVersion="0"')
@staticmethod
def get_categorys():
return get_relate('zhName', 'b_base_media_category', where='status=1')
@staticmethod
def get_devicetype():
return {'Phone': 1, 'Pad': 2}
@staticmethod
def get_appname():
return get_relate('zhName', 'b_base_media', where='status=1')
@staticmethod
def get_carriername():
return {'China Mobile': 43, 'China Telecom': 45, 'UNICOM': 47}
@staticmethod
def get_devicemodel():
## todo
return get_relate('modelName', 'b_base_device')
@staticmethod
def get_province():
return get_relate('zhName', 'b_base_geo', where='level=1')
@staticmethod
def get_city():
return get_relate('zhName', 'b_base_geo', where='level=2 or firstcity = 1')
@staticmethod
def get_citys():
"""
获取所有层级
"""
return get_relate('zhName', 'b_base_geo')
@classmethod
def mapping_value(cls, dim, value):
global dim_relative_map
if dim not in dim_relative_map:
relative = getattr(cls, 'get_%s' % dim.lower())
relative_map = relative()
dim_relative_map[dim] = relative_map
else:
relative_map = dim_relative_map[dim]
return relative_map[value] if value in relative_map else value
dim_father_map = {}
class Parent(object):
@classmethod
def get_citys(cls):
map_dict = get_parent('id', 'b_base_geo')
return cls._filter(map_dict)
@classmethod
def get_carriername(cls):
map_dict1 = get_parent('id', 'b_base_operator', parent_field='categoryId')
map_dict2 = get_parent('id', 'b_base_operator_category', parent_field='parentId')
return cls._filter(map_dict1, map_dict2)
@classmethod
def get_devicemodel(cls):
map_dict = get_parent('id', 'b_base_device')
return cls._filter(map_dict)
@classmethod
def get_os(cls):
map_dict = get_parent('id', 'b_base_os')
return cls._filter(map_dict)
@staticmethod
def _filter(*args):
"""
解析子类所有的上层父类
"""
map_dict = dict(*args)
all_list = {}
for key, value in map_dict.items():
if value is 0:
continue
key = str(key)
if key not in all_list:
all_list[key] = []
parent = value
while parent and parent in map_dict:
all_list[key].append(str(parent))
parent = map_dict[parent]
return all_list
@classmethod
def mapping_value(cls, dim, value):
global dim_father_map
if dim not in dim_father_map:
relative = getattr(cls, 'get_%s' % dim.lower())
father_map = relative()
dim_father_map[dim] = father_map
else:
father_map = dim_father_map[dim]
return father_map[value] if value in father_map else []
|
StarcoderdataPython
|
3383419
|
<reponame>Wilson194/Angry-tux<gh_stars>0
from angrytux.model.game_objects.missile_states.Collided import Collided
from angrytux.model.game_objects.missile_states.MissileState import MissileState
from angrytux.model.game_objects.missile_states.OutOfGame import OutOfGame
from angrytux.model.game_objects.missile_states.Flying import Flying
from .GameObject import GameObject
from .Position import Position
from .missile_strategies.MissileStrategy import MissileStrategy
class Missile(GameObject):
"""
Class for handling one missile object.
"""
def __init__(self, position: Position, speed: float, movement_angle: float, strategy: MissileStrategy = None):
"""
Create new missile
:param position: starting position of missile
:param speed: starting speed of missile
:param movement_angle: Starting movement angle of missile
:param strategy: moving strategy of missile
"""
super().__init__(position)
self.__speed = speed
self.__movement_angle = movement_angle
self.__strategy = strategy
self.__position = position
self.__state = Flying()
@property
def collision_distance(self):
"""
Missile have collision distance 1, because all obstacles and enemies have collision distance
:return: 1
"""
return 1
def move(self, gravity: float, collidable_objects: list) -> int:
"""
Move missile by one tick of clock
:param gravity: current value of gravity
:param collidable_objects: list of all collidable object in game
:return: Number of points, if something is hitted
"""
points = 0
self.__strategy.move(self, gravity)
out = self.__position.out_of_window()
if out:
self.__state = OutOfGame()
for obj in collidable_objects:
if self.has_collided_with(obj):
self.__state = Collided()
obj.state.hit()
points += obj.points
return points
def accept(self, visitor) -> None:
"""
Accept function for visitor pattern
:param visitor: visitor object
"""
visitor.visit(self)
@property
def state(self) -> MissileState:
"""
Get current state of missile
:return: current missile state
"""
return self.__state
@property
def movement_angle(self) -> float:
"""
Get current movement angle of missile
:return: angle of missile
"""
return self.__movement_angle
@movement_angle.setter
def movement_angle(self, angle: float) -> None:
"""
Set new movement angle of missile
:param angle: new angle of missile
"""
self.__movement_angle = angle
@property
def speed(self) -> float:
"""
Get current speed of missile
:return: current speed of missile
"""
return self.__speed
@speed.setter
def speed(self, speed: float) -> None:
"""
Set new speed for missile
:param speed: new speed of missile
"""
self.__speed = speed
def __str__(self):
return 'Missile'
|
StarcoderdataPython
|
180672
|
<filename>csrv/model/actions/gain_a_credit.py<gh_stars>0
"""Base actions for the players to take."""
from csrv.model.actions import action
from csrv.model import cost
from csrv.model import errors
from csrv.model import events
from csrv.model import game_object
from csrv.model import parameters
class GainACredit(action.Action):
DESCRIPTION = '[click]: Gain 1[cred] (one credit)'
COST_CLASS = cost.BasicActionCost
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
action.Action.resolve(
self,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.player.credits.gain(1)
self.game.log('The %s gains 1[cred]' % self.player, None)
self.trigger_event(events.GainACredit(self.game, self.player))
|
StarcoderdataPython
|
3266349
|
"""A management command to apply mailbox operations."""
import logging
from optparse import make_option
import os
from django.core.management.base import BaseCommand
from param_tools import tools as param_tools
from modoboa.lib.sysutils import exec_cmd
from modoboa.lib.exceptions import InternalError
from ...app_settings import load_admin_settings
from ...models import MailboxOperation
class OperationError(Exception):
"""Custom exception."""
pass
class Command(BaseCommand):
"""Command definition"""
help = "Handles rename and delete operations on mailboxes"
option_list = BaseCommand.option_list + (
make_option(
"--pidfile", default="/tmp/handle_mailbox_operations.pid",
help="Path to the file that will contain the PID of this process"
),
)
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("modoboa.admin")
def rename_mailbox(self, operation):
if not os.path.exists(operation.argument):
return
new_mail_home = operation.mailbox.mail_home
dirname = os.path.dirname(new_mail_home)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except os.error as e:
raise OperationError(str(e))
code, output = exec_cmd(
"mv %s %s" % (operation.argument, new_mail_home)
)
if code:
raise OperationError(output)
def delete_mailbox(self, operation):
if not os.path.exists(operation.argument):
return
code, output = exec_cmd(
"rm -r %s" % operation.argument
)
if code:
raise OperationError(output)
def check_pidfile(self, path):
"""Check if this command is already running
:param str path: path to the file containing the PID
:return: a boolean, True means we can go further
"""
if os.path.exists(path):
with open(path) as fp:
pid = fp.read().strip()
code, output = exec_cmd(
"grep handle_mailbox_operations /proc/%s/cmdline" % pid
)
if not code:
return False
with open(path, 'w') as fp:
print >> fp, os.getpid()
return True
def handle(self, *args, **options):
"""Command entry point."""
load_admin_settings()
if not param_tools.get_global_parameter("handle_mailboxes"):
return
if not self.check_pidfile(options["pidfile"]):
return
for ope in MailboxOperation.objects.all():
try:
f = getattr(self, "%s_mailbox" % ope.type)
except AttributeError:
continue
try:
f(ope)
except (OperationError, InternalError) as e:
self.logger.critical("%s failed (reason: %s)",
ope, str(e).encode("utf-8"))
else:
self.logger.info("%s succeed", ope)
ope.delete()
os.unlink(options["pidfile"])
|
StarcoderdataPython
|
3203366
|
import common.utils
|
StarcoderdataPython
|
57276
|
# test_files.py
import unittest2 as unittest
from graphviz.files import File, Source
class TestBase(unittest.TestCase):
def setUp(self):
self.file = File()
def test_format(self):
with self.assertRaisesRegexp(ValueError, 'format'):
self.file.format = 'spam'
def test_engine(self):
with self.assertRaisesRegexp(ValueError, 'engine'):
self.file.engine = 'spam'
def test_encoding(self):
with self.assertRaisesRegexp(LookupError, 'encoding'):
self.file.encoding = 'spam'
class TestFile(unittest.TestCase):
def test_init(self):
f = File('name', 'dir', 'PNG', 'NEATO', 'latin1')
self.assertEqual(f.filename, 'name')
self.assertEqual(f.format, 'png')
self.assertEqual(f.engine, 'neato')
self.assertEqual(f.encoding, 'latin1')
class TestNoent(unittest.TestCase):
def setUp(self):
import graphviz.files
graphviz.files.ENGINES.add('spam')
self.file = File('spam.gv', 'test-output', engine='spam')
self.file.source = 'spam'
def tearDown(self):
import graphviz.files
graphviz.files.ENGINES.discard('spam')
def test_pipe(self):
with self.assertRaisesRegexp(RuntimeError, 'failed to execute'):
self.file.pipe()
def test_render(self):
with self.assertRaisesRegexp(RuntimeError, 'failed to execute'):
self.file.render()
class TestSource(unittest.TestCase):
def test_init(self):
source = 'graph { hello -> world }'
s = Source(source)
self.assertEqual(s.source, source)
|
StarcoderdataPython
|
3280485
|
import unittest
from pymath.wallpaper import wallpaper
# todo: failing tests
@unittest.skip
class MyTestCase(unittest.TestCase):
def test_2(self):
self.assertEqual(wallpaper(6.3, 4.5, 3.29), "sixteen")
def test_3(self):
self.assertEqual(wallpaper(7.8, 2.9, 3.29), "sixteen")
def test_4(self):
self.assertEqual(wallpaper(6.3, 5.8, 3.13), "seventeen")
def test_5(self):
self.assertEqual(wallpaper(6.1, 6.7, 2.81), "sixteen")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1686503
|
import time
import unittest.mock
from datetime import datetime, timezone
from http import HTTPStatus
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
import mlrun.api.crud
import mlrun.api.schemas
import mlrun.errors
import mlrun.runtimes.constants
from mlrun.api.db.sqldb.models import Run
from mlrun.api.utils.singletons.db import get_db
from mlrun.config import config
def test_run_with_nan_in_body(db: Session, client: TestClient) -> None:
"""
This test wouldn't pass if we were using FastAPI default JSONResponse which uses json.dumps to serialize jsons
It passes only because we changed to use fastapi.responses.ORJSONResponse by default which uses orjson.dumps
which do handles float("Nan")
"""
run_with_nan_float = {
"metadata": {"name": "run-name"},
"status": {"artifacts": [{"preview": [[0.0, float("Nan"), 1.3]]}]},
}
uid = "some-uid"
project = "some-project"
mlrun.api.crud.Runs().store_run(db, run_with_nan_float, uid, project=project)
resp = client.get(f"run/{project}/{uid}")
assert resp.status_code == HTTPStatus.OK.value
def test_abort_run(db: Session, client: TestClient) -> None:
project = "some-project"
run_in_progress = {
"metadata": {
"name": "run-name-1",
"labels": {"kind": mlrun.runtimes.RuntimeKinds.job},
},
"status": {"state": mlrun.runtimes.constants.RunStates.running},
}
run_in_progress_uid = "in-progress-uid"
run_completed = {
"metadata": {
"name": "run-name-2",
"labels": {"kind": mlrun.runtimes.RuntimeKinds.job},
},
"status": {"state": mlrun.runtimes.constants.RunStates.completed},
}
run_completed_uid = "completed-uid"
run_aborted = {
"metadata": {
"name": "run-name-3",
"labels": {"kind": mlrun.runtimes.RuntimeKinds.job},
},
"status": {"state": mlrun.runtimes.constants.RunStates.aborted},
}
run_aborted_uid = "aborted-uid"
run_dask = {
"metadata": {
"name": "run-name-4",
"labels": {"kind": mlrun.runtimes.RuntimeKinds.dask},
},
"status": {"state": mlrun.runtimes.constants.RunStates.running},
}
run_dask_uid = "dask-uid"
for run, run_uid in [
(run_in_progress, run_in_progress_uid),
(run_completed, run_completed_uid),
(run_aborted, run_aborted_uid),
(run_dask, run_dask_uid),
]:
mlrun.api.crud.Runs().store_run(db, run, run_uid, project=project)
mlrun.api.crud.RuntimeResources().delete_runtime_resources = unittest.mock.Mock()
abort_body = {"status.state": mlrun.runtimes.constants.RunStates.aborted}
# completed is terminal state - should fail
response = client.patch(f"run/{project}/{run_completed_uid}", json=abort_body)
assert response.status_code == HTTPStatus.CONFLICT.value
# aborted is terminal state - should fail
response = client.patch(f"run/{project}/{run_aborted_uid}", json=abort_body)
assert response.status_code == HTTPStatus.CONFLICT.value
# dask kind not abortable - should fail
response = client.patch(f"run/{project}/{run_dask_uid}", json=abort_body)
assert response.status_code == HTTPStatus.BAD_REQUEST.value
# running is ok - should succeed
response = client.patch(f"run/{project}/{run_in_progress_uid}", json=abort_body)
assert response.status_code == HTTPStatus.OK.value
mlrun.api.crud.RuntimeResources().delete_runtime_resources.assert_called_once()
def test_list_runs_times_filters(db: Session, client: TestClient) -> None:
run_1_start_time = datetime.now(timezone.utc)
time.sleep(0.1)
run_1_update_time = datetime.now(timezone.utc)
run_1_name = "run_1_name"
run_1_uid = "run_1_uid"
run_1 = {
"metadata": {"name": run_1_name, "uid": run_1_uid},
}
run = Run(
name=run_1_name,
uid=run_1_uid,
project=config.default_project,
iteration=0,
start_time=run_1_start_time,
updated=run_1_update_time,
)
run.struct = run_1
get_db()._upsert(db, run, ignore=True)
between_run_1_and_2 = datetime.now(timezone.utc)
time.sleep(0.1)
run_2_start_time = datetime.now(timezone.utc)
time.sleep(0.1)
run_2_update_time = datetime.now(timezone.utc)
run_2_uid = "run_2_uid"
run_2_name = "run_2_name"
run_2 = {
"metadata": {"name": run_2_name, "uid": run_2_uid},
}
run = Run(
name=run_2_name,
uid=run_2_uid,
project=config.default_project,
iteration=0,
start_time=run_2_start_time,
updated=run_2_update_time,
)
run.struct = run_2
get_db()._upsert(db, run, ignore=True)
# all start time range
assert_time_range_request(client, [run_1_uid, run_2_uid])
assert_time_range_request(
client,
[run_1_uid, run_2_uid],
start_time_from=run_1_start_time.isoformat(),
start_time_to=run_2_update_time.isoformat(),
)
assert_time_range_request(
client,
[run_1_uid, run_2_uid],
start_time_from=run_1_start_time.isoformat(),
)
# all last update time range
assert_time_range_request(
client,
[run_1_uid, run_2_uid],
last_update_time_from=run_1_update_time,
last_update_time_to=run_2_update_time,
)
assert_time_range_request(
client,
[run_1_uid, run_2_uid],
last_update_time_from=run_1_update_time,
)
assert_time_range_request(
client,
[run_1_uid, run_2_uid],
last_update_time_to=run_2_update_time,
)
# catch only first
assert_time_range_request(
client,
[run_1_uid],
start_time_from=run_1_start_time,
start_time_to=between_run_1_and_2,
)
assert_time_range_request(
client,
[run_1_uid],
start_time_to=between_run_1_and_2,
)
assert_time_range_request(
client,
[run_1_uid],
last_update_time_from=run_1_update_time,
last_update_time_to=run_2_start_time,
)
# catch run_without_last_update and last
assert_time_range_request(
client,
[run_2_uid],
start_time_from=run_2_start_time,
start_time_to=run_2_update_time,
)
assert_time_range_request(
client,
[run_2_uid],
last_update_time_from=run_2_start_time,
)
def test_list_runs_partition_by(db: Session, client: TestClient) -> None:
# Create runs
projects = ["run-project-1", "run-project-2", "run-project-3"]
run_names = ["run-name-1", "run-name-2", "run-name-3"]
for project in projects:
for name in run_names:
for suffix in ["first", "second", "third"]:
uid = f"{name}-uid-{suffix}"
for iteration in range(3):
run = {
"metadata": {
"name": name,
"uid": uid,
"project": project,
"iter": iteration,
},
}
mlrun.api.crud.Runs().store_run(db, run, uid, iteration, project)
# basic list, all projects, all iterations so 3 projects * 3 names * 3 uids * 3 iterations = 81
runs = _list_and_assert_objects(
client,
{"project": "*"},
81,
)
# basic list, specific project, only iteration 0, so 3 names * 3 uids = 9
runs = _list_and_assert_objects(
client,
{"project": projects[0], "iter": False},
9,
)
# partioned list, specific project, 1 row per partition by default, so 3 names * 1 row = 3
runs = _list_and_assert_objects(
client,
{
"project": projects[0],
"partition-by": mlrun.api.schemas.RunPartitionByField.name,
"partition-sort-by": mlrun.api.schemas.SortField.created,
"partition-order": mlrun.api.schemas.OrderType.asc,
},
3,
)
# sorted by ascending created so only the first ones created
for run in runs:
assert "first" in run["metadata"]["uid"]
# partioned list, specific project, 1 row per partition by default, so 3 names * 1 row = 3
runs = _list_and_assert_objects(
client,
{
"project": projects[0],
"partition-by": mlrun.api.schemas.RunPartitionByField.name,
"partition-sort-by": mlrun.api.schemas.SortField.updated,
"partition-order": mlrun.api.schemas.OrderType.desc,
},
3,
)
# sorted by descending updated so only the third ones created
for run in runs:
assert "third" in run["metadata"]["uid"]
# partioned list, specific project, 5 row per partition, so 3 names * 5 row = 15
runs = _list_and_assert_objects(
client,
{
"project": projects[0],
"partition-by": mlrun.api.schemas.RunPartitionByField.name,
"partition-sort-by": mlrun.api.schemas.SortField.updated,
"partition-order": mlrun.api.schemas.OrderType.desc,
"rows-per-partition": 5,
},
15,
)
# Some negative testing - no sort by field
response = client.get("/api/runs?partition-by=name")
assert response.status_code == HTTPStatus.BAD_REQUEST.value
# An invalid partition-by field - will be failed by fastapi due to schema validation.
response = client.get("/api/runs?partition-by=key&partition-sort-by=name")
assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY.value
def _list_and_assert_objects(client: TestClient, params, expected_number_of_runs: int):
response = client.get("/api/runs", params=params)
assert response.status_code == HTTPStatus.OK.value, response.text
runs = response.json()["runs"]
assert len(runs) == expected_number_of_runs
return runs
def assert_time_range_request(client: TestClient, expected_run_uids: list, **filters):
resp = client.get("runs", params=filters)
assert resp.status_code == HTTPStatus.OK.value
runs = resp.json()["runs"]
assert len(runs) == len(expected_run_uids)
for run in runs:
assert run["metadata"]["uid"] in expected_run_uids
|
StarcoderdataPython
|
1648148
|
from nknsdk.wallet import Wallet
# Create a new wallet
wallet = Wallet.new_wallet('pswd')
# Get wallet's json string
print(wallet.to_json())
# Get wallet's address
print(wallet.address)
# Load wallet from a wallet json string
wallet_from_json = Wallet.load_json_wallet(wallet.to_json(), 'pswd')
# Get wallet's json to dict
print(wallet_from_json.to_dict())
# Restore wallet from a private key
wallet_from_seed = Wallet.restore_wallet_by_seed(wallet.seed, 'aaa')
# Verify whether an address is valid
print(Wallet.verify_address(wallet.address))
# Verify password of the wallet
print(wallet.verify_wallet_password('<PASSWORD>'))
# Get balance of this wallet
print(wallet.get_balance())
# Get balance of address
print(Wallet.get_balance_by_addr(wallet.address))
# Get nonce for next transaction of this wallet
print(wallet.get_nonce())
# Get nonce for next transaction of address
print(Wallet.get_nonce_by_addr(wallet.address))
# Get wallet address of a name
print(Wallet.get_address_by_name('somename'))
# Transfer token to some address
print(wallet.transfer_to(wallet.address, 1, fee=0.00000001))
|
StarcoderdataPython
|
3289346
|
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.contrib.auth import login as auth_login
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.views.decorators.cache import never_cache
from django_yubico.forms import LoginForm, PasswordForm
# Ask for the user password after the token
YUBIKEY_USE_PASSWORD = getattr(settings, 'YUBICO_USE_PASSWORD', True)
# Name of the session key which stores user id
YUBIKEY_SESSION_USER_ID = getattr(settings, 'YUBICO_SESSION_USER_ID',
'yubicodjango_user_id')
# Name of the session key which stores the name of the backend user used to log
# in.
YUBIKEY_SESSION_AUTH_BACKEND = getattr(settings, 'YUBICO_SESSION_AUTH_BACKEND',
'yubicodjango_auth_backend')
# Name of the session key which stores attempt counter
YUBIKEY_SESSION_ATTEMPT_COUNTER = getattr(settings,
'YUBIKEY_SESSION_ATTEMPT_COUNTER',
'yubicodjango_counter')
# Name of the session key which stores number of password attemps
YUBIKEY_PASSWORD_ATTEMPTS = getattr(settings, 'YUBICO_PASSWORD_ATTEMPTS', 3)
# Django Yubico session keys
SESSION_KEYS = [YUBIKEY_SESSION_USER_ID, YUBIKEY_SESSION_AUTH_BACKEND,
YUBIKEY_SESSION_ATTEMPT_COUNTER]
@never_cache
def login(request, template_name='django_yubico/login.html',
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.REQUEST.get(redirect_field_name,
settings.LOGIN_REDIRECT_URL)
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
user = form.user
if YUBIKEY_USE_PASSWORD:
# Dual factor authentication is enabled, user still needs to
# enter his password
request.session[YUBIKEY_SESSION_USER_ID] = user.pk
request.session[YUBIKEY_SESSION_AUTH_BACKEND] = user.backend
request.session[YUBIKEY_SESSION_ATTEMPT_COUNTER] = 1
return HttpResponseRedirect(reverse('yubico_django_password'))
else:
auth_login(request=request, user=user)
return HttpResponseRedirect(redirect_to)
else:
form = LoginForm()
dictionary = {'form': form, redirect_field_name: redirect_to}
return render_to_response(template_name, dictionary,
context_instance=RequestContext(request))
@never_cache
def password(request, template_name='django_yubico/password.html',
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Displays the password form and handles the login action.
"""
redirect_to = request.REQUEST.get(redirect_field_name,
settings.LOGIN_REDIRECT_URL)
for key in SESSION_KEYS:
# Make sure all the required session keys are present
value = request.session.get(key, None)
if value is None:
return HttpResponseRedirect(reverse('yubico_django_login'))
user_id = request.session[YUBIKEY_SESSION_USER_ID]
auth_backend = request.session[YUBIKEY_SESSION_AUTH_BACKEND]
user = User.objects.get(pk=user_id)
user.backend = auth_backend
if request.method == 'POST':
form = PasswordForm(request.POST, user=user)
if form.is_valid():
auth_login(request=request, user=user)
reset_user_session(session=request.session)
return HttpResponseRedirect(redirect_to)
else:
# Limit the number of password attempts per token
request.session[YUBIKEY_SESSION_ATTEMPT_COUNTER] += 1
if request.session[YUBIKEY_SESSION_ATTEMPT_COUNTER] > \
YUBIKEY_PASSWORD_ATTEMPTS:
# Maximum number of attemps has been reached. Require user to
# start from scratch.
reset_user_session(session=request.session)
return HttpResponseRedirect(reverse('yubico_django_login'))
else:
form = PasswordForm(user=user)
dictionary = {'form': form, redirect_field_name: redirect_to}
return render_to_response(template_name, dictionary,
context_instance=RequestContext(request))
def reset_user_session(session):
"""
Remove all the Django Yubico related keys from the provided session.
"""
for key in SESSION_KEYS:
try:
del session[key]
except KeyError:
pass
|
StarcoderdataPython
|
3337377
|
import requests
import os
list_of_tin = ['192.168.1.29', '192.168.1.218', '192.168.1.219']
print('Home Lab Status:')
def g_status(tin_list):
wrong_code = []
for ip in tin_list:
url = 'http://' + ip + '/rest/v1/system/status'
get_status_response = requests.get(url, timeout=2)
g_status_dict = get_status_response.json()
print("HOSTNAME: {} \t IMAGE: {} \t SERIAL_NUMBER: {}".format(g_status_dict['name'],
g_status_dict['firmware_version'],
g_status_dict['serial_number']))
if '16.03' not in g_status_dict['firmware_version']:
wrong_code.append(g_status_dict['name'])
for x in wrong_code:
print("Warning! Wrong code running on {}".format(x))
if wrong_code is not []:
f = open(os.path.join("wrong_code.txt"), "w")
f.write("Attention humanoid! The following switches are NON-COMPLIANT:\n")
for x in wrong_code:
f.write(x)
f.close()
def main():
g_status(list_of_tin)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1663296
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Code starts here
path
data = pd.read_csv(path, sep = ',', delimiter = None)
loan_status = data['Loan_Status'].value_counts()
loan_status.plot(kind = 'bar')
plt.show()
# --------------
#Code starts here
property_and_loan = data.groupby(['Property_Area', 'Loan_Status']).size().unstack()
property_and_loan.plot(kind = 'bar', stacked = False)
plt.xlabel("Property Area")
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
# --------------
#Code starts here
education_and_loan = data[['Education', 'Loan_Status']]
education_and_loan = education_and_loan.groupby(['Education', 'Loan_Status']).size().unstack()
education_and_loan.plot(kind = 'bar', stacked = True)
plt.xlabel('Education Status')
plt.ylabel('Loan Status')
plt.xticks(rotation = 45)
# --------------
#Code starts here
graduate = data[data['Education'] == 'Graduate']
not_graduate = data[data['Education'] == 'Not Graduate']
graduate['LoanAmount'].plot(kind = 'density', label = 'Graduate')
not_graduate['LoanAmount'].plot(kind = 'density', label = 'Not Graduate')
#Code ends here
#For automatic legend display
plt.legend()
# --------------
#Code starts here
fig, (ax_1, ax_2, ax_3) = plt.subplots(3, 1, figsize= (20, 40))
data.plot.scatter(x ='ApplicantIncome' , y='LoanAmount', ax = ax_1)
ax_1.set_title('Applicant Income')
data.plot.scatter(x ='CoapplicantIncome' , y='LoanAmount', ax = ax_2)
ax_2.set_title('Coapplicant Income')
data['ApplicantIncome'].fillna(0)
data['CoapplicantIncome'].fillna(0)
data['TotalIncome'] = data['ApplicantIncome'] + data['CoapplicantIncome']
data.plot.scatter(x ='TotalIncome' , y='LoanAmount', ax = ax_3)
ax_3.set_title('Total Income')
|
StarcoderdataPython
|
110976
|
import pyautogui
from time import sleep
accept=None
f= None
r= None
c=None
l=None
lockin=None
test=None
champ= input("Which champ:")
secondary=input("Which secondary:")
secondary=secondary.lower()
secondary=secondary.strip()
champ=champ.lower()
champ=champ.strip()
ban= input("Which ban:")
ban=ban.lower()
ban=ban.strip()
mode=input("Draft(D) or Blind (B)")
mode=mode.lower()
mode=mode.strip()
if champ=="blitzcrank":
champ="blitz"
img=champ+".png"
secondary=secondary+".png"
ban=ban+".png"
def draft():
global ban
global b
b= None
banThis=None
while b is None:
b=pyautogui.locateCenterOnScreen('bans.png',grayscale=False,confidence=0.5)
if b:
try:
print(ban)
test=pyautogui.locateCenterOnScreen(ban,grayscale=False,confidence=0.5)
pyautogui.moveTo(test[0],test[1])
pyautogui.click()
sleep(2)
while banThis is None:
banThis=pyautogui.locateCenterOnScreen("ban.png",grayscale=False,confidence=0.5)
pyautogui.moveTo(banThis[0],banThis[1])
pyautogui.click()
except:
pyautogui.scroll(-125)
while True:
while lockin is None:
# This detects the accept button and clicks it
#TBI: Constant checking of the button in case a match is dropped due to someone not accepting/dodging
"""accept=pyautogui.locateCenterOnScreen('button.png', grayscale=False,confidence=0.5)
if accept:
pyautogui.moveTo(accept[0],accept[1])
pyautogui.click()
else:
continue
print("Not found")"""
if mode == 'd':
draft()
# This bit of code detects the champion chosen by the user and click
"""
while c is None:
while l is None:
# This bit of code detects the "Choose A Champion" banner
l=pyautogui.locateCenterOnScreen("choose.png",grayscale=False,confidence=0.5)
if l:
try:
print(img+" was chosen")
# This bit of code detects the champion chosen by the user , moves the cursor to it and clicks it
c=pyautogui.locateCenterOnScreen(img,grayscale=False,confidence=0.5)
pyautogui.moveTo(c[0],c[1])
pyautogui.click()
c=1
if lockin is None:
# Lock in button clicking
lockin=pyautogui.locateCenterOnScreen("lock-in.png",grayscale=False,confidence=0.5)
pyautogui.moveTo(lockin[0],lockin[1])
pyautogui.click()
except:
# If the champion isn't found, scroll down
pyautogui.scroll(-115)
"""
print(c)
|
StarcoderdataPython
|
3391090
|
<filename>NJ_trees_run.py<gh_stars>0
from os import system
import multiprocessing
def python_run(command):
print 'Running: ' + command
system(command)
print 'Finished with: ' + command
run_file = 'terminal_run_clusters_probable.txt'
# parse the file
txt = open(run_file, 'r')
txt_data = txt.readlines()
txt.close()
python_commands = [l[:-4] for l in txt_data if 'python NJ_tree_' in l]
# print python_commands
print 'To be run:', len(python_commands)
# run selected commands
n_parallel = 8
pool = multiprocessing.Pool(n_parallel)
pool.map(python_run, python_commands)
pool.close()
|
StarcoderdataPython
|
122504
|
# Copyright [2019] [FORTH-ICS]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import argparse
import struct
import pyh3lib
def ExpiresAt(h3):
"""
Delete all the objects that have the ExpiresAt attribute
and the time that is specified in the ExpiresAt has come.
:param now: the time, now
:type now: float
:returns: nothing
"""
now = time.clock_gettime(time.CLOCK_REALTIME)
# list all the buckets
for h3_bucket in h3.list_buckets():
done = False
offset = 0
# list all the objects that have the ExpiresAt attribute
while not done:
h3_objects = h3.list_objects_with_metadata(h3_bucket, "ExpiresAt", offset)
for h3_object in h3_objects:
# the h3_object contains the object's name
expire_at = h3.read_object_metadata(h3_bucket, h3_object, "ExpiresAt")
if expire_at != b'':
# try to parse
try:
h3_object_remove_timestamp = struct.unpack('d', expire_at)
except struct.error:
continue
# Check if we must remove the object
if (now >= h3_object_remove_timestamp[0]) :
h3.delete_object(h3_bucket, h3_object)
done = h3_objects.done
offset = h3_objects.nextOffset
def main(cmd=None):
parser = argparse.ArgumentParser(description='ExpiresAt Controller')
parser.add_argument('--storage', required=True, help=f'H3 storage URI')
args = parser.parse_args(cmd)
config_path = args.storage
if config_path:
h3 = pyh3lib.H3(config_path)
ExpiresAt(h3)
else:
parser.print_help(sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3224639
|
<filename>boatsandjoy_api/availability/requests.py
from dataclasses import dataclass
from datetime import date
@dataclass
class GetDayAvailabilityRequest:
date: date
apply_resident_discount: bool
@dataclass
class GetMonthAvailabilityRequest:
month: int
year: int
|
StarcoderdataPython
|
22517
|
import re
import urllib
import numbers
from clayful.models import register_models
from clayful.requester import request
from clayful.exception import ClayfulException
class Clayful:
base_url = 'https://api.clayful.io'
default_headers = {
'Accept-Encoding': 'gzip',
'User-Agent': 'clayful-python',
'Clayful-SDK': 'clayful-python'
}
plugins = {
'request': request
}
listeners = {
'request': [],
'response': []
}
@staticmethod
def options_to_headers(o = {}):
headers = {}
if 'language' in o:
headers['Accept-Language'] = o['language']
if 'currency' in o:
headers['Accept-Currency'] = o['currency']
if 'time_zone' in o:
headers['Accept-Time-Zone'] = o['time_zone']
if 'client' in o:
headers['Authorization'] = 'Bearer ' + o['client']
if 'customer' in o:
headers['Authorization-Customer'] = o['customer']
if 'reCAPTCHA' in o:
headers['reCAPTCHA-Response'] = o['reCAPTCHA'];
if 'debug_language' in o:
headers['Accept-Debug-Language'] = o['debug_language']
if 'headers' in o:
headers.update(o['headers'])
return headers
@staticmethod
def get_endpoint(path):
return Clayful.base_url + path
@staticmethod
def normalize_query_values(query = {}):
copied = query.copy()
for key in copied:
if isinstance(copied[key], bool):
copied[key] = 'true' if copied[key] == True else 'false'
if isinstance(copied[key], numbers.Number):
copied[key] = str(copied[key])
return copied
@staticmethod
def extract_request_arguments(options):
result = {
'http_method': options['http_method'],
'request_url': options['path'],
'payload': None,
'query': {},
'headers': {},
'meta': {}
}
rest = options['args'][len(options['params']):]
for i, key in enumerate(options['params']):
result['request_url'] = result['request_url'].replace('{' + key + '}', str(options['args'][i]))
if (options['http_method'] == 'POST' or options['http_method'] == 'PUT') and (options.get('without_payload', False) == False):
result['payload'] = (rest[0:1] or (None,))[0]
rest = rest[1:]
query_headers = (rest[0:1] or ({},))[0]
result['query'] = Clayful.normalize_query_values(query_headers.get('query', {}))
result['headers'] = Clayful.options_to_headers(query_headers)
result['meta'] = query_headers.get('meta', {})
return result
@staticmethod
def call_api(options):
extracted = Clayful.extract_request_arguments(options)
extracted.update({
'request_url': Clayful.get_endpoint(extracted['request_url']),
'model_name': options['model_name'],
'method_name': options['method_name'],
'uses_form_data': options.get('uses_form_data', False),
'error': None,
'response': None,
})
default_headers = Clayful.default_headers.copy()
# Extend default headers with header options
default_headers.update(extracted['headers'])
extracted['headers'] = default_headers
Clayful.trigger('request', extracted)
try:
response = Clayful.plugins['request'](extracted)
extracted['response'] = response
Clayful.trigger('response', extracted)
return response
except ClayfulException as e:
extracted['error'] = e
Clayful.trigger('response', extracted)
raise
@staticmethod
def config(options = {}):
headers = Clayful.options_to_headers(options)
Clayful.default_headers.update(headers)
@staticmethod
def install(scope, plugin):
if scope in Clayful.plugins:
Clayful.plugins[scope] = plugin
@staticmethod
def on(event_name, callback):
listeners = Clayful.listeners.get(event_name, None)
if listeners is None:
return
listeners.append(callback)
@staticmethod
def off(event_name, callback):
listeners = Clayful.listeners.get(event_name, None)
if (listeners is None) or (not callback in listeners):
return
listeners.remove(callback)
@staticmethod
def trigger(event_name, data):
listeners = Clayful.listeners.get(event_name, None)
if listeners is None:
return
for listener in listeners:
listener(data)
@staticmethod
def format_image_url(base_url, options = {}):
query = []
normalized = Clayful.normalize_query_values(options)
for key in normalized:
query.append(key + '=' + normalized.get(key, ''))
query = '&'.join(query)
if bool(query):
query = '?' + query
return base_url + query
@staticmethod
def format_number(number, currency = {}):
if not isinstance(number, numbers.Number):
return ''
precision = currency.get('precision', None)
delimiter = currency.get('delimiter', {})
thousands = delimiter.get('thousands', '')
decimal = delimiter.get('decimal', '.')
if isinstance(precision, numbers.Number):
n = 10 ** precision
number = round(number * n) / n
# To deal with 0.0 case..
if precision == 0:
number = int(number)
parts = str(number).split('.')
a = thousands.join(re.findall('.{1,3}', parts[0][::-1]))[::-1]
b = parts[1] if len(parts) > 1 else ''
if isinstance(precision, numbers.Number):
diff = precision - len(b)
diff = 0 if diff < 0 else diff
b += '0' * diff
decimal = decimal if bool(b) else ''
return decimal.join([a, b])
@staticmethod
def format_price(number, currency = {}):
formatted_number = Clayful.format_number(number, currency)
if not bool(formatted_number):
return ''
symbol = currency.get('symbol', '')
format = currency.get('format', '{price}')
return format.replace('{symbol}', symbol).replace('{price}', formatted_number)
# Register models
register_models(Clayful)
|
StarcoderdataPython
|
30373
|
from django.conf import settings
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, \
PermissionsMixin
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from issues.models import Proposal, ProposalVote, ProposalVoteValue, \
ProposalStatus
from meetings.models import MeetingParticipant
from users.default_roles import DefaultGroups
import datetime
import logging
import random
import string
CODE_LENGTH = 48
logger = logging.getLogger(__name__)
class OCUserManager(BaseUserManager):
@classmethod
def normalize_email(cls, email):
return email.lower()
def get_by_natural_key(self, username):
return self.get(email__iexact=username)
def create_user(self, email, display_name=None, password=<PASSWORD>, **kwargs):
"""
Creates and saves a User with the given email, display name and
password.
"""
if not email:
raise ValueError('Users must have an email address')
if not display_name:
display_name = email
user = self.model(
email=OCUserManager.normalize_email(email),
display_name=display_name,
**kwargs
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, display_name, password):
"""
Creates and saves a superuser with the given email, display name and
password.
"""
user = self.create_user(email,
password=password,
display_name=display_name
)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class OCUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), max_length=255, unique=True,
db_index=True,
)
display_name = models.CharField(_("Your name"), max_length=200)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = OCUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['display_name']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.display_name
def get_full_name(self):
# The user is identified by their email address
return self.display_name
def get_short_name(self):
# The user is identified by their email address
return self.display_name
def get_default_group(self, community):
try:
return self.memberships.get(community=community).default_group_name
except Membership.DoesNotExist:
return ""
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
class MembershipManager(models.Manager):
def board(self):
return self.get_query_set().exclude(
default_group_name=DefaultGroups.MEMBER)
def none_board(self):
return self.get_query_set().filter(
default_group_name=DefaultGroups.MEMBER)
class Membership(models.Model):
community = models.ForeignKey('communities.Community', verbose_name=_("Community"),
related_name='memberships')
user = models.ForeignKey(OCUser, verbose_name=_("User"),
related_name='memberships')
default_group_name = models.CharField(_('Group'), max_length=50,
choices=DefaultGroups.CHOICES)
created_at = models.DateTimeField(auto_now_add=True,
verbose_name=_("Created at"))
invited_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("Invited by"),
related_name="members_invited", null=True,
blank=True)
in_position_since = models.DateField(default=datetime.date.today(),
verbose_name=_("In position since"))
objects = MembershipManager()
class Meta:
unique_together = (("community", "user"),)
verbose_name = _("Community Member")
verbose_name_plural = _("Community Members")
def __unicode__(self):
return "%s: %s (%s)" % (self.community.name, self.user.display_name,
self.get_default_group_name_display())
@models.permalink
def get_absolute_url(self):
return "member_profile", (self.community.id, self.id)
def get_permissions(self):
return DefaultGroups.permissions[self.default_group_name]
def total_meetings(self):
""" In the future we'll check since joined to community or rejoined """
return self.community.meetings.filter(held_at__gte=self.in_position_since).count()
def meetings_participation(self):
""" In the future we'll check since joined to community or rejoined """
return MeetingParticipant.objects.filter(user=self.user, is_absent=False,
meeting__community=self.community,
meeting__held_at__gte=self.in_position_since).count()
def meetings_participation_percantage(self):
""" In the future we'll check since joined to community or rejoined """
return round((float(self.meetings_participation()) / float(self.total_meetings())) * 100.0)
def member_open_tasks(self, user=None, community=None):
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, active=True, task_completed=False).exclude(due_by__lte=datetime.date.today())
def member_close_tasks(self, user=None, community=None):
""" Need to create a field to determine closed tasks """
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, active=True, task_completed=True)
def member_late_tasks(self, user=None, community=None):
return Proposal.objects.object_access_control(
user=user, community=community).filter(status=ProposalStatus.ACCEPTED, assigned_to_user=self.user, due_by__lte=datetime.date.today(), active=True, task_completed=False)
def member_votes_dict(self):
res = {'pro': {}, 'neut': {}, 'con': {}}
pro_count = 0
con_count = 0
neut_count = 0
votes = self.user.board_votes.select_related('proposal') \
.filter(proposal__issue__community_id=self.community_id,
proposal__register_board_votes=True,
proposal__active=True,
proposal__decided_at_meeting__held_at__gte=self.in_position_since) \
.exclude(proposal__status=ProposalStatus.IN_DISCUSSION).order_by('-proposal__issue__created_at', 'proposal__id')
for v in votes:
if not v.proposal.register_board_votes:
continue
if v.value == ProposalVoteValue.NEUTRAL:
key = 'neut'
neut_count += 1
elif v.value == ProposalVoteValue.PRO:
key = 'pro'
pro_count += 1
elif v.value == ProposalVoteValue.CON:
key = 'con'
con_count += 1
issue_key = v.proposal.issue
p_list = res[key].setdefault(issue_key, [])
p_list.append(v.proposal)
res['pro_count'] = pro_count
res['con_count'] = con_count
res['neut_count'] = neut_count
return res
def _user_board_votes(self):
return self.user.board_votes.select_related('proposal').filter(proposal__issue__community_id=self.community_id,
proposal__active=True,
proposal__register_board_votes=True,
proposal__decided_at_meeting__held_at__gte=self.in_position_since)
def member_proposal_pro_votes_accepted(self):
return self._user_board_votes().filter(value=ProposalVoteValue.PRO,
proposal__status=ProposalStatus.ACCEPTED)
def member_proposal_con_votes_rejected(self):
return self._user_board_votes().filter(value=ProposalVoteValue.CON,
proposal__status=ProposalStatus.REJECTED)
def member_proposal_nut_votes_accepted(self):
return self._user_board_votes().filter(value=ProposalVoteValue.NEUTRAL,
proposal__status=ProposalStatus.ACCEPTED)
CODE_CHARS = string.lowercase + string.digits
def create_code(length=CODE_LENGTH):
"""
Creates a random code of lowercase letters and numbers
"""
return "".join(random.choice(CODE_CHARS) for _x in xrange(length))
class EmailStatus(object):
PENDING = 0
SENT = 1
FAILED = 2
choices = (
(PENDING, _('Pending')),
(SENT, _('Sent')),
(FAILED, _('Failed')),
)
class Invitation(models.Model):
community = models.ForeignKey('communities.Community',
verbose_name=_("Community"),
related_name='invitations')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_("Created at"))
created_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("Created by"),
related_name="invitations_created")
name = models.CharField(_("Name"), max_length=200, null=True, blank=True)
email = models.EmailField(_("Email"))
message = models.TextField(_("Message"), null=True, blank=True)
code = models.CharField(max_length=CODE_LENGTH, default=create_code)
user = models.ForeignKey(OCUser, verbose_name=_("User"),
related_name='invitations', null=True, blank=True)
default_group_name = models.CharField(_('Group'), max_length=50,
choices=DefaultGroups.CHOICES)
status = models.PositiveIntegerField(_("Status"),
choices=EmailStatus.choices, default=EmailStatus.PENDING)
times_sent = models.PositiveIntegerField(_("Times Sent"), default=0)
error_count = models.PositiveIntegerField(_("Error count"), default=0)
last_sent_at = models.DateTimeField(_("Sent at"), null=True, blank=True)
class Meta:
unique_together = (("community", "email"),)
verbose_name = _("Invitation")
verbose_name_plural = _("Invitations")
DEFAULT_MESSAGE = _("The system will allow you to take part in the decision making process of %s. "
"Once you've joined, you'll be able to see the topics for the agenda in the upcoming meeting, decisions at previous meetings, and in the near future you'll be able to discuss and influence them.")
def __unicode__(self):
return "%s: %s (%s)" % (self.community.name, self.email,
self.get_default_group_name_display())
@models.permalink
def get_absolute_url(self):
return "accept_invitation", (self.code,)
def send(self, sender, recipient_name='', base_url=None):
if not base_url:
base_url = settings.HOST_URL
subject = _("Invitation to %s") % self.community.name
d = {
'base_url': base_url,
'object': self,
'recipient_name': recipient_name,
}
message = render_to_string("emails/invitation.txt", d)
recipient_list = [self.email]
from_email = "%s <%s>" % (self.community.name, settings.FROM_EMAIL)
self.last_sent_at = timezone.now()
try:
send_mail(subject, message, from_email, recipient_list)
self.times_sent += 1
self.status = EmailStatus.SENT
self.save()
return True
except:
logger.error("Invitation email sending failed", exc_info=True)
self.error_count += 1
self.status = EmailStatus.FAILED
self.save()
return False
|
StarcoderdataPython
|
1741936
|
<filename>__main__.py<gh_stars>1-10
# tree --dirsfirst --noreport -I 'Dataset*|wandb*|__pycache__|__init__.py|logs|SampleImages|List.md' > List.md
from Data import explore, process, prepare
from torch.utils.data import DataLoader
from torchvision.utils import save_image, make_grid
from torch.utils.tensorboard import SummaryWriter
from common.utils import create_dir, check_dir_exists, initialize_weights
import Config
from Execute.train import train
import torch.nn as nn
from torch import optim
from Models.generator import Generator
from Models.discriminator import Discriminator
from rich import pretty
import sys
# import os
pretty.install()
import wandb
wandb.init(project="pix2pix", entity="rohitkuk")
"""
ToDos:
- Data Prepare : Done
- Data Explore : Done
- Data Process : Done
- Data Augmentations : Done
- Generator : Done
- Disriminator : Done
- Training : Done
- Requirments.txt : Done
- TensorBoard Integration : Done
- WandB Integration : Done
- ReadME.md : WIP
- Logging : TBD (Try Implementing Rich Library)
- Argument Parsing : TBD
- Packaging : TBD
- Frontend or UI : TBD
- Optimization : Continous
- Test
- Unit : TBD
- Integration : TBD
- System : TBD
- UAT : TBD
"""
def main(dataset_name, Data_Path):
print("Preparing Dataset")
prepare.main( Data_Path, expectedFileName=False, unzip_path= "Dataset", keep_cache=False)
dataset = process.Pix2PixDatasets(dataset_name, reverse=True)
loader = DataLoader(dataset, batch_size=Config.BATCH_SIZE, shuffle=True, num_workers= Config.NUM_WORKERS if Config.NUM_WORKER else 0)
BCE = nn.BCEWithLogitsLoss()
l1_loss = nn.L1Loss()
disc = Discriminator(Config.IMG_CHANNELS, Config.FEATURES).to(Config.DEVICE)
gen = Generator(Config.IMG_CHANNELS, Config.FEATURES).to(Config.DEVICE)
initialize_weights(disc), initialize_weights(gen)
disc_optim = optim.Adam(disc.parameters(), lr = Config.LEARNING_RATE, betas=(0.5, 0.999))
gen_optim = optim.Adam(gen.parameters(), lr = Config.LEARNING_RATE, betas=(0.5, 0.999))
# Tensorboard Implementation
writer_real = SummaryWriter(f"logs/real")
writer_fake = SummaryWriter(f"logs/fake")
wandb.watch(gen)
wandb.watch(disc)
# training
disc.train()
gen.train()
step = 0
print("Start Training")
for epoch in range(Config.EPOCHS):
step = train(disc, gen, BCE, disc_optim, gen_optim, l1_loss,epoch,loader, Config , make_grid, wandb, writer_real, writer_fake, step)
if __name__ == "__main__":
main(dataset_name = sys.argv[1], Data_Path="vikramtiwari/pix2pix-dataset")
# explore.show_grid(GRIDSIZE=1, ROWS=1, COLS=1)
# for x, y in loader:
# create_dir("SampleImages") if not check_dir_exists('SampleImages') else None
# print("here")
# print(x.shape)
# save_image(x, "SampleImages/x.png")
# save_image(y, "SampleImages/y.png")
# import sys
# sys.exit()
|
StarcoderdataPython
|
4825603
|
import json
import os
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import JsonResponse
from django.http.response import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.conf import settings
from django.core.files.storage import default_storage
from .forms import AnswerForm, ProfileForm
from .models import (AllTags, AnsweraQuestion, AskaQuestion, Comments, Reply,
hidden, report, tags, Profile, Roles, QuestionAccessControl)
from applications.globals.models import ExtraInfo
from django.contrib.auth.models import User
from django.core.paginator import Paginator
import math
PAGE_SIZE = 4
# Create your views here.
@login_required
def feeds(request):
query = AskaQuestion.objects.order_by('-uploaded_at')
paginator = Paginator(query, PAGE_SIZE) # Show 25 contacts per page.
total_page = math.ceil(query.count()/PAGE_SIZE)
if request.GET.get("page_number") :
current_page = int(request.GET.get("page_number"))
else:
current_page = 1
previous_page = current_page - 1
next_page = current_page + 1
keyword = ""
# query = paginator.page(current_page)
if request.GET.get("search") and request.GET.get('keyword') :
print("searching")
q = request.GET.get('keyword')
questions = AskaQuestion.objects.all()
result = questions.filter(Q(subject__icontains=q) | Q(description__icontains=q)).order_by('-uploaded_at')
query = result
paginator = Paginator(query, PAGE_SIZE)
keyword = q.split(" ")
keyword = "+".join(keyword)
total_page = math.ceil(query.count()/PAGE_SIZE)
if request.method == 'POST':
if request.POST.get('add_qus') :
print("Post a Question request received")
question = AskaQuestion.objects.create(user=request.user)
question.subject = request.POST.get('subject')
question.description = request.POST.get('content')
if request.FILES :
question.file = request.FILES['file']
tag = request.POST.get('Add_Tag')
tag = tag[8:]
ques_tag = []
result = []
ques_tag = [int(c) for c in tag.split(",")]
for i in range(0, len(ques_tag)):
result = AllTags.objects.get(id=ques_tag[i])
question.select_tag.add(result)
if request.POST.get('anonymous'):
question.anonymous_ask = True;
else:
question.anonymous_ask = False;
question.save()
role_check = Roles.objects.filter(user=request.user)
if len(role_check) > 0 and request.POST.get("from_admin"):
access = QuestionAccessControl.objects.create(question=question, canVote=True, canAnswer=True, canComment = True, posted_by = role_check[0])
if request.POST.get("RestrictVote"):
access.canVote = False
if request.POST.get("RestrictAnswer"):
access.canAnswer = False
if request.POST.get("RestrictComment"):
access.canComment = False
access.save()
return redirect("/feeds/admin")
query = AskaQuestion.objects.order_by('-uploaded_at')
if request.POST.get('search'):
q = request.POST.get('keyword')
questions = AskaQuestion.objects.all()
result = questions.filter(Q(subject__icontains=q) | Q(description__icontains=q)).order_by('-uploaded_at')
query = result
paginator = Paginator(query, PAGE_SIZE)
# adding user's favourite tags
if request.POST.get("add_tag"):
fav_tag=request.POST.get('tag') # returning string
a = []
fav_tag = fav_tag[4:]
a= [int(c) for c in fav_tag.split(",")] # listing queery objects
print(a)
for i in range(0, len(a)):
temp = AllTags.objects.get(pk=a[i])
new = tags.objects.create(user=request.user,my_subtag=temp)
new.my_tag = temp.tag
print(AllTags.objects.get(pk=a[i]))
new.save()
return redirect("/feeds")
all_tags = AllTags.objects.values('tag').distinct()
askqus_subtags = AllTags.objects.all()
user_tags = tags.objects.values("my_tag").distinct().filter(Q(user__username=request.user.username))
u_tags = tags.objects.all().filter(Q(user__username=request.user.username))
a_tags = tags.objects.values('my_subtag').filter(Q(user__username=request.user.username))
# print(tags.objects.all().filter(Q(my_tag__icontains='CSE')))
ques = []
try:
query = paginator.page(current_page)
except:
query = []
hid = hidden.objects.all()
for q in query:
isliked = 0
isdisliked = 0
hidd = 0
isSpecial = 0
profi = Profile.objects.all().filter(user=q.user)
if(q.likes.all().filter(username=request.user.username).count()==1):
isliked = 1
if(hid.all().filter(user=request.user, question = q).count()==1):
hidd = 1
if(q.dislikes.all().filter(username=request.user.username).count()==1):
isdisliked = 1
access_check = QuestionAccessControl.objects.filter(question=q)
if len(access_check)>0:
isSpecial = 1
temp = {
'access' : access_check,
'isSpecial' : isSpecial,
'profile':profi,
'ques' : q,
'isliked':isliked,
'hidd' : hidd,
'disliked': isdisliked,
'votes':q.total_likes() - q.total_dislikes(),
}
ques.append(temp)
add_tag_list = AllTags.objects.all()
add_tag_list = add_tag_list.exclude(pk__in=a_tags)
role_data = Roles.objects.all()
context ={
'role' : role_data,
'hidden' : hid,
'form_answer': AnswerForm(),
'Tags': user_tags,
'questions': ques,
'username': request.user.username,
'subtags': askqus_subtags,
'add_tag_list' : add_tag_list,
'pages' : {
'current_page' : current_page,
'total_page' : total_page,
'previous_page' : previous_page,
'next_page' : next_page,
},
"keyword": keyword,
'a': u_tags.filter(Q(my_tag__icontains='CSE')),
'b' : u_tags.filter(Q(my_tag__icontains='ECE')),
'c' : u_tags.filter(Q(my_tag__icontains='Mechanical')),
'd' : u_tags.filter(Q(my_tag__icontains='Technical-Clubs')),
'e' : u_tags.filter(Q(my_tag__icontains='Cultural-Clubs')),
'f' : u_tags.filter(Q(my_tag__icontains='Sports-Clubs')),
'g' : u_tags.filter(Q(my_tag__icontains='Business-and-Career')),
'h' : u_tags.filter(Q(my_tag__icontains='Entertainment')),
'i' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Campus')),
'j' : u_tags.filter(Q(my_tag__icontains='Jabalpur-city')),
'k' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Rules-and-Regulations')),
'l' : u_tags.filter(Q(my_tag__icontains='Academics')),
'm' : u_tags.filter(Q(my_tag__icontains='IIITDMJ')),
'n' : u_tags.filter(Q(my_tag__icontains='Life-Relationship-and-Self')),
'o' : u_tags.filter(Q(my_tag__icontains='Technology-and-Education')),
'p' : u_tags.filter(Q(my_tag__icontains='Programmes')),
'q' : u_tags.filter(Q(my_tag__icontains='Others')),
'r' : u_tags.filter(Q(my_tag__icontains='Design')),
}
return render(request, 'feeds/feeds_main.html', context)
def Request(request):
question = get_object_or_404(AskaQuestion, id=request.POST.get('id'))
print('Python')
question.is_requested = False
if question.requests.filter(id=request.user.id).exists():
question.requests.remove(request.user)
question.is_requested = False
question.save()
else:
question.requests.add(request.user)
question.is_requested = True
question.save()
print(question.total_requests())
context ={
'question' : question,
'question.is_requested' : question.is_requested,
'question.total_requests' : question.total_requests(),
}
if request.is_ajax():
html = render_to_string('feeds/question_request_count.html', context, request=request)
return JsonResponse({'form': html})
# Ajax called for comments to saved and display them
def Comment_Text(request):
if request.method == 'POST':
print('Ajax called')
question = get_object_or_404(AskaQuestion, id=request.POST.get('id'))
comment = Comments.objects.create(user=request.user,question=question)
comment.comment_text = request.POST.get('comment_box')
comment.save()
print(comment.id)
msg = request.POST.get('comment_box', None)
print('saved')
context = {
'question': question,
'comment': comment,
'msg': msg,
}
# obj = json.dumps(context)
# comment = Comments.objects.order_by('-commented_at')
# return HttpResponse(obj, content_type='application/json')
if request.is_ajax():
html = render_to_string('feeds/comment_text.html', context, request=request)
return JsonResponse({'form': html})
def Reply_Text(request):
if request.method == 'POST':
print('Ajax called')
question = get_object_or_404(AskaQuestion, id=request.POST.get('ques_id'))
print(request.POST.get('ques_id'))
comment = get_object_or_404(Comments, id=request.POST.get('id'))
reply = Reply.objects.create(user=request.user, comment=comment)
reply.content = request.POST.get('comment_box')
reply.save()
print(comment.id)
msg = request.POST.get('comment_box', None)
print('saved')
context = {
'question': question,
'comment': comment,
'reply': reply,
'msg': msg,
}
# obj = json.dumps(context)
# comment = Comments.objects.order_by('-commented_at')
# return HttpResponse(obj, content_type='application/json')
if request.is_ajax():
html = render_to_string('feeds/comment_text.html', context, request=request)
return JsonResponse({'form': html})
@login_required
def LikeComment(request):
# question = get_object_or_404(AskaQuestion, id=request.POST.get('id'))
comment = Comments.objects.get(id=request.POST.get('id'))
# comment.question = question
print('Liking comment')
# print(comment.likes_comment.filter(id=request.user.id).exists())
print(comment.is_liked)
if comment.is_liked:
comment.is_liked = False
comment.likes_comment.remove(request.user)
comment.save()
else:
comment.is_liked = True
comment.likes_comment.add(request.user)
print(comment.total_likes_comment())
comment.save()
context ={
'comment' : comment,
'comment.is_liked' : comment.is_liked,
# 'comment.likes': comment.like,
'comment.total_likes_comment' : comment.total_likes_comment(),
}
if request.is_ajax():
html = render_to_string('feeds/like_section_comment.html', context, request=request)
return JsonResponse({'form': html})
def delete_comment(request):
if request.method == 'POST':
print("deleting comment")
comment_id = request.POST.get("comment_id")
comment = Comments.objects.filter(pk=comment_id)
comment.delete()
print(comment)
return JsonResponse({"done":1})
def delete_answer(request):
if request.method == 'POST':
print("deleting answer")
answer_id = request.POST.get("answer_id")
print(answer_id)
answer = AnsweraQuestion.objects.filter(pk=answer_id)
answer.delete()
# print(answer)
return JsonResponse({"done":1})
def delete_post(request, id):
if request.method == 'POST' and request.POST.get("delete"):
ques = AskaQuestion.objects.filter(pk=id)[0]
if ques.file:
pth = os.path.join(settings.BASE_DIR, '..')
default_storage.delete(pth+ques.file.url)
ques.delete()
return redirect ('/feeds/')
def hide_post(request, id):
if request.method == 'POST' and request.POST.get("hide"):
ques = AskaQuestion.objects.filter(pk=id)[0]
print(ques)
hid = hidden(user = request.user, question = ques);
hid.save()
print(hid,"sid")
return redirect ('/feeds/')
def unhide_post(request, id):
if request.method == 'POST' and request.POST.get("unhide"):
ques = AskaQuestion.objects.filter(pk=id)[0]
print(ques)
hid = hidden.objects.filter(user=request.user )
hid.delete()
return redirect ('/feeds/')
def update_post(request, id):
redirect_to = "/feeds"
if request.method == 'POST' and request.POST.get("update"):
print(request.POST.get('anonymous_update'))
question= AskaQuestion.objects.get(pk=id)
question.subject = request.POST.get('subject')
question.description = request.POST.get('description')
tag = request.POST.get('Add_Tag')
tag = tag[8:]
ques_tag = []
result = []
ques_tag = [int(c) for c in tag.split(",")]
question.select_tag.clear()
for i in range(0, len(ques_tag)):
result = AllTags.objects.get(id=ques_tag[i])
question.select_tag.add(result)
if request.POST.get('anonymous_update')==None :
question.user= request.user
question.anonymous_ask=False
else :
question.anonymous_ask=True
if request.POST.get("isSpecial"):
access = QuestionAccessControl.objects.filter(question = question)[0]
print(access)
if request.POST.get("RestrictVote"):
access.canVote = False
else:
access.canVote = True
if request.POST.get("RestrictAnswer"):
access.canAnswer = False
else:
access.canAnswer = True
if request.POST.get("RestrictComment"):
access.canComment = False
else:
access.canComment = True
access.save()
if request.POST.get("from_url"):
redirect_to = request.POST.get("from_url")
question.save()
return redirect (redirect_to)
@login_required
def TagsBasedView(request, string):
print('Tag based View')
questions = AskaQuestion.objects.order_by('-uploaded_at')
result = questions.filter(Q(select_tag__subtag__icontains=string))
paginator = Paginator(result, PAGE_SIZE) # Show 25 contacts per page.
total_page = math.ceil(result.count()/PAGE_SIZE)
if request.GET.get("page_number") :
current_page = int(request.GET.get("page_number"))
else:
current_page = 1
previous_page = current_page - 1
next_page = current_page + 1
# result = paginator.page(current_page)
user_tags = tags.objects.values("my_tag").distinct().filter(Q(user__username=request.user.username))
u_tags = tags.objects.all().filter(Q(user__username=request.user.username))
a_tags = tags.objects.values('my_subtag').filter(Q(user__username=request.user.username))
add_tag_list = AllTags.objects.all()
add_tag_list = add_tag_list.exclude(pk__in=a_tags)
askqus_subtags = AllTags.objects.all()
ques = []
result = paginator.page(current_page)
hid = hidden.objects.all()
for q in result:
isliked = 0
isdisliked = 0
hidd = 0
isSpecial = 0
profi = Profile.objects.all().filter(user=q.user)
if(q.likes.all().filter(username=request.user.username).count()==1):
isliked = 1
if(hid.all().filter(user=request.user, question = q).count()==1):
hidd = 1
if(q.dislikes.all().filter(username=request.user.username).count()==1):
isdisliked = 1
access_check = QuestionAccessControl.objects.filter(question=q)
if len(access_check)>0:
isSpecial = 1
temp = {
'access' : access_check,
'isSpecial' : isSpecial,
'profile':profi,
'ques' : q,
'isliked':isliked,
'hidd' : hidd,
'disliked': isdisliked,
'votes':q.total_likes() - q.total_dislikes(),
}
ques.append(temp)
role_data = Roles.objects.all()
context = {
"role":role_data,
'form_answer': AnswerForm(),
'Tags': user_tags,
'questions': ques,
'username': request.user.username,
'subtags': askqus_subtags,
'add_tag_list' : add_tag_list,
'pages' : {
'current_page' : current_page,
'total_page' : total_page,
'previous_page' : previous_page,
'next_page' : next_page,
},
'a': u_tags.filter(Q(my_tag__icontains='CSE')),
'b' : u_tags.filter(Q(my_tag__icontains='ECE')),
'c' : u_tags.filter(Q(my_tag__icontains='Mechanical')),
'd' : u_tags.filter(Q(my_tag__icontains='Technical-Clubs')),
'e' : u_tags.filter(Q(my_tag__icontains='Cultural-Clubs')),
'f' : u_tags.filter(Q(my_tag__icontains='Sports-Clubs')),
'g' : u_tags.filter(Q(my_tag__icontains='Business-and-Career')),
'h' : u_tags.filter(Q(my_tag__icontains='Entertainment')),
'i' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Campus')),
'j' : u_tags.filter(Q(my_tag__icontains='Jabalpur-city')),
'k' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Rules-and-Regulations')),
'l' : u_tags.filter(Q(my_tag__icontains='Academics')),
'm' : u_tags.filter(Q(my_tag__icontains='IIITDMJ')),
'n' : u_tags.filter(Q(my_tag__icontains='Life-Relationship-and-Self')),
'o' : u_tags.filter(Q(my_tag__icontains='Technology-and-Education')),
'p' : u_tags.filter(Q(my_tag__icontains='Programmes')),
'q' : u_tags.filter(Q(my_tag__icontains='Others')),
'r' : u_tags.filter(Q(my_tag__icontains='Design')),
}
return render(request, 'feeds/feeds_main.html', context)
def RemoveTag(request):
if request.method == 'POST':
print(request.POST.get('id'))
userTags = tags.objects.all().filter(Q(user=request.user))
tagto_delete = AllTags.objects.all().filter(Q(subtag=request.POST.get('id')))
userTags.filter(Q(my_subtag__in=tagto_delete)).delete()
return JsonResponse({"done":"1"})
else:
return JsonResponse({"done":"0"})
def ParticularQuestion(request, id):
result = AskaQuestion.objects.get(id=id)
a_tags = tags.objects.values('my_subtag').filter(Q(user__username=request.user.username))
all_tags_list = AllTags.objects.all()
all_tags_list= all_tags_list.exclude(pk__in=a_tags)
all_tags = AllTags.objects.values('tag').distinct()
u_tags = tags.objects.all().filter(Q(user__username=request.user.username))
askqus_subtags = AllTags.objects.all()
profile = Profile.objects.all().filter(user=result.user)
isliked = 0
isdisliked = 0
user_tags = tags.objects.values("my_tag").distinct().filter(Q(user__username=request.user.username))
if(result.likes.all().filter(username=request.user.username).count()==1):
isliked = 1
if(result.dislikes.all().filter(username=request.user.username).count()==1):
isdisliked = 1
a_tags = tags.objects.values('my_subtag').filter(Q(user__username=request.user.username))
add_tag_list = AllTags.objects.all()
add_tag_list = add_tag_list.exclude(pk__in=a_tags)
isSpecial = 0
access_check = QuestionAccessControl.objects.filter(question=result)
if len(access_check)>0:
isSpecial = 1
if request.method == 'POST':
if request.POST.get("answer_button"):
print('Particular Question')
form_answer = AnswerForm(request.POST)
if form_answer.is_valid():
instance = form_answer.save(commit=False)
instance.question = result
instance.user = request.user
instance.save()
role_data = Roles.objects.all()
context = {
'access' : access_check,
'isSpecial' : isSpecial,
'role' : role_data,
'isliked':isliked,
'disliked': isdisliked,
'votes':result.total_likes() - result.total_dislikes(),
'form_answer': AnswerForm(),
'instance': instance,
'question': result,
'Tags': user_tags,
'subtags': askqus_subtags,
'add_tag_list' : add_tag_list,
'profile' : profile,
'a': u_tags.filter(Q(my_tag__icontains='CSE')),
'b' : u_tags.filter(Q(my_tag__icontains='ECE')),
'c' : u_tags.filter(Q(my_tag__icontains='Mechanical')),
'd' : u_tags.filter(Q(my_tag__icontains='Technical-Clubs')),
'e' : u_tags.filter(Q(my_tag__icontains='Cultural-Clubs')),
'f' : u_tags.filter(Q(my_tag__icontains='Sports-Clubs')),
'g' : u_tags.filter(Q(my_tag__icontains='Business-and-Career')),
'h' : u_tags.filter(Q(my_tag__icontains='Entertainment')),
'i' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Campus')),
'j' : u_tags.filter(Q(my_tag__icontains='Jabalpur-city')),
'k' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Rules-and-Regulations')),
'l' : u_tags.filter(Q(my_tag__icontains='Academics')),
'm' : u_tags.filter(Q(my_tag__icontains='IIITDMJ')),
'n' : u_tags.filter(Q(my_tag__icontains='Life-Relationship-and-Self')),
'o' : u_tags.filter(Q(my_tag__icontains='Technology-and-Education')),
'p' : u_tags.filter(Q(my_tag__icontains='Programmes')),
'q' : u_tags.filter(Q(my_tag__icontains='Others')),
'r' : u_tags.filter(Q(my_tag__icontains='Design')),
}
return render(request, 'feeds/single_question.html', context)
else:
form = AnswerForm()
# instance = AnsweraQuestion.objects.get(question__id=id)
# print(instance.content)
role_data = Roles.objects.all()
context = {
'access' : access_check,
'isSpecial' : isSpecial,
"role" : role_data,
'isliked':isliked,
'disliked': isdisliked,
'votes':result.total_likes() - result.total_dislikes(),
'question': result,
'form_answer': form,
'question': result,
'Tags': user_tags,
'add_tag_list' : add_tag_list,
'profile' : profile,
'subtags': askqus_subtags,
'a': u_tags.filter(Q(my_tag__icontains='CSE')),
'b' : u_tags.filter(Q(my_tag__icontains='ECE')),
'c' : u_tags.filter(Q(my_tag__icontains='Mechanical')),
'd' : u_tags.filter(Q(my_tag__icontains='Technical-Clubs')),
'e' : u_tags.filter(Q(my_tag__icontains='Cultural-Clubs')),
'f' : u_tags.filter(Q(my_tag__icontains='Sports-Clubs')),
'g' : u_tags.filter(Q(my_tag__icontains='Business-and-Career')),
'h' : u_tags.filter(Q(my_tag__icontains='Entertainment')),
'i' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Campus')),
'j' : u_tags.filter(Q(my_tag__icontains='Jabalpur-city')),
'k' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Rules-and-Regulations')),
'l' : u_tags.filter(Q(my_tag__icontains='Academics')),
'm' : u_tags.filter(Q(my_tag__icontains='IIITDMJ')),
'n' : u_tags.filter(Q(my_tag__icontains='Life-Relationship-and-Self')),
'o' : u_tags.filter(Q(my_tag__icontains='Technology-and-Education')),
'p' : u_tags.filter(Q(my_tag__icontains='Programmes')),
'q' : u_tags.filter(Q(my_tag__icontains='Others')),
'r' : u_tags.filter(Q(my_tag__icontains='Design')),
}
return render(request, 'feeds/single_question.html', context)
@login_required
def profile(request, string):
if request.method == "POST":
profile = Profile.objects.all().filter(user=request.user)
Pr = None
if len(profile) == 0:
Pr = Profile(user = request.user)
else:
Pr = profile[0]
if request.POST.get("bio"):
if request.POST.get("bio") != "":
Pr.bio = request.POST.get("bio")
if request.FILES:
if Pr.profile_picture :
pth = os.path.join(settings.BASE_DIR, '..')
default_storage.delete(pth+Pr.profile_picture.url)
Pr.profile_picture = request.FILES["profile_img"]
Pr.save()
print("Profile Loading ......")
try:
usr = User.objects.get(username=string)
except:
return redirect("/feeds")
profile = Profile.objects.all().filter(user=usr)
ques = AskaQuestion.objects.all().filter(user=usr)
ans = AnsweraQuestion.objects.all().filter(user=usr)
extra = ExtraInfo.objects.all().filter(user=usr)
tags = set()
top_ques = ""
top_ans = ans
for q in ques:
if top_ques == "":
top_ques = q;
for t in q.select_tag.all():
tags.add(t)
prf = ""
ext = ""
no_img = True
if len(profile) == 0:
prf = Profile(user =usr )
prf.save()
else:
prf = profile[0]
if prf.profile_picture :
pth = os.path.join(settings.BASE_DIR, '..')
if os.path.exists(pth+prf.profile_picture.url):
no_img=False
else :
no_img :True
if len(extra) == 0:
ext = ""
else:
ext = extra[0]
hid = hidden.objects.all().filter(user = request.user)
context = {
'profile': prf,
# 'profile_image' : profile[0].profile_picture,
'question_asked' : len(ques),
'answer_given' : len(ans),
'last_login' : usr.last_login,
'extra' : ext,
'hidden_ques' : hid,
'tags' : tags,
'top_ques' : ques,
'top_ques_len' : len(ques),
'top_ans' : ans,
'top_ans_len' : len(ans),
'no_img' : no_img
}
return render(request, 'feeds/profile.html',context)
def printques(a):
print(a.can_delete)
print(a.can_update)
print(a.user)
print(a.subject)
print(a.description)
print(a.select_tag)
print(a.file)
print(a.uploaded_at)
print(a.likes)
print(a.requests)
#dislikes = models.ManyToManyField(User, related_name='dislikes', blank=True)
print(a.is_liked)
print(a.is_requested)
print(a.request)
print(a.anonymous_ask)
print(a.total_likes)
print(a.total_dislikes)
def upvoteQuestion(request,id):
question = AskaQuestion.objects.get(id=request.POST.get('id'))
print('upvoting question')
print("-------------likes--------------")
print(question.likes.all())
print("-------------dislikes--------------")
print(question.dislikes.all())
question.dislikes.remove(request.user)
isupvoted = question.likes.all().filter(username=request.user.username).count()
if request.is_ajax() and isupvoted == 0:
question.likes.add(request.user)
return JsonResponse({'done': "1",'votes':question.total_likes() - question.total_dislikes(),})
else:
return JsonResponse({"done":"0",'votes':question.total_likes() - question.total_dislikes(),})
def downvoteQuestion(request,id):
question = AskaQuestion.objects.get(id=request.POST.get('id'))
print('upvoting question')
print("-------------likes--------------")
print(question.likes.all())
print("-------------dislikes--------------")
print(question.dislikes.all())
question.likes.remove(request.user)
isdownvoted = question.dislikes.all().filter(username=request.user.username).count()
if request.is_ajax() and isdownvoted == 0:
question.dislikes.add(request.user)
return JsonResponse({'done': "1",'votes':question.total_likes() - question.total_dislikes(),})
else:
return JsonResponse({"done":"0",'votes':question.total_likes() - question.total_dislikes(),})
def upvoteAnswer(request,id):
answer = AnsweraQuestion.objects.get(id=request.POST.get('id'))
print('upvoting answer')
print("-------------likes--------------")
print(answer.likes.all())
print("-------------dislikes--------------")
print(answer.dislikes.all())
answer.dislikes.remove(request.user)
isupvoted = answer.likes.all().filter(username=request.user.username).count()
if request.is_ajax() and isupvoted == 0:
answer.likes.add(request.user)
return JsonResponse({'done': "1",'votes':answer.total_likes() - answer.total_dislikes(),})
else:
return JsonResponse({"done":"0",'votes':answer.total_likes() - answer.total_dislikes(),})
def downvoteAnswer(request,id):
answer = AnsweraQuestion.objects.get(id=request.POST.get('id'))
print('upvoting answer')
print("-------------likes--------------")
print(answer.likes.all())
print("-------------dislikes--------------")
print(answer.dislikes.all())
answer.likes.remove(request.user)
isdownvoted = answer.dislikes.all().filter(username=request.user.username).count()
if request.is_ajax() and isdownvoted == 0:
answer.dislikes.add(request.user)
return JsonResponse({'done': "1",'votes':answer.total_likes() - answer.total_dislikes(),})
else:
return JsonResponse({"done":"0",'votes':answer.total_likes() - answer.total_dislikes(),})
def update_answer(request):
try:
ques_id = request.POST.get("ques_id")
answer_id = request.POST.get("answer_id")
question = AskaQuestion.objects.get(pk=ques_id)
answer = AnsweraQuestion.objects.get(pk=answer_id)
new_answer = request.POST.get("comment_box")
answer.content = new_answer
answer.save()
if request.is_ajax():
return JsonResponse({'success': 1})
except:
if request.is_ajax():
return JsonResponse({'sucess': 0})
def update_comment(request):
try:
ques_id = request.POST.get("ques_id")
comment_id = request.POST.get("comment_id")
question = AskaQuestion.objects.get(pk=ques_id)
comment = Comments.objects.get(pk=comment_id)
new_comment = request.POST.get("comment_box")
print(new_comment)
comment.comment_text = new_comment
comment.save()
if request.is_ajax():
return JsonResponse({'success': 1})
except:
if request.is_ajax():
return JsonResponse({'sucess': 0})
def get_page_info(current_page, query):
paginator = Paginator(query, PAGE_SIZE) # Show 25 contacts per page.
total_page = math.ceil(query.count()/2)
if request.GET.get("page_number") :
current_page = int(request.GET.get("page_number"))
else:
current_page = 1
previous_page = current_page - 1
next_page = current_page + 1
query = paginator.page(current_page)
return {
'total_page' : total_page,
'previous_page' : previous_page,
'next_page' : next_page,
}
@login_required
def admin(request):
error = {
"user":"",
"role" : ""
}
success = {
"user":"",
}
if request.method == 'POST' and request.POST.get("addrole"):
print(request.POST.get("addrole"))
user = request.POST.get("user")
role = request.POST.get("role")
try:
user_check = User.objects.get(username=user)
print(user_check)
role_check = Roles.objects.filter(user=user_check)
if(len(role_check)==0):
role_check_role = Roles.objects.filter(role__iexact=role)
if(len(role_check_role)==0):
role = Roles.objects.create(user=user_check, role=role)
success["user"] = "Role added."
else:
error["role"] = "This role is assigned to different person."
else:
error["user"] = "User already assigned a role."
except User.DoesNotExist:
error["user"] = "User Does not exist."
if request.method == 'POST' and request.POST.get("unassignrole"):
if request.POST.get("unassignrole_value"):
try:
role_unassign = Roles.objects.get(role = request.POST.get("unassignrole_value"))
role_unassign.active = False
role_unassign.save()
success["update"] = "Role Unassigned."
except :
error["update"] = "Incorrect Username provided."
if request.method == 'POST' and request.POST.get("reassignrole"):
if request.POST.get("reassignrole_value"):
try:
role_unassign = Roles.objects.get(role = request.POST.get("reassignrole_value"))
role_unassign.active = True
role_unassign.save()
success["updatere"] = "Role Reassigned."
except :
error["updatere"] = "Error occurred."
if request.method == 'POST' and request.POST.get("unassignrole_update"):
try:
role_unassign = Roles.objects.get(role = request.POST.get("unassignrole_value"))
user_check = User.objects.get(username=request.POST.get("unassignrole_update"))
role_unassign.user = user_check
role_unassign.save()
success["update"] = "Role Reassigned."
except :
error["updateerror"] = "Incorrect Username provided."
role_data = Roles.objects.all()
role_user = ""
askqus_subtags = AllTags.objects.all()
isAdmin = False
administrativeRole = False
try:
admin = User.objects.get(username = "siddharth")
if admin == request.user:
isAdmin = True
except:
isAdmin = False
try:
admin_role = Roles.objects.filter(user = request.user)
if len(admin_role) >0 :
if admin_role[0].active == True:
role_user = admin_role[0].role
administrativeRole = True
else:
role_user = ""
except:
administrativeRole = False
print(role_user)
context = {
"role_user" : role_user,
"administrativeRole" : administrativeRole,
"isAdmin" : isAdmin,
'form_answer': AnswerForm(),
"error" : error,
"success" : success,
"role" : role_data,
'subtags': askqus_subtags,
}
return render(request, 'feeds/admin.html', context)
@login_required
def administrativeView(request, string):
print('administrative View')
# questions = AskaQuestion.objects.order_by('-uploaded_at')
role_user = Roles.objects.filter(role=string)
try :
role_user = role_user[0]
except:
redirect("/feeds")
# print(QuestionAccessControl.objects.select_related('posted_by').filter(posted_by=role_user))
result = QuestionAccessControl.objects.select_related('posted_by').filter(posted_by=role_user).order_by('-created_at')
paginator = Paginator(result, PAGE_SIZE) # Show 25 contacts per page.
total_page = math.ceil(result.count()/PAGE_SIZE)
if request.GET.get("page_number") :
current_page = int(request.GET.get("page_number"))
else:
current_page = 1
previous_page = current_page - 1
next_page = current_page + 1
# result = paginator.page(current_page)
user_tags = tags.objects.values("my_tag").distinct().filter(Q(user__username=request.user.username))
u_tags = tags.objects.all().filter(Q(user__username=request.user.username))
a_tags = tags.objects.values('my_subtag').filter(Q(user__username=request.user.username))
add_tag_list = AllTags.objects.all()
add_tag_list = add_tag_list.exclude(pk__in=a_tags)
askqus_subtags = AllTags.objects.all()
ques = []
result = paginator.page(current_page)
hid = hidden.objects.all()
for q in result:
isliked = 0
isdisliked = 0
hidd = 0
isSpecial = 0
profi = Profile.objects.all().filter(user=q.question.user)
if(q.question.likes.all().filter(username=request.user.username).count()==1):
isliked = 1
if(hid.all().filter(user=request.user, question = q.question).count()==1):
hidd = 1
if(q.question.dislikes.all().filter(username=request.user.username).count()==1):
isdisliked = 1
access_check = q
isSpecial = 1
temp = {
'access' : access_check,
'isSpecial' : isSpecial,
'profile':profi,
'ques' : q.question,
'isliked':isliked,
'hidd' : hidd,
'disliked': isdisliked,
'votes':q.question.total_likes() - q.question.total_dislikes(),
}
ques.append(temp)
role_data = Roles.objects.all()
context = {
"role":role_data,
'form_answer': AnswerForm(),
'Tags': user_tags,
'questions': ques,
'username': request.user.username,
'subtags': askqus_subtags,
'add_tag_list' : add_tag_list,
'pages' : {
'current_page' : current_page,
'total_page' : total_page,
'previous_page' : previous_page,
'next_page' : next_page,
},
'a': u_tags.filter(Q(my_tag__icontains='CSE')),
'b' : u_tags.filter(Q(my_tag__icontains='ECE')),
'c' : u_tags.filter(Q(my_tag__icontains='Mechanical')),
'd' : u_tags.filter(Q(my_tag__icontains='Technical-Clubs')),
'e' : u_tags.filter(Q(my_tag__icontains='Cultural-Clubs')),
'f' : u_tags.filter(Q(my_tag__icontains='Sports-Clubs')),
'g' : u_tags.filter(Q(my_tag__icontains='Business-and-Career')),
'h' : u_tags.filter(Q(my_tag__icontains='Entertainment')),
'i' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Campus')),
'j' : u_tags.filter(Q(my_tag__icontains='Jabalpur-city')),
'k' : u_tags.filter(Q(my_tag__icontains='IIITDMJ-Rules-and-Regulations')),
'l' : u_tags.filter(Q(my_tag__icontains='Academics')),
'm' : u_tags.filter(Q(my_tag__icontains='IIITDMJ')),
'n' : u_tags.filter(Q(my_tag__icontains='Life-Relationship-and-Self')),
'o' : u_tags.filter(Q(my_tag__icontains='Technology-and-Education')),
'p' : u_tags.filter(Q(my_tag__icontains='Programmes')),
'q' : u_tags.filter(Q(my_tag__icontains='Others')),
'r' : u_tags.filter(Q(my_tag__icontains='Design')),
}
return render(request, 'feeds/feeds_main.html', context)
|
StarcoderdataPython
|
3282126
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia_lib.common import constants as lib_consts
##############################################################################
# Constants common to the provider drivers moved to
# octavia_lib.common.constants
# These are deprecated, to be removed in the 'U' release
##############################################################################
# 'loadbalancers'
LOADBALANCERS = lib_consts.LOADBALANCERS
# 'listeners'
LISTENERS = lib_consts.LISTENERS
# 'pools'
POOLS = lib_consts.POOLS
# HEALTHMONITORS = 'healthmonitors'
HEALTHMONITORS = lib_consts.HEALTHMONITORS
# 'members'
MEMBERS = lib_consts.MEMBERS
# 'l7policies'
L7POLICIES = lib_consts.L7POLICIES
# 'l7rules'
L7RULES = lib_consts.L7RULES
# 'PING'
HEALTH_MONITOR_PING = lib_consts.HEALTH_MONITOR_PING
# 'TCP'
HEALTH_MONITOR_TCP = lib_consts.HEALTH_MONITOR_TCP
# 'HTTP'
HEALTH_MONITOR_HTTP = lib_consts.HEALTH_MONITOR_HTTP
# 'HTTPS'
HEALTH_MONITOR_HTTPS = lib_consts.HEALTH_MONITOR_HTTPS
# 'TLS-HELLO'
HEALTH_MONITOR_TLS_HELLO = lib_consts.HEALTH_MONITOR_TLS_HELLO
# 'UDP-CONNECT'
HEALTH_MONITOR_UDP_CONNECT = lib_consts.HEALTH_MONITOR_UDP_CONNECT
SUPPORTED_HEALTH_MONITOR_TYPES = lib_consts.SUPPORTED_HEALTH_MONITOR_TYPES
# 'GET'
HEALTH_MONITOR_HTTP_METHOD_GET = lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET
# 'HEAD'
HEALTH_MONITOR_HTTP_METHOD_HEAD = lib_consts.HEALTH_MONITOR_HTTP_METHOD_HEAD
# 'POST'
HEALTH_MONITOR_HTTP_METHOD_POST = lib_consts.HEALTH_MONITOR_HTTP_METHOD_POST
# 'PUT'
HEALTH_MONITOR_HTTP_METHOD_PUT = lib_consts.HEALTH_MONITOR_HTTP_METHOD_PUT
# 'DELETE'
HEALTH_MONITOR_HTTP_METHOD_DELETE = (
lib_consts.HEALTH_MONITOR_HTTP_METHOD_DELETE)
# 'TRACE'
HEALTH_MONITOR_HTTP_METHOD_TRACE = lib_consts.HEALTH_MONITOR_HTTP_METHOD_TRACE
# 'OPTIONS'
HEALTH_MONITOR_HTTP_METHOD_OPTIONS = (
lib_consts.HEALTH_MONITOR_HTTP_METHOD_OPTIONS)
# 'CONNECT'
HEALTH_MONITOR_HTTP_METHOD_CONNECT = (
lib_consts.HEALTH_MONITOR_HTTP_METHOD_CONNECT)
# 'PATCH'
HEALTH_MONITOR_HTTP_METHOD_PATCH = lib_consts.HEALTH_MONITOR_HTTP_METHOD_PATCH
SUPPORTED_HEALTH_MONITOR_HTTP_METHODS = (
lib_consts.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS)
# 'REJECT'
L7POLICY_ACTION_REJECT = lib_consts.L7POLICY_ACTION_REJECT
# 'REDIRECT_TO_URL'
L7POLICY_ACTION_REDIRECT_TO_URL = lib_consts.L7POLICY_ACTION_REDIRECT_TO_URL
# 'REDIRECT_TO_POOL'
L7POLICY_ACTION_REDIRECT_TO_POOL = lib_consts.L7POLICY_ACTION_REDIRECT_TO_POOL
# 'REDIRECT_PREFIX'
L7POLICY_ACTION_REDIRECT_PREFIX = lib_consts.L7POLICY_ACTION_REDIRECT_PREFIX
SUPPORTED_L7POLICY_ACTIONS = lib_consts.SUPPORTED_L7POLICY_ACTIONS
# 'REGEX'
L7RULE_COMPARE_TYPE_REGEX = lib_consts.L7RULE_COMPARE_TYPE_REGEX
# 'STARTS_WITH'
L7RULE_COMPARE_TYPE_STARTS_WITH = lib_consts.L7RULE_COMPARE_TYPE_STARTS_WITH
# 'ENDS_WITH'
L7RULE_COMPARE_TYPE_ENDS_WITH = lib_consts.L7RULE_COMPARE_TYPE_ENDS_WITH
# 'CONTAINS'
L7RULE_COMPARE_TYPE_CONTAINS = lib_consts.L7RULE_COMPARE_TYPE_CONTAINS
# 'EQUAL_TO'
L7RULE_COMPARE_TYPE_EQUAL_TO = lib_consts.L7RULE_COMPARE_TYPE_EQUAL_TO
SUPPORTED_L7RULE_COMPARE_TYPES = lib_consts.SUPPORTED_L7RULE_COMPARE_TYPES
# 'HOST_NAME'
L7RULE_TYPE_HOST_NAME = lib_consts.L7RULE_TYPE_HOST_NAME
# 'PATH'
L7RULE_TYPE_PATH = lib_consts.L7RULE_TYPE_PATH
# 'FILE_TYPE'
L7RULE_TYPE_FILE_TYPE = lib_consts.L7RULE_TYPE_FILE_TYPE
# 'HEADER'
L7RULE_TYPE_HEADER = lib_consts.L7RULE_TYPE_HEADER
# 'COOKIE'
L7RULE_TYPE_COOKIE = lib_consts.L7RULE_TYPE_COOKIE
# 'SSL_CONN_HAS_CERT'
L7RULE_TYPE_SSL_CONN_HAS_CERT = lib_consts.L7RULE_TYPE_SSL_CONN_HAS_CERT
# 'SSL_VERIFY_RESULT'
L7RULE_TYPE_SSL_VERIFY_RESULT = lib_consts.L7RULE_TYPE_SSL_VERIFY_RESULT
# 'SSL_DN_FIELD'
L7RULE_TYPE_SSL_DN_FIELD = lib_consts.L7RULE_TYPE_SSL_DN_FIELD
SUPPORTED_L7RULE_TYPES = lib_consts.SUPPORTED_L7RULE_TYPES
# 'ROUND_ROBIN'
LB_ALGORITHM_ROUND_ROBIN = lib_consts.LB_ALGORITHM_ROUND_ROBIN
# 'LEAST_CONNECTIONS'
LB_ALGORITHM_LEAST_CONNECTIONS = lib_consts.LB_ALGORITHM_LEAST_CONNECTIONS
# 'SOURCE_IP'
LB_ALGORITHM_SOURCE_IP = lib_consts.LB_ALGORITHM_SOURCE_IP
SUPPORTED_LB_ALGORITHMS = lib_consts.SUPPORTED_LB_ALGORITHMS
# 'operating_status'
OPERATING_STATUS = lib_consts.OPERATING_STATUS
# 'ONLINE'
ONLINE = lib_consts.ONLINE
# 'OFFLINE'
OFFLINE = lib_consts.OFFLINE
# 'DEGRADED'
DEGRADED = lib_consts.DEGRADED
# 'ERROR'
ERROR = lib_consts.ERROR
# 'DRAINING'
DRAINING = lib_consts.DRAINING
# 'NO_MONITOR'
NO_MONITOR = lib_consts.NO_MONITOR
# 'operating_status'
SUPPORTED_OPERATING_STATUSES = lib_consts.SUPPORTED_OPERATING_STATUSES
# 'TCP'
PROTOCOL_TCP = lib_consts.PROTOCOL_TCP
# 'UDP'
PROTOCOL_UDP = lib_consts.PROTOCOL_UDP
# 'HTTP'
PROTOCOL_HTTP = lib_consts.PROTOCOL_HTTP
# 'HTTPS'
PROTOCOL_HTTPS = lib_consts.PROTOCOL_HTTPS
# 'TERMINATED_HTTPS'
PROTOCOL_TERMINATED_HTTPS = lib_consts.PROTOCOL_TERMINATED_HTTPS
# 'PROXY'
PROTOCOL_PROXY = lib_consts.PROTOCOL_PROXY
SUPPORTED_PROTOCOLS = lib_consts.SUPPORTED_PROTOCOLS
# 'PROMETHEUS'
PROTOCOL_PROMETHEUS = lib_consts.PROTOCOL_PROMETHEUS
# 'provisioning_status'
PROVISIONING_STATUS = lib_consts.PROVISIONING_STATUS
# Amphora has been allocated to a load balancer 'ALLOCATED'
AMPHORA_ALLOCATED = lib_consts.AMPHORA_ALLOCATED
# Amphora is being built 'BOOTING'
AMPHORA_BOOTING = lib_consts.AMPHORA_BOOTING
# Amphora is ready to be allocated to a load balancer 'READY'
AMPHORA_READY = lib_consts.AMPHORA_READY
# 'ACTIVE'
ACTIVE = lib_consts.ACTIVE
# 'PENDING_DELETE'
PENDING_DELETE = lib_consts.PENDING_DELETE
# 'PENDING_UPDATE'
PENDING_UPDATE = lib_consts.PENDING_UPDATE
# 'PENDING_CREATE'
PENDING_CREATE = lib_consts.PENDING_CREATE
# 'DELETED'
DELETED = lib_consts.DELETED
SUPPORTED_PROVISIONING_STATUSES = lib_consts.SUPPORTED_PROVISIONING_STATUSES
# 'SOURCE_IP'
SESSION_PERSISTENCE_SOURCE_IP = lib_consts.SESSION_PERSISTENCE_SOURCE_IP
# 'HTTP_COOKIE'
SESSION_PERSISTENCE_HTTP_COOKIE = lib_consts.SESSION_PERSISTENCE_HTTP_COOKIE
# 'APP_COOKIE'
SESSION_PERSISTENCE_APP_COOKIE = lib_consts.SESSION_PERSISTENCE_APP_COOKIE
SUPPORTED_SP_TYPES = lib_consts.SUPPORTED_SP_TYPES
# List of HTTP headers which are supported for insertion
SUPPORTED_HTTP_HEADERS = lib_consts.SUPPORTED_HTTP_HEADERS
# List of SSL headers for client certificate
SUPPORTED_SSL_HEADERS = lib_consts.SUPPORTED_SSL_HEADERS
###############################################################################
HEALTH_MONITOR_DEFAULT_EXPECTED_CODES = '200'
HEALTH_MONITOR_HTTP_DEFAULT_METHOD = lib_consts.HEALTH_MONITOR_HTTP_METHOD_GET
HEALTH_MONITOR_DEFAULT_URL_PATH = '/'
TYPE = 'type'
URL_PATH = 'url_path'
HTTP_METHOD = 'http_method'
HTTP_VERSION = 'http_version'
EXPECTED_CODES = 'expected_codes'
DELAY = 'delay'
TIMEOUT = 'timeout'
MAX_RETRIES = 'max_retries'
MAX_RETRIES_DOWN = 'max_retries_down'
RISE_THRESHOLD = 'rise_threshold'
DOMAIN_NAME = 'domain_name'
UPDATE_STATS = 'UPDATE_STATS'
UPDATE_HEALTH = 'UPDATE_HEALTH'
VALID_LISTENER_POOL_PROTOCOL_MAP = {
PROTOCOL_TCP: [PROTOCOL_HTTP, PROTOCOL_HTTPS,
PROTOCOL_PROXY, lib_consts.PROTOCOL_PROXYV2, PROTOCOL_TCP],
PROTOCOL_HTTP: [PROTOCOL_HTTP, PROTOCOL_PROXY,
lib_consts.PROTOCOL_PROXYV2],
PROTOCOL_HTTPS: [PROTOCOL_HTTPS, PROTOCOL_PROXY,
lib_consts.PROTOCOL_PROXYV2, PROTOCOL_TCP],
PROTOCOL_TERMINATED_HTTPS: [PROTOCOL_HTTP, PROTOCOL_PROXY,
lib_consts.PROTOCOL_PROXYV2],
PROTOCOL_UDP: [PROTOCOL_UDP],
lib_consts.PROTOCOL_SCTP: [lib_consts.PROTOCOL_SCTP],
lib_consts.PROTOCOL_PROMETHEUS: []}
# API Integer Ranges
MIN_PORT_NUMBER = 1
MAX_PORT_NUMBER = 65535
DEFAULT_CONNECTION_LIMIT = -1
MIN_CONNECTION_LIMIT = -1
DEFAULT_WEIGHT = 1
MIN_WEIGHT = 0
MAX_WEIGHT = 256
DEFAULT_MAX_RETRIES_DOWN = 3
MIN_HM_RETRIES = 1
MAX_HM_RETRIES = 10
# 24 days: days d h m ms
MAX_TIMEOUT = 24 * 24 * 60 * 60 * 1000
MIN_TIMEOUT = 0
DEFAULT_TIMEOUT_CLIENT_DATA = 50000
DEFAULT_TIMEOUT_MEMBER_CONNECT = 5000
DEFAULT_TIMEOUT_MEMBER_DATA = 50000
DEFAULT_TIMEOUT_TCP_INSPECT = 0
MUTABLE_STATUSES = (lib_consts.ACTIVE,)
DELETABLE_STATUSES = (lib_consts.ACTIVE, lib_consts.ERROR)
FAILOVERABLE_STATUSES = (lib_consts.ACTIVE, lib_consts.ERROR)
# Note: The database Amphora table has a foreign key constraint against
# the provisioning_status table
SUPPORTED_AMPHORA_STATUSES = (
lib_consts.AMPHORA_ALLOCATED, lib_consts.AMPHORA_BOOTING, lib_consts.ERROR,
lib_consts.AMPHORA_READY, lib_consts.DELETED, lib_consts.PENDING_CREATE,
lib_consts.PENDING_DELETE)
AMPHORA_VM = 'VM'
SUPPORTED_AMPHORA_TYPES = (AMPHORA_VM,)
DISTINGUISHED_NAME_FIELD_REGEX = lib_consts.DISTINGUISHED_NAME_FIELD_REGEX
# For redirect, only codes 301, 302, 303, 307 and 308 are # supported.
SUPPORTED_L7POLICY_REDIRECT_HTTP_CODES = [301, 302, 303, 307, 308]
SUPPORTED_HTTP_VERSIONS = [1.0, 1.1]
MIN_POLICY_POSITION = 1
# Largest a 32-bit integer can be, which is a limitation
# here if you're using MySQL, as most probably are. This just needs
# to be larger than any existing rule position numbers which will
# definitely be the case with 2147483647
MAX_POLICY_POSITION = 2147483647
# Testing showed haproxy config failed to parse after more than
# 53 rules per policy
MAX_L7RULES_PER_L7POLICY = 50
# See RFCs 2616, 2965, 6265, 7230: Should match characters valid in a
# http header or cookie name.
HTTP_HEADER_NAME_REGEX = r'\A[a-zA-Z0-9!#$%&\'*+-.^_`|~]+\Z'
# See RFCs 2616, 2965, 6265: Should match characters valid in a cookie value.
HTTP_COOKIE_VALUE_REGEX = r'\A[a-zA-Z0-9!#$%&\'()*+-./:<=>?@[\]^_`{|}~]+\Z'
# See RFC 7230: Should match characters valid in a header value.
HTTP_HEADER_VALUE_REGEX = (r'\A[a-zA-Z0-9'
r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]+\Z')
# Also in RFC 7230: Should match characters valid in a header value
# when quoted with double quotes.
HTTP_QUOTED_HEADER_VALUE_REGEX = (r'\A"[a-zA-Z0-9 \t'
r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]*"\Z')
DOMAIN_NAME_REGEX = (
r'^(?=.{1,253}\.?$)(?:(?!-|[^.]+_)[A-Za-z0-9-_]{1,63}(?<!-)(?:\.|$))+$')
# TaskFlow
SUPPORTED_TASKFLOW_ENGINE_TYPES = ['serial', 'parallel']
# Task/Flow constants
ACTIVE_CONNECTIONS = 'active_connections'
ADD_NICS = 'add_nics'
ADDED_PORTS = 'added_ports'
ADMIN_STATE_UP = 'admin_state_up'
ALLOWED_ADDRESS_PAIRS = 'allowed_address_pairs'
AMP_DATA = 'amp_data'
AMP_VRRP_INT = 'amp_vrrp_int'
AMPHORA = 'amphora'
AMPHORA_DICT = 'amphora_dict'
AMPHORA_ID = 'amphora_id'
AMPHORA_INDEX = 'amphora_index'
AMPHORA_NETWORK_CONFIG = 'amphora_network_config'
AMPHORAE = 'amphorae'
AMPHORAE_NETWORK_CONFIG = 'amphorae_network_config'
AMPS_DATA = 'amps_data'
ANTI_AFFINITY = 'anti-affinity'
ATTEMPT_NUMBER = 'attempt_number'
BASE_PORT = 'base_port'
BYTES_IN = 'bytes_in'
BYTES_OUT = 'bytes_out'
CACHED_ZONE = 'cached_zone'
CA_TLS_CERTIFICATE_ID = 'ca_tls_certificate_id'
CIDR = 'cidr'
CLIENT_CA_TLS_CERTIFICATE_ID = 'client_ca_tls_certificate_id'
CLIENT_CRL_CONTAINER_ID = 'client_crl_container_id'
COMPUTE_ID = 'compute_id'
COMPUTE_OBJ = 'compute_obj'
COMPUTE_ZONE = 'compute_zone'
CONN_MAX_RETRIES = 'conn_max_retries'
CONN_RETRY_INTERVAL = 'conn_retry_interval'
CREATED_AT = 'created_at'
CRL_CONTAINER_ID = 'crl_container_id'
DEFAULT_TLS_CONTAINER_DATA = 'default_tls_container_data'
DELETE_NICS = 'delete_nics'
DELTA = 'delta'
DELTAS = 'deltas'
DESCRIPTION = 'description'
DEVICE_OWNER = 'device_owner'
ENABLED = 'enabled'
FAILED_AMP_VRRP_PORT_ID = 'failed_amp_vrrp_port_id'
FAILED_AMPHORA = 'failed_amphora'
FAILOVER_AMPHORA = 'failover_amphora'
FAILOVER_AMPHORA_ID = 'failover_amphora_id'
FIELDS = 'fields'
FIXED_IPS = 'fixed_ips'
FLAVOR_ID = 'flavor_id'
HA_IP = 'ha_ip'
HA_PORT_ID = 'ha_port_id'
HEALTH_MON = 'health_mon'
HEALTH_MONITOR = 'health_monitor'
HEALTH_MONITOR_ID = 'health_monitor_id'
HEALTHMONITOR_ID = 'healthmonitor_id'
HEALTH_MONITOR_UPDATES = 'health_monitor_updates'
ID = 'id'
IMAGE_ID = 'image_id'
IP_ADDRESS = 'ip_address'
IPV6_ICMP = 'ipv6-icmp'
LB_NETWORK_IP = 'lb_network_ip'
L7POLICY = 'l7policy'
L7POLICY_ID = 'l7policy_id'
L7POLICY_UPDATES = 'l7policy_updates'
L7RULE = 'l7rule'
L7RULE_ID = 'l7rule_id'
L7RULE_UPDATES = 'l7rule_updates'
LISTENER = 'listener'
LISTENER_ID = 'listener_id'
LISTENER_UPDATES = 'listener_updates'
LOADBALANCER = 'loadbalancer'
LOADBALANCER_ID = 'loadbalancer_id'
LOAD_BALANCER_ID = 'load_balancer_id'
LOAD_BALANCER_UPDATES = 'load_balancer_updates'
MANAGEMENT_NETWORK = 'management_network'
MEMBER = 'member'
MEMBER_ID = 'member_id'
MEMBER_PORTS = 'member_ports'
MEMBER_UPDATES = 'member_updates'
MESSAGE = 'message'
NAME = 'name'
NETWORK = 'network'
NETWORK_ID = 'network_id'
NICS = 'nics'
OBJECT = 'object'
ORIGINAL_HEALTH_MONITOR = 'original_health_monitor'
ORIGINAL_L7POLICY = 'original_l7policy'
ORIGINAL_L7RULE = 'original_l7rule'
ORIGINAL_LISTENER = 'original_listener'
ORIGINAL_LOADBALANCER = 'original_load_balancer'
ORIGINAL_MEMBER = 'original_member'
ORIGINAL_POOL = 'original_pool'
PASSIVE_FAILURE = 'passive_failure'
PEER_PORT = 'peer_port'
POOL = 'pool'
POOL_CHILD_COUNT = 'pool_child_count'
POOL_ID = 'pool_id'
POOL_UPDATES = 'pool_updates'
PORT = 'port'
PORT_ID = 'port_id'
PORTS = 'ports'
PROJECT_ID = 'project_id'
PROVIDER = 'provider'
PROVIDER_NAME = 'provider_name'
QOS_POLICY_ID = 'qos_policy_id'
REDIRECT_POOL = 'redirect_pool'
REQ_CONN_TIMEOUT = 'req_conn_timeout'
REQ_READ_TIMEOUT = 'req_read_timeout'
REQUEST_ERRORS = 'request_errors'
ROLE = 'role'
SECURITY_GROUPS = 'security_groups'
SECURITY_GROUP_RULES = 'security_group_rules'
SERVER_GROUP_ID = 'server_group_id'
SERVER_PEM = 'server_pem'
SNI_CONTAINER_DATA = 'sni_container_data'
SNI_CONTAINERS = 'sni_containers'
SOFT_ANTI_AFFINITY = 'soft-anti-affinity'
STATUS = 'status'
STATUS_CODE = 'status_code'
SUBNET = 'subnet'
SUBNET_ID = 'subnet_id'
TAGS = 'tags'
TENANT_ID = 'tenant_id'
TIMEOUT_DICT = 'timeout_dict'
TLS_CERTIFICATE_ID = 'tls_certificate_id'
TLS_CONTAINER_ID = 'tls_container_id'
TOPOLOGY = 'topology'
TOTAL_CONNECTIONS = 'total_connections'
UPDATED_AT = 'updated_at'
UPDATE_DICT = 'update_dict'
VALID_VIP_NETWORKS = 'valid_vip_networks'
VIP = 'vip'
VIP_ADDRESS = 'vip_address'
VIP_NETWORK = 'vip_network'
VIP_PORT_ID = 'vip_port_id'
VIP_QOS_POLICY_ID = 'vip_qos_policy_id'
VIP_SG_ID = 'vip_sg_id'
VIP_SUBNET = 'vip_subnet'
VIP_SUBNET_ID = 'vip_subnet_id'
VRRP_ID = 'vrrp_id'
VRRP_IP = 'vrrp_ip'
VRRP_GROUP = 'vrrp_group'
VRRP_PORT = 'vrrp_port'
VRRP_PORT_ID = 'vrrp_port_id'
VRRP_PRIORITY = 'vrrp_priority'
# Taskflow flow and task names
CERT_ROTATE_AMPHORA_FLOW = 'octavia-cert-rotate-amphora-flow'
CREATE_AMPHORA_FLOW = 'octavia-create-amphora-flow'
CREATE_AMPHORA_RETRY_SUBFLOW = 'octavia-create-amphora-retry-subflow'
CREATE_AMPHORA_FOR_LB_FLOW = 'octavia-create-amp-for-lb-flow'
CREATE_HEALTH_MONITOR_FLOW = 'octavia-create-health-monitor-flow'
CREATE_LISTENER_FLOW = 'octavia-create-listener_flow'
PRE_CREATE_LOADBALANCER_FLOW = 'octavia-pre-create-loadbalancer-flow'
CREATE_SERVER_GROUP_FLOW = 'octavia-create-server-group-flow'
UPDATE_LB_SERVERGROUPID_FLOW = 'octavia-update-lb-server-group-id-flow'
CREATE_LISTENERS_FLOW = 'octavia-create-all-listeners-flow'
CREATE_LOADBALANCER_FLOW = 'octavia-create-loadbalancer-flow'
CREATE_LOADBALANCER_GRAPH_FLOW = 'octavia-create-loadbalancer-graph-flow'
CREATE_MEMBER_FLOW = 'octavia-create-member-flow'
CREATE_POOL_FLOW = 'octavia-create-pool-flow'
CREATE_L7POLICY_FLOW = 'octavia-create-l7policy-flow'
CREATE_L7RULE_FLOW = 'octavia-create-l7rule-flow'
DELETE_AMPHORA_FLOW = 'octavia-delete-amphora-flow'
DELETE_EXTRA_AMPHORAE_FLOW = 'octavia-delete-extra-amphorae-flow'
DELETE_HEALTH_MONITOR_FLOW = 'octavia-delete-health-monitor-flow'
DELETE_LISTENER_FLOW = 'octavia-delete-listener_flow'
DELETE_LOADBALANCER_FLOW = 'octavia-delete-loadbalancer-flow'
DELETE_MEMBER_FLOW = 'octavia-delete-member-flow'
DELETE_POOL_FLOW = 'octavia-delete-pool-flow'
DELETE_L7POLICY_FLOW = 'octavia-delete-l7policy-flow'
DELETE_L7RULE_FLOW = 'octavia-delete-l7policy-flow'
FAILOVER_AMPHORA_FLOW = 'octavia-failover-amphora-flow'
FAILOVER_LOADBALANCER_FLOW = 'octavia-failover-loadbalancer-flow'
FINALIZE_AMPHORA_FLOW = 'octavia-finalize-amphora-flow'
LOADBALANCER_NETWORKING_SUBFLOW = 'octavia-new-loadbalancer-net-subflow'
UPDATE_HEALTH_MONITOR_FLOW = 'octavia-update-health-monitor-flow'
UPDATE_LISTENER_FLOW = 'octavia-update-listener-flow'
UPDATE_LOADBALANCER_FLOW = 'octavia-update-loadbalancer-flow'
UPDATE_MEMBER_FLOW = 'octavia-update-member-flow'
UPDATE_POOL_FLOW = 'octavia-update-pool-flow'
UPDATE_L7POLICY_FLOW = 'octavia-update-l7policy-flow'
UPDATE_L7RULE_FLOW = 'octavia-update-l7rule-flow'
UPDATE_AMPS_SUBFLOW = 'octavia-update-amps-subflow'
UPDATE_AMPHORA_CONFIG_FLOW = 'octavia-update-amp-config-flow'
POST_MAP_AMP_TO_LB_SUBFLOW = 'octavia-post-map-amp-to-lb-subflow'
CREATE_AMP_FOR_LB_SUBFLOW = 'octavia-create-amp-for-lb-subflow'
CREATE_AMP_FOR_FAILOVER_SUBFLOW = 'octavia-create-amp-for-failover-subflow'
AMP_PLUG_NET_SUBFLOW = 'octavia-plug-net-subflow'
GET_AMPHORA_FOR_LB_SUBFLOW = 'octavia-get-amphora-for-lb-subflow'
POST_LB_AMP_ASSOCIATION_SUBFLOW = (
'octavia-post-loadbalancer-amp_association-subflow')
AMPHORA_LISTENER_START_SUBFLOW = 'amphora-listener-start-subflow'
AMPHORA_LISTENER_RELOAD_SUBFLOW = 'amphora-listener-start-subflow'
MAP_LOADBALANCER_TO_AMPHORA = 'octavia-mapload-balancer-to-amphora'
RELOAD_AMPHORA = 'octavia-reload-amphora'
CREATE_AMPHORA_INDB = 'octavia-create-amphora-indb'
GENERATE_SERVER_PEM = 'octavia-generate-serverpem'
UPDATE_CERT_EXPIRATION = 'octavia-update-cert-expiration'
CERT_COMPUTE_CREATE = 'octavia-cert-compute-create'
COMPUTE_CREATE = 'octavia-compute-create'
UPDATE_AMPHORA_COMPUTEID = 'octavia-update-amphora-computeid'
MARK_AMPHORA_BOOTING_INDB = 'octavia-mark-amphora-booting-indb'
WAIT_FOR_AMPHORA = 'octavia-wait_for_amphora'
COMPUTE_WAIT = 'octavia-compute-wait'
UPDATE_AMPHORA_INFO = 'octavia-update-amphora-info'
AMPHORA_FINALIZE = 'octavia-amphora-finalize'
MARK_AMPHORA_ALLOCATED_INDB = 'octavia-mark-amphora-allocated-indb'
MARK_AMPHORA_READY_INDB = 'octavia-mark-amphora-ready-indb'
MARK_LB_ACTIVE_INDB = 'octavia-mark-lb-active-indb'
MARK_AMP_MASTER_INDB = 'octavia-mark-amp-master-indb'
MARK_AMP_BACKUP_INDB = 'octavia-mark-amp-backup-indb'
MARK_AMP_STANDALONE_INDB = 'octavia-mark-amp-standalone-indb'
GET_VRRP_SUBFLOW = 'octavia-get-vrrp-subflow'
AMP_VRRP_UPDATE = 'octavia-amphora-vrrp-update'
AMP_VRRP_START = 'octavia-amphora-vrrp-start'
AMP_VRRP_STOP = 'octavia-amphora-vrrp-stop'
AMP_UPDATE_VRRP_INTF = 'octavia-amphora-update-vrrp-intf'
CREATE_VRRP_GROUP_FOR_LB = 'octavia-create-vrrp-group-for-lb'
CREATE_VRRP_SECURITY_RULES = 'octavia-create-vrrp-security-rules'
AMP_COMPUTE_CONNECTIVITY_WAIT = 'octavia-amp-compute-connectivity-wait'
AMP_LISTENER_UPDATE = 'octavia-amp-listeners-update'
AMP_LISTENER_START = 'octavia-amp-listeners-start'
PLUG_VIP_AMPHORA = 'octavia-amp-plug-vip'
APPLY_QOS_AMP = 'octavia-amp-apply-qos'
UPDATE_AMPHORA_VIP_DATA = 'ocatvia-amp-update-vip-data'
GET_AMP_NETWORK_CONFIG = 'octavia-amp-get-network-config'
AMP_POST_VIP_PLUG = 'octavia-amp-post-vip-plug'
GENERATE_SERVER_PEM_TASK = 'GenerateServerPEMTask'
AMPHORA_CONFIG_UPDATE_TASK = 'AmphoraConfigUpdateTask'
FIRST_AMP_NETWORK_CONFIGS = 'first-amp-network-configs'
FIRST_AMP_VRRP_INTERFACE = 'first-amp-vrrp_interface'
# Batch Member Update constants
UNORDERED_MEMBER_UPDATES_FLOW = 'octavia-unordered-member-updates-flow'
UNORDERED_MEMBER_ACTIVE_FLOW = 'octavia-unordered-member-active-flow'
UPDATE_ATTRIBUTES_FLOW = 'octavia-update-attributes-flow'
DELETE_MODEL_OBJECT_FLOW = 'octavia-delete-model-object-flow'
BATCH_UPDATE_MEMBERS_FLOW = 'octavia-batch-update-members-flow'
MEMBER_TO_ERROR_ON_REVERT_FLOW = 'octavia-member-to-error-on-revert-flow'
DECREMENT_MEMBER_QUOTA_FLOW = 'octavia-decrement-member-quota-flow'
MARK_MEMBER_ACTIVE_INDB = 'octavia-mark-member-active-indb'
UPDATE_MEMBER_INDB = 'octavia-update-member-indb'
DELETE_MEMBER_INDB = 'octavia-delete-member-indb'
# Task Names
ADMIN_DOWN_PORT = 'admin-down-port'
AMPHORA_POST_VIP_PLUG = 'amphora-post-vip-plug'
AMPHORA_RELOAD_LISTENER = 'amphora-reload-listener'
AMPHORA_TO_ERROR_ON_REVERT = 'amphora-to-error-on-revert'
AMPHORAE_POST_NETWORK_PLUG = 'amphorae-post-network-plug'
ATTACH_PORT = 'attach-port'
CALCULATE_AMPHORA_DELTA = 'calculate-amphora-delta'
CREATE_VIP_BASE_PORT = 'create-vip-base-port'
DELETE_AMPHORA = 'delete-amphora'
DELETE_PORT = 'delete-port'
DISABLE_AMP_HEALTH_MONITORING = 'disable-amphora-health-monitoring'
GET_AMPHORA_NETWORK_CONFIGS_BY_ID = 'get-amphora-network-configs-by-id'
GET_AMPHORAE_FROM_LB = 'get-amphorae-from-lb'
HANDLE_NETWORK_DELTA = 'handle-network-delta'
MARK_AMPHORA_DELETED = 'mark-amphora-deleted'
MARK_AMPHORA_PENDING_DELETE = 'mark-amphora-pending-delete'
MARK_AMPHORA_HEALTH_BUSY = 'mark-amphora-health-busy'
RELOAD_AMP_AFTER_PLUG_VIP = 'reload-amp-after-plug-vip'
RELOAD_LB_AFTER_AMP_ASSOC = 'reload-lb-after-amp-assoc'
RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH = 'reload-lb-after-amp-assoc-full-graph'
RELOAD_LB_AFTER_PLUG_VIP = 'reload-lb-after-plug-vip'
RELOAD_LB_BEFOR_ALLOCATE_VIP = 'reload-lb-before-allocate-vip'
UPDATE_AMP_FAILOVER_DETAILS = 'update-amp-failover-details'
NOVA_1 = '1.1'
NOVA_21 = '2.1'
NOVA_3 = '3'
NOVA_VERSIONS = (NOVA_1, NOVA_21, NOVA_3)
# Auth sections
SERVICE_AUTH = 'service_auth'
RPC_NAMESPACE_CONTROLLER_AGENT = 'controller'
# Build Type Priority
LB_CREATE_FAILOVER_PRIORITY = 20
LB_CREATE_NORMAL_PRIORITY = 40
LB_CREATE_ADMIN_FAILOVER_PRIORITY = 80
BUILD_TYPE_PRIORITY = 'build_type_priority'
# Active standalone roles and topology
TOPOLOGY_SINGLE = 'SINGLE'
TOPOLOGY_ACTIVE_STANDBY = 'ACTIVE_STANDBY'
ROLE_MASTER = 'MASTER'
ROLE_BACKUP = 'BACKUP'
ROLE_STANDALONE = 'STANDALONE'
SUPPORTED_LB_TOPOLOGIES = (TOPOLOGY_ACTIVE_STANDBY, TOPOLOGY_SINGLE)
SUPPORTED_AMPHORA_ROLES = (ROLE_BACKUP, ROLE_MASTER, ROLE_STANDALONE)
TOPOLOGY_STATUS_OK = 'OK'
ROLE_MASTER_PRIORITY = 100
ROLE_BACKUP_PRIORITY = 90
VRRP_AUTH_DEFAULT = 'PASS'
VRRP_AUTH_AH = 'AH'
SUPPORTED_VRRP_AUTH = (VRRP_AUTH_DEFAULT, VRRP_AUTH_AH)
KEEPALIVED_CMD = '/usr/sbin/keepalived '
# The DEFAULT_VRRP_ID value needs to be variable for multi tenant support
# per amphora in the future
DEFAULT_VRRP_ID = 1
VRRP_PROTOCOL_NUM = 112
AUTH_HEADER_PROTOCOL_NUMBER = 51
TEMPLATES = '/templates'
AGENT_API_TEMPLATES = '/templates'
LOGGING_TEMPLATES = '/templates'
AGENT_CONF_TEMPLATE = 'amphora_agent_conf.template'
LOGGING_CONF_TEMPLATE = '10-rsyslog.conf.template'
USER_DATA_CONFIG_DRIVE_TEMPLATE = 'user_data_config_drive.template'
OPEN = 'OPEN'
FULL = 'FULL'
# OPEN = HAProxy listener status nbconn < maxconn
# FULL = HAProxy listener status not nbconn < maxconn
HAPROXY_LISTENER_STATUSES = (OPEN, FULL)
UP = 'UP'
DOWN = 'DOWN'
# UP = HAProxy backend has working or no servers
# DOWN = HAProxy backend has no working servers
HAPROXY_BACKEND_STATUSES = (UP, DOWN)
DRAIN = 'DRAIN'
MAINT = 'MAINT'
NO_CHECK = 'no check'
# DRAIN = member is weight 0 and is in draining mode
# MAINT = member is downed for maintenance? not sure when this happens
# NO_CHECK = no health monitor is enabled
HAPROXY_MEMBER_STATUSES = (UP, DOWN, DRAIN, MAINT, NO_CHECK)
# Default number of concurrent connections in a HAProxy listener.
HAPROXY_DEFAULT_MAXCONN = 50000
# Current maximum number of conccurent connections in HAProxy.
# This is limited by the systemd "LimitNOFILE" and
# the sysctl fs.file-max fs.nr_open settings in the image
HAPROXY_MAX_MAXCONN = 1000000
RESTARTING = 'RESTARTING'
# Quota Constants
QUOTA_UNLIMITED = -1
MIN_QUOTA = QUOTA_UNLIMITED
MAX_QUOTA = 2000000000
API_VERSION = '0.5'
HAPROXY_BASE_PEER_PORT = 1025
KEEPALIVED_JINJA2_UPSTART = 'keepalived.upstart.j2'
KEEPALIVED_JINJA2_SYSTEMD = 'keepalived.systemd.j2'
KEEPALIVED_JINJA2_SYSVINIT = 'keepalived.sysvinit.j2'
CHECK_SCRIPT_CONF = 'keepalived_check_script.conf.j2'
KEEPALIVED_CHECK_SCRIPT = 'keepalived_lvs_check_script.sh.j2'
PLUGGED_INTERFACES = '/var/lib/octavia/plugged_interfaces'
HAPROXY_USER_GROUP_CFG = '/var/lib/octavia/haproxy-default-user-group.conf'
AMPHORA_NAMESPACE = 'amphora-haproxy'
FLOW_DOC_TITLES = {'AmphoraFlows': 'Amphora Flows',
'LoadBalancerFlows': 'Load Balancer Flows',
'ListenerFlows': 'Listener Flows',
'PoolFlows': 'Pool Flows',
'MemberFlows': 'Member Flows',
'HealthMonitorFlows': 'Health Monitor Flows',
'L7PolicyFlows': 'Layer 7 Policy Flows',
'L7RuleFlows': 'Layer 7 Rule Flows'}
NETNS_PRIMARY_INTERFACE = 'eth1'
SYSCTL_CMD = '/sbin/sysctl'
AMP_ACTION_START = 'start'
AMP_ACTION_STOP = 'stop'
AMP_ACTION_RELOAD = 'reload'
AMP_ACTION_RESTART = 'restart'
GLANCE_IMAGE_ACTIVE = 'active'
INIT_SYSTEMD = 'systemd'
INIT_UPSTART = 'upstart'
INIT_SYSVINIT = 'sysvinit'
INIT_UNKOWN = 'unknown'
VALID_INIT_SYSTEMS = (INIT_SYSTEMD, INIT_SYSVINIT, INIT_UPSTART)
INIT_PATH = '/sbin/init'
SYSTEMD_DIR = '/usr/lib/systemd/system'
SYSVINIT_DIR = '/etc/init.d'
UPSTART_DIR = '/etc/init'
INIT_PROC_COMM_PATH = '/proc/1/comm'
KEEPALIVED_SYSTEMD = 'octavia-keepalived.service'
KEEPALIVED_SYSVINIT = 'octavia-keepalived'
KEEPALIVED_UPSTART = 'octavia-keepalived.conf'
KEEPALIVED_SYSTEMD_PREFIX = 'octavia-keepalivedlvs-%s.service'
KEEPALIVED_SYSVINIT_PREFIX = 'octavia-keepalivedlvs-%s'
KEEPALIVED_UPSTART_PREFIX = 'octavia-keepalivedlvs-%s.conf'
# Authentication
KEYSTONE = 'keystone'
NOAUTH = 'noauth'
TESTING = 'testing'
# Amphora distro-specific data
UBUNTU = 'ubuntu'
CENTOS = 'centos'
# Pagination, sorting, filtering values
APPLICATION_JSON = 'application/json'
PAGINATION_HELPER = 'pagination_helper'
ASC = 'asc'
DESC = 'desc'
ALLOWED_SORT_DIR = (ASC, DESC)
DEFAULT_SORT_DIR = ASC
DEFAULT_SORT_KEYS = ['created_at', 'id']
DEFAULT_PAGE_SIZE = 1000
# RBAC
LOADBALANCER_API = 'os_load-balancer_api'
RULE_API_ADMIN = 'rule:load-balancer:admin'
RULE_API_READ = 'rule:load-balancer:read'
RULE_API_READ_GLOBAL = 'rule:load-balancer:read-global'
RULE_API_WRITE = 'rule:load-balancer:write'
RULE_API_READ_QUOTA = 'rule:load-balancer:read-quota'
RULE_API_READ_QUOTA_GLOBAL = 'rule:load-balancer:read-quota-global'
RULE_API_WRITE_QUOTA = 'rule:load-balancer:write-quota'
RBAC_LOADBALANCER = '{}:loadbalancer:'.format(LOADBALANCER_API)
RBAC_LISTENER = '{}:listener:'.format(LOADBALANCER_API)
RBAC_POOL = '{}:pool:'.format(LOADBALANCER_API)
RBAC_MEMBER = '{}:member:'.format(LOADBALANCER_API)
RBAC_HEALTHMONITOR = '{}:healthmonitor:'.format(LOADBALANCER_API)
RBAC_L7POLICY = '{}:l7policy:'.format(LOADBALANCER_API)
RBAC_L7RULE = '{}:l7rule:'.format(LOADBALANCER_API)
RBAC_QUOTA = '{}:quota:'.format(LOADBALANCER_API)
RBAC_AMPHORA = '{}:amphora:'.format(LOADBALANCER_API)
RBAC_PROVIDER = '{}:provider:'.format(LOADBALANCER_API)
RBAC_PROVIDER_FLAVOR = '{}:provider-flavor:'.format(LOADBALANCER_API)
RBAC_PROVIDER_AVAILABILITY_ZONE = '{}:provider-availability-zone:'.format(
LOADBALANCER_API)
RBAC_FLAVOR = '{}:flavor:'.format(LOADBALANCER_API)
RBAC_FLAVOR_PROFILE = '{}:flavor-profile:'.format(LOADBALANCER_API)
RBAC_AVAILABILITY_ZONE = '{}:availability-zone:'.format(LOADBALANCER_API)
RBAC_AVAILABILITY_ZONE_PROFILE = '{}:availability-zone-profile:'.format(
LOADBALANCER_API)
RBAC_POST = 'post'
RBAC_PUT = 'put'
RBAC_PUT_CONFIG = 'put_config'
RBAC_PUT_FAILOVER = 'put_failover'
RBAC_DELETE = 'delete'
RBAC_GET_ONE = 'get_one'
RBAC_GET_ALL = 'get_all'
RBAC_GET_ALL_GLOBAL = 'get_all-global'
RBAC_GET_DEFAULTS = 'get_defaults'
RBAC_GET_STATS = 'get_stats'
RBAC_GET_STATUS = 'get_status'
RBAC_SCOPE_PROJECT = 'project'
RBAC_SCOPE_SYSTEM = 'system'
RBAC_ROLES_DEPRECATED_REASON = (
'The Octavia API now requires the OpenStack default roles and scoped '
'tokens. '
'See https://docs.openstack.org/octavia/latest/configuration/policy.html '
'and https://docs.openstack.org/keystone/latest/contributor/'
'services.html#reusable-default-roles for more information.')
# PROVIDERS
OCTAVIA = 'octavia'
AMPHORAV2 = 'amphorav2'
AMPHORAV1 = 'amphorav1'
# systemctl commands
DISABLE = 'disable'
ENABLE = 'enable'
# systemd amphora netns service prefix
AMP_NETNS_SVC_PREFIX = 'amphora-netns'
# Amphora Feature Compatibility
HTTP_REUSE = 'has_http_reuse'
POOL_ALPN = 'has_pool_alpn'
# TODO(johnsom) convert these to octavia_lib constants
# once octavia is transitioned to use octavia_lib
FLAVOR = 'flavor'
FLAVOR_DATA = 'flavor_data'
AVAILABILITY_ZONE = 'availability_zone'
AVAILABILITY_ZONE_DATA = 'availability_zone_data'
# Flavor metadata
LOADBALANCER_TOPOLOGY = 'loadbalancer_topology'
COMPUTE_FLAVOR = 'compute_flavor'
AMP_IMAGE_TAG = 'amp_image_tag'
# TODO(johnsom) move to octavia_lib
# client certification authorization option
CLIENT_AUTH_NONE = 'NONE'
CLIENT_AUTH_OPTIONAL = 'OPTIONAL'
CLIENT_AUTH_MANDATORY = 'MANDATORY'
SUPPORTED_CLIENT_AUTH_MODES = [CLIENT_AUTH_NONE, CLIENT_AUTH_OPTIONAL,
CLIENT_AUTH_MANDATORY]
TOPIC_AMPHORA_V2 = 'octavia_provisioning_v2'
HAPROXY_HTTP_PROTOCOLS = [lib_consts.PROTOCOL_HTTP,
lib_consts.PROTOCOL_TERMINATED_HTTPS]
LVS_PROTOCOLS = [PROTOCOL_UDP,
lib_consts.PROTOCOL_SCTP]
HAPROXY_BACKEND = 'HAPROXY'
LVS_BACKEND = 'LVS'
# Map each supported protocol to its L4 protocol
L4_PROTOCOL_MAP = {
PROTOCOL_TCP: PROTOCOL_TCP,
PROTOCOL_HTTP: PROTOCOL_TCP,
PROTOCOL_HTTPS: PROTOCOL_TCP,
PROTOCOL_TERMINATED_HTTPS: PROTOCOL_TCP,
PROTOCOL_PROXY: PROTOCOL_TCP,
lib_consts.PROTOCOL_PROXYV2: PROTOCOL_TCP,
PROTOCOL_UDP: PROTOCOL_UDP,
lib_consts.PROTOCOL_SCTP: lib_consts.PROTOCOL_SCTP,
lib_consts.PROTOCOL_PROMETHEUS: lib_consts.PROTOCOL_TCP,
}
# Image drivers
SUPPORTED_IMAGE_DRIVERS = ['image_noop_driver',
'image_glance_driver']
# Volume drivers
VOLUME_NOOP_DRIVER = 'volume_noop_driver'
SUPPORTED_VOLUME_DRIVERS = [VOLUME_NOOP_DRIVER,
'volume_cinder_driver']
# Cinder volume driver constants
CINDER_STATUS_AVAILABLE = 'available'
CINDER_STATUS_ERROR = 'error'
CINDER_ACTION_CREATE_VOLUME = 'create volume'
# The nil UUID (used in octavia for deleted references) - RFC 4122
NIL_UUID = '00000000-0000-0000-0000-000000000000'
# OpenSSL cipher strings
CIPHERS_OWASP_SUITE_B = ('TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:'
'TLS_AES_128_GCM_SHA256:DHE-RSA-AES256-GCM-SHA384:'
'DHE-RSA-AES128-GCM-SHA256:'
'ECDHE-RSA-AES256-GCM-SHA384:'
'ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-SHA256:'
'DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:'
'ECDHE-RSA-AES128-SHA256')
TLS_VERSIONS_OWASP_SUITE_B = [lib_consts.TLS_VERSION_1_2,
lib_consts.TLS_VERSION_1_3]
# All supported TLS versions in ascending order (oldest to newest)
TLS_ALL_VERSIONS = [
lib_consts.SSL_VERSION_3,
lib_consts.TLS_VERSION_1,
lib_consts.TLS_VERSION_1_1,
lib_consts.TLS_VERSION_1_2,
lib_consts.TLS_VERSION_1_3
]
VIP_SECURITY_GROUP_PREFIX = 'lb-'
AMP_BASE_PORT_PREFIX = 'octavia-lb-vrrp-'
OCTAVIA_OWNED = 'octavia_owned'
# Sadly in the LBaaS v2 API, header insertions are on the listener objects
# but they should be on the pool. Dealing with it until v3.
LISTENER_PROTOCOLS_SUPPORTING_HEADER_INSERTION = [PROTOCOL_HTTP,
PROTOCOL_TERMINATED_HTTPS]
SUPPORTED_ALPN_PROTOCOLS = [lib_consts.ALPN_PROTOCOL_HTTP_2,
lib_consts.ALPN_PROTOCOL_HTTP_1_1,
lib_consts.ALPN_PROTOCOL_HTTP_1_0]
AMPHORA_SUPPORTED_ALPN_PROTOCOLS = [lib_consts.ALPN_PROTOCOL_HTTP_2,
lib_consts.ALPN_PROTOCOL_HTTP_1_1,
lib_consts.ALPN_PROTOCOL_HTTP_1_0]
# Amphora interface fields
MTU = 'mtu'
ADDRESSES = 'addresses'
ROUTES = 'routes'
RULES = 'rules'
SCRIPTS = 'scripts'
# pyroute2 fields
STATE = 'state'
FAMILY = 'family'
ADDRESS = 'address'
PREFIXLEN = 'prefixlen'
DHCP = 'dhcp'
IPV6AUTO = 'ipv6auto'
DST = 'dst'
PREFSRC = 'prefsrc'
GATEWAY = 'gateway'
FLAGS = 'flags'
ONLINK = 'onlink'
TABLE = 'table'
SCOPE = 'scope'
SRC = 'src'
SRC_LEN = 'src_len'
IFACE_UP = 'up'
IFACE_DOWN = 'down'
COMMAND = 'command'
# Amphora network directory
AMP_NET_DIR_TEMPLATE = '/etc/octavia/interfaces/'
|
StarcoderdataPython
|
1732786
|
<reponame>milesrout/beatle
from collections import ChainMap
import contextlib
from itertools import chain, count
import typednodes as T
import cstnodes as E
from astpass import DeepAstPass
from is_gen import is_gen
from utils import (Type, Ast, ApeError, ApeSyntaxError, ApeInternalError,
ApeNotImplementedError, ApeTypeError, overloadmethod, unzip)
from kinds import *
from apetypes import *
from environments import Object, Namespace, add_namespace
#⭑: 2b51
#★: 2605
class ApeInferenceError(ApeTypeError):
pass
class ApeUnificationError(ApeTypeError):
pass
def pos_only_params(*args):
return ParamsType.pos_only(
[AnonParamType(a) for a in args])
tY = TypeVariable('y')
tS = TypeVariable('s')
tR = TypeVariable('r')
BASE_TYPE_ENVIRONMENT = {
'generator': Object(AbstractTypeConstructor(TernaryKind)),
'coroutine': Object(AbstractTypeConstructor(TernaryKind)),
#'list': Object(AbstractTypeConstructor(UnaryKind)),
'int': Object(Nullary(Int)),
'float': Object(Nullary(Float)),
'bool': Object(Nullary(Bool)),
'error': Object(Nullary(Error)),
'any': Object(Nullary(Any)),
}
def CoroutineType(y, s, r):
return TypeCall(BASE_TYPE_ENVIRONMENT['coroutine'].value, (y, s, r))
def GeneratorType(y, s, r):
return TypeCall(BASE_TYPE_ENVIRONMENT['generator'].value, (y, s, r))
def IterationResultType(y, s, r):
return DisjunctionType([TaggedType('return', r),
TaggedType('yield', TupleType([y, CoroutineType(y, s, r)]))])
BASE_ENVIRONMENT = {
# Want to add 'next', 'send', 'throw', etc. for generators.
# But we need linear types before we can do that. Then we can say
# iter: Iterable(y,r) ~> r + (y, Iterator(y,r)),
# next: Iterator(y,r) ~> r + (y, Iterator(y,r)),
# start: Generator(y,s,r) ~> r + (y, Coroutine(y,s,r)), and
# send: (s, Coroutine(y,s,r)) ~> r + (y, Coroutine(y,s,r)),
# where ~> is the LinearArrowType, Iterable(y,r) = Generator(y,(),r),
# and Iterator(y,r) = Coroutine(y,(),r).
'iter': Object(TypeScheme(['y', 'r'], FunctionType(
pos_only_params(GeneratorType(tY, Unit, tR)),
IterationResultType(tY, Unit, tR)))),
'next': Object(TypeScheme(['y', 'r'], FunctionType(
pos_only_params(CoroutineType(tY, Unit, tR)),
IterationResultType(tY, Unit, tR)))),
'start': Object(TypeScheme(['y', 's', 'r'], FunctionType(
pos_only_params(GeneratorType(tY, tS, tR)),
IterationResultType(tY, tS, tR)))),
'send': Object(TypeScheme(['y', 's', 'r'], FunctionType(
pos_only_params(tS, CoroutineType(tY, tS, tR)),
IterationResultType(tY, tS, tR)))),
'litstr': Object(TypeScheme([], FunctionType(pos_only_params(String), Expression))),
'stringify': Object(TypeScheme([], FunctionType(pos_only_params(Expression), Expression))),
'gensym': Object(TypeScheme([], FunctionType(ParamsType.unit(), String))),
'something': Object(TypeScheme(['a'], FunctionType(pos_only_params(TypeVariable('a')), MaybeType(TypeVariable('a'))))),
'nothing': Object(TypeScheme(['a'], MaybeType(TypeVariable('a')))),
'print': Object(TypeScheme([], FunctionType(ParamsType.varargs(ListType(String)), Unit))),
'str': Object(TypeScheme(['a'], FunctionType(pos_only_params(TypeVariable('a')), String))),
'set': Object(TypeScheme(['a'], FunctionType(ParamsType.unit(), SetType(TypeVariable('a'))))),
'len': Object(TypeScheme(['a'], FunctionType(
pos_only_params(ListType(TypeVariable('a'))),
Int))),
'cons': Object(TypeScheme(['a'], FunctionType(
pos_only_params(TypeVariable('a'), ListType(TypeVariable('a'))),
ListType(TypeVariable('a'))))),
'RuntimeError': Object(TypeScheme([], FunctionType(pos_only_params(String), Error))),
'AssertionError': Object(TypeScheme([], FunctionType(pos_only_params(String), Error))),
'eval': Object(TypeScheme(['a'], FunctionType(
pos_only_params(Expression),
TypeVariable('a')))),
}
generic_unary_op = TypeScheme(['a'], FunctionType(pos_only_params(TypeVariable('a')), TypeVariable('a')))
generic_binary_op = TypeScheme(['a'], FunctionType(pos_only_params(TypeVariable('a'), TypeVariable('a')), TypeVariable('a')))
UNARY_OPERATORS = {
'plus': generic_unary_op,
'minus': generic_unary_op,
}
BINARY_OPERATORS = {
'plus': generic_binary_op,
'minus': generic_binary_op,
'asterisk': generic_binary_op,
'at': generic_binary_op,
'div': generic_binary_op,
'mod': generic_binary_op,
'truediv': generic_binary_op,
}
class TypeChecker:
def __init__(self):
self.env = Namespace(BASE_ENVIRONMENT)
self.type_env = Namespace(BASE_TYPE_ENVIRONMENT)
self.subst = {}
self.unifiers = []
self.kind_unifiers = []
self.fresh_vars = (f'a{i}' for i in count(1))
self.fresh_kind_vars = (f'k{i}' for i in count(1))
self.asts = []
self.tags = {}
class Types:
"""A collection of the different types an expression can have
vtype: the type of the _V_alue of the expression
rtype: the type that an expression may early _R_eturn while being evaluated
ytype: the type that an expression may _Y_ield while being evaluated
stype: the type that an expression may be _S_ent while being evaluated
ctype: the type of _C_ondition that an expression may throw while being evaluated
"""
def __init__(this, vtype=None, *, rtype=None, ytype=None, stype=None, ctype=None):
this.vtype = vtype or self.fresh()
this.rtype = rtype or self.fresh()
this.ytype = ytype or self.fresh()
this.stype = stype or self.fresh()
this.ctype = ctype or self.fresh()
def __repr__(this):
return f'Types({this.vtype}, {this.rtype}, {this.ytype}, {this.stype}, {this.ctype})'
def apply(this, subst):
vtype = this.vtype.apply(subst) if this.vtype is not None else None
rtype = this.rtype.apply(subst) if this.rtype is not None else None
ytype = this.ytype.apply(subst) if this.ytype is not None else None
stype = this.stype.apply(subst) if this.stype is not None else None
ctype = this.ctype.apply(subst) if this.ctype is not None else None
return Types(vtype=vtype,
rtype=rtype,
ytype=ytype,
stype=stype,
ctype=ctype)
def also(this, vtype=None, *, rtype=None, ytype=None, stype=None, ctype=None):
new = this.but(vtype=vtype, rtype=rtype, ytype=ytype, stype=stype, ctype=ctype)
self.unify_all(this, new)
return new
def but(this, vtype=None, *, rtype=None, ytype=None, stype=None, ctype=None):
return Types(vtype=vtype or this.vtype,
rtype=rtype or this.rtype,
ytype=ytype or this.ytype,
stype=stype or this.stype,
ctype=ctype or this.ctype)
@staticmethod
def void(vtype=None, *, rtype=None, ytype=None, stype=None, ctype=None):
return Types(vtype=vtype or Void,
rtype=rtype or Void,
ytype=ytype or Void,
stype=stype or Void,
ctype=ctype or Void)
@staticmethod
def any(vtype=None, *, rtype=None, ytype=None, stype=None, ctype=None):
return Types(vtype=vtype or Any,
rtype=rtype or Any,
ytype=ytype or Any,
stype=stype or Any,
ctype=ctype or Any)
self.Types = Types
######
def clean_vars(self, disallowed):
return (TypeVariable(x)
for x in chain('abcdefghijklmnopqrstuvwxyz', (f'a{i}' for i in count(1)))
if x not in disallowed)
def clean_fresh(self, disallowed):
return next(self.clean_vars(disallowed))
def free_vars_in_env(self):
return set.union(*(ty.value.ftv() for ty in self.env.env.values() if isinstance(ty, Object)), self.type_env.env.keys())
def fresh(self):
return TypeVariable(next(self.fresh_vars))
def fresh_kind(self):
return KindVariable(next(self.fresh_kind_vars))
def instantiate(self, scm):
return scm.t.apply({tvar: self.fresh() for tvar in scm.tvars})
def generalise(self, t):
existing_types = self.free_vars_in_env()
ftv = t.ftv() - existing_types
subst = dict(zip(ftv, [self.fresh() for tv in range(len(ftv))]))
tt = t.apply(subst)
return TypeScheme([subst[tvar].tvar for tvar in ftv], tt)
def lookup_name(self, name, pos):
scm = self.env.get(name, None)
if scm is None:
raise ApeTypeError(msg=f'Unbound variable: {name}', pos=pos)
return self.instantiate(scm.value)
def lookup_type_con(self, name, pos):
if isinstance(name, E.DottedNameExpression):
if len(name.parts) == 1:
name = name.parts[0]
else:
raise NotImplementedError()
if isinstance(name, E.IdExpression):
name = name.name
con = self.type_env.get(name, None)
if con is None:
raise ApeTypeError(msg=f'Unbound type: {name}', pos=pos)
if isinstance(con.value, Nullary):
return con.value.t, Star
return con.value, con.value.kind
def solve(self):
self.subst = solve(self.subst, self.unifiers)
self.env.update({name: Object(scm.value.apply(self.subst)) for name, scm in self.env.env.items() if isinstance(scm, Object)})
return self.subst
def add_name_ungeneralised(self, name, t):
subst = self.solve()
self.env[name] = Object(TypeScheme([], t.apply(subst)))
def add_type_name(self, name, tc):
"""Add a type constructor (possibly nullary) to the type environment"""
self.type_env[name] = Object(tc)
def add_name(self, name, t):
subst = self.solve()
self.env[name] = Object(self.generalise(t.apply(subst)))
def add_type_namespace(self, name, env):
add_namespace(self.type_env, name, env)
def add_namespace(self, name, env):
add_namespace(self.env, name, env)
#def add_type_namespace(self, name, env):
# environment = self.type_env
# for part in name.parts[:-1]:
# environment[part.name] = Namespace({})
# environment = environment[part.name]
# environment[name.parts[-1].name] = Namespace(env)
#def add_namespace(self, name, env):
# environment = self.env
# for part in name.parts[:-1]:
# environment[part.name] = Namespace({})
# environment = environment[part.name]
# environment[name.parts[-1].name] = Namespace(env)
def print_env(self):
print('self.type_env:')
self.type_env.printout()
print('self.tags:')
for k, v in self.tags.items():
print(k, v)
print('self.env:')
self.env.printout()
def update_with_subst(self, subst):
for ast, env in self.asts:
ast.type.vtype = (ast.type.vtype.apply(subst))
ast.type.rtype = (ast.type.rtype.apply(subst))
ast.type.ytype = (ast.type.ytype.apply(subst))
ast.type.stype = (ast.type.stype.apply(subst))
ast.type.ctype = (ast.type.ctype.apply(subst))
def update_with_subst(self, subst):
for ast, env in self.asts:
ast.type.vtype = (ast.type.vtype.apply(subst))
ast.type.rtype = (ast.type.rtype.apply(subst))
ast.type.ytype = (ast.type.ytype.apply(subst))
ast.type.stype = (ast.type.stype.apply(subst))
ast.type.ctype = (ast.type.ctype.apply(subst))
@contextlib.contextmanager
def clean_subenv(self):
with self.type_env.clean_subenv(), self.env.clean_subenv():
yield
@contextlib.contextmanager
def subenv(self):
with self.type_env.subenv(), self.env.subenv():
yield
# @contextlib.contextmanager
# def clean_subenv(self):
# old, oldT = self.env, self.type_env
# self.env = Namespace(ChainMap(BASE_ENVIRONMENT).new_child())
# self.type_env = Namespace(ChainMap(BASE_TYPE_ENVIRONMENT).new_child())
# yield
# self.env, self.type_env = old, oldT
#
# @contextlib.contextmanager
# def subenv(self):
# old, oldT = self.env.env, self.type_env.env
# self.env.env = self.env.env.new_child()
# self.type_env.env = self.type_env.env.new_child()
# yield
# self.env.env, self.type_env.env = old, oldT
#
def unify_others(self, T1, T2, pos):
self.unify(T1.rtype, T2.rtype, pos)
self.unify(T1.ytype, T2.ytype, pos)
self.unify(T1.stype, T2.stype, pos)
self.unify(T1.ctype, T2.ctype, pos)
def unify_all(self, T1, T2, pos):
self.unify(T1.vtype, T2.vtype, pos)
self.unify(T1.rtype, T2.rtype, pos)
self.unify(T1.ytype, T2.ytype, pos)
self.unify(T1.stype, T2.stype, pos)
self.unify(T1.ctype, T2.ctype, pos)
def unify(self, t1, t2, pos):
if isinstance(t1, self.Types):
raise NotImplementedError
if isinstance(t2, self.Types):
raise NotImplementedError
if isinstance(t1, TypeConstructor):
raise NotImplementedError
if isinstance(t2, TypeConstructor):
raise NotImplementedError
if t1 is not Any and t2 is not Any:
self.unifiers.append(Constraint(t1, t2, pos))
def unify_kind(self, k1, k2, pos):
self.kind_unifiers.append(KindConstraint(k1, k2, pos))
######
def infer_error(self, ast):
try:
pos = ast.pos
except Exception:
pos = None
raise ApeInternalError(pos=pos, msg='no overload found for {}'.format(ast.__class__))
######
def parse_toplevel_type(self, ast):
if not isinstance(ast, E.TypeForallExpression):
raise ApeInternalError(pos=ast.pos, msg='Unexpected type expression')
with self.subenv():
names = [tvar.name for tvar in ast.tvars]
for name in names:
self.type_env[name] = Object(Nullary(TypeVariable(name)))
t, k = self.parse_type(ast.expr)
self.unify_kind(k, Star, ast.pos)
return TypeScheme(names, t)
@overloadmethod(error_function=infer_error)
def infer_kind(self):
...
@infer_kind.on(E.TypeNameExpression)
def infer_kind_TypeNameExpression(self, ast):
_, k = self.lookup_type_con(ast.name, ast.pos)
return k
@infer_kind.on(E.TypeTupleExpression)
def infer_kind_TypeTupleExpression(self, ast):
for expr in ast.exprs:
self.unify_kind(self.infer_kind(expr), Star, ast.pos)
return Star
@infer_kind.on(E.TypeFunctionExpression)
def infer_kind_TypeFunctionExpression(self, ast):
self.unify_kind(self.infer_kind(ast.t1), Star, ast.t1.pos)
self.unify_kind(self.infer_kind(ast.t2), Star, ast.t2.pos)
return Star
@infer_kind.on(E.TypeMaybeExpression)
def infer_kind_TypeMaybeExpression(self, ast):
self.unify_kind(self.infer_kind(ast.t), Star, ast.pos)
return Star
@infer_kind.on(E.TypeCallExpression)
def infer_kind_TypeCallExpression(self, ast):
kc = self.infer_kind(ast.atom)
kt = [self.infer_kind(t) for t in ast.args]
kv = self.fresh_kind()
self.unify_kind(kc, ArrowKind(kt, kv), ast.pos)
return kv
@overloadmethod(error_function=infer_error)
def infer_type(self):
...
@infer_type.on(E.TypeNameExpression)
def infer_type_TypeNameExpression(self, ast):
t, _ = self.lookup_type_con(ast.name, ast.pos)
return t
@infer_type.on(E.TypeTupleExpression)
def infer_type_TypeTupleExpression(self, ast):
return TupleType([self.infer_type(expr) for expr in ast.exprs])
@infer_type.on(E.TypeTaggedExpression)
def infer_type_TypeTaggedExpression(self, ast):
if ast.t is None:
t = Unit
else:
t = self.infer_type(ast.t)
#print('TAGGED', ast.tag, 'TYPE', t)
return TaggedType(ast.tag, t)
#if ast.tag in self.tags:
# if self.tags[ast.tag] is not t:
# raise ApeTypeError(pos=ast.pos, msg=f'Cannot redefine tag \'{ast.tag}\': already represents {self.tags[ast.tag]}, not {t}')
#else:
# g = TypeScheme([], t)
# self.tags[ast.tag] = g
#if ast.tag in self.tags:
# if self.tags[ast.tag] is not t:
# raise ApeTypeError(pos=ast.pos, msg=f'Cannot redefine tag \'{ast.tag}\': already represents {self.tags[ast.tag]}, not {t}')
#else:
# g = self.generalise(t)
# print('g', g, 't', t)
# print('g', repr(g), 't', repr(t))
# self.tags[ast.tag] = g
#return TaggedType(ast.tag, t)
@infer_type.on(E.TypeDisjunctionExpression)
def infer_type_TypeDisjunctionExpression(self, ast):
return DisjunctionType([self.infer_type(expr) for expr in ast.exprs])
@infer_type.on(E.TypeFunctionExpression)
def infer_type_TypeFunctionExpression(self, ast):
return FunctionType(pos_only_params(self.infer_type(ast.t1)), self.infer_type(ast.t2))
@infer_type.on(E.TypeMaybeExpression)
def infer_type_TypeMaybeExpression(self, ast):
t = self.infer_type(ast.t)
return MaybeType(t)
@infer_type.on(E.TypeCallExpression)
def infer_type_TypeCallExpression(self, ast):
tc = self.infer_type(ast.atom)
#print('TC', tc, repr(tc))
ts = [self.infer_type(t) for t in ast.args]
return TypeCall(tc, ts)
def parse_type(self, ast):
t, k = self.infer_type(ast), self.infer_kind(ast)
return t, k
######
@overloadmethod(use_as_wrapper=True, error_function=infer_error)
def infer(self, original, ast_and_type):
if isinstance(ast_and_type, Ast):
ast, t = ast_and_type.node, ast_and_type.type
else:
if isinstance(ast_and_type, self.Types):
raise ApeInternalError(
pos=original.pos,
msg='You forgot to return the typed node as well as the type itself: {ast_and_type} <= {original}')
raise
try:
ast, t = ast_and_type
except TypeError as exc:
raise ApeInternalError(
pos=original.pos,
msg=f'Only returned one of ast and type: {ast_and_type} <= {original}')
ret = Ast(ast, t, original.pos)
self.asts.append((ret, self.env))
return ret, t
@infer.on(E.EmptyListExpression)
def infer_EmptyListExpression(self, ast):
tv = self.fresh()
return T.EmptyList(), self.Types(ListType(tv))
@infer.on(E.EmptySetExpression)
def infer_EmptySetExpression(self, ast):
tk = self.fresh()
return T.EmptySet(), self.Types(SetType(tk))
@infer.on(E.EmptyDictExpression)
def infer_EmptyDictExpression(self, ast):
tk = self.fresh()
tv = self.fresh()
return T.EmptyDict(), self.Types(DictType(tk, tv))
@infer.on(E.EmptyTupleExpression)
def infer_EmptyTupleExpression(self, ast):
return T.EmptyTuple(), self.Types(Unit)
@infer.on(E.SetLiteral)
def infer_SetLiteral(self, ast):
v = self.fresh()
tS = self.Types(SetType(v))
tV = tS.but(v)
exprs = []
for expr in ast.exprs:
if isinstance(expr, E.StarExpr):
es, ts = self.infer(expr.expr)
self.unify_all(tS, ts, expr.pos)
exprs.append(T.Star(es))
else:
ev, tv = self.infer(expr)
self.unify_all(tV, tv, expr.pos)
exprs.append(ev)
return T.SetLit(exprs), tS
@infer.on(E.DictLiteral)
def infer_DictLiteral(self, ast):
k, v = self.fresh(), self.fresh()
tD = self.Types(DictType(k, v))
tK, tV = tD.but(k), tD.but(v)
exprs = []
for expr in ast.exprs:
if isinstance(expr, E.DictPair):
ek, tk = self.infer(expr.key_expr)
self.unify_all(tK, tk, expr.key_expr.pos)
ev, tv = self.infer(expr.value_expr)
self.unify_all(tV, tv, expr.value_expr.pos)
exprs.append(T.DictKV(ek, ev))
elif isinstance(expr, E.StarStarExpr):
ee, te = self.infer(expr.expr)
self.unify_all(tD, te, expr.expr.pos)
exprs.append(T.StarStar(ee))
else:
raise ApeInternalError(pos=ast.pos, msg=f'Unexpected {expr.__class__.__name__} in {ast.__class__.__name__}')
return T.DictLit(exprs), tD
@infer.on(E.ListLiteral)
def infer_ListLiteral(self, ast):
v = self.fresh()
tL = self.Types(ListType(v))
tV = tL.but(v)
exprs = []
for expr in ast.exprs:
if isinstance(expr, E.StarExpr):
el, tl = self.infer(expr.expr)
self.unify_all(tL, tl, expr.pos)
exprs.append(T.Star(el))
else:
ev, tv = self.infer(expr)
self.unify_all(tV, tv, [ast.pos, expr.pos])
exprs.append(ev)
return T.List(exprs), tL
@infer.on(E.TupleLiteral)
def infer_TupleLiteral(self, ast):
ets = [self.infer(expr) for expr in ast.exprs]
es, ts = unzip(ets) if len(ets) > 0 else ([], [])
tt = self.Types(TupleType([t.vtype for t in ts]))
for e, t in ets:
self.unify_others(tt, t, e.pos)
return T.Tuple(es), tt
@infer.on(E.RaiseStatement)
def infer_RaiseStatement(self, ast):
e1, t1 = self.infer(ast.expr)
self.unify(t1.vtype, t1.ctype, ast.expr.pos)
if ast.original is not None:
e2, t2 = self.infer(ast.original)
self.unify_all(t1, t2, ast.original.pos)
else:
e2 = None
return T.Raise(e1, e2), t1.but(vtype=self.fresh())
@infer.on(E.NoneExpression)
def infer_NoneExpression(self, ast):
return T.NoneExpr(), self.Types.any(Unit)
@infer.on(E.StringExpression)
def infer_StringExpression(self, ast):
return T.String(''.join(a.string for a in ast.unparsed)), self.Types(String)
@infer.on(E.FloatExpression)
def infer_FloatExpression(self, ast):
return T.Float(ast.format, ast.value), self.Types(Float)
@infer.on(E.IntExpression)
def infer_IntExpression(self, ast):
return T.Int(ast.base, ast.value), self.Types(Int)
@infer.on(E.IfElseExpr)
def infer_IfElseExpr(self, ast):
t1 = self.infer(ast.cond)
t2 = self.infer(ast.expr)
t3 = self.infer(ast.alt)
self.unify_all(t1, self.Types(Bool), ast.cond.pos)
self.unify_others(t1, t2, ast.expr.pos)
self.unify_others(t1, t3, ast.expr.pos)
self.unify(t2.vtype, t3.vtype, ast.expr.pos)
return t2
@infer.on(E.TryStatement)
def infer_TryStatement(self, ast):
tt = self.Types()
eb, tb = self.infer(ast.body)
self.unify(tt.rtype, tb.rtype, [ast.body.pos, ast.pos])
self.unify(tt.ytype, tb.ytype, [ast.body.pos, ast.pos])
self.unify(tt.stype, tb.stype, [ast.body.pos, ast.pos])
for exb in ast.excepts:
pass
raise
@infer.on(E.WhileStatement)
def infer_WhileStatement(self, ast):
tt = self.Types()
ec, tc = self.infer(ast.cond)
self.unify(tc.vtype, Bool, ast.cond.pos)
self.unify_others(tc, tt, [ast.cond.pos, ast.pos])
eb, tb = self.infer(ast.body)
self.unify(tb.vtype, Unit, ast.pos)
self.unify_others(tb, tt, [ast.body.pos, ast.pos])
if ast.alt is not None:
ea, ta = self.infer(ast.alt)
self.unify(ta.vtype, Unit, ast.alt.pos)
self.unify_others(ta, tt, [ast.alt.pos, ast.pos])
return T.While(ec, eb, ea if ast.alt is not None else None), tt
@infer.on(E.IfElifElseStatement)
def infer_IfElifElseStatement(self, ast):
eic, tic = self.infer(ast.if_branch.cond)
self.unify(tic.vtype, Bool, ast.pos)
eib, tib = self.infer(ast.if_branch.body)
self.unify_others(tic, tib, ast.pos)
elifs = []
for br in ast.elif_branches:
eeic, teic = self.infer(br.cond)
self.unify_all(teic, tic, br.cond.pos)
eeib, teib = self.infer(br.body)
self.unify_all(teib, tib, br.body.pos)
elifs.append(T.ElifBranch(eeic, eeib))
if ast.else_branch is not None:
eeb, teb = self.infer(ast.else_branch.body)
self.unify_all(teb, tib, ast.else_branch.pos)
eelse = T.ElseBranch(eeb)
else:
eelse = None
return T.IfElifElse(T.IfBranch(eic, eib), elifs, eelse), tib
@infer.on(E.UnaryExpression)
def infer_UnaryExpression(self, ast):
e, t = self.infer(ast.expr)
tv = self.fresh()
u1 = FunctionType(ArgsType.create(t.vtype), tv)
u2 = self.instantiate(UNARY_OPERATORS[ast.op])
self.unify(u1, u2, ast.pos)
return T.Unary(ast.op, e), t.but(tv)
@infer.on(E.ArithExpression)
def infer_ArithExpression(self, ast):
e1, t1 = self.infer(ast.left)
e2, t2 = self.infer(ast.right)
self.unify_others(t1, t2, [ast.left.pos, ast.right.pos])
tv = self.fresh()
u1 = FunctionType(ArgsType.create(t1.vtype, t2.vtype), tv)
u2 = self.instantiate(BINARY_OPERATORS[ast.op])
self.unify(u1, u2, [ast.left.pos, ast.right.pos])
return T.Arith(ast.op, e1, e2), t1.but(tv)
@infer.on(E.ReturnStatement)
def infer_ReturnStatement(self, ast):
if ast.expr is None:
return T.Return(None), self.Types.any(Unit, rtype=Unit)
e, t = self.infer(ast.expr)
self.unify(t.vtype, t.rtype, ast.pos)
return T.Return(e), t.but(self.fresh())
@infer.on(E.PassStatement)
def infer_PassStatement(self, ast):
return T.Pass(), self.Types(Unit)
@infer.on(E.IndexExpression)
def infer_IndexExpression(self, ast):
# For now, only support one index and no slices
assert len(ast.indices) == 1
assert type(ast.indices[0]) == E.Index
t = self.Types()
ef, tf = self.infer(ast.atom)
self.unify_others(t, tf, ast.atom.pos)
self.unify(ListType(t.vtype), tf.vtype, ast.atom.pos)
ei, ti = self.infer(ast.indices[0].idx)
self.unify_others(t, ti, ast.indices[0].idx.pos)
self.unify(ti.vtype, Int, ast.indices[0].idx.pos)
return T.Index(ef, [ei]), t
es, ts = [], []
for index in ast.indices:
ei, ti = self.infer(index)
self.unify_others(tf, ti, ast.pos)
es.append(ei)
ts.append(ti)
tt = ArgsType(HListType([t.vtype for t in ts]), HDictType([]))
tv = self.fresh()
self.unify(tf.vtype, FunctionType(tt, tv), ast.pos)
return T.Call(ef, es), tf.but(tv)
@infer.on(E.CallExpression)
def infer_CallExpression(self, ast):
ef, tf = self.infer(ast.atom)
eps, tps = [], []
eks, tks = [], []
for arg in ast.args:
if isinstance(arg, E.PlainArg):
ea, ta = self.infer(arg.expr)
self.unify_others(tf, ta, arg.expr.pos)
eps.append(ea)
tps.append(ta)
elif isinstance(arg, E.StarArg):
raise ApeNotImplementedError(msg='splat arguments not yet supported', pos=arg.name.pos)
elif isinstance(arg, E.StarStarKwarg):
raise ApeNotImplementedError(msg='keyword splat arguments not yet supported', pos=arg.name.pos)
elif isinstance(arg, E.KeywordArg):
if not isinstance(arg.name, E.IdExpression):
raise ApeSyntaxError(msg=f'Argument keywords must be simple identifiers, not {arg.name.__class__.__name__}', pos=arg.name.pos)
ea, ta = self.infer(arg.expr)
self.unify_others(tf, ta, arg.expr.pos)
eks.append((arg.name.name, ea))
tks.append((arg.name.name, ta))
#raise ApeNotImplementedError(msg='keyword arguments not yet supported', pos=arg.name.pos)
elif isinstance(arg, E.CompForArg):
raise ApeNotImplementedError(msg='comprehension arguments not yet supported', pos=ast.comp.pos)
else:
raise ApeNotImplementedError(msg=f'argument type \'{arg.__class__.__name__}\' not yet supported', pos=ast.pos)
tt = ArgsType(HListType([t.vtype for t in tps]),
HDictType([(k, t.vtype) for k, t in tks]))
tv = self.fresh()
self.unify(tf.vtype, FunctionType(tt, tv), ast.pos)
return T.Call(ef, eps, eks), tf.but(tv)
@infer.on(E.Comparison)
def infer_Comparison(self, ast):
e1, t1 = self.infer(ast.a)
e2, t2 = self.infer(ast.b)
self.unify_all(t1, t2, ast.pos)
return T.Comparison(ast.op, e1, e2), t1.but(Bool)
@infer.on(E.IdExpression)
def infer_IdExpression(self, ast):
return T.Id(ast.name), self.Types(self.lookup_name(ast.name, ast.pos))
@infer.on(E.TrueExpression)
def infer_TrueExpression(self, ast):
return T.Bool(True), self.Types(Bool)
@infer.on(E.FalseExpression)
def infer_FalseExpression(self, ast):
return T.Bool(False), self.Types(Bool)
@infer.on(E.YieldFromExpression)
def infer_YieldFromExpression(self, ast):
t = self.Types()
tc = CoroutineType(self.fresh(), self.fresh(), self.fresh())
self.unify(t.ytype, tc.ts[0], [ast.pos, ast.expr.pos])
self.unify(t.stype, tc.ts[1], [ast.pos, ast.expr.pos])
self.unify(t.vtype, tc.ts[2], [ast.pos, ast.expr.pos])
ef, tf = self.infer(ast.expr)
self.unify_all(tf, t.but(tc), ast.expr.pos)
return T.YieldFrom(ef), t
@infer.on(E.YieldExpression)
def infer_YieldExpression(self, ast):
e, t = self.infer(ast.expr)
self.unify(t.vtype, t.ytype, ast.pos)
return T.Yield(e), t.but(t.stype)
@infer.on(E.TaggedExpression)
def infer_TaggedExpression(self, ast):
# e.g. nil : ∀a.((unit/) -> list[a]), but instantiated
tft = self.Types(self.instantiate(self.tags[ast.tag]))
tv = self.fresh()
tr = self.Types(self.fresh())
# e.g. (a/) -> b
self.unify(tft.vtype, FunctionType(ArgsType.create(tv), tr.vtype), ast.pos)
if ast.expr is None:
ee = None
self.unify(tv, Unit, ast.pos)
else:
ee, te = self.infer(ast.expr)
self.unify_all(tr.but(tv), te, ast.pos)
return T.Tagged(ast.tag, ee), tr
@infer.on(E.DoStatement)
def infer_DoStatement(self, ast):
e, t = self.infer(ast.body)
return T.Do(e), t
@infer.on(E.Statements)
def infer_Statements(self, ast):
t = self.Types()
exprs = []
for i, stmt in enumerate(ast.stmts):
expr, typ = self.infer(stmt)
if i == len(ast.stmts) - 1:
self.unify_all(typ, t, stmt.pos)
else:
self.unify_all(typ, t.but(Unit), stmt.pos)
exprs.append(expr)
return T.Statements(exprs), t
@infer.on(E.LogicalAndExpressions)
def infer_LogicalAndExpressions(self, ast):
t = self.Types(Bool)
es = []
for expr in ast.exprs:
ee, te = self.infer(expr)
self.unify_all(te, t, expr.pos)
es.append(ee)
return T.LogicalAnd(es), t
@infer.on(E.LogicalOrExpressions)
def infer_LogicalOrExpressions(self, ast):
t = self.Types(Bool)
es = []
for expr in ast.exprs:
ee, te = self.infer(expr)
self.unify_all(te, t, expr.pos)
es.append(ee)
return T.LogicalOr(es), t
@infer.on(E.LogicalNotExpression)
def infer_LogicalNotExpression(self, ast):
t = self.Types(Bool)
ee, te = self.infer(ast.expr)
self.unify_all(te, t, ast.pos)
return T.LogicalNot(ee), t
@infer.on(E.Quasiquote)
def infer_Quasiquote(self, ast):
return QuoteTypeInference(self).visit(ast.expr)
# Recall that parameter default values are evaluated at definition time in
# Python. This is also true in Beatle.
#
# Thus:
# - the v-types of the default value expressions must unify with the types
# of the parameters.
# - the other types of the default value expressions must unify with the
# other types of the *function call definition expression*.
def infer_params(self, params):
tdefs = self.Types()
edefs = []
pots = []
pkts = []
kots = []
vargs = Void
kwargs = Void
# always start off appending to the pkts
types = pkts
for p in params:
if isinstance(p, E.Param):
tv = self.fresh()
edef = None
if p.annotation is not None:
tann, kann = self.parse_type(p.annotation)
self.unify(tv, tann, p.annotation.pos)
self.unify_kind(kann, Star, p.annotation.pos)
if p.default is not None:
edef, tdef = self.infer(p.default)
self.unify(tv, tdef.vtype, p.default.pos)
self.unify_others(tdefs, tdef, p.default.pos)
edefs.append(edef)
types.append(ParamType(p.name.name, tv, p.default is not None))
self.add_name_ungeneralised(p.name.name, tv)
elif isinstance(p, E.EndOfPosOnlyParams):
pots, pkts = pkts, pots
types = pkts
elif isinstance(p, E.EndOfPosParams):
types = kots
elif isinstance(p, E.StarVarParams):
vargs = self.fresh()
elif isinstance(p, E.StarStarKwParams):
kwargs = self.fresh()
else:
raise ApeNotImplementedError(msg=f'{p.__class__.__name__} is not supported yet', pos=p.pos)
return ParamsType.make(pots, pkts, kots, vargs, kwargs), edefs, tdefs
def infer_function(self, ast):
with self.subenv():
tt, defaults, tD = self.infer_params(ast.params)
t = self.fresh()
eb, tb = self.infer(ast.body)
self.unify(tb.vtype, t, ast.pos)
self.unify(tb.rtype, t, ast.pos)
if is_gen(ast.body):
y, s = self.fresh(), self.fresh()
self.unify(tb.ytype, y, ast.pos)
self.unify(tb.stype, s, ast.pos)
tr = GeneratorType(y, s, t)
else:
self.unify(tb.ytype, Void, ast.pos)
self.unify(tb.stype, Void, ast.pos)
tr = t
if ast.return_annotation:
tann, kann = self.parse_type(ast.return_annotation)
self.unify(tr, tann, ast.return_annotation.pos)
self.unify_kind(kann, Star, ast.return_annotation.pos)
tf = FunctionType(tt, tr)
return defaults, eb, tf, tD
@infer.on(E.LambdaExpression)
def infer_LambdaExpression(self, ast):
edefaults, ebody, tf, tD = self.infer_function(ast)
return T.Lambda(ast.params, edefaults, ebody), tD.but(tf)
@infer.on(E.FunctionExpression)
def infer_FunctionExpression(self, ast):
edefaults, ebody, tf, tD = self.infer_function(ast)
return T.Function(ast.params, edefaults, ebody), tD.but(tf)
@infer.on(E.FunctionDefinition)
def infer_FunctionDefinition(self, ast):
edefaults, ebody, tf, tD = self.infer_function(ast)
self.add_name(ast.name.name, tf)
return T.FunctionDefinition(ast.name.name, ast.params, edefaults, ebody), tD.but(Unit)
@infer.on(E.Decorated)
def infer_Decorated(self, ast):
raise
@infer.on(E.NamespaceDefinition)
def infer_NamespaceDefinition(self, ast):
nstype = self.Types.void(Unit)
with self.clean_subenv():
eb, tb = self.infer(ast.expr)
self.unify_all(tb, nstype, ast.pos)
child_env = self.env
child_type_env = self.type_env
self.add_namespace(ast.name, child_env.env)
self.add_type_namespace(ast.name, child_type_env.env)
return T.NamespaceDefn(ast.name, ast.key, eb), nstype
@infer.on(E.NamespaceReferenceDefinition)
def infer_NamespaceReferenceDefinition(self, ast):
return T.NamespaceDefn(ast.name, ast.key, T.NoneExpr(ast.pos)), self.Types.void(Unit)
@infer.on(E.ModuleDefinition)
def infer_ModuleDefinition(self, ast):
raise
return T.ModuleDefn(), self.Types(Unit)
@infer.on(E.TypeDefinition)
def infer_TypeDefinition(self, ast):
name = ast.name.name
if len(ast.args) == 0:
self.add_type_name(name, Nullary(self.infer_type(ast.expr)))
else:
tvs = [self.fresh() for a in ast.args]
tvar = TypeVariable(name)
with self.subenv():
for n, tv in zip(ast.args, tvs):
self.type_env[n.name] = Object(Nullary(tv))
self.type_env[name] = Object(Nullary(tvar))
t = self.infer_type(ast.expr)
self.add_type_name(name, ConcreteTypeConstructor(
[Star] * len(ast.args), Star, tvs, MuType(tvar, t)))
if isinstance(t, DisjunctionType) and all(isinstance(s, TaggedType) for s in t.ts):
for s in t.ts:
self.tags[s.tag] = self.generalise(FunctionType(ArgsType.create(s.t), TypeCall(tvar, tvs)))
return T.NoneExpr(), self.Types(Unit)
@infer.on(E.SignatureDefinition)
def infer_SignatureDefinition(self, ast):
with self.subenv():
types = {}
names = {}
for decl in ast.body:
if isinstance(decl, E.TypeDeclaration):
name = decl.name.name
if len(decl.args) == 0:
types[name] = self.type_env[name] = Object(Nullary(self.fresh()))
# HERE
else:
types[name] = self.type_env[name] = Object(AbstractTypeConstructor(ArrowKind([Star for _ in decl.args], Star)))
# HERE
elif isinstance(decl, E.NameDeclaration):
scm = self.parse_toplevel_type(decl.annotation)
name = decl.name.name
names[name] = self.env[name] = Object(scm)
elif isinstance(decl, E.LawDeclaration):
with self.subenv():
self.env.update({name.name: Object(TypeScheme([], self.fresh())) for name in decl.names})
e, t = self.infer(decl.expr)
law_type = self.Types.void(Bool)
self.unify_all(t, law_type, decl.pos)
else:
raise RuntimeError()
self.type_env[ast.name.name] = Object(Nullary(SignatureType(types, names)))
# HERE
return T.SignatureDefn(ast.name.name, self.Types(Unit)), self.Types(Unit)
@infer.on(E.AnnotatedAssignment)
def infer_AnnotatedAssignment(self, ast):
ta, ka = self.parse_type(ast.annotation)
if not isinstance(ast.assignee, E.IdExpression):
raise ApeTypeError(pos=ast.pos, msg='Cannot write a type-annotated assignment for assignees that are not identifiers')
self.unify_kind(ka, Star, ast.annotation.pos)
ee, te = self.infer(ast.expr)
tta = self.Types(ta)
self.unify_all(tta, te, ast.pos)
self.add_name(ast.assignee.name, ta)
return T.Assignment([ast.assignee.name], ee), tta
@infer.on(E.ChainedAssignment)
def infer_ChainedAssignment(self, ast):
assignees = ast.assignees[:-1]
expr = ast.assignees[-1]
ma = MatchAssignee(self)
ee, te = self.infer(expr)
targets = []
assignments = []
for a in assignees:
asgnmts, target, t = ma.match_assignee(a)
assignments.extend(asgnmts)
targets.append(target)
for name, ty in assignments:
self.add_name(name, ty)
return T.Assignment(targets, ee), te.but(Unit)
class MatchAssignee:
def __init__(self, tc):
self.tc = tc
def match_error(self, asgn):
raise ApeNotImplementedError(asgn.pos, f'{asgn.__class__.__name__} assignee matching not yet implemented')
@overloadmethod(error_function=match_error)
def match_assignee(self, asgn):
...
@match_assignee.on(E.EmptyListExpression)
def match_assignee_EmptyListExpression(self, asgn):
return [], T.List([]), self.tc.Types(ListType(self.tc.fresh()))
@match_assignee.on(E.TaggedExpression)
def match_assignee_TaggedExpression(self, asgn):
assignments, targets, types = self.match_assignee(asgn.expr)
return assignments, T.Tagged(asgn.tag, targets), types.but(TaggedType(asgn.tag, types.vtype))
@match_assignee.on(E.TupleLiteral)
def match_assignee_TupleLiteral(self, asgn):
assignments = []
targets = []
types = []
tt = self.tc.Types()
for a in asgn.exprs:
asgn, e, t = self.match_assignee(a)
assignments.extend(asgn)
targets.append(e)
types.append(t.vtype)
self.tc.unify_others(t, tt, a.pos)
return assignments, T.Tuple(targets), tt.but(TupleType(types))
# Commenting this out for now. We need some way to deal with:
# [a, b, c] = [1, 2, 3, 4]
# [a, b, c] = [1, 2]
# [a, b, c] = [1, 'a', 3.0]
# and
# [a, b, *c] = [1, 2, 'h', 'e', 'l', 'l', 'o']
# while maintaining type safety and compatibility with Python. Using an
# HListType is a possibility. Getting this right means we are compatible with
# Perl-style argument passing as well. Whether that is a good or a bad thing is
# dependent upon your point of view.
#
# Don't forget to fix EmptyListExpression matching at the same time
#
# @match_assignee.on(E.ListLiteral)
# def match_assignee_ListLiteral(self, asgn):
# assignments = []
# targets = []
# types = []
# t = self.tc.Types()
# for a in asgn.exprs:
# ea, ta = self.match_assignee(a)
# targets.append(ea)
# types.append(ta)
# self.unify_all(t, ta)
# return assignments, T.List(targets), t.but(ListType(t.vtype))
@match_assignee.on(E.IdExpression)
def match_assignee_IdExpression(self, asgn):
tv = self.tc.fresh()
return [(asgn.name, tv)], T.Id(asgn.name), self.tc.Types(tv)
class QuoteTypeInference(DeepAstPass):
def __init__(self, tc):
self.tc = tc
def override_do_visit_wrapper(self, ast, new):
if ast is new:
return Ast(T.Quote(ast, ()), self.tc.Types(Any), new.pos)
cls, args = new
try:
return Ast(T.Quote(cls, args), self.tc.Types(Any), ast.pos)
except TypeError:
print(cls.__name__)
raise
@overloadmethod(use_as_default=True)
def visit(self, ast):
return self.do_visit(ast)
@visit.on(E.Unquote)
def qtinfer_Unquote(self, ast):
e, t = self.tc.infer(ast.expr)
self.tc.unify_all(t, self.tc.Types(Any), ast.pos)
return e
######
def bind_kind(name, k, pos):
if isinstance(k, KindVariable) and k.name == name:
return {}
if name in k.fkv():
raise ApeUnificationError(msg=f'infinite kind with {name}={k}!', pos=pos)
return {name: k}
def bind(tvar, t, pos):
if isinstance(t, TypeVariable) and t.tvar == tvar:
return {}
if tvar in t.ftv():
raise ApeUnificationError(msg=f'infinite type with {tvar}={t}!', pos=pos)
return {tvar: t}
def unifies_kind(k1, k2, pos):
if k1 == k2:
return {}
if isinstance(k1, KindVariable):
return bind_kind(k1.name, k2, pos)
if isinstance(k2, KindVariable):
return bind_kind(k2.name, k1, pos)
if isinstance(k1, ArrowKind) and isinstance(k2, ArrowKind):
if len(k1.ks) == len(k2.ks):
return unify_many_kinds([k1.k, *k1.ks], [k2.k, *k2.ks], pos)
raise ApeUnificationError(pos=pos, msg=f'Cannot unify {k1} and {k2}')
def unify_constructor(t1, t2, pos):
su1 = unifies_kind(t1.con.kind, ArrowKind([t.kind for t in t1.ts], Star), pos)
s = {x.tvar: t for x, t in zip(t1.con.tvars, t1.ts)}
su2 = unifies(t1.apply(su1).con.expr.apply(s), t2.apply(su1), pos)
return compose_subst(su2, su1)
def unifies(t1, t2, pos):
if isinstance(t1, AbstractTypeConstructor) and isinstance(t2, AbstractTypeConstructor):
return unifies_kind(t1.kind, t2.kind, pos)
if isinstance(t1, TypeCall) and isinstance(t1.con, ConcreteTypeConstructor):
if len(t1.ts) == len(t1.con.tvars):
return unify_constructor(t1, t2, pos)
if isinstance(t2, TypeCall) and isinstance(t2.con, ConcreteTypeConstructor):
if len(t2.ts) == len(t2.con.tvars):
return unify_constructor(t2, t1, pos)
if not isinstance(t1, Type) or not isinstance(t2, Type):
raise ApeUnificationError(pos=pos, msg=f'Can only unify types, not {t1}: {type(t1)} and {t2}: {type(t2)}')
if t1 == t2:
return {}
if t1 is Any and t2 is Any:
raise ApeUnificationError(pos=pos, msg=f'Cannot unify Any and Any')
if t1 is Any or t2 is Any:
return {}
if isinstance(t1, TypeVariable):
return bind(t1.tvar, t2, pos)
if isinstance(t2, TypeVariable):
return bind(t2.tvar, t1, pos)
if isinstance(t1, TupleType) and isinstance(t2, TupleType):
if len(t1.ts) == len(t2.ts):
return unify_many(t1.ts, t2.ts, pos)
if isinstance(t1, DisjunctionType) and isinstance(t2, DisjunctionType):
if len(t1.ts) == len(t2.ts):
return unify_many(t1.ts, t2.ts, pos)
if isinstance(t1, AnonParamType) and isinstance(t2, AnonParamType):
if t1.opt == t2.opt:
return unifies(t1.t, t2.t, pos)
if isinstance(t1, AnonParamType):
return unifies(t1.t, t2, pos)
if isinstance(t2, AnonParamType):
return unifies(t2, t1, pos)
if isinstance(t1, ParamType) and isinstance(t2, ParamType):
if t1.name == t2.name and t1.opt == t2.opt:
return unifies(t1.t, t2.t, pos)
if isinstance(t1, ParamType):
return unifies(t1.t, t2, pos)
if isinstance(t2, ParamType):
return unifies(t2, t1, pos)
if isinstance(t1, HListType) and isinstance(t2, ListType):
return unify_many(t1.ts, [t2.t] * len(t1.ts), pos)
if isinstance(t1, ListType) and isinstance(t2, HListType):
return unifies(t2, t1, pos)
if isinstance(t1, HListType) and isinstance(t2, HListType):
if len(t1.ts) == len(t2.ts):
return unify_many(t1.ts, t2.ts, pos)
if isinstance(t1, HDictType) and isinstance(t2, HDictType):
if len(t1.ts) == len(t2.ts):
# require the same sets of names, and no duplicates
if t1.names == t2.names == list(sorted(set(t1.names))):
return unify_many(t1.types, t2.types, pos)
if isinstance(t1, ParamsType) and isinstance(t2, ParamsType):
return unify_many([t1.pots, t1.pkts, t1.kots, t1.args, t1.kwds],
[t2.pots, t2.pkts, t2.kots, t2.args, t2.kwds], pos)
if isinstance(t1, ArgsType) and isinstance(t2, ParamsType):
return unify_call(t1, t2, pos)
if isinstance(t1, ParamsType) and isinstance(t2, ArgsType):
return unify_call(t2, t1, pos)
if isinstance(t1, ArgsType) and isinstance(t2, ArgsType):
return unify_many([t1.pts, t1.kts], [t2.pts, t2.kts], pos)
if isinstance(t1, MaybeType) and isinstance(t2, MaybeType):
return unifies(t1.t, t2.t, pos)
if isinstance(t1, SetType) and isinstance(t2, SetType):
return unifies(t1.t, t2.t, pos)
if isinstance(t1, ListType) and isinstance(t2, ListType):
return unifies(t1.t, t2.t, pos)
if isinstance(t1, DictType) and isinstance(t2, DictType):
return unify_many([t1.k, t1.v], [t2.k, t2.v], pos)
if isinstance(t1, FunctionType) and isinstance(t2, FunctionType):
return unify_many([t1.p, t1.r], [t2.p, t2.r], pos)
if isinstance(t1, TypeCall) and isinstance(t2, TypeCall):
return unify_many([t1.con, *t1.ts], [t2.con, *t2.ts], pos)
raise ApeUnificationError(pos=pos, msg=f'Cannot unify {t1} and {t2}')
def apply(ts, subst):
return [t.apply(subst) for t in ts]
def unify_call(a, p, pos, seen=set()):
if len(a.pts.ts) != 0 and len(p.pots) != 0:
if len(a.pts.ts) < len(p.pots):
raise ApeUnificationError(msg='Not enough positional arguments', pos=pos)
# if any(t.name in seen for t in p.pots):
# raise ApeUnificationError(msg='Already have an argument for {t.name}', pos=pos)
l = len(p.pots)
su1 = unifies(a.pts[:l], HListType(p.pots), pos)
at = ArgsType(a.pts[l:].apply(su1), a.kts.apply(su1))
pt = ParamsType.make([], apply(p.pkts, su1), apply(p.kots, su1),
p.args.apply(su1), p.kwds.apply(su1))
su2 = unify_call(at, pt, pos, seen)
return compose_subst(su2, su1)
if len(a.pts.ts) != 0 and len(p.pkts) != 0:
l = min(len(a.pts.ts), len(p.pkts))
su1 = unifies(a.pts[:l], HListType(p.pkts[:l]), pos)
at = ArgsType(a.pts[l:].apply(su1), a.kts.apply(su1))
pt = ParamsType.make([], apply(p.pkts[l:], su1), apply(p.kots, su1),
p.args.apply(su1), p.kwds.apply(su1))
new_seen = {t.name for t in p.pkts[:l]}
su2 = unify_call(at, pt, pos, seen | new_seen)
return compose_subst(su2, su1)
if len(a.pts.ts) != 0:
su1 = unifies(a.pts, p.args, pos)
at = ArgsType(HListType([]), a.kts.apply(su1))
pt = ParamsType.make([], [], apply(p.kots, su1), p.args.apply(su1), p.kwds.apply(su1))
su2 = unify_call(at, pt, pos, seen)
return compose_subst(su2, su1)
if len(a.kts) != 0 and len(p.pots) != 0:
raise ApeUnificationError(msg='Not enough positional arguments', pos=pos)
if len(a.kts) != 0 and len(p.pkts) != 0:
raise ApeUnificationError(msg='Not enough positional arguments', pos=pos)
if len(a.kts) != 0 and len(p.kots) != 0:
raise ApeUnificationError(msg='Not enough positional arguments', pos=pos)
if len(a.kts) != 0:
raise ApeUnificationError(msg='Not enough positional arguments', pos=pos)
return {}
#raise ApeNotImplementedError(pos=pos, msg='We need type lists to properly handle f(*[1, 2, 3]) where f: (int, int, int) -> int')
def unify_many_kinds(k1x, k2x, pos):
if len(k1x) == 0 and len(k2x) == 0:
return {}
[k1, *k1s], [k2, *k2s] = k1x, k2x
su1 = unifies_kind(k1, k2, pos)
su2 = unify_many_kinds(
[t.apply(su1) for t in k1s],
[t.apply(su1) for t in k2s], pos)
return compose_subst(su2, su1)
def unify_many(t1x, t2x, pos):
if len(t1x) == 0 and len(t2x) == 0:
return {}
[t1, *t1s], [t2, *t2s] = t1x, t2x
su1 = unifies(t1, t2, pos)
su2 = unify_many(
[t.apply(su1) for t in t1s],
[t.apply(su1) for t in t2s], pos)
return compose_subst(su2, su1)
class KindConstraint:
def __init__(self, k1, k2, pos):
self.k1 = k1
self.k2 = k2
self.pos = pos
def __repr__(self):
uni = f'{self.k1} ~ {self.k2}'
return f'{uni:<25} at {self.pos}'
def apply(self, subst):
return KindConstraint(self.k1.apply(subst), self.k2.apply(subst), self.pos)
def fkv(self):
return set.union(self.k1.ftv(), self.k2.ftv())
class Constraint:
def __init__(self, t1, t2, pos):
self.t1 = t1
self.t2 = t2
self.pos = pos
def __repr__(self):
uni = f'{self.t1} ~ {self.t2}'
return f'{uni:<25} at {self.pos}'
def apply(self, subst):
return Constraint(self.t1.apply(subst), self.t2.apply(subst), self.pos)
def ftv(self):
return set.union(self.t1.ftv(), self.t2.ftv())
def compose_subst(su1, su2):
return {**su2, **su1}
def solve_kind(su, cs):
su = {**su}
for c in cs:
c = c.apply(su)
su1 = unifies_kind(c.k1, c.k2, c.pos)
su.update(su1)
return su
def solve(su, cs):
su = {**su}
for c in (k.apply(su) for k in cs):
su1 = unifies(c.t1, c.t2, c.pos)
su.update(su1)
return su
######
def test_infer():
try:
unify_call(ArgsType(HListType([Int, Int, Int, Int]), HDictType([])), ParamsType.make([ParamType('a', Int), ParamType('b', Int)], [ParamType('c', Int)], [], TypeVariable('a'), TypeVariable('b')), 0)
except BaseException as e:
print(e.format_with_context(''))
def infer(ast):
try:
i = TypeChecker()
e, t = i.infer(ast)
s = solve({}, i.unifiers)
i.update_with_subst(s)
return e
except ApeTypeError:
raise
except ApeError as exc:
raise ApeTypeError(pos=None, msg='Unexpected error in typechecker') from exc
|
StarcoderdataPython
|
90727
|
<gh_stars>0
"""
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
for s in[*open(0)][1:]:a,b=map(int,s.split())print('YNEOS'[a+b<a*b::2])
exec(int(input())*"n,m=map(int,input().split());print('YNEOS'[n+m<n*m::2]);")
|
StarcoderdataPython
|
1775110
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class DeleteDepartmentReq(object):
department_id_type: lark_type.DepartmentIDType = attr.ib(
default=None, metadata={"req_type": "query", "key": "department_id_type"}
) # 此次调用中使用的部门ID的类型, 示例值:"open_department_id", 可选值有: `department_id`:以自定义department_id来标识部门, `open_department_id`:以open_department_id来标识部门, 默认值: `open_department_id`
department_id: str = attr.ib(
default="", metadata={"req_type": "path", "key": "department_id"}
) # 部门ID,需要与查询参数中传入的department_id_type类型保持一致。, 示例值:"od-4e6ac4d14bcd5071a37a39de902c7141", 最大长度:`64` 字符, 正则校验:`^0|[^od][A-Za-z0-9]*`
@attr.s
class DeleteDepartmentResp(object):
pass
def _gen_delete_department_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=DeleteDepartmentResp,
scope="Contact",
api="DeleteDepartment",
method="DELETE",
url="https://open.feishu.cn/open-apis/contact/v3/departments/:department_id",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
)
|
StarcoderdataPython
|
1662238
|
# -*- coding: utf-8 -*-
import unittest
import logging as pylogging
import utils.logging as logging
class UtilsLoggingTests(unittest.TestCase):
"""Documentatoion coming soon.
"""
@classmethod
def setUpClass(cls):
pylogging.disable(pylogging.NOTSET)
@classmethod
def tearDownClass(cls):
pylogging.disable(pylogging.CRITICAL)
def test_with_extras(self):
"""It should append the `extras` dictionary to the logger.
"""
logger = logging.Logger('app.testing', {'user': '12345'})
with self.assertLogs(logger.name, level=pylogging.INFO) as cm:
logger.info('This is the first log.')
logger.info('This is the second log.')
logger.warning('This is the third log.')
self.assertEqual(cm.output, [
'INFO:app.testing:(user: 12345) This is the first log.',
'INFO:app.testing:(user: 12345) This is the second log.',
'WARNING:app.testing:(user: 12345) This is the third log.',
])
def test_with_fields(self):
"""It should append the `extras` dictionary to the logger.
"""
logger = logging.Logger('app.testing', {'user': '12345'})
logger.with_fields(pid=10)
with self.assertLogs(logger.name, level=pylogging.INFO) as cm:
logger.info('This is the first log.')
logger.info('This is the second log.')
self.assertEqual(cm.output, [
'INFO:app.testing:(user: 12345) (pid: 10) This is the first log.',
'INFO:app.testing:(user: 12345) (pid: 10) This is the second log.',
])
|
StarcoderdataPython
|
1635413
|
#coding = utf-8
import cv2
import numpy as np
import sys
if __name__ == '__main__':
image_path ="E:/IntelliJ Projects/Thyroid/Thyroid Maven Webapp/out/artifacts/Thyroid_Maven_Webapp_Web_exploded/Thyroid_images/"+sys.argv[1]
img = cv2.imread(image_path)
image = cv2.medianBlur(img, 5)
cv2.imwrite("E:/IntelliJ Projects/Thyroid/Thyroid Maven Webapp/out/artifacts/Thyroid_Maven_Webapp_Web_exploded/Thyroid_images/quzao%s" % sys.argv[2],image)
|
StarcoderdataPython
|
1619482
|
# Copyright (C) 2010, 2011 <NAME> (<EMAIL>) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.objects.submodule.base import Submodule
from git.objects.submodule.root import RootModule
from git.db.interface import SubmoduleDB
__all__ = ["PureSubmoduleDB"]
class PureSubmoduleDB(SubmoduleDB):
"""Pure python implementation of submodule functionality"""
@property
def submodules(self):
return Submodule.list_items(self)
def submodule(self, name):
try:
return self.submodules[name]
except IndexError:
raise ValueError("Didn't find submodule named %r" % name)
# END exception handling
def create_submodule(self, *args, **kwargs):
return Submodule.add(self, *args, **kwargs)
def iter_submodules(self, *args, **kwargs):
return RootModule(self).traverse(*args, **kwargs)
def submodule_update(self, *args, **kwargs):
return RootModule(self).update(*args, **kwargs)
|
StarcoderdataPython
|
153458
|
<filename>renderer.py
#!/usr/bin/env python3
import weather
import bitcoin
import bitcoin2
from wand.color import Color
from wand.image import Image, COMPOSITE_OPERATORS
from wand.drawing import Drawing
import urllib.request
def update_img():
bitcoins = bitcoin.get_bitcoin()
bitcoingraph = bitcoin2.get_bitcoin()
weath, icon = weather.get_weather()
#weath = "Tiistai. Lumikuuroja. Ylin lämpötila 0ºC. Tuuli ENE, nopeus 10−15 km/h. Lumisateen mahd: 40%."
f = urllib.request.urlopen(icon)
infos = bitcoins+" "+weath
weatherstrip = infos.split(" ")
lines = [""]
#extraimage = Image(filename='test2.png')
extraimage = Image(file=f)
eiw=extraimage.width
eih=extraimage.height
if eiw/eih > 176/264:
iw = 176
ih = 176*eih/eiw
else:
iw = 264*eiw/eih
ih = 264
#extraimage.resize(int(iw),int(ih))
#extraimage.gamma(0.2)
for w in weatherstrip:
if len(w)+len(lines[-1]) < 23:
lines[-1]+=w+" "
else:
lines.append(w+" ")
with Color('white') as bg:
with Drawing() as draw:
with Image(width=176, height=264, background=bg) as img:
img.type = 'bilevel'
img.composite(bitcoingraph,0,0)
img.composite(extraimage,0,100)
draw.font_size = 16 #27
lheight = len(lines)-1
for l in lines:
draw.text(int(0),int(img.height-draw.font_size*(lheight+0.5)),l)
lheight-=1
draw(img)
#img.negate()
img.save(filename='text.png')
#update_img()
|
StarcoderdataPython
|
1621257
|
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import norm
def prepare_input(y, X, end_time):
y0, y1 = y[np.isnan(y[:, 1])], y[~np.isnan(y[:, 1])]
x0, x1 = X[np.isnan(y[:, 1])], X[~np.isnan(y[:, 1])]
diagonal0, diagonal1 = coo_matrix((y0.shape[0], y0.shape[0])), coo_matrix((y1.shape[0], y1.shape[0]))
diagonal0.setdiag(np.ones(y0.shape[0]))
diagonal1.setdiag(np.ones(y1.shape[0]))
mu = get_regularization_parameter(X)
return {'y0': y0, 'y1': y1, 'x0': x0, 'x1': x1, 'end_time': end_time, 'mu': mu,
'diagonal0': diagonal0, 'diagonal1': diagonal1}
def get_regularization_parameter(X):
n = X.shape[0]
return norm(X) ** 2 / n
def hash_all(x, mod):
x_ = np.zeros(mod)
for i in x:
x_[hash(i) % mod] += 1
return x_
def check_input_data(y):
assert (y[:, 0] >= 0.).all()
assert (y[~np.isnan(y[:, 1])][:, 0] <= y[~np.isnan(y[:, 1])][:, 1]).all()
class MultiEncoder:
def __init__(self, encoders):
"""
:param encoders: iterable of encoders with the property:
encoders[i].features is a subset of encoders[i+1].features
"""
self.encoders = encoders
self.dimension = len(encoders)
def dict_vectorizer(self, state):
num_common_feat = len(set(self.encoders[-1].features).intersection(state))
best_level, best_encoder = self.dimension, self.encoders[-1]
for level, encoder in reversed(list(enumerate(self.encoders))):
partial_features = set(encoder.features)
num_common_feat_level = len(partial_features.intersection(state))
if num_common_feat_level < num_common_feat:
break
else:
best_level, best_encoder = level, encoder
return best_level, best_encoder.dict_vectorizer(state)
class MultiEstimator:
def __init__(self, estimators):
self.estimators = estimators
def predict(self, x_):
level, x = x_
estimator = self.estimators[level]
return estimator.predict(x)
|
StarcoderdataPython
|
90906
|
"""Faça um programa que leia nome e média de um aluno, guardando também a situação em um dicionário.
No final, mostre o conteúdo da estrutura na tela."""
aluno = {}
nome = str(input('Nome: '))
aluno['Nome'] = nome
media = float(input(f'Média de {nome}: '))
aluno['Média'] = media
if media >= 7:
aluno['Situação'] = 'Aprovado(a)'
elif media >= 5:
aluno['Situação'] = 'Em recuperação'
else:
aluno['Situação'] = 'Reprovado(a)'
print('--' * 20)
for k, v in aluno.items():
print(f'{k} é igual a {v}')
|
StarcoderdataPython
|
142714
|
<reponame>ayame-q/PersonalSupplyManager<filename>server/supply/migrations/0001_initial.py<gh_stars>0
# Generated by Django 3.1.6 on 2021-02-06 16:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Connector',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='名前')),
],
),
migrations.CreateModel(
name='Standard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='名前')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='supply.standard', verbose_name='親規格')),
],
),
migrations.CreateModel(
name='Supply',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('type', models.CharField(choices=[('C', 'ケーブル'), ('E', '電化製品')], max_length=2, verbose_name='分類')),
('number', models.IntegerField(verbose_name='番号')),
('category', models.CharField(blank=True, max_length=40, null=True, verbose_name='種類')),
('name', models.CharField(blank=True, max_length=40, null=True, verbose_name='名前')),
('manufacturer', models.CharField(blank=True, max_length=40, null=True, verbose_name='メーカー')),
('model', models.CharField(blank=True, max_length=40, null=True, verbose_name='型番')),
('serial_number', models.CharField(blank=True, max_length=200, null=True, verbose_name='シリアルナンバー')),
('length', models.IntegerField(blank=True, null=True, verbose_name='長さ(cm)')),
('bought_at', models.DateField(blank=True, null=True, verbose_name='購入日')),
('position', models.TextField(blank=True, null=True, verbose_name='設置場所')),
('is_power_cable', models.BooleanField(default=False, verbose_name='電源線か')),
('is_signal_cable', models.BooleanField(default=False, verbose_name='信号線か')),
('is_active_cable', models.BooleanField(default=False, verbose_name='アクティブケーブルか')),
('note', models.TextField(blank=True, null=True, verbose_name='備考')),
('created_at', models.DateTimeField(default=django.utils.timezone.localtime, verbose_name='作成日')),
('updated_at', models.DateTimeField(default=django.utils.timezone.localtime, verbose_name='更新日')),
('connected_supplies', models.ManyToManyField(blank=True, related_name='_supply_connected_supplies_+', to='supply.Supply', verbose_name='接続先')),
],
),
migrations.CreateModel(
name='SupplyConnectorRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(blank=True, choices=[('None', '未定義'), ('Male', 'オス'), ('Female', 'メス')], max_length=10, null=True, verbose_name='オス・メス')),
('count', models.IntegerField(default=1, verbose_name='個数')),
('connector', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='supply_relations', to='supply.connector', verbose_name='コネクタ')),
('supply', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='connector_relations', to='supply.supply', verbose_name='製品')),
],
),
migrations.AddField(
model_name='supply',
name='connectors',
field=models.ManyToManyField(blank=True, related_name='supplies', through='supply.SupplyConnectorRelation', to='supply.Connector', verbose_name='コネクタ'),
),
migrations.AddField(
model_name='supply',
name='last_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='最終更新者'),
),
migrations.AddField(
model_name='supply',
name='owner',
field=models.ManyToManyField(blank=True, related_name='supplies', to=settings.AUTH_USER_MODEL, verbose_name='所有者'),
),
migrations.AddField(
model_name='supply',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='supply.supply', verbose_name='本体'),
),
migrations.AddField(
model_name='supply',
name='standard',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='supplies', to='supply.standard', verbose_name='規格'),
),
migrations.AddField(
model_name='connector',
name='standard',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='supply.standard', verbose_name='規格'),
),
]
|
StarcoderdataPython
|
1723996
|
"""
Utils
=====
"""
from .logging import getLogger, log_to_file, log_to_console
from .map2loop import process_map2loop, build_model
from .helper import get_data_axis_aligned_bounding_box, get_data_bounding_box, get_data_bounding_box_map
from .helper import get_dip_vector,get_strike_vector, get_vectors, strike_dip_vector
from .regions import RegionEverywhere, RegionFunction, NegativeRegion, PositiveRegion
from .exceptions import LoopException, LoopImportError, InterpolatorError, LoopTypeError, LoopValueError
|
StarcoderdataPython
|
143802
|
#!/usr/bin/env python3
from contextlib import contextmanager
import pandas as pd
import numpy as np
import random
import torch
import time
import os
import argparse
from scipy import sparse
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
@contextmanager
def timeit(name: str) -> None:
before = time.time()
try:
yield
finally:
duration = time.time() - before
print("%s: %.3f sec." % (name, duration))
parser = argparse.ArgumentParser(description='Linear Regression')
parser.add_argument('csv_file', metavar='csv-file', type=str, nargs=1)
parser.add_argument('target', metavar='target-column', type=str, nargs=1)
parser.add_argument('exclude', metavar='excluded-columns', type=str, nargs='*')
parser.add_argument('-testratio', metavar='ratio', type=float, default=0.5, nargs=None)
parser.add_argument('-epochs', metavar='epochs', type=int, default=1, nargs=None)
parser.add_argument('-batchsize', metavar='batch size', type=int, default=256, nargs=None)
parser.add_argument('-lr', metavar='learning rate', type=float, default=0.001, nargs=None)
parser.add_argument('-decay', metavar='weight decay', type=float, default=0.0, nargs=None)
parser.add_argument('-momentum', metavar='gradient momentum', type=float, default=0.1, nargs=None)
parser.add_argument('-sep', metavar='separator', type=str, default=",", nargs=None)
args = parser.parse_args()
target_col = args.target[0]
with timeit("CSV parsing"):
excluded = set(args.exclude)
df = pd.read_csv(args.csv_file[0], sep=args.sep[0], header=0, na_values=["", " ", "NA", "-"])
numerical = [
col for col, t in df.dtypes.iteritems()
if t in (np.int64, np.float64) and t not in excluded
]
categorical = [
col for col, t in df.dtypes.iteritems()
if t == np.object and t not in excluded
]
numerical.remove(target_col)
df[categorical] = df[categorical].astype(str) # required for one-hot
with timeit("set split"):
train_set, test_set = train_test_split(df, shuffle=True, test_size=args.testratio)
with timeit("training+running imputer"):
X_num = train_set[numerical].values # already makes a copy
imputer = SimpleImputer(copy=False)
X_num = imputer.fit_transform(X_num)
with timeit("training+running scaler"):
scaler = StandardScaler(copy=False)
X_num = scaler.fit_transform(X_num)
# with timeit("hash encoding"):
# X_cat = df[categorical]
# hash = HashingEncoder(n_components=32).fit(X_cat)
# X_cat = hash.transform(X_cat)
if len(categorical) > 0:
with timeit("one-hot encoding"):
X_cat = train_set[categorical].values
#cat_imputer = SimpleImputer(copy=False, strategy='most_frequent')
#X_cat = cat_imputer.fit_transform(X_cat)
one_hot = OneHotEncoder(sparse=True, handle_unknown='ignore')
X_cat = one_hot.fit_transform(X_cat)
dim = X_cat.shape[1] + X_num.shape[1]
else:
dim = X_num.shape[1]
print("dimensions:", dim)
y_true = train_set[args.target[0]].values.astype(np.float32)
y_scale = y_true.std()
y_true /= y_scale
regressor = torch.nn.Linear(dim, 1)
torch.nn.init.kaiming_normal_(regressor.weight, nonlinearity='linear')
optimizer = torch.optim.SGD(
regressor.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.decay
)
with timeit("training"):
indices = list(range(len(X_num)))
n_sections = 1 + len(X_num) // args.batchsize
for epoch in range(args.epochs):
print("epoch", epoch)
random.shuffle(indices)
for idx in np.array_split(indices, n_sections):
y_batch = torch.Tensor(y_true[idx])
num = torch.Tensor(X_num[idx, :])
if len(categorical) > 0:
cat = torch.Tensor(X_cat[idx, :].todense())
batch = torch.cat([num, cat], dim=1)
else:
batch = num
optimizer.zero_grad()
y_pred = regressor(batch).squeeze(1)
loss = (y_batch - y_pred).pow(2).sum()
loss.backward()
optimizer.step()
regressor.eval()
with timeit("running imputer on testing data"):
X_num = test_set[numerical].values
X_num = imputer.transform(X_num)
with timeit("running scaler on testing data"):
X_num = scaler.transform(X_num)
if len(categorical) > 0:
with timeit("running one-hot on testing data"):
X_cat = test_set[categorical].values
X_cat = one_hot.transform(X_cat)
with timeit("predicting"):
batch_size = 4096
y = []
for i in range(0, len(X_num), batch_size):
end = min(len(X_num), i+batch_size)
num = torch.Tensor(X_num[i:end, :])
if len(categorical) > 0:
cat = torch.Tensor(X_cat[i:end, :].todense())
batch = torch.cat([num, cat], dim=1)
else:
batch = num
y += regressor(batch).squeeze(1).tolist()
y = np.array(y) * y_scale
y_true = test_set[args.target[0]].values.astype(np.float32)
mae = np.abs(y_true - y).mean()
print("MAE", mae)
ref = np.abs(y_true - y_true.mean()).mean()
print("Baseline", ref)
outdir = "outdir"
# with timeit("writing"):
# batch_size = 1024
# for j, i in enumerate(range(0, len(X_num), batch_size)):
# d = X_cat[i:i+batch_size, :].todense()
# X = np.concatenate([X_num[i:i+batch_size], d], axis=1)
# print("X dim", X.shape[1])
# pd.DataFrame(X).to_csv("%s/output%i.csv" % (outdir, j), index=False)
with timeit("reading again"):
n = 0
for filename in os.listdir(outdir):
df = pd.read_csv(os.path.join(outdir, filename))
n += len(df)
print("number of rows:", n)
|
StarcoderdataPython
|
3266590
|
import torch.nn as nn
import tensorflow as tf
import json
import time
import sys
sys.path.append('.')
import src.vanilla as vanilla
from src.hardware.precompile import precompile_model
from src.hardware.sim_binder import run_csim
def run_layer(array_size, layer_type, in_ch, out_ch, kernel_size, input_size, batch_size):
input_image_dims = [input_size,input_size,in_ch]
input_layer = tf.keras.Input(shape=input_image_dims, batch_size=batch_size, name="input_1")
x = input_layer
if layer_type == "Conv2d":
x = tf.keras.layers.Conv2D(
filters=out_ch,
kernel_size=list(kernel_size),
strides=(1, 1),
padding="same",
activation=None,
use_bias=True,
name="dummy_layer"
)(x)
elif layer_type == "DepthwiseConv2d":
x = tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=(1, 1),
depth_multiplier=1,
padding="same",
activation=None,
use_bias=True,
name="dummy_layer"
)(x)
else:
raise NotImplementedError
keras_model = tf.keras.models.Model(inputs=input_layer, outputs=x)
layers = precompile_model(keras_model, array_size=array_size, partition_size=None)
no_ops = 0
for layer_name in layers:
gemm_op = layers[layer_name]['gemm_op']
if gemm_op is not None:
X = gemm_op['input_size']
W = gemm_op['weight_size']
no_ops += (2 * X[0] * X[1] * W[1])
json_out = {"args":None, "model1":{"order":list(layers.keys()), "layers":layers, "no_repeat":1, "no_ops":no_ops}}
sim_res = run_csim(json_out)
csim_runtime = sim_res['no_cycles'] * 1e-9 * 1e3
throughput = sim_res['no_ops'] / 2 / (csim_runtime/1e3)
peak_throughput = array_size[0] * array_size[1] * 1e9
csim_util = throughput / peak_throughput
return csim_runtime, csim_util
if __name__ == "__main__":
array_size = [128,128]
batch_size = 1
table = {}
start = time.time()
discrete_step = 16
op = vanilla.Conv2d
opname = op.__name__
table[opname] = {}
for input_size in [2,4,8,16,32]:
table[opname][input_size] = {}
for kernel_size in [(1,1), (3,3), (5,5)]:
kernel_size_str = "{}x{}".format(kernel_size[0], kernel_size[1])
table[opname][input_size][kernel_size_str] = {}
for in_ch in range (discrete_step,256+1,discrete_step):
table[opname][input_size][kernel_size_str][in_ch] = {}
for out_ch in range (discrete_step,256+1,discrete_step):
runtime_csim, util_csim = run_layer(array_size, opname, in_ch, out_ch, kernel_size, input_size, batch_size)
print("op:{}\t input_size: {}\t kernel_size: {}\t in_ch:{}\t out_ch:{}\t runtime_csim: {} ms\t util_csim: {}".format(opname, input_size, kernel_size_str, in_ch, out_ch, runtime_csim, util_csim))
table[opname][input_size][kernel_size_str][in_ch][out_ch] = {"runtime_csim": runtime_csim, "util_csim": util_csim}
for input_size in [1]:
table[opname][input_size] = {}
for kernel_size in [(1,1)]:
kernel_size_str = "{}x{}".format(kernel_size[0], kernel_size[1])
table[opname][input_size][kernel_size_str] = {}
for in_ch in range (discrete_step, 512+1,discrete_step):
table[opname][input_size][kernel_size_str][in_ch] = {}
for out_ch in range (discrete_step, 512+1,discrete_step):
runtime_csim, util_csim = run_layer(array_size, opname, in_ch, out_ch, kernel_size, input_size, batch_size)
print("op:{}\t input_size: {}\t kernel_size: {}\t in_ch:{}\t out_ch:{}\t runtime_csim: {} ms\t util_csim: {}".format(opname, input_size, kernel_size_str, in_ch, out_ch, runtime_csim, util_csim))
table[opname][input_size][kernel_size_str][in_ch][out_ch] = {"runtime_csim": runtime_csim, "util_csim": util_csim}
op = vanilla.DepthwiseConv2d
opname = op.__name__
table[opname] = {}
for input_size in [2,4,8,16,32]:
table[opname][input_size] = {}
for kernel_size in [(3,3), (5,5)]:
kernel_size_str = "{}x{}".format(kernel_size[0], kernel_size[1])
table[opname][input_size][kernel_size_str] = {}
for in_ch in range (discrete_step,256+1,discrete_step):
table[opname][input_size][kernel_size_str][in_ch] = {}
out_ch = in_ch
runtime_csim, util_csim = run_layer(array_size, opname, in_ch, out_ch, kernel_size, input_size, batch_size)
print("op:{}\t input_size: {}\t kernel_size: {}\t in_ch:{}\t out_ch:{}\t runtime_csim: {} ms\t util_csim: {}".format(opname, input_size, kernel_size_str, in_ch, out_ch, runtime_csim, util_csim))
table[opname][input_size][kernel_size_str][in_ch][out_ch] = {"runtime_csim": runtime_csim, "util_csim": util_csim}
with open("src/hardware/lookup.json", "w") as outfile:
json.dump(table, outfile)
print("Completed in: {} s".format(time.time() - start))
|
StarcoderdataPython
|
3231854
|
"""
Neo-RTD theme for Sphinx documentation generator.
Based on the color combination of the original sphinx_rtd_theme, but updated with better readability.
"""
import os
__version__ = '1.0'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
|
StarcoderdataPython
|
1621088
|
#! /usr/bin/python3
import json
with open('grades.json', 'rb') as f:
data = json.load(f)
ids = [course['content']['achievementDto']['cpCourseLibDto']['id'] for course in data['resource']]
with open('ids.txt', 'w') as ids_list:
ids_list.write('\n'.join((str(i) for i in ids)))
|
StarcoderdataPython
|
81801
|
<reponame>domwillcode/home-assistant<filename>homeassistant/components/hp_ilo/__init__.py<gh_stars>1000+
"""The HP Integrated Lights-Out (iLO) component."""
|
StarcoderdataPython
|
178423
|
<filename>Day 4 Rock Paper Scissor.py
import random
rock='''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper='''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissor='''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
hand=[rock,paper,scissor]
while True:
try:
player=int(input("Please choose your hand, 0=rock, 1=paper, 2=scissor\n"))
except:
print("Please input number only")
continue
if player>2 or player<0:
print("Wrong Number, Please try again")
continue
computer=random.randint(0,2)
a=hand[(player)]
b=hand[computer]
print(f"Player chose {a}, Computer chose {b}")
if player>computer:
print('Player wins')
if player==2 and computer==0:
print("Computer wins")
if computer>player:
if player==0 and computer==2:
print("Player wins")
else:
print('Computer wins')
if player==computer:
print("Draw")
if player>2:
print("Wrong Number")
asd=input("Do you want to try again?\n").lower()
print(asd)
if asd== 'yes':
continue
else:
print("Thank you for playing")
break
|
StarcoderdataPython
|
167975
|
<gh_stars>0
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
df = pd.read_csv(path)
# print(df.head())
X = df.iloc[:,1:-1]
y = df['Churn']
# print(X.head())
# print(y.head())
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=.3,random_state=0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['TotalCharges'] = X_train['TotalCharges'].replace(' ',np.NaN)
# print(X_train['TotalCharges'])
X_test['TotalCharges'] = X_test['TotalCharges'].replace(' ',np.NaN)
# print(X_test['TotalCharges'])
X_train['TotalCharges'] = X_train['TotalCharges'].astype('float64')
# print(dtype(X_train['TotalCharges']))
print(X_train.info())
X_test['TotalCharges'] = X_test['TotalCharges'].astype('float64')
X_train['TotalCharges'] = X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean())
# print(X_test['TotalCharges'])
X_test['TotalCharges'] = X_test['TotalCharges'].fillna(X_test['TotalCharges'].mean())
# print(X_train.isnull().sum(),X_test.isnull().sum())
cat_cols = X_train.select_dtypes(include='object').columns
# print(cat_cols)
le = LabelEncoder()
for col in cat_cols:
for q in cat_cols:
X_train[col] = le.fit_transform(X_train[col])
X_test[col] = le.transform(X_test[col])
y_train = y_train.replace({'No':0,'Yes':1})
y_test = y_test.replace({'No':0,'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
# print(X_train.head(),'\n',X_test.head(),'\n',y_train.head(),'\n',y_test.head())
ada_model = AdaBoostClassifier(random_state=0)
ada_model.fit(X_train,y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test,y_pred)
print(ada_score)
ada_cm = confusion_matrix(y_test,y_pred)
print(ada_cm)
ada_cr = classification_report(y_test,y_pred)
print(ada_cr)
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_model = XGBClassifier(random_state=0)
xgb_model.fit(X_train,y_train)
y_pred = xgb_model.predict(X_test)
xgb_score = accuracy_score(y_test,y_pred)
print(xgb_score)
xgb_cm = confusion_matrix(y_test,y_pred)
print(xgb_cm)
xgb_cr = classification_report(y_test,y_pred)
print(xgb_cr)
clf_model = GridSearchCV(estimator=xgb_model,param_grid=parameters)
clf_model.fit(X_train,y_train)
y_pred = clf_model.predict(X_test)
clf_score = accuracy_score(y_test,y_pred)
print(clf_score)
clf_cm = confusion_matrix(y_test,y_pred)
print(clf_cm)
clf_cr = classification_report(y_test,y_pred)
print(clf_cr)
# print(clf_model.bes)
|
StarcoderdataPython
|
3218065
|
<reponame>daVinciCEB/Basic-Python-Package
import unittest
from context import core
class ExampleTest(unittest.TestCase):
"""An example test in unittest fashion."""
def setUp(self):
pass
def test_will_pass(self):
self.assertEqual(1, 1)
def test_will_not_pass(self):
self.assertEqual(1, 1)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3378497
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.interface import AbstractFrontendValidator
from rqalpha.const import ORDER_TYPE
from rqalpha.utils.logger import user_system_log
from rqalpha.utils.i18n import gettext as _
class PriceValidator(AbstractFrontendValidator):
def __init__(self, env):
self._env = env
def can_submit_order(self, account, order):
if order.type != ORDER_TYPE.LIMIT:
return True
# FIXME: it may be better to round price in data source
limit_up = round(self._env.price_board.get_limit_up(order.order_book_id), 4)
if order.price > limit_up:
reason = _(
"Order Creation Failed: limit order price {limit_price} is higher than limit up {limit_up}."
).format(
limit_price=order.price,
limit_up=limit_up
)
user_system_log.warn(reason)
return False
limit_down = round(self._env.price_board.get_limit_down(order.order_book_id), 4)
if order.price < limit_down:
reason = _(
"Order Creation Failed: limit order price {limit_price} is lower than limit down {limit_down}."
).format(
limit_price=order.price,
limit_down=limit_down
)
user_system_log.warn(reason)
return False
return True
def can_cancel_order(self, account, order):
return True
|
StarcoderdataPython
|
1790354
|
<gh_stars>0
import sys
from unittest import skip
from django.core.management.commands import test
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_by_path
class Command(test.Command):
def handle(self, *args, **kwargs):
# Look for the previous app in INSTALLED_APPS that defines a
# test command for, eg., South support.
apps = settings.INSTALLED_APPS[:]
previous_apps = reversed(apps[:apps.index('djangae')])
CommandClass = test.Command
for app in previous_apps:
try:
CommandClass = import_by_path('{}.management.commands.test.Command'.format(app))
break
except ImproperlyConfigured:
pass
if settings.DATABASES['default']['ENGINE'] == 'djangae.db.backends.appengine':
_monkey_patch_unsupported_tests()
CommandClass().handle(*args, **kwargs)
def _monkey_patch_unsupported_tests():
unsupported_tests = []
if 'django.contrib.auth' in settings.INSTALLED_APPS:
import django
if django.VERSION[:2] == (1, 5):
unsupported_tests.extend([
# These auth tests override the AUTH_USER_MODEL setting, which then uses M2M joins
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_custom_perms',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_get_all_superuser_permissions',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_has_no_object_perm',
'django.contrib.auth.tests.auth_backends.CustomPermissionsUserModelBackendTest.test_has_perm',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_custom_perms',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_has_perm',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_get_all_superuser_permissions',
'django.contrib.auth.tests.auth_backends.ExtensionUserModelBackendTest.test_has_no_object_perm'
])
for unsupported_test in unsupported_tests:
module_path, klass_name, method_name = unsupported_test.rsplit(".", 2)
__import__(module_path, klass_name)
module = sys.modules[module_path]
if hasattr(module, klass_name):
klass = getattr(module, klass_name)
method = getattr(klass, method_name)
setattr(klass, method_name, skip("Not supported by Djangae")(method))
|
StarcoderdataPython
|
37658
|
import importlib
import pytest
import yaml
import appmap._implementation
from appmap._implementation.env import Env
from appmap._implementation.recording import Recorder
def _data_dir(pytestconfig):
return pytestconfig.rootpath / 'appmap' / 'test' / 'data'
@pytest.fixture(name='data_dir')
def fixture_data_dir(pytestconfig):
return _data_dir(pytestconfig)
@pytest.fixture(name='with_data_dir')
def fixture_with_data_dir(data_dir, monkeypatch):
monkeypatch.syspath_prepend(data_dir)
return data_dir
@pytest.fixture
def events():
rec = Recorder()
rec.clear()
rec.enabled = True
yield rec.events
rec.enabled = False
rec.clear()
@pytest.hookimpl
def pytest_runtest_setup(item):
mark = item.get_closest_marker('appmap_enabled')
env = {}
if mark:
appmap_yml = mark.kwargs.get('config', 'appmap.yml')
d = _data_dir(item.config)
config = d / appmap_yml
Env.current.set('APPMAP_CONFIG', config)
env = {'APPMAP': 'true', 'APPMAP_CONFIG': config}
appmap._implementation.initialize(env=env) # pylint: disable=protected-access
# Some tests want yaml instrumented, others don't.
# Reload it to make sure it's instrumented, or not, as set in appmap.yml.
importlib.reload(yaml)
|
StarcoderdataPython
|
3371868
|
<reponame>aaguasca/gammapy<gh_stars>0
import astropy.units as u
from .core import IRF
__all__ = [
"RadMax2D",
]
class RadMax2D(IRF):
"""2D Rad Max table.
This is not directly a IRF component but is needed as additional information
for point-like IRF components when an energy or field of view
dependent directional cut has been applied.
Data format specification: :ref:`gadf:rad_max_2d`
Parameters
----------
energy_axis : `MapAxis`
Reconstructed energy axis
offset_axis : `MapAxis`
Field of view offset axis.
data : `~astropy.units.Quantity`
Applied directional cut
meta : dict
Meta data
"""
tag = "rad_max_2d"
required_axes = ["energy", "offset"]
default_unit = u.deg
@classmethod
def from_irf(cls, irf):
'''
Create a RadMax2D instance from another IRF component.
This reads the RAD_MAX metadata keyword from the irf and creates
a RadMax2D with a single bin in energy and offset using the
ranges from the input irf.
Parameters
----------
irf: `~gammapy.irf.EffectiveAreaTable2D` or `~gammapy.irf.EnergyDispersion2D`
IRF instance from which to read the RAD_MAX and limit information
Returns
-------
rad_max: `RadMax2D`
`RadMax2D` object with a single bin corresponding to the fixed
RAD_MAX cut.
Notes
-----
This assumes the true energy axis limits are also valid for the
reco energy limits.
'''
if not irf.is_pointlike:
raise ValueError('RadMax2D.from_irf is only valid for point-like irfs')
if 'RAD_MAX' not in irf.meta:
raise ValueError('irf does not contain RAD_MAX keyword')
rad_max_value = irf.meta["RAD_MAX"]
if not isinstance(rad_max_value, float):
raise ValueError('RAD_MAX must be a float')
energy_axis = irf.axes["energy_true"].copy(name="energy").squash()
offset_axis = irf.axes["offset"].squash()
return cls(
data=u.Quantity([[rad_max_value]], u.deg),
axes=[energy_axis, offset_axis],
)
|
StarcoderdataPython
|
3246924
|
# -*- coding: utf-8 -*-
"""
State machine interface. This is a base class for implementing state machines.
"""
from copy import deepcopy
from signalslot import Signal
from threading import Event
from .asyncexc import AsynchronousException
class NotReadyError(Exception):
"""
Exception raised when an attempt is made to retrieve the result of an
operation before it is ready.
"""
pass
class HaystackOperation(object):
"""
A core state machine object. This implements the basic interface presented
for all operations in pyhaystack.
"""
def __init__(self, result_copy=True, result_deepcopy=True):
"""
Initialisation. This should be overridden by subclasses to accept and
validate the inputs presented for the operation, raising an appropriate
Exception subclass if the inputs are found to be invalid.
These should be stored here by the initialisation function as private
variables in suitably sanitised form. The core state machine object
shall then be created and stored before the object is returned to the
caller.
"""
# Event object to represent when this operation is "done"
self._done_evt = Event()
# Signal emitted when the operation is "done"
self.done_sig = Signal(name='done', threadsafe=True)
# Result returned by operation
self._result = None
self._result_copy = result_copy
self._result_deepcopy = result_deepcopy
def go(self):
"""
Start processing the operation. This is called by the caller (so after
all __init__ functions have executed) in order to begin the asynchronous
operation.
"""
# This needs to be implemented in the subclass.
raise NotImplementedError("To be implemented in subclass %s" \
% self.__class__.__name__)
def wait(self, timeout=None):
"""
Wait for an operation to finish. This should *NOT* be called in the
same thread as the thread executing the operation as this will
deadlock.
"""
self._done_evt.wait(timeout)
@property
def state(self):
"""
Return the current state machine's state.
"""
return self._state_machine.current
@property
def is_done(self):
"""
Return true if the operation is complete.
"""
return self._state_machine.is_finished()
@property
def is_failed(self):
"""
Return true if the result is an Exception.
"""
return isinstance(self._result, AsynchronousException)
@property
def result(self):
"""
Return the result of the operation or raise its exception.
Raises NotReadyError if not ready.
"""
if not self.is_done:
raise NotReadyError()
if self.is_failed:
self._result.reraise()
if not self._result_copy:
# Return the original instance (do not copy)
return self._result
elif self._result_deepcopy:
# Return a deep copy
return deepcopy(self._result)
else:
# Return a shallow copy
return self._result.copy()
def __repr__(self):
"""
Return a representation of this object's state.
"""
if self.is_failed:
return '<%s failed>' % self.__class__.__name__
elif self.is_done:
return '<%s done: %s>' % (self.__class__.__name__, self._result)
else:
return '<%s %s>' % (self.__class__.__name__, self.state)
def _done(self, result):
"""
Return the result of the operation to any listeners.
"""
self._result = result
self._done_evt.set()
self.done_sig.emit(operation=self)
|
StarcoderdataPython
|
3215851
|
<filename>test/wasm-js/testcfg.py
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
ANY_JS = ".any.js"
WPT_ROOT = "/wasm/jsapi/"
META_SCRIPT_REGEXP = re.compile(r"META:\s*script=(.*)")
META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
proposal_flags = [{
'name': 'js-types',
'flags': ['--experimental-wasm-type-reflection',
'--wasm-staging']
},
{
'name': 'tail-call',
'flags': ['--experimental-wasm-tail-call',
'--wasm-staging']
},
{
'name': 'simd',
'flags': ['--experimental-wasm-simd',
'--wasm-staging']
},
{
'name': 'memory64',
'flags': ['--experimental-wasm-memory64',
'--wasm-staging']
},
]
class TestLoader(testsuite.JSTestLoader):
@property
def extensions(self):
return [ANY_JS]
class TestSuite(testsuite.TestSuite):
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.mjsunit_js = os.path.join(os.path.dirname(self.root), "mjsunit",
"mjsunit.js")
self.test_root = os.path.join(self.root, "tests")
self._test_loader.test_root = self.test_root
def _test_loader_class(self):
return TestLoader
def _test_class(self):
return TestCase
def get_proposal_path_identifier(proposal):
return os.sep.join(['proposals', proposal['name']])
class TestCase(testcase.D8TestCase):
def _get_timeout_param(self):
source = self.get_source()
timeout_params = META_TIMEOUT_REGEXP.findall(source)
if not timeout_params:
return None
if timeout_params[0] in ["long"]:
return timeout_params[0]
else:
print("unknown timeout param %s in %s%s"
% (timeout_params[0], self.path, ANY_JS))
return None
def _get_files_params(self):
files = [self.suite.mjsunit_js,
os.path.join(self.suite.root, "third_party", "testharness.js"),
os.path.join(self.suite.root, "testharness-additions.js"),
os.path.join(self.suite.root, "report.js")]
source = self.get_source()
current_dir = os.path.dirname(self._get_source_path())
for script in META_SCRIPT_REGEXP.findall(source):
if script.startswith(WPT_ROOT):
# Matched an absolute path, strip the root and replace it with our
# local root.
found = False
for proposal in proposal_flags:
if get_proposal_path_identifier(proposal) in current_dir:
found = True
script = os.path.join(self.suite.test_root,
os.sep.join(['proposals', proposal['name']]),
script[len(WPT_ROOT):])
if not found:
script = os.path.join(self.suite.test_root, script[len(WPT_ROOT):])
elif not script.startswith("/"):
# Matched a relative path, prepend this test's directory.
script = os.path.join(current_dir, script)
else:
raise Exception("Unexpected absolute path for script: \"%s\"" % script);
files.append(script)
files.extend([self._get_source_path(),
os.path.join(self.suite.root, "after.js")])
return files
def _get_source_flags(self):
for proposal in proposal_flags:
if get_proposal_path_identifier(proposal) in self.path:
return proposal['flags']
return ['--wasm-staging']
def _get_source_path(self):
# All tests are named `path/name.any.js`
return os.path.join(self.suite.test_root, self.path + ANY_JS)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
|
StarcoderdataPython
|
3386405
|
<filename>src/spacel/security/acm.py
import logging
from tldextract import extract
logger = logging.getLogger('spacel.security.acm')
class AcmCertificates(object):
def __init__(self, clients):
self._clients = clients
def get_certificate(self, region, hostname):
logger.debug('Looking up certificate for "%s".', hostname)
host_wildcards = self._get_wildcards(hostname)
logger.debug('Resolved wildcards: %s', host_wildcards)
acm = self._clients.acm(region)
wildcard_cert = None
wildcard_count = 100
# Iterate certificates:
for acm_cert in self._get_certificates(acm):
cert_domain = acm_cert['DomainName']
cert_arn = acm_cert['CertificateArn']
# Stop search if exact match is found:
if cert_domain == hostname:
logger.debug('Found exact match for "%s": %s"', hostname,
cert_arn)
return cert_arn
if cert_domain in host_wildcards:
star_count = cert_domain.count('*')
if star_count < wildcard_count:
wildcard_count = star_count
wildcard_cert = cert_arn
if wildcard_cert:
logger.debug('Found wildcard match for "%s": %s (%s)', hostname,
wildcard_cert, wildcard_count)
return wildcard_cert
@staticmethod
def _get_wildcards(hostname):
extracted = extract('http://%s' % hostname)
if not extracted.subdomain:
return []
common_domain = [extracted.domain, extracted.suffix]
wildcards = []
# For each subdomain component:
split_subdomain = extracted.subdomain.split('.')
for subdomain_index in range(len(split_subdomain)):
# Replace with wildcard, then concat to remaining bits:
wildcard_parts = ['*'] * (subdomain_index + 1) \
+ split_subdomain[subdomain_index + 1:]
wildcard_parts += common_domain
wildcards.append('.'.join(wildcard_parts))
return wildcards
@staticmethod
def _get_certificates(acm):
certificate_pages = (acm.get_paginator('list_certificates')
.paginate(CertificateStatuses=['ISSUED']))
for certificate_page in certificate_pages:
for certificate in certificate_page['CertificateSummaryList']:
yield certificate
|
StarcoderdataPython
|
146476
|
<reponame>alaasalman/aussieshopper
from django.conf.urls import url, include
from rest_framework import routers
from api import views
app_name = 'api'
router = routers.SimpleRouter()
router.register(r'stats', views.StatsViewSet)
urlpatterns = [
url(r'bot/handle/',
views.HandleChatMessage.as_view(),
name='api-handle-message'),
url(r'^', include(router.urls))
]
|
StarcoderdataPython
|
3351868
|
<filename>_exclude/build-README.py
import glob, os
import sys
import urllib
import shutil
from pathlib import Path
def build_lectures_md(lectures_md_file_name):
weekly_lectures = []
lectures_path = '../lectures'
weekly_lectures = [lecture for lecture in os.listdir(lectures_path) \
if lecture.startswith('Week') and not lecture.endswith('.md')]
weekly_lectures.sort()
lectures_md = []
lectures_md.append("## Lectures\n")
lectures_md.append("\n")
lectures_md.append("Lectures will contain a mixture of content form this site and others.\n")
lectures_md.append("\n")
for lecture in weekly_lectures:
print('***** ', lecture)
(week, content) = tuple(lecture.split(' - '))
lectures_md.append(f'1. [{week}](lectures/lectures.md) - {content}\n')
with open(lectures_md_file_name, 'w+') as f:
f.write(''.join(lectures_md))
if __name__ == "__main__":
os.chdir(os.path.dirname(sys.argv[0]))
md = []
title = "Data Focused Python"
md.append("---\n")
md.append("layout: default\n")
md.append(f"title: {title}\n")
md.append("nav_order: 1\n")
md.append("permalink: /\n")
md.append("---\n")
md.append("\n")
lectures_md_file_name = '02-lectures.md'
build_lectures_md(lectures_md_file_name)
files = [
'01-data-focused-python.md',
lectures_md_file_name,
'03-quizzes.md',
'04-assignments.md'
]
for file in files:
with open(file, 'r') as f:
md.extend(f.readlines())
md.append("\n")
with open('../README.md', 'w+') as f:
f.write(''.join(md))
|
StarcoderdataPython
|
3369434
|
<filename>summarizer/analysis/plot_user.py
import sys, os.path as path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
import numpy as np
import argparse
import matplotlib.pyplot as plt
import os
from summarizer.utils.reader import read_csv
import matplotlib as mpl
mpl.use('pgf')
def figsize(scale):
fig_width_pt = 455.24408 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def savefig(filename):
plt.savefig( "figs/" + '{}.pgf'.format(filename))
plt.savefig("figs/" + '{}.pdf'.format(filename))
class Plot(object):
def load_data(self, filename, cluster_id, info_per_user):
self.cluster = cluster_id
self.rows = read_csv(filename)
self.info_num = info_per_user
def get_scores(self, rouge_type='R1_score'):
ub_scores = self.rows[0]
self.users = len(ub_scores[1:])/self.info_num
y = [[] for user in range(self.users)]
for iteration in range(0,len(self.rows)):
row = self.rows[iteration]
for user in range(self.users):
if rouge_type == 'R1_score':
val = row[1+user*self.info_num]
if val != "":
y[user].append(float(val))
if rouge_type == 'R2_score':
val = row[2+user*self.info_num]
if val != "":
y[user].append(float(val))
return y
def plot_scores(self, labels, scores, filename):
self.users = 2
f, axis = plt.subplots(2, sharex=True, sharey=False, figsize=(4, 6))
colors = ['g','b','r', '#8E4585']
linestyles = ['->', '-o', '-', '-x']
iterations= 8
for i in range(self.users):
for index, score in enumerate(scores):
y = score
if index == 0:
axis[i].plot(range(len(y[i][1:])), len(y[i][1:]) *[y[i][0]], 'k--', label = 'Upper bound', linewidth=2)
#axis[i].plot(range(len(y[i][1:])), len(y[i][1:]) *[y[i][0]], 'k--', label = 'Upper-bound')
if i>0:
axis[i].plot(range(len(y[i][1:])), y[i][1:], linestyles[index], color=colors[index], label='%s' % labels[index], linewidth=2)
else:
axis[i].plot(range(len(y[i][1:])), y[i][1:], linestyles[index], color=colors[index], label='%s' % labels[index], linewidth=2)
axis[i].set_title('User:%s' % str(i+1))
axis[i].set_xticks(np.arange(0, iterations, 1))
axis[i].set_xticklabels(np.arange(0, iterations, 1))
axis[i].set_ylabel('ROUGE 2', fontsize=13)
axis[i].grid(True)
plt.legend(loc="best", fontsize=9)
plt.xlabel("\# Iterations", fontsize=15)
plt.yscale("linear")
plt.xlim(0,iterations)
plt.tight_layout()
savefig(filename)
def get_args():
''' This function parses and return arguments passed in'''
parser = argparse.ArgumentParser(description='Users Aggregator')
parser.add_argument('-d', '--data_set', type= str, help='Dataset Name', required=True)
parser.add_argument('-l', '--summary_len', type= str, help='Summary Size', required=False)
args = parser.parse_args()
data_set = args.data_set
size = args.summary_len
return data_set, size
if __name__ == '__main__':
data_set, size = get_args()
methods = ['active_learning2', 'active_learning','ilp_feedback', 'accept_reject']
labels = ['Active+','Active', 'Joint', 'Accept']
plotter = Plot()
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 10, # LaTeX default is 10pt font.
"text.fontsize": 10,
"legend.fontsize": 10, # Make the legend/label fonts a little smaller
"xtick.labelsize": 12,
"ytick.labelsize": 12,
"figure.figsize": figsize(0.85), # default fig size of 0.9 textwidth
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
]
}
mpl.rcParams.update(pgf_with_latex)
if size!= None:
data_path = '../data/scores/%s/%s_%s_%s' % (data_set, methods[2], data_set, size)
else:
data_path = '../data/scores/%s/%s_%s' % (data_set, methods[2], data_set)
score_type = 'R2_score'
for fileid in os.listdir(data_path):
scores = []
cluster_id = fileid[:-3]
print cluster_id
for method in methods:
if size!= None:
filename = '../data/scores/%s/%s_%s_%s/%s' % (data_set, method, data_set, size, fileid)
else:
filename = '../data/scores/%s/%s_%s/%s' % (data_set, method, data_set, fileid)
plotter.load_data(filename, cluster_id, 6)
scores.append(plotter.get_scores(score_type))
filename = "users_%s_%s" % (data_set, fileid)
plotter.plot_scores(labels, scores, filename)
|
StarcoderdataPython
|
3203165
|
<filename>dls_pmaccontrol/CSstatus.py
#!/bin/env dls-python2.6
# -*- coding: utf-8 -*-
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from formCSStatus import Ui_formCSStatus
class CSStatusForm(QDialog, Ui_formCSStatus):
def __init__(self, parent):
QDialog.__init__(self,parent)
self.setupUi(self)
self.csSpin.valueChanged.connect(self.changeCS)
self.feedSpin.valueChanged.connect(self.setFeed)
self.greenLedOn = parent.greenLedOn
self.greenLedOff = parent.greenLedOff
self.redLedOn = parent.redLedOn
self.redLedOff = parent.redLedOff
self._feed = 100
ledGroupLayout = self.ledGroup.layout()
ledGroupLayout.setAlignment(Qt.AlignTop)
self.lstLeds = []
self.lstLabels = []
self.lstLabelTexts = []
self.lstTooltips = []
# Extracted from manual
# First Word Returned (X:$002040, X:$0020C0, etc.)
# Bit 23
self.lstLabelTexts.append("Z-CS Used in Feedrate Calculations")
self.lstTooltips.append("""Used in Feedrate Calculations: This bit is 1 if this CS is used in the vector feedrate
calculations for F-based moves in the coordinate system; it is 0 if this CS is not used. See the FRAX
command.""")
# Bit 22
self.lstLabelTexts.append("Z-CS Incremental Mode")
self.lstTooltips.append("""Incremental Mode: This bit is 1 if this CS is in incremental mode -- moves specified by
distance from the last programmed point. It is 0 if this CS is in absolute mode -- moves specified by end
position, not distance. See the INC and ABS commands.""")
# Bit 21
self.lstLabelTexts.append("Y-CS Used in Feedrate Calculations")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 20
self.lstLabelTexts.append("Y-CS Incremental Mode")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 19
self.lstLabelTexts.append("X-CS Used in Feedrate Calculations")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 18
self.lstLabelTexts.append("X-CS Incremental Mode")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 17
self.lstLabelTexts.append("W-CS Used in Feedrate Calculations")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 16
self.lstLabelTexts.append("W-CS Incremental Mode")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 15
self.lstLabelTexts.append("V-CS Used in Feedrate Calculations")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 14
self.lstLabelTexts.append("V-CS Incremental Mode")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 13
self.lstLabelTexts.append("U-CS Used in Feedrate Calculations")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 12
self.lstLabelTexts.append("U-CS Incremental Mode")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 11
self.lstLabelTexts.append("C-CS Used in Feedrate Calculations")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 10
self.lstLabelTexts.append("C-CS Incremental Mode")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 9
self.lstLabelTexts.append("B-CS Used in Feedrate Calculations")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 8
self.lstLabelTexts.append("B-CS Incremental Mode")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 7
self.lstLabelTexts.append("A-CS Used in Feedrate Calculations")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 6
self.lstLabelTexts.append("A-CS Incremental Mode")
self.lstTooltips.append(self.lstTooltips[-2])
# Bit 5
self.lstLabelTexts.append("Radius Vector Incremental Mode")
self.lstTooltips.append("""Radius Vector Incremental Mode: This bit is 1 if circle move radius vectors are specified
incrementally (i.e. from the move start point to the arc center). It is 0 if circle move radius vectors are
specified absolutely (i.e. from the XYZ origin to the arc center). See the INC (R) and ABS (R)
commands.""")
# Bit 4
self.lstLabelTexts.append("Continuous Motion Request")
self.lstTooltips.append("""Continuous Motion Request: This bit is 1 if the coordinate system has requested of it a
continuous set of moves (e.g. with an R command). It is 0 if this is not the case (e.g. not running
program, Isx92=1, or running under an S command).""")
# Bit 3
self.lstLabelTexts.append("Move-Specified-by-Time Mode")
self.lstTooltips.append("""Move-Specified-by-Time Mode: This bit is 1 if programmed moves in this coordinate system are
currently specified by time (TM or TA), and the move speed is derived. It is 0 if programmed moves in
this coordinate system are currently specified by feedrate (speed; F) and the move time is derived.""")
# Bit 2
self.lstLabelTexts.append("Continuous Motion Mode")
self.lstTooltips.append("""Continuous Motion Mode: This bit is 1 if the coordinate system is in a sequence of moves that it
is blending together without stops in between. It is 0 if it is not currently in such a sequence, for whatever
reason.""")
# Bit 1
self.lstLabelTexts.append("Single-Step Mode")
self.lstTooltips.append("""Single-Step Mode: This bit is 1 if the motion program currently executing in this coordinate
system has been told to step one move or block of moves, or if it has been given a Q (Quit) command. It
is 0 if the motion program is executing a program by a R (run) command, or if it is not executing a motion
program at all.""")
# Bit 0
self.lstLabelTexts.append("Running Program")
self.lstTooltips.append("""Running Program: This bit is 1 if the coordinate system is currently executing a motion
program. It is 0 if the C.S. is not currently executing a motion program. Note that it becomes 0 as soon
as it has calculated the last move and reached the final RETURN statement in the program, even if the
motors are still executing the last move or two that have been calculated. Compare to the motor Running
Program status bit.""")
# Second Word Returned (Y:$00203F, Y:$0020BF, etc.)
# Bit 23
self.lstLabelTexts.append("Lookahead in Progress")
self.lstTooltips.append("""Lookahead in Progress: This bit is 1 when the coordinate system is actively computing and/or
executing a move sequence using the multi-block lookahead function. It is 0 otherwise.""")
# Bit 22
self.lstLabelTexts.append("Run-Time Error")
self.lstTooltips.append("""Run-Time Error: This bit is 1 when the coordinate system has stopped a motion program due to
an error encountered while executing the program (e.g. jump to non-existent label, insufficient calculation
time, etc.) It is 0 otherwise. The run-time error code word (Y:$002x14) shows the cause of a run-time
error.""")
# Bit 21
self.lstLabelTexts.append("Move In Stack")
self.lstTooltips.append("""Move In Stack: (For internal use)""")
# Bit 20
self.lstLabelTexts.append("Amplifier Fault Error")
self.lstTooltips.append("""Amplifier Fault Error: This bit is 1 when any motor in the coordinate system has been killed due
to receiving an amplifier fault signal. It is 0 at other times, changing from 1 to 0 when the offending
motor is re-enabled.""")
# Bit 19
self.lstLabelTexts.append("Fatal Following Error")
self.lstTooltips.append("""Fatal Following Error: This bit is 1 when any motor in the coordinate system has been killed
due to exceeding its fatal following error limit (Ixx11). It is 0 at other times. The change from 1 to 0
occurs when the offending motor is re-enabled.""")
# Bit 18
self.lstLabelTexts.append("Warning Following Error")
self.lstTooltips.append("""Warning Following Error: This bit is 1 when any motor in the coordinate system has exceeded
its warning following error limit (Ixx12). It stays at 1 if a motor has been killed due to fatal following
error limit. It is 0 at all other times. The change from 1 to 0 occurs when the offending motor's following
error is reduced to under the limit, or if killed on fatal following error as well, when it is re-enabled.""")
# Bit 17
self.lstLabelTexts.append("In Position")
self.lstTooltips.append("""In Position: This bit is 1 when all motors in the coordinate system are in position. Five
conditions must apply for all of these motors for this to be true:, the loops must be closed, the desired
velocity must be zero for all motors, the coordinate system cannot be in any timed move (even zero
distance) or DWELL, all motors must have a following error smaller than their respective Ixx28 in-position
bands, and the above conditions must have been satisfied for (Ixx88+1) consecutive scans.""")
# Bit 16
self.lstLabelTexts.append("Rotary Buffer Request")
self.lstTooltips.append("""Rotary Buffer Request: This bit is 1 when a rotary buffer exists for the coordinate system and
enough program lines have been sent to it so that the buffer contains at least I17 lines ahead of what has
been calculated. Once this bit has been set to 1 it will not be set to 0 until there are less than I16 program
lines ahead of what has been calculated. The PR command may be used to find the current number of
program lines ahead of what has been calculated.""")
# Bit 15
self.lstLabelTexts.append("Delayed Calculation Flag")
self.lstTooltips.append("""Delayed Calculation Flag: (for internal use)""")
# Bit 14
self.lstLabelTexts.append("End of Block Stop")
self.lstTooltips.append("""End of Block Stop: This bit is 1 when a motion program running in the currently addressed
Coordinate System is stopped using the ' / ' command from a segmented move (Linear or Circular mode
with Isx13 > 0).""")
# Bit 13
self.lstLabelTexts.append("Synchronous M-variable One-Shot")
self.lstTooltips.append("""Synchronous M-variable One-Shot: (for internal use)""")
# Bit 12
self.lstLabelTexts.append("Dwell Move Buffered")
self.lstTooltips.append("""Dwell Move Buffered: (for internal use)""")
# Bit 11
self.lstLabelTexts.append("Cutter Comp Outside Corner")
self.lstTooltips.append("""Cutter Comp Outside Corner: This bit is 1 when the coordinate system is executing an added
outside corner move with cutter compensation on. It is 0 otherwise.""")
# Bit 10
self.lstLabelTexts.append("Cutter Comp Move Stop Request")
self.lstTooltips.append("""Cutter Comp Move Stop Request: This bit is 1 when the coordinate system is executing moves
with cutter compensation enabled, and has been asked to stop move execution. This is primarily for
internal use.""")
# Bit 9
self.lstLabelTexts.append("Cutter Comp Move Buffered")
self.lstTooltips.append("""Cutter Comp Move Buffered: This bit is 1 when the coordinate system is executing moves with
cutter compensation enabled, and the next move has been calculated and buffered. This is primarily for
internal use.""")
# Bit 8
self.lstLabelTexts.append("Pre-jog Move Flag")
self.lstTooltips.append("""Pre-jog Move Flag: This bit is 1 when any motor in the coordinate system is executing a jog
move to "pre-jog" position (J= command). It is 0 otherwise.""")
# Bit 7
self.lstLabelTexts.append("Segmented Move in Progress")
self.lstTooltips.append("""Segmented Move in Progress: This bit is 1 when the coordinate system is executing motion
program moves in segmentation mode (Isx13>0). It is 0 otherwise. This is primarily for internal use.""")
# Bit 6
self.lstLabelTexts.append("Segmented Move Acceleration")
self.lstTooltips.append("""Segmented Move Acceleration: This bit is 1 when the coordinate system is executing motion
program moves in segmentation mode (Isx13>0) and accelerating from a stop. It is 0 otherwise. This is
primarily for internal use.""")
# Bit 5
self.lstLabelTexts.append("Segmented Move Stop Request")
self.lstTooltips.append("""Segmented Move Stop Request: This bit is 1 when the coordinate system is executing motion
program move in segmentation mode (Isx13>0) and it is decelerating to a stop. It is 0 otherwise. This is
primarily for internal use.""")
# Bit 4
self.lstLabelTexts.append("PVT/SPLINE Move Mode")
self.lstTooltips.append("""PVT/SPLINE Move Mode: This bit is 1 if this coordinate system is in either PVT move mode or
SPLINE move mode. (If bit 0 of this word is 0, this means PVT mode; if bit 0 is 1, this means SPLINE
mode.) This bit is 0 if the coordinate system is in a different move mode (LINEAR, CIRCLE, or
RAPID). See the table below.""")
# Bit 3
self.lstLabelTexts.append("2D Cutter Comp Left/3D Cutter Comp On")
self.lstTooltips.append("""2D Cutter Comp Left/3D Cutter Comp On: With bit 2 equal to 1, this bit is 1 if the coordinate
system has 2D cutter compensation on, compensating to the left when looking in the direction of motion.
It is 0 if 2D compensation is to the right. With bit 2 equal to 0, this bit is 1 if the coordinate system has
3D cutter compensation on. It is 0 if no cutter compensation is on.""")
# Bit 2
self.lstLabelTexts.append("2D Cutter Comp On")
self.lstTooltips.append("""2D Cutter Comp On: This bit is 1 if the coordinate system has 2D cutter compensation on. It is
0 if 2D cutter compensation is off (but 3D cutter compensation may be on if bit 3 is 1).""")
# Bit 1
self.lstLabelTexts.append("CCW Circle\Rapid Mode")
self.lstTooltips.append("""CCW Circle\Rapid Mode: When bit 0 is 1 and bit 4 is 0, this bit is set to 0 if the coordinate
system is in CIRCLE1 (clockwise arc) move mode and 1 if the coordinate system is in CIRCLE2
(counterclockwise arc) move mode. If both bits 0 and 4 are 0, this bit is set to 1 if the coordinate system
is in RAPID move mode. Otherwise this bit is 0. See the table below.""")
# Bit 0
self.lstLabelTexts.append("CIRCLE/SPLINE Move Mode")
self.lstTooltips.append("""CIRCLE/SPLINE Move Mode: This bit is 1 if the coordinate system is in either CIRCLE or
SPLINE move mode. (If bit 4 of this word is 0, this means CIRCLE mode; if bit 4 is 1, this means
SPLINE mode.) This bit is 0 if the coordinate system is in a different move mode (LINEAR, PVT, or
RAPID.). See the table below.""")
# Third Word Returned (Y:$002040, Y:$0020C0, etc.)
# Bit 23
self.lstLabelTexts.append("Lookahead Buffer Wrap")
self.lstTooltips.append("""Lookahead Buffer Wrap: This bit is 1 when the lookahead buffer for the coordinate system is
active and has wrapped around since the beginning of the current continuous motion sequence, meaning
that retrace back to the beginning of the sequence is no longer possible. It is 0 otherwise.""")
# Bit 22
self.lstLabelTexts.append("Lookahead Lookback Active")
self.lstTooltips.append("""Lookahead Lookback Active: (For internal use)""")
# Bit 21
self.lstLabelTexts.append("Lookahead Buffer End")
self.lstTooltips.append("""Lookahead Buffer End: (For internal use)""")
# Bit 20
self.lstLabelTexts.append("Lookahead Synchronous M-variable")
self.lstTooltips.append("""Lookahead Synchronous M-variable: (For internal use)""")
# Bit 19
self.lstLabelTexts.append("Lookahead Synchronous M-variable Overflow")
self.lstTooltips.append("""Lookahead Synchronous M-variable Overflow: This bit is 1 if the program has attempted to put
more synchronous M-variable assignments into the lookahead buffer than the buffer has room for. If this
bit is set, one or more synchronous M-variable assignments have failed to execute or will fail to execute.""")
# Bit 18
self.lstLabelTexts.append("Lookahead Buffer Direction")
self.lstTooltips.append("""Lookahead Buffer Direction: This bit is 1 if the lookahead buffer is executing in the reverse
direction, or has executed a quick stop from the reverse direction. It is 0 if the lookahead buffer is
executing in the forward direction, has executed a quick stop for the forward direction, or is not executing.""")
# Bit 17
self.lstLabelTexts.append("Lookahead Buffer Stop")
self.lstTooltips.append("""Lookahead Buffer Stop: This bit is 1 if the lookahead buffer execution is stopping due to a
quick-stop command or request. It is 0 otherwise.""")
# Bit 16
self.lstLabelTexts.append("Lookahead Buffer Change")
self.lstTooltips.append("""Lookahead Buffer Change: This bit is 1 if the lookahead buffer is currently changing state
between forward and reverse direction, or between executing and stopped. It is 0 otherwise.
Fifteenth character returned:""")
# Bit 15
self.lstLabelTexts.append("Lookahead Buffer Last Segment")
self.lstTooltips.append("""Lookahead Buffer Last Segment: This bit is 1 if the lookahead buffer is currently executing the
last segment before the end of a sequence. It is 0 otherwise.""")
# Bit 14
self.lstLabelTexts.append("Lookahead Buffer Recalculate")
self.lstTooltips.append("""Lookahead Buffer Recalculate: This bit is 1 if the lookahead buffer is recalculating segments
already in the buffer due to a change in the state of the buffer. It is 0 otherwise.""")
# Bit 13
self.lstLabelTexts.append("Lookahead Buffer Flush")
self.lstTooltips.append("""Lookahead Buffer Flush: This bit is 1 if the lookahead buffer is executing segments but not
adding any new segments. It is 0 otherwise.""")
# Bit 12
self.lstLabelTexts.append("Lookahead Buffer Last Move")
self.lstTooltips.append("""Lookahead Buffer Last Move: This bit is 1 if the last programmed move in the buffer has
reached speed. It is 0 otherwise.""")
# Bit 11
self.lstLabelTexts.append("Lookahead Buffer Single-Segment Request")
self.lstTooltips.append("""Lookahead Buffer Single-Segment Request: This bit can be set to 1 by the user as part of a
request to change the state of the lookahead buffer. It should be set to 1 to request the buffer to move
only a single segment from a stopped state (in either direction). It should be set to 0 otherwise. Turbo
PMAC leaves this bit in the state of the last request, even after the request has been processed.""")
# Bit 10
self.lstLabelTexts.append("Lookahead Buffer Change Request")
self.lstTooltips.append("""Lookahead Buffer Change Request: This bit can be set to 1 by the user to request a change in
the state of the lookahead buffer. It remains at 1 until the Turbo PMAC processes the change, at which
time Turbo PMAC changes it to 0.""")
# Bit 9
self.lstLabelTexts.append("Lookahead Buffer Movement Request")
self.lstTooltips.append("""Lookahead Buffer Movement Request: This bit can be set by the user as part of a request to
change the state of the lookahead buffer. It should be set to 1 to request the buffer to operate (in either the
forward or reverse direction); it should be set to 0 to request the buffer to execute a quick stop. Turbo
PMAC leaves this bit in the state of the last request, even after the request has been processed.""")
# Bit 8
self.lstLabelTexts.append("Lookahead Buffer Direction Request")
self.lstTooltips.append("""Lookahead Buffer Direction Request: This bit can be set by the user as part of a request to
change the state of the lookahead buffer. It should be set to 1 to request operation in the reverse direction;
it should be set to 0 to request operation in the forward direction. Its state does not matter in a request to
execute a quick stop. Turbo PMAC leaves this bit in the state of the last request, even after the request
has been processed.""")
# Bit 7
self.lstLabelTexts.append("Reserved for future use""")
self.lstTooltips.append("""Reserved for future use""")
# Bit 6
self.lstLabelTexts.append("Reserved for future use""")
self.lstTooltips.append("""Reserved for future use""")
# Bit 5
self.lstLabelTexts.append("Reserved for future use""")
self.lstTooltips.append("""Reserved for future use""")
# Bit 4
self.lstLabelTexts.append("Reserved for future use""")
self.lstTooltips.append("""Reserved for future use""")
# Bit 3
self.lstLabelTexts.append("Radius Error")
self.lstTooltips.append("""Radius Error: This bit is 1 when a motion program has been stopped because it was asked to do
an arc move whose distance was more than twice the radius (by an amount greater than Ixx96).""")
# Bit 2
self.lstLabelTexts.append("Program Resume Error")
self.lstTooltips.append("""Program Resume Error: This bit is 1 when the user has tried to resume program operation after
a feed-hold or quick-stop, but one or more of the motors in the coordinate system are not at the location of
the feed-hold or quick-stop. It is 0 otherwise.""")
# Bit 1
self.lstLabelTexts.append("Desired Position Limit Stop")
self.lstTooltips.append("""Desired Position Limit Stop: This bit is 1 if the motion program in the coordinate system has
stopped due to the desired position of a motor exceeding a limit.""")
# Bit 0
self.lstLabelTexts.append("In-Program PMATCH")
self.lstTooltips.append("""In-Program PMATCH: This bit is 1 if Turbo PMAC is executing the PMATCH function
automatically, as at the end of a move-until-trigger. It is 0 otherwise. This bit is primarily for internal use.""")
# Here are all the labels for the CSStatus bits defined.
self.lstLabelTexts.reverse()
self.lstTooltips.reverse()
for word in range(1, 4):
for bit in range(0, 24):
i = 24*(word-1) + bit
self.lstLeds.append( QLabel( self.ledGroup ))
self.lstLabels.append( QLabel("Word%s Bit%s" %(word+1,bit), self.ledGroup ))
ledGroupLayout.addWidget( self.lstLeds[i], bit, word*2)
ledGroupLayout.addWidget( self.lstLabels[i], bit, word*2 + 1)
self.lstLeds[i].setPixmap( self.greenLedOff )
self.lstLabels[i].setText( self.lstLabelTexts[i] )
self.lstLabels[i].setToolTip(self.lstTooltips[i])
def changeCS(self, CS):
self.parent().commsThread.CSNum = CS
self.ledGroup.setTitle("CS "+str(CS))
def updateFeed(self, feed):
self._feed = feed
if not self.feedSpin.hasFocus():
self.feedSpin.setValue(feed)
def setFeed(self, feed):
if feed != self._feed:
self.parent().pmac.sendCommand("&%d%%%d" % (self.parent().commsThread.CSNum, feed))
def updateStatus(self, CSStatusHexWord):
#print "update CSStatus: dec = " + str(CSStatusHexWord)
for bit in range(0, 72):
bitMask = (1 << bit)
if bool(CSStatusHexWord & bitMask):
self.lstLeds[bit].setPixmap(self.greenLedOn)
else:
self.lstLeds[bit].setPixmap(self.greenLedOff)
if __name__ == "__main__":
a = QApplication(sys.argv)
QObject.connect(a,SIGNAL("lastWindowClosed()"),a,SLOT("quit()"))
w = CSStatusform(None)
a.setMainWidget(w)
w.show()
a.exec_loop()
## \file
# \section License
# Author: <NAME>, Copyright 2011
#
# 'dls_pmaccontrol' is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# 'dls_pmaccontrol' is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with 'dls_pmaccontrol'. If not, see http://www.gnu.org/licenses/.
|
StarcoderdataPython
|
1755472
|
<gh_stars>0
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.db.models import Count
from colleges.models import SignificantMajors, College, Blog
def index(request):
return render(request, 'index.html', {})
def schools_list(request):
colleges = College.objects.order_by('name')
context = {'colleges': colleges}
return render(request, 'colleges.html', context)
def resources(request):
colleges = College.objects.order_by('name')
context = {'colleges': colleges}
return render(request, 'resources.html', context)
def faq(request):
colleges = College.objects.order_by('name')
context = {'colleges': colleges}
return render(request, 'faq.html', context)
def checklist(request):
colleges = College.objects.order_by('name')
context = {'colleges': colleges}
return render(request, 'checklist.html', context)
def ambassadors(request):
colleges = College.objects.order_by('name')
context = {'colleges': colleges}
return render(request, 'ambassadors.html', context)
def schools(request):
colleges = College.objects.order_by('name')
context = {'colleges': colleges}
return render(request, 'schools.html', context)
def college_comparison(request):
return render (request, 'colleges/college_comparison.html', {})
search_bar = SearchBar(request, ['college'])
def college_blog(request):
return render (request, 'colleges/college_blog.html', {})
def home(request):
return render(request, 'home.html', {})
|
StarcoderdataPython
|
4807924
|
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import losses
def create_embeddings_matrix(vectorizer, embeddings_path, embedding_dim=100, mask_zero=True):
embeddings_index = {}
with open(embeddings_path) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
voc = vectorizer.get_vocabulary()
word_index = dict(zip(voc, range(len(voc))))
num_tokens = len(voc) + 2
hits = 0
embedding_matrix = np.zeros((num_tokens, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
hits += 1
print("Converted %d words from %d" % (hits, len(voc)))
return layers.Embedding(
num_tokens,
embedding_dim,
embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix),
trainable=False,
mask_zero=mask_zero
)
def create_model_lstm(embedding_layer, num_labels=3):
text_input = layers.Input(shape=(None,), name='text')
txt = embedding_layer(text_input)
txt = layers.Bidirectional(tf.keras.layers.LSTM(64, recurrent_dropout=0.5, dropout=0.5))(txt)
x = layers.Dropout(0.25)(txt)
out = layers.Dense(num_labels, activation='softmax')(x)
return tf.keras.Model(inputs=[text_input], outputs=[out])
def create_model_gru(embedding_layer, num_labels=3):
text_input = layers.Input(shape=(None,), name='text')
txt = embedding_layer(text_input)
txt = tf.keras.layers.GRU(128)(txt)
# txt = layers.Bidirectional(tf.keras.layers.GRU(64, recurrent_dropout=0.5, dropout=0.5))(txt)
series_input = layers.Input(shape=(None, num_labels), name='series')
series = layers.GRU(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)(series_input)
series = layers.GRU(64)(series)
series = layers.Reshape([-1])(series)
x = layers.concatenate([txt, series])
# txt = layers.Dropout(0.25)(x)
x = layers.Dense(64)(x)
x = layers.Dropout(0.2)(x)
out = layers.Dense(num_labels, activation='softmax')(x)
return tf.keras.Model(inputs=[text_input, series_input], outputs=[out])
def create_model_lstm_big(embedding_layer, num_labels=3):
text_input = layers.Input(shape=(None,), name='text')
txt = embedding_layer(text_input)
txt = layers.Bidirectional(tf.keras.layers.LSTM(64, recurrent_dropout=0.5, dropout=0.5))(txt)
txt = layers.Dense(32)(txt)
series_input = layers.Input(shape=(None,num_labels), name='series')
series = layers.LSTM(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)(series_input)
series = layers.LSTM(64, dropout=0.2, recurrent_dropout=0.2)(series)
series = layers.Dense(32)(series)
series = layers.Reshape([-1])(series)
x = layers.concatenate([txt, series])
x = layers.Dropout(0.25)(txt)
out = layers.Dense(num_labels, activation='softmax')(x)
return tf.keras.Model(inputs=[text_input], outputs=[out])
def build_model(embeddings_layer, model_fn, categories=3, optimizer='adam',
loss='categorical_crossentropy', metrics=[tf.keras.metrics.CategoricalAccuracy()]):
model = model_fn(embeddings_layer, categories)
model.summary()
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
return model
def early_stopping(min_delta=1e-3, patience=3, monitor='val_categorical_accuracy'):
return tf.keras.callbacks.EarlyStopping(
monitor=monitor,
min_delta=min_delta,
patience=patience,
verbose=1,
restore_best_weights=True
)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.