max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
daily.py | apiforfun/theforexapi | 2 | 12786551 | <gh_stars>1-10
from sys import displayhook
import xml.etree.ElementTree as ET
import urllib.request
import xmltodict
import json
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client['theforexapi']
collection = db['currency']
currencies = ['USD', 'JPY', 'BGN', 'CYP', 'CZK', 'DKK', 'EEK', 'GBP', 'HUF', 'LTL', 'LVL', 'MTL', 'PLN', 'ROL', 'RON', 'SEK', 'SIT', 'SKK', 'CHF', 'ISK', 'NOK', 'HRK', 'RUB', 'TRL', 'TRY', 'AUD', 'BRL', 'CAD', 'CNY', 'HKD', 'IDR', 'ILS', 'INR', 'KRW', 'MXN', 'MYR', 'NZD', 'PHP', 'SGD', 'THB', 'ZAR']
url = 'https://www.ecb.europa.eu/home/html/rss.en.html'
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
urls = []
for link in soup.find_all('a'):
if link.get('href',None) :
if '/rss/fxref' in link.get('href') and 'eek' not in link.get('href'):
urls.append('https://www.ecb.europa.eu'+link.get('href'))
record_data = {}
for url in urls:
response = urllib.request.urlopen(url).read()
data = xmltodict.parse(response)
data = dict(data)
for item in data['rdf:RDF']['item'] :
item = dict(item)
statstics = dict(item['cb:statistics'])
exchangeRate= dict(statstics['cb:exchangeRate'])
cbValue = dict(exchangeRate['cb:value'])
date = item['dc:date'].split("T")[0]
cbBase = dict(exchangeRate['cb:baseCurrency'])
if record_data.get(date,None) is None:
record_data[date] = {
'date': date,
'base': cbBase['#text'],
'rates': {}
}
record_data[date]['rates'][exchangeRate['cb:targetCurrency']] = float(cbValue['#text'])
def calculate_new_base(new_base, new_row):
new_curr = {
'date': new_row['date'],
'base': new_base,
}
new_curr['rates']={}
new_curr['rates']['EUR']=1/float(new_row['rates'][new_base])
for x in currencies:
if x in new_row['rates'].keys() and x is not new_base:
new_curr['rates'][x] = float(new_row['rates'][x])/float(new_row['rates'][new_base])
collection.insert_one(new_curr)
print(new_curr)
print('baaaaaaaaaaaaaaaaaaaaaaaaaaaaaase', new_base)
for record in record_data.values():
# if record with the date and base is not found in db: insert that record and all the corresponding base values for the record.
if not collection.find_one({'date': record['date'], 'base': record['base']}):
print('not found')
collection.insert_one(record)
for k in currencies:
if k not in ('EUR',) and k in record['rates'].keys():
calculate_new_base(k, record)
else:
print('found')
print(record)
print(record['date'], record['base']) | 2.59375 | 3 |
ads/helpers.py | sinisaos/starlette-piccolo-rental | 3 | 12786552 | <filename>ads/helpers.py
from .tables import Ad, Review
def get_ads():
a = Ad
qs = a.select(
a.id,
a.slug,
a.title,
a.content,
a.created,
a.view,
a.room,
a.visitor,
a.price,
a.city,
a.address,
a.ad_user.username,
a.ad_user.id,
)
return qs
def get_reviews():
r = Review
qs = r.select(
r.id,
r.content,
r.created,
r.review_grade,
r.review_user.username,
r.ad.id,
)
return qs
def get_search_ads(q):
a = Ad
qs = a.select(
a.id,
a.slug,
a.title,
a.content,
a.created,
a.view,
a.room,
a.visitor,
a.price,
a.city,
a.ad_user.username,
).where(
(
(a.title.ilike("%" + q + "%"))
| (a.content.ilike("%" + q + "%"))
| (a.city.ilike("%" + q + "%"))
| (a.ad_user.username.ilike("%" + q + "%"))
)
)
return qs
def count_search_ads(q):
a = Ad
qs = a.count().where(
(
(a.title.ilike("%" + q + "%"))
| (a.content.ilike("%" + q + "%"))
| (a.city.ilike("%" + q + "%"))
| (a.ad_user.username.ilike("%" + q + "%"))
)
)
return qs
| 2.5 | 2 |
backend/models/location.py | DavidLee0216/SWOOSH | 0 | 12786553 | <gh_stars>0
from sqlalchemy import Column, String, DateTime
from config.DBindex import Base
class LaunchLocations(Base):
__tablename__ = "launch_location"
observate_location = Column(String(100))
location = Column(String(100), primary_key=True, unique=True)
country = Column(String(50))
| 2.171875 | 2 |
0801-0850/0804-UniqueMorseCodeWords/UniqueMorseCodeWords.py | Sun-Zhen/leetcode | 3 | 12786554 | # -*- coding:utf-8 -*-
"""
@author: Alden
@email: <EMAIL>
@date: 2018/3/29
@version: 1.0.0.0
"""
class Solution(object):
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
tmp_list = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---", "-.-",
".-..", "--", "-.", "---", ".--.", "--.-", ".-.", "...", "-", "..-", "...-",
".--", "-..-", "-.--", "--.."]
tmp_dict = dict()
for i in range(len(tmp_list)):
tmp_dict[chr(97 + i)] = tmp_list[i]
words_list = list()
for word in words:
tmp_str = ""
for l in word:
tmp_str += tmp_dict[l]
words_list.append(tmp_str)
return len(set(words_list))
if __name__ == "__main__":
s = Solution()
s.uniqueMorseRepresentations()
| 3.421875 | 3 |
backend/poolguybackend/api/migrations/0001_initial.py | dotchetter/poolguy | 0 | 12786555 | <reponame>dotchetter/poolguy<gh_stars>0
# Generated by Django 3.2.3 on 2021-05-23 12:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Device',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('device_id', models.CharField(default=uuid.UUID('efd053f8-3bc5-4851-b7d2-4bacd4e9667e'), max_length=255)),
('given_name', models.TextField(default='Poolguy Device', max_length=255)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DeviceMessage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('temperature_value', models.FloatField(default=0.0)),
('battery_level', models.IntegerField(default=0)),
('unit', models.CharField(choices=[('C', 'Celcius'), ('F', 'Fahrenheit')], default='F', max_length=255)),
('device', models.ManyToManyField(to='api.Device')),
],
),
]
| 1.921875 | 2 |
netta/model.py | zhangdafu12/web | 0 | 12786556 | # -*- encoding:utf8 -*-
# author: Shulei
# e-mail: <EMAIL>
# time: 2019/4/2 11:56
import random
import re
from _datetime import datetime
from collections import Counter
import jieba
import operator
import pymysql
import requests
from flask import Flask, json
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects.mysql import LONGTEXT
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string '
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql+pymysql://root:[email protected]:3306/netta"
db = SQLAlchemy(app)
# db = SQLAlchemy()
# 新闻和用户中间表
middle_mylike = db.Table("middle_mylike",
db.Column("user_id", db.Integer, db.ForeignKey("users.user_id")),
db.Column("news_id", db.Integer, db.ForeignKey("news.news_id"))
)
# 用户表
class User(db.Model):
__tablename__ = 'users'
user_id = db.Column(db.Integer, primary_key=True) # 主键
account = db.Column(db.String(32), unique=True, nullable=False) # 添加索引,不能为空
passwd = db.Column(db.String(32), nullable=False)
nick_name = db.Column(db.String(32), unique=True)
true_name = db.Column(db.String(32))
sex = db.Column(db.String(32))
head_pic = db.Column(db.String(255))
birthday = db.Column(db.String(32))
province = db.Column(db.String(32))
country = db.Column(db.String(32))
city = db.Column(db.String(32))
description = db.Column(db.String(255)) # 个人描述内容
is_administrator = db.Column(db.Boolean, default=False,
nullable=False) # 管理员或者普通用户, 默认是普通用户,当设置为True的时候为管理员(验证邮箱是否要设置成管理员用户)
news = db.relationship("News",
secondary=middle_mylike,
backref=db.backref("users", lazy='dynamic'),
lazy='dynamic'
)
comments = db.relationship("Comment", backref="users", lazy="dynamic") # 评论的外链接
give_likes = db.relationship("GiveLike", backref="users", lazy="dynamic") # 点赞的外链接
notifyreminds = db.relationship("NotifyRemind", backref="users", lazy="dynamic") # 消息推送的外链接
def user_2_json(obj):
return {'user_id': obj.user_id,
'account': obj.account,
'passwd': <PASSWORD>,
'nick_name': obj.nick_name if obj.nick_name else '',
'true_name': obj.true_name,
'sex': obj.sex,
'head_pic': obj.head_pic,
"province": obj.province,
"city": obj.city,
"country": obj.country,
"description": obj.description if obj.description else ""}
class Interest(db.Model):
__tablename__ = 'interest'
id = db.Column(db.Integer, primary_key=True) # 主键
news_id = db.Column(db.Integer, db.ForeignKey("news.news_id"), index=True)
interest = db.Column(db.String(64))
class UserInterest(db.Model):
__tablename__ = 'user_interest'
id = db.Column(db.Integer, primary_key=True) # 主键
interest = db.Column(db.String(64), index=True, )
user_id = db.Column(db.Integer, db.ForeignKey("users.user_id"))
class NewsDetail(db.Model):
__tablename__ = 'news_detail'
id = db.Column(db.Integer, primary_key=True) # 主键
relation = db.Column(LONGTEXT)
words = db.Column(LONGTEXT)
weight = db.Column(LONGTEXT)
event = db.Column(LONGTEXT)
news_id = db.Column(db.Integer, db.ForeignKey("news.news_id"), index=True)
# 新闻表
class News(db.Model):
__tablename__ = 'news'
news_id = db.Column(db.Integer, primary_key=True) # 主键
title = db.Column(db.Text)
content = db.Column(db.Text)
news_time = db.Column(db.DateTime)
author = db.Column(db.String(64))
watch_num = db.Column(db.INTEGER)
comment_num = db.Column(db.INTEGER)
like_num = db.Column(db.INTEGER)
img = db.Column(db.String(255))
url = db.Column(db.Text)
comments = db.relationship("Comment", backref="news", lazy="dynamic")
interest = db.relationship("Interest", backref="news", lazy="dynamic")
# interest_id = db.Column(db.Integer, db.ForeignKey('interest.id'))
words = db.Column(db.Text)
def news_2_json(obj):
return {'news_id': obj.news_id,
'title': obj.title,
'content': obj.content,
'news_time': obj.news_time if obj.news_time else '',
'author': obj.author,
'watch_num': obj.watch_num,
'comment_num': obj.comment_num,
"like_num": obj.like_num,
"img": obj.img,
"url": obj.url}
# 搜索结果一条条新闻数据组成的集合
# news >>> 结果 一对多的关系
# 用户 >>> 结果 一对多的关系
class Search_result(db.Model):
__tablename__ = 'search_result'
id = db.Column(db.Integer, primary_key=True) # 主键
keyword = db.Column(db.String(255))
maj_event = db.Column(db.Text)
people_list = db.Column(db.Text)
relation1 = db.Column(db.Text)
relation2 = db.Column(db.Text)
content = db.Column(db.Text)
create_at = db.Column(db.DateTime, default=datetime.now)
class RecordSearch(db.Model):
__tablename__ = "record_search"
id = db.Column(db.Integer, primary_key=True)
search_result_id = db.Column(db.Integer, db.ForeignKey("search_result.id"))
user_id = db.Column(db.Integer, db.ForeignKey("users.user_id"))
status = db.Column(db.Boolean, default=0)
create_at = db.Column(db.DateTime, default=datetime.now)
# 评论模型, 评论和回复进行拆开,即此张评论表就是直接评论的新闻主题,不再需要目标用户字段
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
agree_num = db.Column(db.Integer, default=0)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.user_id"))
news_id = db.Column(db.Integer, db.ForeignKey("news.news_id"))
replies = db.relationship("CommentReply", backref="comments", lazy="dynamic")
# 回复表
class CommentReply(db.Model):
__tablename__ = "replies"
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.now())
from_uid = db.Column(db.Integer, db.ForeignKey("users.user_id"))
to_uid = db.Column(db.Integer, db.ForeignKey("users.user_id"))
comment_id = db.Column(db.Integer, db.ForeignKey("comments.id"))
agree_num = db.Column(db.Integer, default=0)
#
reply_id = db.Column(db.Integer) # 回复的回复id【当reply_type为reply时】或者回复的评论id(即评论挂载的第一个回复的情况)【当reply_type为comment】
reply_type = db.Column(db.String(64)) # 回复的类型 comment 和reply 2种类型
# 评论回复点赞表
class GiveLike(db.Model):
__tablename__ = "givelike"
id = db.Column(db.Integer, primary_key=True)
con_rep_id = db.Column(db.Integer) # 评论或者回复的id,需要记录与点赞用户的关系
user_id = db.Column(db.Integer, db.ForeignKey("users.user_id")) # 用户和点赞的关系是一对多关系
like_type = db.Column(db.String(64)) # comment/reply 给予点赞的是什么内容
# 消息通知系统
# 资源发布提醒(主要是针对自己发布过的资源进行评论、回复、点赞的消息推送)
class NotifyRemind(db.Model):
__tablename__ = "notify_remind"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
recipientID = db.Column(db.Integer, db.ForeignKey("users.user_id")) # 接受消息用户的id,一个用户对应多个推送消息
kind = db.Column(db.String(64)) # 回复、点赞、删除
createdAt = db.Column(db.DateTime, default=datetime.utcnow) # 消息创建的时间
status = db.Column(db.Boolean, default=False) # 该条提醒消息的状态,默认为未读状态
from_user_name = db.Column(db.String(64))
content = db.Column(db.Text)
news_id = db.Column(db.Integer)
def __repr__(self):
return f"NotifyRemind 对象: {self}"
def search_news(num, key_word=''):
db2 = pymysql.connect("192.168.3.84", "root", "1<PASSWORD>", "netta", charset='utf8')
cursor = db2.cursor()
if key_word:
sql = f"SELECT * From news where words like \'%{key_word}%\' limit {num}"
else:
sql = f"SELECT * From news limit {num}"
print(sql)
cursor.execute(sql)
data = cursor.fetchall()
cursor.close()
db2.close()
return data
def filter_emoji(desstr, restr=''):
# 过滤表情
content = desstr
try:
co = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
co = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
try:
result = co.sub(restr, desstr)
except:
result = content
return result
def deal_word(word):
word = word.replace(' ', '').replace('\n', '').replace('\r\t', '')
return word
def weighted_xigama_sorting(inters_wd_set, founded_info_li):
weight_xigam_id_tp_li = []
for ev_tp in founded_info_li:
weight_xigam = 0
ev_dic = ev_tp[1][0]
for ev_wd in inters_wd_set:
try:
weight_xigam += ev_dic[ev_wd][0] * ev_dic[ev_wd][1]
except Exception as te:
continue
weight_xigam_id_tp_li.append((ev_tp[0], weight_xigam))
weight_xigam_id_tp_li = sorted(weight_xigam_id_tp_li, key=lambda x: x[1], reverse=True)
return weight_xigam_id_tp_li
def get_recommand(id):
# key = key.split(',')
detail = NewsDetail.query.filter(NewsDetail.news_id == id).first()
try:
words = eval(detail.words)
except:
return []
result = []
ids = []
for i in words:
que = Interest.query.filter(Interest.interest == i).all()
# print(result)
for i in que:
ids.append(i.news_id)
# print(i.news_id)
c_id = Counter(ids)
ided = sorted(c_id.items(), key=operator.itemgetter(1), reverse=True)
fin_id = []
if len(ided) > 20:
for i in ided[0:10]:
fin_id.append(i[0])
else:
for i in ided:
fin_id.append(i[0])
words_detail = [] # 相关网页高频词权重集合
for i in fin_id:
w = NewsDetail.query.filter(NewsDetail.news_id == i).first()
if w:
words_detail.append((w.news_id, eval(w.weight)))
# a = News.query.get(i)
# result.append((a.news_id,a.title,a.content,a.news_time,a.author,a.watch_num,a.comment_num,a.like_num,a.img,a.url,a.words))
# print(a.title)
sorted_url_li = weighted_xigama_sorting(words, words_detail)
max_urls_num = 5 # 设置最大推荐数
if len(sorted_url_li) > 5: # 选择 前若干个
sorted_url_li = sorted_url_li[0:max_urls_num]
# 收集网页结果
for ev_tp in sorted_url_li:
a = News.query.get(ev_tp[0])
result.append((a.news_id, a.title, a.content, a.news_time, a.author, a.watch_num, a.comment_num, a.like_num,
a.img, a.url, a.words))
# print()
return result
def get_news_by_word(word):
ids = []
result = []
que = Interest.query.filter(Interest.interest == word).all()
# print(result)
for i in que:
ids.append(i.news_id)
# print(i.news_id)
c_id = Counter(ids)
ided = sorted(c_id.items(), key=operator.itemgetter(1), reverse=True)
fin_id = []
if len(ided) > 20:
for i in ided[0:10]:
fin_id.append(i[0])
else:
for i in ided:
fin_id.append(i[0])
for i in fin_id:
a = News.query.get(i)
result.append((a.news_id, a.title, a.content, a.news_time, a.author, a.watch_num, a.comment_num, a.like_num,
a.img, a.url, a.words))
return result
def get_bottom_left(id):
result = []
a = UserInterest.query.filter(UserInterest.user_id == id).all()
print(a)
for i in a:
print(i.interest)
if i.interest:
result += get_news_by_word(i.interest)
random.shuffle(result)
return random.sample(result, 20)
def get_news():
return get_bottom_left(1)
def add_interset(user_id, words):
a = UserInterest.query.filter(UserInterest.user_id == user_id).all()
interests = []
for i in a:
interests.append(i.interest)
for word in words.split(','):
if word not in interests:
inter = UserInterest(interest=word, user_id=user_id)
db.session.add(inter)
db.session.commit()
def add_news_detail(id, relation, words, event, weight):
# news = News.query.get(id)
# relation = deal(news.content)
detail = NewsDetail(news_id=id, relation=relation, words=words, event=event, weight=weight)
db.session.add(detail)
db.session.commit()
if __name__ == '__main__':
# a = search_news(num=7)
# print(a)
# db.drop_all()
db.create_all()
# for i in range(2013,10000):
# try:
# news = News.query.get(i)
# except:
# continue
# if news:
# if news.news_time:
# if not type(news.news_time) == str:
# web_time = news.news_time.strftime("%Y%m%d%H%M%S")
# else:
# web_time = ''
# data = [(news.url,news.title,web_time,news.content,news.title,'')]
# info = {"content":str(data)}
# r = requests.post("http://192.168.3.134:9988/", data=info)
# response = json.loads(r.text)
# print(response)
# try:
# if eval(response['relL']):
# relation = [i for i in eval(response['relL'])[0] if i]
# else:
# relation = []
# except:
# relation = []
# words = []
# for j in eval(response['reuslt_dict_tuple']):
# if type(j)==dict:
# for key,value in j.items():
# words.append(key)
# elif type(j)==list:
# words += j
# add_news_detail(i,str(relation),str(words),response['mergedEvt_li'],response['reuslt_dict_tuple'])
# for x in words:
# print(x)
# interest = Interest(interest=x, news_id=i)
# db.session.add(interest)
# db.session.commit()
# db.create_all()
# app.run()
# file = r'D:\Junjie_Space\git\soloTaskCapsulation\test_search2\data\-7755379329499152025\data.txt'
# with open(file,'r',encoding='utf8') as f:
# data = eval(f.read())
# print(data)
# result = []
# url = set()
# for i in data:
# if not i[0] in url:
# result.append(i)
# for i in result:
# news = News(title=i[1],url=i[0],content=filter_emoji(i[2]),words='猪疫情')
# db.session.add(news)
# db.session.flush()
# # 输出新插入数据的主键
# news_id = news.news_id
# # print(news_id)
# interest = Interest(interest='猪疫情',news_id=news_id)
# db.session.add(interest)
# # interest = Interest(interest='虐待',news_id=news_id)
# # db.session.add(interest)
# # interest = Interest(interest='腐败',news_id=news_id)
# # db.session.add(interest)
#
# db.session.commit()
# app.run()
# a = get_recommand('华为')
# print(a)
# words = 'aaa,aaa'
# words = words.split(',')
# print(words)
# interest = Interest(interest='台湾')
# db.session.add(interest)
# for i in a:
# print(i)
# news = News(title=i['title'], author='a', img=i['img'], url=i['news_url'],
# words=','.join(i['key_word']))
# db.session.add(news)
#
# db.session.commit()
# app.run()
# a = get_bottom_left(1)
# print(a)
| 2.25 | 2 |
secret-handshake/secret_handshake.py | amalshehu/exercism-python | 2 | 12786557 | # File : secret_handshake.py
# Purpose : Write a program that will take a decimal number, and
# convert it to the sequence of events for a secret handshake.
# Programmer : <NAME>
# Course : Exercism
# Date : Monday 3 October 2016, 12:50 AM
actions = {1: 'wink',
2: 'double blink',
4: 'close your eyes',
8: 'jump'
}
rev_action = dict(zip(actions.values(), actions.keys()))
def handshake(num):
if type(num) == str:
try:
num = int(num, 2)
except ValueError:
return []
if num <= 0:
return []
secret = [actions[2**i] for i in range(4) if num & 2**i]
if num & 2**4:
secret = secret[::-1]
return secret
def code(actions):
num = 0
encoded = [0]
back = False
for item in actions:
if item in rev_action:
action_code = rev_action[item]
if action_code <= 8:
num += action_code
if action_code < max(encoded):
back = True
encoded.append(action_code)
else:
return '0'
if back:
num += 16
binary = str(bin(num))[2:]
return binary
print (handshake(9))
print (code(['wink', 'double blink', 'jump']))
| 3.953125 | 4 |
test.py | csev/tsugi-python-test | 0 | 12786558 | import pymysql
import random
from urllib.parse import urlparse
import urllib
import databaseconfig as CFG
import post as POST
import util as U
inp = input('Test Java, Node, PHP, pGphp, or pYthon? ')
if inp.lower().startswith('j') :
url = 'http://localhost:8080/tsugi-servlet/hello'
elif inp.lower().startswith('n') :
url = 'http://localhost:3000/lti'
elif inp.lower().startswith('y') :
url = 'http://localhost:8000/tsugi/default/launch'
elif inp.lower().startswith('g') :
url = 'http://localhost:8888/pg-tsugi/mod/attend/index.php'
else :
# This does not work with all tools - use map.
url = 'http://localhost:8888/tsugi/mod/attend/index.php'
print('Test URL:',url)
user1 = 'unittest:user:'+str(random.random())
user2 = 'unittest:user:'+str(random.random())
context1 = 'unittest:context:'+str(random.random())
context2 = 'unittest:context:'+str(random.random())
link1 = 'unittest:link:'+str(random.random())
link2 = 'unittest:link:'+str(random.random())
link3 = 'unittest:link:'+str(random.random())
conn = pymysql.connect(host=CFG.host,
port=CFG.port,
user=CFG.user,
password=<PASSWORD>,
db=CFG.db,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
# Clean up old unit test users and contexts
U.cleanunit(conn, cursor)
post = {}
post.update(POST.core)
post.update(POST.inst)
post['resource_link_id'] = link1
post['context_id'] = context1
post['user_id'] = user1
print('Sending a launch with a bad secret... ',end='')
CFG.oauth_secret = 'bad_news'
r = U.launch(CFG,url,post, 302)
redirect = r.headers['Location']
up = urlparse(redirect)
qu = urllib.parse.parse_qs(up.query)
print (qu['lti_errormsg'][0])
# print (qu['detail'][0])
print('Loading secret for',CFG.oauth_consumer_key,'from the database')
sql = 'SELECT secret FROM lti_key WHERE key_key = %s'
cursor.execute(sql, (CFG.oauth_consumer_key, ))
result = cursor.fetchone()
if ( result == None ) :
print('Unable to load secret for key',CFG.oauth_consumer_key)
U.abort()
conn.commit()
CFG.oauth_secret = result['secret']
header = {'Content-Type' : 'application/x-www-form-urlencoded'}
print('Sending a launch with a good secret... ',end='')
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
print('Sending minimal launch to check DB persistence... ',end='')
post = {}
post.update(POST.core)
post['resource_link_id'] = link1
post['context_id'] = context1
post['user_id'] = user1
post['roles'] = 'Instructor'
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
print('Changing context_title... ',end='')
post['context_title'] = 'Now for something completely dfferent';
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
print('Changing lis_person_contact_email_primary... ',end='')
post['lis_person_contact_email_primary'] = '<EMAIL>';
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
print('Changing user_image... ',end='')
post['user_image'] = 'http://www.dr-chuck.com/csev.jpg';
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
print('Changing user_image again... ',end='')
post['user_image'] = 'http://www.dr-chuck.com/csev_old.jpg';
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
print('Changing user_locale... ',end='')
post['launch_presentation_locale'] = 'pt-BR';
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
print('Changing user_locale (Again)... ',end='')
post['launch_presentation_locale'] = 'pt-PT';
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
services = ['ext_memberships_id', 'ext_memberships_url', 'lineitems_url', 'memberships_url']
for service in services:
for i in range(2):
x = 'http://example.com/' + service + '#' + str(i)
print('Changing',service,'to',x,'...',end='')
if service in post : del post[service]
if 'custom_'+service in post : del post['custom_'+service]
if i == 1 and not service.startswith('ext_') :
post['custom_'+service] = x
else:
post[service] = x
r = U.launch(CFG,url,post)
U.verifyDb(conn,post)
| 2.546875 | 3 |
configlighthouse.py | ntfshard/build-status-semaphore | 0 | 12786559 | #!python3
# config-lighthouse.py
user='user'
password='<PASSWORD>' # token from https://HOSTNAME/user/USERNAME/configure `API Token`
comport='/dev/ttyUSB0'
| 1.390625 | 1 |
src/test_eorzea_time.py | Indanaiya/ffoverlay | 0 | 12786560 | <filename>src/test_eorzea_time.py
import unittest
import eorzea_time
from eorzea_time import getEorzeaTime, getEorzeaTimeDecimal, timeUntilInEorzea
from unittest import mock
class EorzeaTimeTests(unittest.TestCase):
def test_type_of_getEorzeaTimeDecimal(self):
self.assertEqual(type(getEorzeaTimeDecimal()), type((0.0,0.0)))
def test_type_of_getEorzeaTime(self):
self.assertEqual(type(getEorzeaTime()), type((0,0)))
def test_result_of_getEorzeaTime(self):
self.assertEqual(int(getEorzeaTimeDecimal()[0]), getEorzeaTime()[0])#Test hours
self.assertEqual(int(getEorzeaTimeDecimal()[1]), getEorzeaTime()[1])#Test minutes
def test_timeUntilInEorzea(self):
with mock.patch.object(eorzea_time, 'getEorzeaTimeDecimal') as m:
m.return_value = (12,0)
#Testing midnight
self.assertEqual(timeUntilInEorzea(0), 2100)#Half a day is 35min = 2100s
#Testing 0 hours
self.assertEqual(timeUntilInEorzea(12), 0)
#Testing tomorrow morning
self.assertEqual(timeUntilInEorzea(3), 2625)
#Testing later today
self.assertEqual(timeUntilInEorzea(15), 525)
with self.assertRaises(ValueError):
timeUntilInEorzea(-1)
with self.assertRaises(ValueError):
timeUntilInEorzea(25) | 2.859375 | 3 |
PythonWallet/post_balance_address.py | IOTAplus/SMART-ENERGY-CONTROLL | 0 | 12786561 | <filename>PythonWallet/post_balance_address.py
'''
In this example we generate 1 address with a security level of 2 (default)
for a given seed. This is the first available, unused address for this seed.
'''
from iota import Iota
import pprint
import time
import json
import requests
# This is a demonstration seed, always generate your own seed!
my_seed = 'EDFCUGAMUKFUTSNERKXBVFTMRPGQRLFMYOHHSVCYDTZRJWULKHKRTGCEUMPD9NPFGWFTRTKLQSQRWZDMY'
# This is a demonstration url, put yours. It's the one for keepy it's the same for the TTGO / ESP32!
url = "http://192.168.1.200:3002/messages"
# This node should work but you can use your own or another one. Here you find more: https://thetangle.org/nodes
node = 'https://nodes.thetangle.org:443'
api = Iota(
adapter= node,
seed = my_seed)
#print('\nThe balance for your seed:\n')
#pprint.pprint(api.get_account_data())
# We want the first address for this seed (index 0), make sure it hasn't been spent from before!
# Script actually runs until you load up your address
success = False
# Gather addresses, balance and bundles
# response['balance'] is an integer!
while success == False:
try:
addresses = api.get_new_addresses(index=0, count=1, security_level=2, checksum=True)
#this cointains the last unused and save address of your SEED.
address = str(addresses['addresses'][0])
print('\nLast unused address: %s' % address)
response = api.get_balances(addresses=[address])
#this contains the pure balance of your SEED as a string.
balance= str(response["balances"]).replace("[","").replace("]","").replace("'","").replace("'","")
print('Your balance:')
pprint.pprint(balance)
#http post
#This is the message which will be sent to keepy. The addres and the balance get filled in automatically
message = {"iot2tangle":[{"sensor": "Wallet","data":[{"Address":address},{"Balance":balance}]}],"device": "Raspi-HTTP","timestamp": "1601653408"}
print("Sending following to url: "+url)
print(message)
#this is the JSON response of the POST Request
httpResponseCode = requests.post(url,json=message)
pprint.pprint(httpResponseCode)
except:
#This happens when the programmed failed or the buttons "ctrl"+"c" where pushed.
time.sleep(3) #If you push again the buttons "ctrl"+"c" you go out of the programm
print("It did not worked. trying again.")
| 2.890625 | 3 |
internal/handlers/andorra.py | fillingthemoon/cartogram-web | 0 | 12786562 | <filename>internal/handlers/andorra.py
import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Andorra"
def get_gen_file(self):
return "{}/and_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 7:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} Andorra la Vella
2 {} Canillo
3 {} Encamp
4 {} Escaldes-Engordany
5 {} La Massana
6 {} Ordino
7 {} <NAME>""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "Parish", 0, 1, 2, 3, ["<NAME> Vella","Canillo","Encamp","Escaldes-Engordany","La Massana","Ordino","<NAME>"], [0.0 for i in range(0,7)], {"Andorra la Vella":"1","Canillo":"2","Encamp":"3","Escaldes-Engordany":"4","La Massana":"5","Ordino":"6","<NAME>":"7"})
| 2.609375 | 3 |
tests/test_token_revocation.py | yaal-fr/canaille | 3 | 12786563 | <reponame>yaal-fr/canaille
from . import client_credentials
def test_token_revocation(testclient, user, client, token, slapd_connection):
assert not token.oauthRevokationDate
res = testclient.post(
"/oauth/revoke",
params=dict(token=token.oauthAccessToken,),
headers={"Authorization": f"Basic {client_credentials(client)}"},
status=200,
)
assert {} == res.json
token.reload(slapd_connection)
assert token.oauthRevokationDate
def test_token_invalid(testclient, client):
res = testclient.post(
"/oauth/revoke",
params=dict(token="invalid"),
headers={"Authorization": f"Basic {client_credentials(client)}"},
status=200,
)
assert {} == res.json
| 2.125 | 2 |
teelib/network/msg_packer.py | edg-l/teelib | 0 | 12786564 | <filename>teelib/network/msg_packer.py
from .packer import Packer
from .constants import OFFSET_UUID
from teelib.uuid.util import get_uuid
class MsgPacker(Packer):
def __init__(self, packet_type: int):
super().__init__()
if packet_type < OFFSET_UUID:
self.add_int(packet_type)
else:
self.add_int(0) # NETMSG_EX, NETMSGTYPE_EX
self.add_raw(get_uuid(packet_type))
| 2.359375 | 2 |
blocks/templatetags/blocks_admin.py | kimus/django-blocks | 3 | 12786565 | from django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag
def get_language_byindex(index):
lang = ('', '')
try:
lang = settings.LANGUAGES[index]
except KeyError:
pass
except IndexError:
pass
return lang
| 2 | 2 |
src/bootstrap_run.py | AminJavari/ROSE | 0 | 12786566 |
from collections import namedtuple
from src import bootstrap
import settings
import const
if __name__ == "__main__":
argsClass = namedtuple('argsClass', 'build predict')
buildClass = namedtuple('argsClass', 'input directed sample method dimension windowsize walklen nbofwalks embedtype classificationfunc optimizeclassifier '
'temp_dir temp_id logfile train_ratio verbose keep_dropout use_cuda epoch_num batch_size task force')
print(const.SLASHDOT_GRAPH)
build = buildClass(input=settings.config[const.SLASHDOT_GRAPH],
directed=True, sample=["degree", 120], method="3type",
dimension=10, windowsize=3, walklen=50, nbofwalks=20, embedtype="py", classificationfunc= "MLP", optimizeclassifier= True,
temp_dir=settings.config[const.TEMP_DIR],
temp_id="slash-full", train_ratio=0.8, verbose=True, logfile = "log.txt", keep_dropout = 0.8, use_cuda=False, epoch_num=10,
batch_size = 512, task = 'link',
#force=['model'])
force=[ 'sample', 'preprocess', 'postprocess', 'model'])
# args = argsClass(build=build, predict=None)
# bootstrap.main(args)
print("----------------------------")
# build = build._replace(method="attention")
args = argsClass(build=build, predict=None)
bootstrap.main(args)
print("----------------------------")
| 2.328125 | 2 |
Spanners/Treeify.py | eddo888/Spanners | 0 | 12786567 | <reponame>eddo888/Spanners<gh_stars>0
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
import io, sys, os, json, xmltodict, yaml
from collections import OrderedDict as OD
from collections import deque
from asciitree import LeftAligned
from asciitree.drawing import BoxStyle, BOX_LIGHT, BOX_BLANK
from io import StringIO, IOBase
from Baubles.Colours import Colours
from Perdy.pretty import prettyPrintLn, Style
from Perdy.parser import doParse
from Argumental.Argue import Argue
args = Argue()
@args.command(single=True)
class Treeify(object):
@args.property(short='c', flag=True, help='output in colour')
def colour(self): return False
@args.property(short='a', flag=True, help='ascii instead of boxes')
def ascii(self): return False
_oneof = OD([(x, 'input as %s' % x) for x in ['json', 'xml', 'yaml']])
@args.property(oneof=_oneof, short=True, flag=True, default=list(_oneof.keys())[0])
def format(self): return
def __init__(self, colour=False, ascii=False):
if colour: self.colour = True
if ascii: self.ascii = True
self.fundamentals = [str, str, int, float, bool]
self.collections = [list, dict, OD]
self.colours = Colours(colour=self.colour)
def treeFix(self, node):
if not node:
return dict()
if type(node) in self.fundamentals:
return {''.join([self.colours.Purple, str(node), self.colours.Off]): dict()}
if type(node) is list:
new = OD()
for n in range(len(node)):
key = ''.join(['[', self.colours.Teal, str(n), self.colours.Off,']'])
new[key] = self.treeFix(node[n])
return new
if type(node) in [dict, OD]:
for key in list(node.keys()):
tipe = type(node[key])
value = self.treeFix(node[key])
del node[key]
if len(key) and key[0] in ['@', '#']:
node[''.join([self.colours.Green, key, self.colours.Off])] = value
else:
if tipe in self.fundamentals:
parts = [self.colours.Green]
else:
parts = [self.colours.Teal]
parts += [key, self.colours.Off]
if self.format == 'xml':
parts = ['<'] + parts + ['>']
node[''.join(parts)] = value
return node
def process(self, input, output=sys.stdout):
if type(input) in self.collections:
o = input
elif isinstance(input, IOBase) or isinstance(input, StringIO):
input = input.read()
if type(input) in [str]:
if self.format == 'xml':
o = xmltodict.parse(input)
elif self.format == 'yaml':
o = yaml.load(input)
else: # == 'json'
o = json.loads(input)
if self.ascii:
tr = LeftAligned()
else:
tr = LeftAligned(draw=BoxStyle(
label_space=0,
gfx=BOX_LIGHT,
horiz_len=1
))
output.write(tr(self.treeFix(o)))
@args.operation
@args.parameter(name='files', short='f', nargs='*', metavar='file')
@args.parameter(name='output', short='o')
def bark(self, files=[], output=None):
_output = sys.stdout
if output:
_output = open(output(), 'w')
if len(files) == 0:
self.process(sys.stdin, _output)
else:
for file in files:
with open(file) as _input:
self.process(_input, _output)
if output:
_output.close()
return
@args.operation
def test(self):
h = '\n' + '_' * 47
j = {
'one': {
'one_one': {
'one_one': [{
'#text': '_1_1_1'
}, {
'#text': '_1_1_2'
}]
},
'one_two': {
'@attr': '_1_2',
'#text': '_1_2_1'
}
}
}
print(h)
prettyPrintLn(j)
print(h)
f = '../test/treeify.json'
with open(f,'w') as output:
json.dump(j, output)
self.bark([f])
print(h)
#self.ascii = True
self.colour = True
self.process(StringIO(json.dumps(j)), sys.stdout)
print(h)
x = xmltodict.unparse(j)
doParse(StringIO(str(x)), sys.stdout, colour=True)
print(h)
self.format = 'xml'
self.process(StringIO(str(x)), sys.stdout)
print(h)
sio = StringIO()
prettyPrintLn(j, output=sio, style=Style.YAML, colour=False)
y = sio.getvalue()
sio.close()
#print y
y = y.replace('#text', '"#text"')
y = y.replace('@attr', '"@attr"')
#print y
prettyPrintLn(j, output=sys.stdout, style=Style.YAML, colour=True)
print(h)
self.format = 'yaml'
self.process(StringIO(y), sys.stdout)
return
if __name__ == '__main__': args.execute()
| 2.375 | 2 |
hamiltonian_chain/hamiltonian_chain_solution.py | bzliu94/algorithms | 0 | 12786568 | # 2015-12-14
# solves hamiltonian chain enumeration problem
# usage: python hamiltonian_chain_solution.py W H
# where W is grid width and H is grid height
# takes O(2 ^ L * L ^ 2) time
# involves memoizing using a surface key
# inspired by <NAME>
# algorithm comes from a paper by <NAME>
import math
from collections import defaultdict
import random
class Grid:
def __init__(self, W, H):
self.W = W
self.H = H
def getWidth(self):
return self.W
def getHeight(self):
return self.H
def idToLocation(id_value, eff_W, eff_H):
t = id_value
row = getRow(t, eff_W, eff_H)
col = getCol(t, eff_W, eff_H)
location = (row, col)
return location
def getRow(t, W, H):
result = int(math.floor(t / W))
return result
def getCol(t, W, H):
return t % W
def getTime(row, col, W, H):
t = row * W + col
return t
def getPriorRowAndColumn(row, col, W, H):
t = getTime(row, col, W, H)
next_t = t - 1
next_row = getRow(next_t, W, H)
next_col = getCol(next_t, W, H)
if next_t == -1:
return None
else:
return (next_row, next_col)
def getNextRowAndColumn(row, col, W, H):
t = getTime(row, col, W, H)
next_t = t + 1
next_row = getRow(next_t, W, H)
next_col = getCol(next_t, W, H)
return (next_row, next_col)
class FullGrid(Grid):
def __init__(self, W, H):
Grid.__init__(self, W, H)
vertex_rows = []
for i in xrange(H + 1):
vertex_row = []
for j in xrange(W + 1):
vertex = None
vertex_row.append(vertex)
vertex_rows.append(vertex_row)
self.vertex_rows = vertex_rows
self.id_to_vertex_dict = {}
self.location_to_incident_path_far_node_id = defaultdict(lambda: [])
self.num_completed_chains = 0
def addVertex(self, id_value, row1, col1, path_end_id_value, base_num_connections, non_base_num_connections, is_sentinel):
vertex = Vertex(id_value, row1, col1, [path_end_id_value], base_num_connections, non_base_num_connections, is_sentinel)
(self.vertex_rows)[row1][col1] = vertex
(self.id_to_vertex_dict)[id_value] = vertex
location1 = (row1, col1)
location2 = idToLocation(path_end_id_value, self.getWidth() + 1, self.getHeight() + 1)
(self.location_to_incident_path_far_node_id)[location1].append(path_end_id_value)
(self.location_to_incident_path_far_node_id)[location2].append(id_value)
return vertex
def getVertex(self, row, col):
return (self.vertex_rows)[row][col]
def getVertexUsingIDValue(self, id_value):
return (self.id_to_vertex_dict)[id_value]
def getVertexRow(self, row):
return (self.vertex_rows)[row]
def getPathEndNode(self, row, col):
vertex = self.getVertex(row, col)
path_end_id_value = vertex.getPathEndIDValue()
path_end_node = self.getVertexUsingIDValue(path_end_id_value)
return path_end_node
def setPathEnd(self, row1, col1, row2, col2):
vertex1 = self.getVertex(row1, col1)
vertex2 = self.getVertex(row2, col2)
id_value1 = vertex1.getIDValue()
id_value2 = vertex2.getIDValue()
old_partner_id = vertex1.getPathEndIDValue()
old_partner_location = idToLocation(old_partner_id, self.getWidth() + 1, self.getHeight() + 1)
path_end_id_value = vertex2.getIDValue()
vertex1.setPathEndIDValue(path_end_id_value)
location1 = (row1, col1)
location2 = (row2, col2)
(self.location_to_incident_path_far_node_id)[old_partner_location].remove(id_value1)
(self.location_to_incident_path_far_node_id)[location1].remove(old_partner_id)
(self.location_to_incident_path_far_node_id)[location2].append(id_value1)
(self.location_to_incident_path_far_node_id)[location1].append(id_value2)
def setNumConnections(self, row, col, val):
vertex = self.getVertex(row, col)
vertex.setNumConnections(val)
def getNumConnections(self, row, col):
vertex = self.getVertex(row, col)
result = vertex.getNumConnections()
return result
@staticmethod
def formKey(grid):
vertex_rows = grid.vertex_rows
W = grid.getWidth()
H = grid.getHeight()
vertices = []
for vertex_row in vertex_rows:
vertices += vertex_row
vertex_keys = [Vertex.formKey(x) for x in vertices]
keys = [W, H] + vertex_keys
result = tuple(keys)
return result
@staticmethod
def formFromKey(key):
W = key[0]
H = key[1]
vertex_keys = key[2 : ]
grid = FullGrid(W, H)
for vertex_key in vertex_keys:
vertex = Vertex.formFromKey(vertex_key)
id_value = vertex.getIDValue()
row1 = vertex.getRow()
col1 = vertex.getCol()
path_end_id_value = vertex.getPathEndIDValue()
base_num_connections = vertex.getBaseNumConnections()
non_base_num_connections = vertex.getNonBaseNumConnections()
is_sentinel = vertex.getIsSentinel()
grid.addVertex(id_value, row1, col1, path_end_id_value, base_num_connections, non_base_num_connections, is_sentinel)
return grid
class Surface(Grid):
def __init__(self, W, H, curr_row_index):
Grid.__init__(self, W, H)
self.curr_row_index = curr_row_index
vertex_rows = defaultdict(lambda: defaultdict(lambda: None))
self.vertex_rows = vertex_rows
self.id_to_vertex_dict = {}
self.location_to_horizontal_cut_edge_path_key_list_dict = defaultdict(lambda: [])
self.location_to_vertical_cut_edge_path_key_list_dict = defaultdict(lambda: [])
self.num_completed_chains = 0
def getNumCompletedChains(self):
return self.num_completed_chains
def setNumCompletedChains(self, val):
self.num_completed_chains = val
def getHorizontalCutEdgeExists(self, location):
matching_path_keys = self.getHorizontalCutEdgePathKeys(location)
num_matching_path_keys = len(matching_path_keys)
return num_matching_path_keys > 0
def getVerticalCutEdgeExists(self, location):
matching_path_keys = self.getVerticalCutEdgePathKeys(location)
num_matching_path_keys = len(matching_path_keys)
return num_matching_path_keys > 0
def getHorizontalCutEdgePathKeys(self, location):
matching_path_keys = (self.location_to_horizontal_cut_edge_path_key_list_dict)[location]
return matching_path_keys[ : ]
def getVerticalCutEdgePathKeys(self, location):
matching_path_keys = (self.location_to_vertical_cut_edge_path_key_list_dict)[location]
return matching_path_keys[ : ]
def _addHorizontalCutEdgePathKey(self, location1, location2):
path_key = Surface.getPathKey(location1, location2)
(self.location_to_horizontal_cut_edge_path_key_list_dict)[location2].append(path_key)
def _addVerticalCutEdgePathKey(self, location1, location2):
path_key = Surface.getPathKey(location1, location2)
(self.location_to_vertical_cut_edge_path_key_list_dict)[location2].append(path_key)
def _removeHorizontalCutEdgePathKey(self, location1, location2):
path_key = Surface.getPathKey(location1, location2)
(self.location_to_horizontal_cut_edge_path_key_list_dict)[location2].remove(path_key)
def _removeVerticalCutEdgePathKey(self, location1, location2):
path_key = Surface.getPathKey(location1, location2)
(self.location_to_vertical_cut_edge_path_key_list_dict)[location2].remove(path_key)
def _idempotentRemoveCutEdgePathKey(self, location1, location2):
path_key = Surface.getPathKey(location1, location2)
if path_key in self.getHorizontalCutEdgePathKeys(location2):
self._removeHorizontalCutEdgePathKey(location1, location2)
return True
elif path_key in self.getVerticalCutEdgePathKeys(location2):
self._removeVerticalCutEdgePathKey(location1, location2)
return False
else:
return None
def addVertex(self, id_value, row1, col1, path_end_id_values, base_num_connections, non_base_num_connections, is_sentinel):
vertex = Vertex(id_value, row1, col1, path_end_id_values[ : ], base_num_connections, non_base_num_connections, is_sentinel)
(self.vertex_rows)[row1][col1] = vertex
(self.id_to_vertex_dict)[id_value] = vertex
return vertex
def getVertex(self, row, col):
return (self.vertex_rows)[row][col]
def getVertexUsingIDValue(self, id_value):
return (self.id_to_vertex_dict)[id_value]
def getPathEndNodes(self, row, col):
vertex = self.getVertex(row, col)
path_end_id_values = vertex.getPathEndIDValues()
path_end_nodes = [self.getVertexUsingIDValue(x) for x in path_end_id_values]
return path_end_nodes
@staticmethod
def getPathKey(location1, location2):
if location1 <= location2:
return (location1, location2)
elif location1 > location2:
return (location2, location1)
def setPathEnd(self, row1, col1, row2, col2, do_override_non_trivial):
vertex1 = self.getVertex(row1, col1)
vertex2 = self.getVertex(row2, col2)
id_value1 = vertex1.getIDValue()
id_value2 = vertex2.getIDValue()
path_end_id_value = vertex1.getIDValue()
if do_override_non_trivial == True:
vertex2.setPathEndIDValues([path_end_id_value])
else:
vertex2.addPathEndIDValue(path_end_id_value)
location1 = (row1, col1)
location2 = (row2, col2)
def idempotentRemovePathEnd(self, row1, col1, id_value):
vertex1 = self.getVertex(row1, col1)
id_value1 = vertex1.getIDValue()
path_end_id_values = vertex1.getPathEndIDValues()
next_path_end_id_values = path_end_id_values[ : ]
if id_value in next_path_end_id_values:
next_path_end_id_values.remove(id_value)
vertex1.setPathEndIDValues(next_path_end_id_values)
def setNumConnections(self, row, col, val):
vertex = self.getVertex(row, col)
vertex.setNumConnections(val)
def getNumConnections(self, row, col):
vertex = self.getVertex(row, col)
result = vertex.getNumConnections()
return result
def getCurrRowIndex(self):
return self.curr_row_index
def setCurrRowIndex(self, curr_row_index):
self.curr_row_index = curr_row_index
def _getLocationToHorizontalCutEdgePathKeyListDict(self):
return self.location_to_horizontal_cut_edge_path_key_list_dict
def _getLocationToVerticalCutEdgePathKeyListDict(self):
return self.location_to_horizontal_cut_edge_path_key_list_dict
@staticmethod
def formKeyOriginal(grid):
vertex_rows = grid.vertex_rows
W = grid.getWidth()
H = grid.getHeight()
curr_row_index = grid.getCurrRowIndex()
k = grid.getNumCompletedChains()
lthcepkld = grid._getLocationToHorizontalCutEdgePathKeyListDict()
ltvcepkld = grid._getLocationToVerticalCutEdgePathKeyListDict()
lthcepkld_components = []
ltvcepkld_components = []
for item in lthcepkld.items():
location, path_key_list = item
next_items = [(location, x) for x in path_key_list]
lthcepkld_components += next_items
lthcepkld_components.sort()
for item in ltvcepkld.items():
location, path_key_list = item
next_items = [(location, x) for x in path_key_list]
ltvcepkld_components += next_items
ltvcepkld_components.sort()
next_lthcepkld_components = tuple(lthcepkld_components)
next_ltvcepkld_components = tuple(ltvcepkld_components)
vertices = []
for vertex_row in vertex_rows.values():
vertices += vertex_row.values()
vertex_keys = [Vertex.formKey(x) for x in vertices]
keys = [W, H, curr_row_index, next_lthcepkld_components, next_ltvcepkld_components, k] + vertex_keys
result = tuple(keys)
return result
@staticmethod
def formKey(grid, row, col):
return Surface.formKeyNextPreMerge(grid, row, col)
@staticmethod
def formKeyNextPreMerge(grid, row, col):
W = grid.getWidth()
H = grid.getHeight()
k = grid.getNumCompletedChains()
num_vertical_components = W + 1
vertical_components = []
horizontal_component = None
id_value = 1
path_key_to_id_dict = {}
for curr_col in xrange(num_vertical_components):
far_vertex_location = None
if curr_col <= col:
far_vertex_location = (row + 1, curr_col)
else:
far_vertex_location = (row, curr_col)
curr_id_value = None
if grid.getVerticalCutEdgeExists(far_vertex_location) == True:
path_keys = grid.getVerticalCutEdgePathKeys(far_vertex_location)
path_key = path_keys[0]
if path_key in path_key_to_id_dict:
curr_id_value = path_key_to_id_dict[path_key]
else:
curr_id_value = id_value
path_key_to_id_dict[path_key] = curr_id_value
id_value += 1
else:
curr_id_value = 0
vertical_components.append(curr_id_value)
far_vertex_location = (row, col + 1)
curr_id_value = None
if grid.getHorizontalCutEdgeExists(far_vertex_location) == True:
path_keys = grid.getHorizontalCutEdgePathKeys(far_vertex_location)
path_key = path_keys[0]
if path_key in path_key_to_id_dict:
curr_id_value = path_key_to_id_dict[path_key]
else:
curr_id_value = id_value
path_key_to_id_dict[path_key] = curr_id_value
id_value += 1
else:
curr_id_value = 0
horizontal_component = curr_id_value
components = vertical_components + [horizontal_component, k]
key = tuple(components)
return key
@staticmethod
def formKeyNextPostMerge(grid, row, col):
W = grid.getWidth()
H = grid.getHeight()
k = grid.getNumCompletedChains()
num_vertical_components = W + 1
vertical_components = []
horizontal_component = None
id_value = 1
path_key_to_id_dict = {}
for curr_col in xrange(num_vertical_components):
far_vertex_location = None
if curr_col <= col:
far_vertex_location = (row + 1, curr_col)
else:
far_vertex_location = (row, curr_col)
curr_id_value = None
if grid.getVerticalCutEdgeExists(far_vertex_location) == True:
path_keys = grid.getVerticalCutEdgePathKeys(far_vertex_location)
path_key = path_keys[0]
if path_key in path_key_to_id_dict:
curr_id_value = path_key_to_id_dict[path_key]
else:
curr_id_value = id_value
path_key_to_id_dict[path_key] = curr_id_value
id_value += 1
else:
curr_id_value = 0
vertical_components.append(curr_id_value)
far_vertex_location = (row, col + 1)
curr_id_value = None
if grid.getHorizontalCutEdgeExists(far_vertex_location) == True:
path_keys = grid.getHorizontalCutEdgePathKeys(far_vertex_location)
path_key = path_keys[0]
if path_key in path_key_to_id_dict:
curr_id_value = path_key_to_id_dict[path_key]
else:
curr_id_value = id_value
path_key_to_id_dict[path_key] = curr_id_value
id_value += 1
else:
curr_id_value = 0
horizontal_component = curr_id_value
components = vertical_components + [horizontal_component, k]
key = tuple(components)
return key
@staticmethod
def formFromKeyOld(key):
W = key[0]
H = key[1]
curr_row_index = key[2]
next_lthcepkld_components = list(key[3])
next_ltvcepkld_components = list(key[4])
k = list(key[5])
vertex_keys = key[6 : ]
grid = Surface(W, H, curr_row_index)
grid.setNumCompletedChains(k)
for item in next_lthcepkld_components:
location, path_key = item
l1, l2 = path_key
location1 = location
location2 = l1 if l2 == location else l2
grid._addHorizontalCutEdgePathKey(location1, location2)
for item in next_ltvcepkld_components:
location, path_key = item
l1, l2 = path_key
location1 = location
location2 = l1 if l2 == location else l2
grid._addVerticalCutEdgePathKey(location1, location2)
for vertex_key in vertex_keys:
vertex = Vertex.formFromKey(vertex_key)
id_value = vertex.getIDValue()
row1 = vertex.getRow()
col1 = vertex.getCol()
path_end_id_values = vertex.getPathEndIDValues()
base_num_connections = vertex.getBaseNumConnections()
non_base_num_connections = vertex.getNonBaseNumConnections()
is_sentinel = vertex.getIsSentinel()
grid.addVertex(id_value, row1, col1, path_end_id_values, base_num_connections, non_base_num_connections, is_sentinel)
return grid
def getVertices(self):
vertex_rows = self.vertex_rows
H = self.getHeight()
curr_row_index = self.getCurrRowIndex()
vertices = []
for i in xrange(max(curr_row_index - 1, 0), curr_row_index + 2):
vertex_row = vertex_rows[i]
next_vertex_row = vertex_row.values()
vertices += next_vertex_row
next_vertices = set(vertices)
for vertex in vertices:
location = vertex.getLocation()
row, col = location
path_ends = self.getPathEndNodes(row, col)
next_vertices |= set(path_ends)
next_next_vertices = list(next_vertices)
return next_next_vertices
def clone(self):
W = self.getWidth()
H = self.getHeight()
curr_row_index = self.getCurrRowIndex()
k = self.getNumCompletedChains()
surface = Surface(W, H, curr_row_index)
surface.setNumCompletedChains(k)
lthcepkld = defaultdict(lambda: [])
for item in self.location_to_horizontal_cut_edge_path_key_list_dict.items():
location, path_key_list = item
if len(path_key_list) != 0:
lthcepkld[location] = path_key_list[ : ]
ltvcepkld = defaultdict(lambda: [])
for item in self.location_to_vertical_cut_edge_path_key_list_dict.items():
location, path_key_list = item
if len(path_key_list) != 0:
ltvcepkld[location] = path_key_list[ : ]
surface.location_to_horizontal_cut_edge_path_key_list_dict = lthcepkld
surface.location_to_vertical_cut_edge_path_key_list_dict = ltvcepkld
vertices = self.getVertices()
for vertex in vertices:
id_value = vertex.getIDValue()
path_end_id_values = vertex.getPathEndIDValues()
base_num_connections = vertex.getBaseNumConnections()
non_base_num_connections = vertex.getNonBaseNumConnections()
location = vertex.getLocation()
is_sentinel = vertex.getIsSentinel()
row, col = location
surface.addVertex(id_value, row, col, path_end_id_values, base_num_connections, non_base_num_connections, is_sentinel)
return surface
def _advanceOneRow(self, reference_full_grid):
curr_row_index = self.getCurrRowIndex()
next_row_index = curr_row_index + 1
prev_row_index = curr_row_index - 1
self.setCurrRowIndex(curr_row_index + 1)
have_prev_row = (curr_row_index - 1) >= 0
next_next_row = reference_full_grid.getVertexRow(curr_row_index + 2)
next_next_row_safe = [x.clone() for x in next_next_row]
safe_vertices = self.getVertices() + next_next_row_safe
safe_vertices_set = set(safe_vertices)
lthcepkld = self.location_to_horizontal_cut_edge_path_key_list_dict
ltvcepkld = self.location_to_vertical_cut_edge_path_key_list_dict
if have_prev_row == True:
vertices = (self.vertex_rows)[prev_row_index].values()
for i in xrange(len(vertices)):
vertex = vertices[i]
id_value = vertex.getIDValue()
location = vertex.getLocation()
if vertex not in safe_vertices_set:
(self.vertex_rows)[prev_row_index].pop(i)
(self.id_to_vertex_dict).pop(id_value)
if location in lthcepkld:
lthcepkld.pop(location)
if location in ltvcepkld:
ltvcepkld.pop(location)
if len((self.vertex_rows)[prev_row_index]) == 0:
(self.vertex_rows).pop(prev_row_index)
for vertex in next_next_row_safe:
id_value = vertex.getIDValue()
path_end_id_values = vertex.getPathEndIDValues()
base_num_connections = vertex.getBaseNumConnections()
non_base_num_connections = vertex.getNonBaseNumConnections()
location = vertex.getLocation()
is_sentinel = vertex.getIsSentinel()
row, col = location
self.addVertex(id_value, row, col, path_end_id_values, base_num_connections, non_base_num_connections, is_sentinel)
class Vertex:
def __init__(self, id_value, row, col, path_end_id_values, base_num_connections, non_base_num_connections, is_sentinel):
self.id_value = id_value
self.path_end_id_values = path_end_id_values
self.base_num_connections = base_num_connections
self.non_base_num_connections = non_base_num_connections
self.row = row
self.col = col
self.is_sentinel = is_sentinel
def getIDValue(self):
return self.id_value
def getRow(self):
return self.row
def getCol(self):
return self.col
def getNumConnections(self):
return self.getBaseNumConnections() + self.getNonBaseNumConnections()
def setNumConnections(self, val):
base_num_connections = self.getBaseNumConnections()
non_base_num_connections = val - base_num_connections
self.setNonBaseNumConnections(non_base_num_connections)
def getNonBaseNumConnections(self):
return self.non_base_num_connections
def setNonBaseNumConnections(self, val):
self.non_base_num_connections = val
def getBaseNumConnections(self):
return self.base_num_connections
def setBaseNumConnections(self, val):
self.base_num_connections = val
def getLocation(self):
return (self.row, self.col)
def toLocationString(self):
return str(self.getLocation())
def toString(self):
node1 = self
node2 = self.getPathEnd()
node_str1 = node1.toLocationString()
node_str2 = node2.toLocationString()
result = "(" + node_str1 + ", " + node_str2 + ")"
return result
def getPathEndIDValues(self):
return self.path_end_id_values
def addPathEndIDValue(self, path_end_id_value):
curr_id_value = self.getIDValue()
all_trivial = True
for id_value in self.path_end_id_values:
if id_value != curr_id_value:
all_trivial = False
break
if all_trivial == True:
self.path_end_id_values = [path_end_id_value]
else:
self.path_end_id_values.append(path_end_id_value)
def setPathEndIDValues(self, path_end_id_values):
next_path_end_id_values = path_end_id_values[ : ]
self.path_end_id_values = next_path_end_id_values
@staticmethod
def formKey(vertex):
id_value = vertex.getIDValue()
path_end_id_values = vertex.getPathEndIDValues()
base_num_connections = vertex.base_num_connections
non_base_num_connections = vertex.non_base_num_connections
location = vertex.getLocation()
is_sentinel = vertex.getIsSentinel()
components = [id_value, location, path_end_id_values, base_num_connections, non_base_num_connections, is_sentinel]
next_components = tuple(components)
return next_components
@staticmethod
def formFromKey(key):
id_value, location, path_end_id_values, base_num_connections, non_base_num_connections, is_sentinel = key
row1, col1 = location
vertex = Vertex(id_value, row1, col1, path_end_id_values, base_num_connections, non_base_num_connections, is_sentinel)
return vertex
def clone(self):
key = Vertex.formKey(self)
vertex = Vertex.formFromKey(key)
return vertex
def getIsSentinel(self):
return self.is_sentinel
def setIsSentinel(self, is_sentinel):
self.is_sentinel = is_sentinel
class Connection:
def __init__(self):
self.short_connected = None
self.long_connected = None
self.room = None
self.neighbor = None
self.room_partner = None
self.neighbor_partner = None
def connectShort(self, location1, location2, full_grid, is_for_horizontal_cut_edge, is_for_second_to_last_cell, prev_k, do_override_non_trivial_head, do_override_non_trivial_base):
row1, col1 = location1
row2, col2 = location2
vertex_a = full_grid.getVertex(row1, col1)
vertex_b = full_grid.getVertex(row2, col2)
room = vertex_a
neighbor = vertex_b
short_connected = False
num_connections1 = full_grid.getNumConnections(room.getRow(), room.getCol())
num_connections2 = full_grid.getNumConnections(neighbor.getRow(), neighbor.getCol())
room_partner = None
neighbor_partner = None
created_cycle_for_last_cell = False
if num_connections1 != 2 and num_connections2 != 2:
safe_to_continue = False
if is_for_second_to_last_cell == True and room in full_grid.getPathEndNodes(neighbor.getRow(), neighbor.getCol()) and prev_k == 0:
safe_to_continue = True
created_cycle_for_last_cell = True
if room not in full_grid.getPathEndNodes(neighbor.getRow(), neighbor.getCol()):
safe_to_continue = True
if safe_to_continue == True:
nodes1 = full_grid.getPathEndNodes(room.getRow(), room.getCol())
nodes2 = full_grid.getPathEndNodes(neighbor.getRow(), neighbor.getCol())
matches1 = full_grid.getPathEndNodes(nodes1[0].getRow(), nodes1[0].getCol())
matches2 = full_grid.getPathEndNodes(nodes2[0].getRow(), nodes2[0].getCol())
room_partner = None
if len(nodes1) > 1:
room_partner = nodes1[0] if nodes1[0] in matches1 else nodes1[1]
else:
room_partner = nodes1[0]
neighbor_partner = None
if len(nodes2) > 1:
neighbor_partner = nodes2[0] if nodes2[0] in matches2 else nodes2[1]
else:
neighbor_partner = nodes2[0]
assert(room in full_grid.getPathEndNodes(room_partner.getRow(), room_partner.getCol()))
assert(neighbor in full_grid.getPathEndNodes(neighbor_partner.getRow(), neighbor_partner.getCol()))
short_connected = True
full_grid.setPathEnd(room_partner.getRow(), room_partner.getCol(), neighbor.getRow(), neighbor.getCol(), do_override_non_trivial_head)
full_grid.setPathEnd(neighbor.getRow(), neighbor.getCol(), room_partner.getRow(), room_partner.getCol(), do_override_non_trivial_base)
full_grid.idempotentRemovePathEnd(room.getRow(), room.getCol(), room_partner.getIDValue())
full_grid.idempotentRemovePathEnd(room_partner.getRow(), room_partner.getCol(), room.getIDValue())
full_grid.setNumConnections(room.getRow(), room.getCol(), room.getNumConnections() + 1)
full_grid.setNumConnections(neighbor.getRow(), neighbor.getCol(), neighbor.getNumConnections() + 1)
was_horizontal1 = full_grid._idempotentRemoveCutEdgePathKey(room.getLocation(), room_partner.getLocation())
full_grid._idempotentRemoveCutEdgePathKey(room_partner.getLocation(), room.getLocation())
if was_horizontal1 == True or (was_horizontal1 == None and is_for_horizontal_cut_edge == True):
full_grid._addHorizontalCutEdgePathKey(neighbor.getLocation(), room_partner.getLocation())
elif was_horizontal1 == False or (was_horizontal1 == None and is_for_horizontal_cut_edge == False):
full_grid._addVerticalCutEdgePathKey(neighbor.getLocation(), room_partner.getLocation())
if is_for_horizontal_cut_edge == True:
full_grid._addHorizontalCutEdgePathKey(room_partner.getLocation(), neighbor.getLocation())
elif is_for_horizontal_cut_edge == False:
full_grid._addVerticalCutEdgePathKey(room_partner.getLocation(), neighbor.getLocation())
self.short_connected = short_connected
self.long_connected = False
self.room = room
self.neighbor = neighbor
self.room_partner = room_partner
self.neighbor_partner = neighbor_partner
return created_cycle_for_last_cell
def connectLong(self, location1, location2, full_grid, is_for_horizontal_cut_edge, is_for_second_to_last_cell, prev_k):
assert(location1 == location2)
row1, col1 = location1
row2, col2 = location2
vertex_a = full_grid.getVertex(row1, col1)
vertex_b = full_grid.getVertex(row2, col2)
room = vertex_a
neighbor = vertex_b
merge_connected = False
num_connections1 = full_grid.getNumConnections(room.getRow(), room.getCol())
num_connections2 = full_grid.getNumConnections(neighbor.getRow(), neighbor.getCol())
room_partner = None
neighbor_partner = None
short_connected = True
candidate_nodes = full_grid.getPathEndNodes(room.getRow(), room.getCol())
room_partner = candidate_nodes[0]
neighbor_partner = candidate_nodes[1]
long_connected = True
full_grid.setPathEnd(room_partner.getRow(), room_partner.getCol(), neighbor_partner.getRow(), neighbor_partner.getCol(), True)
full_grid.setPathEnd(neighbor_partner.getRow(), neighbor_partner.getCol(), room_partner.getRow(), room_partner.getCol(), True)
was_horizontal1 = None
was_horizontal2 = None
if short_connected == False:
raise Exception()
elif short_connected == True:
was_horizontal1 = full_grid._idempotentRemoveCutEdgePathKey(neighbor.getLocation(), room_partner.getLocation())
full_grid._idempotentRemoveCutEdgePathKey(room_partner.getLocation(), neighbor.getLocation())
if short_connected == False:
raise Exception()
elif short_connected == True:
was_horizontal2 = full_grid._idempotentRemoveCutEdgePathKey(neighbor.getLocation(), neighbor_partner.getLocation())
full_grid._idempotentRemoveCutEdgePathKey(neighbor_partner.getLocation(), neighbor.getLocation())
if was_horizontal1 == True or (was_horizontal1 == None and is_for_horizontal_cut_edge == True):
full_grid._addHorizontalCutEdgePathKey(neighbor_partner.getLocation(), room_partner.getLocation())
elif was_horizontal1 == False or (was_horizontal1 == None and is_for_horizontal_cut_edge == False):
full_grid._addVerticalCutEdgePathKey(neighbor_partner.getLocation(), room_partner.getLocation())
if was_horizontal2 == True or (was_horizontal2 == None and is_for_horizontal_cut_edge == True):
full_grid._addHorizontalCutEdgePathKey(room_partner.getLocation(), neighbor_partner.getLocation())
elif was_horizontal2 == False or (was_horizontal2 == None and is_for_horizontal_cut_edge == False):
full_grid._addVerticalCutEdgePathKey(room_partner.getLocation(), neighbor_partner.getLocation())
self.long_connected = long_connected
self.room = room
self.neighbor = neighbor
self.room_partner = room_partner
self.neighbor_partner = neighbor_partner
def successfullyConnected(self):
return self.short_connected
@staticmethod
def formKey(connection):
connected = connection.connected
room = connection.room
neighbor = connection.neighbor
room_partner = connection.room_partner
neighbor_partner = connection.neighbor_partner
components = [connected, room.getLocation(), neighbor.getLocation(), room_partner.getLocation(), neighbor_partner.getLocation()]
next_components = tuple(components)
return next_components
@staticmethod
def formFromKey(key, location_to_vertex_dict):
connected, location1, location2, location3, location4 = key
room = location_to_vertex_dict[location1]
neighbor = location_to_vertex_dict[location2]
room_partner = location_to_vertex_dict[location3]
neighbor_partner = location_to_vertex_dict[location4]
connection = Connection()
connection.connected = connected
connection.room = room
connection.neighbor = neighbor
connection.room_partner = room_partner
connection.neighbor_partner = neighbor_partner
return connection
class SolutionCounter:
def __init__(self, count = 0):
self.count = count
def getCount(self):
return self.count
def setCount(self, count):
self.count = count
def increment(self):
self.count += 1
def incrementBy(self, val):
self.count += val
def getKeyWithOrderChanged(key, val):
num_components = len(key)
key_list = list(key)
leading_components = key_list[ : num_components - 1]
order_component = key_list[num_components - 1]
next_key_list = leading_components + [val]
next_key = tuple(next_key_list)
return next_key
def getKeyWithOrderChangedToNegativeOne(key):
return getKeyWithOrderChanged(key, -1)
def keyHasNegativeOneK(key):
num_components = len(key)
key_list = list(key)
order_component = key_list[num_components - 1]
result = order_component == -1
return result
def getKeyWithOrderIncremented(key):
num_components = len(key)
key_list = list(key)
order_component = key_list[num_components - 1]
return getKeyWithOrderChanged(key, order_component + 1)
def getOrderForKey(key):
num_components = len(key)
key_list = list(key)
order_component = key_list[num_components - 1]
return order_component
def solve(full_grid, grid):
W = grid.getWidth()
H = grid.getHeight()
curr_grid_key_to_count_dict = defaultdict(lambda: 0)
curr_grid_key_to_surface_dict = {}
initial_surface = grid
initial_key = tuple([0] * (W + 3))
curr_grid_key_to_count_dict[initial_key] = 1
curr_grid_key_to_surface_dict[initial_key] = initial_surface
next_grid_key_to_count_dict = defaultdict(lambda: 0)
next_grid_key_to_surface_dict = {}
for row in xrange(H):
for col in xrange(W):
grid_key_count_pairs = curr_grid_key_to_count_dict.items()
is_for_second_to_last_cell = row == H - 1 and col == W - 2
for pair in grid_key_count_pairs:
grid_key, count = pair
surface = curr_grid_key_to_surface_dict[grid_key]
curr_k = getOrderForKey(grid_key)
next_row, next_col = getNextRowAndColumn(row, col, W, H)
if keyHasNegativeOneK(grid_key) == True:
next_surface_key = tuple([0] * (W + 3))
next_surface_key = getKeyWithOrderChangedToNegativeOne(next_surface_key)
next_grid_key_to_count_dict[next_surface_key] += count
next_grid_key_to_surface_dict[next_surface_key] = surface
continue
if row > 0 and col == 0:
surface._advanceOneRow(full_grid)
vertex = surface.getVertex(row, col)
vertex_right = surface.getVertex(row + 1, col)
vertex_down = surface.getVertex(row, col + 1)
num_connections = surface.getNumConnections(row, col)
intermediate_surface_key = None
if (row == 0 and col == 0):
intermediate_surface_key = tuple([0] * (W + 3))
else:
prev_row, prev_col = getPriorRowAndColumn(row, col, W, H)
intermediate_surface_key = Surface.formKeyNextPreMerge(surface, prev_row, prev_col)
if num_connections == 2:
surface1 = surface
left_match = intermediate_surface_key[col]
top_match = intermediate_surface_key[W + 1]
key_id_values = list(intermediate_surface_key[ : W + 2])
if ((left_match != 0 and key_id_values.count(left_match) == 1) and (top_match != 0 and key_id_values.count(top_match) == 1)):
surface1.setNumCompletedChains(surface1.getNumCompletedChains() + 1)
c1 = Connection()
c1.connectLong(vertex.getLocation(), vertex.getLocation(), surface1, False, is_for_second_to_last_cell, curr_k)
next_surface_key = Surface.formKeyNextPostMerge(surface1, row, col)
next_grid_key_to_count_dict[next_surface_key] += count
next_grid_key_to_surface_dict[next_surface_key] = surface1
continue
if num_connections == 0:
surface1 = surface
surface2 = surface.clone()
surface3 = surface.clone()
adjacent_vertical_match = intermediate_surface_key[col + 1]
key_id_values = list(intermediate_surface_key[ : W + 2])
c1 = Connection()
c2 = Connection()
c1.connectShort(vertex.getLocation(), vertex_right.getLocation(), surface1, False, is_for_second_to_last_cell, curr_k, True, True)
c2.connectShort(vertex.getLocation(), vertex_down.getLocation(), surface1, True, is_for_second_to_last_cell, curr_k, False, False)
if c1.successfullyConnected() and c2.successfullyConnected():
next_surface_key = Surface.formKeyNextPreMerge(surface1, row, col)
next_grid_key_to_count_dict[next_surface_key] += count
next_grid_key_to_surface_dict[next_surface_key] = surface1
left_match = intermediate_surface_key[col]
top_match = intermediate_surface_key[W + 1]
key_id_values = list(intermediate_surface_key[ : W + 2])
c1 = Connection()
c1.connectShort(vertex.getLocation(), vertex_right.getLocation(), surface2, False, is_for_second_to_last_cell, curr_k, True, True)
if c1.successfullyConnected():
next_surface_key = Surface.formKeyNextPreMerge(surface2, row, col)
next_grid_key_to_count_dict[next_surface_key] += count
next_grid_key_to_surface_dict[next_surface_key] = surface2
adjacent_vertical_match1 = intermediate_surface_key[col + 1]
key_id_values = list(intermediate_surface_key[ : W + 2])
c1 = Connection()
ccflc = c1.connectShort(vertex.getLocation(), vertex_down.getLocation(), surface3, True, is_for_second_to_last_cell, curr_k, False, True)
if c1.successfullyConnected():
next_surface_key = Surface.formKeyNextPreMerge(surface3, row, col)
if ccflc == True:
next_surface_key = getKeyWithOrderChangedToNegativeOne(tuple([0] * (W + 3)))
next_grid_key_to_count_dict[next_surface_key] += count
next_grid_key_to_surface_dict[next_surface_key] = surface3
continue
elif num_connections == 1:
surface1 = surface
surface2 = surface.clone()
surface3 = surface.clone()
left_match = intermediate_surface_key[col]
top_match = intermediate_surface_key[W + 1]
key_id_values = list(intermediate_surface_key[ : W + 2])
if (left_match != 0 and key_id_values.count(left_match) == 1) or (top_match != 0 and key_id_values.count(top_match) == 1):
surface1.setNumCompletedChains(surface1.getNumCompletedChains() + 1)
next_surface_key = Surface.formKeyNextPreMerge(surface1, row, col)
next_grid_key_to_count_dict[next_surface_key] += count
next_grid_key_to_surface_dict[next_surface_key] = surface1
left_match = intermediate_surface_key[col]
top_match = intermediate_surface_key[W + 1]
key_id_values = list(intermediate_surface_key[ : W + 2])
c1 = Connection()
c1.connectShort(vertex.getLocation(), vertex_right.getLocation(), surface2, False, is_for_second_to_last_cell, curr_k, True, True)
if c1.successfullyConnected() == True:
next_surface_key2 = Surface.formKeyNextPreMerge(surface2, row, col)
next_grid_key_to_count_dict[next_surface_key2] += count
next_grid_key_to_surface_dict[next_surface_key2] = surface2
left_match = intermediate_surface_key[col]
top_match = intermediate_surface_key[W + 1]
adjacent_vertical_match = intermediate_surface_key[col + 1]
key_id_values = list(intermediate_surface_key[ : W + 2])
c2 = Connection()
ccflc = c2.connectShort(vertex.getLocation(), vertex_down.getLocation(), surface3, True, is_for_second_to_last_cell, curr_k, False, True)
if c2.successfullyConnected() == True:
next_surface_key3 = Surface.formKeyNextPreMerge(surface3, row, col)
if ccflc == True:
next_surface_key3 = getKeyWithOrderChangedToNegativeOne(tuple([0] * (W + 3)))
next_grid_key_to_count_dict[next_surface_key3] += count
next_grid_key_to_surface_dict[next_surface_key3] = surface3
curr_grid_key_to_count_dict = next_grid_key_to_count_dict
next_grid_key_to_count_dict = defaultdict(lambda: 0)
curr_grid_key_to_surface_dict = next_grid_key_to_surface_dict
next_grid_key_to_surface_dict = {}
result_dict = {}
for key_count_pair in curr_grid_key_to_count_dict.items():
key, count = key_count_pair
next_key = key
result_dict[next_key] = count
return result_dict
def drawGrid(grid, W, H):
str_grid = []
for i in xrange(H):
row = grid[i]
str_row = []
for j in xrange(W):
vertex = row[j]
chain = vertex.getChain()
vertex_right = grid[i][j + 1]
vertex_down = grid[i + 1][j]
vertex_str = vertex.toString()
str_row.append(vertex_str)
str_grid.append(str_row)
for row in str_grid:
print row
import sys
import string
args = sys.argv
file_name = args[0]
raw_W = string.atoi(args[1])
raw_H = string.atoi(args[2])
W = min([raw_W, raw_H])
H = max([raw_W, raw_H])
print "width and height:", W, H
rows = []
for i in xrange(H):
row = [0] * W
rows.append(row)
full_grid = FullGrid(W, H)
grid2 = Surface(W, H, 0)
id_value = 0
for i in xrange(H + 1):
vertex_row = []
for j in xrange(W + 1):
kind = 1
base_num_connections = 0
if (i < H and j < W):
kind = rows[i][j]
if kind == 0:
base_num_connections = 0
elif kind == 1:
base_num_connections = 2
elif kind == 2:
base_num_connections = 1
elif kind == 3:
base_num_connections = 1
vertex = grid2.addVertex(id_value, i, j, [id_value], base_num_connections, 0, False)
full_grid.addVertex(id_value, i, j, id_value, base_num_connections, 0, False)
id_value += 1
result_dict = solve(full_grid, grid2)
scores = result_dict.items()
next_scores = [(getOrderForKey(x[0]), x[1]) for x in scores]
next_next_scores = [(x[0], x[1]) if x[0] != -1 else (x[0] + 1, x[1]) for x in next_scores]
next_next_scores.sort(key = lambda x: x[0])
for score_pair in next_next_scores:
score, count = score_pair
print "order " + str(score) + " count is " + str(count)
| 3.140625 | 3 |
recommender/export.py | google/article-recommender | 8 | 12786569 | <gh_stars>1-10
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exports user's history to a CSV file."""
from __future__ import division
import csv
from datetime import datetime
from datetime import timedelta
import os
import pickle
import webapp2
import cloudstorage as gcs
from mapreduce import mapreduce_pipeline
from mapreduce import mapper_pipeline
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from recommender import config
from recommender import models
from recommender import pipelines
# Keyed by user id.
class ExportRatingsResult(ndb.Model):
in_progress = ndb.BooleanProperty()
date = ndb.DateTimeProperty(auto_now=True)
# The key used to make the download url non-guessable.
download_key = ndb.StringProperty()
filename = ndb.StringProperty()
def ExportRatingsMap(rating):
title = models.GetPageInfo(rating.url).title
category_name = ''
if rating.category:
category = rating.category.get()
if category:
category_name = category.name
yield [
rating.user_id,
pickle.dumps(
[rating.url, rating.rating, rating.date, category_name, title])
]
HEADER_NAMES = ['date', 'url', 'rating', 'category', 'title']
def GetExportStatus(user_id):
# Have to disable memcache because it returns the cached value with
# in_progress = True.
result = ndb.Key(ExportRatingsResult, user_id).get(
use_cache=False, use_memcache=False)
if not result:
return None
return {
'in_progress': result.in_progress,
'download_key': result.download_key,
'generated_date': result.date,
}
def WriteLatestExportResult(user_id, key, output):
result = ndb.Key(ExportRatingsResult, user_id).get()
if not result:
return
if result.download_key != key:
return
with gcs.open(result.filename) as fp:
output.write(fp.read())
_EXPORT_RESULT_TTL = timedelta(days=2)
def ExportRatingsReduce(user_id, values):
filename = '/' + '/'.join([config.GetBucketName(), 'export', str(user_id)])
write_retry_params = gcs.RetryParams(backoff_factor=1.1)
output = gcs.open(
filename, 'w', content_type='text/csv', retry_params=write_retry_params)
writer = csv.writer(output, doublequote=False, escapechar='\\')
writer.writerow(HEADER_NAMES)
for value in values:
url, rating, date, category_name, title = pickle.loads(value)
date_string = date.strftime('%Y-%m-%d-%H%M%S')
writer.writerow([
date_string,
unicode(url).encode('utf-8'),
str(rating),
unicode(category_name).encode('utf-8'),
unicode(title).encode('utf-8')
])
output.close()
ExportRatingsResult(
key=ndb.Key(ExportRatingsResult, user_id),
in_progress=False,
filename=filename,
date=datetime.now(),
download_key=os.urandom(32).encode('hex')).put()
# Clean up the history dump after two days so that we don't have old
# recommendations around (in case the user deletes their previous
# recommendations).
deferred.defer(
_CleanUpOldExportResult,
user_id,
datetime.now(),
_countdown=_EXPORT_RESULT_TTL.total_seconds())
def _CleanUpOldExportResult(user_id, date):
result = ndb.Key(ExportRatingsResult, user_id).get()
if not result:
return
if result.date > date:
return
gcs.delete(result.filename)
result.key.delete()
def CreateExportRatingsPipeline(user_id):
ExportRatingsResult(
key=ndb.Key(ExportRatingsResult, user_id), in_progress=True).put()
return mapreduce_pipeline.MapreducePipeline(
'export-ratings',
pipelines.FullName(ExportRatingsMap),
pipelines.FullName(ExportRatingsReduce),
'mapreduce.input_readers.DatastoreInputReader',
mapper_params={
'entity_kind': pipelines.FullName(models.PageRating),
'filters': [('user_id', '=', user_id)]
},
shards=pipelines.DEFAULT_SHARDS)
def CleanUpOldExportsMap(export_result):
if datetime.now() > export_result.date + _EXPORT_RESULT_TTL:
gcs.delete(export_result.filename)
export_result.key.delete()
class CleanUpOldExportsPipeline(pipelines.SelfCleaningPipeline):
def run(self):
yield mapper_pipeline.MapperPipeline(
'clean_up_old_exports',
pipelines.FullName(CleanUpOldExportsMap),
'mapreduce.input_readers.DatastoreInputReader',
params={'entity_kind': pipelines.FullName(ExportRatingsResult)},
shards=pipelines.DEFAULT_SHARDS)
# This pipeline is a secondary mechanism to clean up exported ratings in case
# the primary mechanism that uses deferred.defer(_CleanUpOldExportResult) fails.
class CleanUpOldExportsHandler(webapp2.RequestHandler):
def get(self):
CleanUpOldExportsPipeline().start()
application = webapp2.WSGIApplication([
('/admin/cron/clean_up_old_exports', CleanUpOldExportsHandler),
])
| 2.203125 | 2 |
pluto/coms/client/protos/account_state_pb2.py | chalant/pluto | 0 | 12786570 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: contrib/coms/client/protos/account_state.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protos import protocol_pb2 as contrib_dot_coms_dot_protos_dot_protocol__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='contrib/coms/client/protos/account_state.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n.contrib/coms/client/protos/account_state.proto\x1a\"contrib/coms/protos/protocol.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xe8\x01\n\x0c\x41\x63\x63ountState\x12\x1d\n\tportfolio\x18\x01 \x01(\x0b\x32\n.Portfolio\x12\x19\n\x07\x61\x63\x63ount\x18\x02 \x01(\x0b\x32\x08.Account\x12\x33\n\x0flast_checkpoint\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x16\n\x06orders\x18\x04 \x03(\x0b\x32\x06.Order\x12\x31\n\rfirst_session\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1e\n\rdaily_returns\x18\x06 \x03(\x0b\x32\x07.Return\"F\n\x06Return\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05value\x18\x02 \x01(\x02\x62\x06proto3')
,
dependencies=[contrib_dot_coms_dot_protos_dot_protocol__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_ACCOUNTSTATE = _descriptor.Descriptor(
name='AccountState',
full_name='AccountState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='portfolio', full_name='AccountState.portfolio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='account', full_name='AccountState.account', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_checkpoint', full_name='AccountState.last_checkpoint', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='orders', full_name='AccountState.orders', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_session', full_name='AccountState.first_session', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='daily_returns', full_name='AccountState.daily_returns', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=120,
serialized_end=352,
)
_RETURN = _descriptor.Descriptor(
name='Return',
full_name='Return',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='Return.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Return.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=354,
serialized_end=424,
)
_ACCOUNTSTATE.fields_by_name['portfolio'].message_type = contrib_dot_coms_dot_protos_dot_protocol__pb2._PORTFOLIO
_ACCOUNTSTATE.fields_by_name['account'].message_type = contrib_dot_coms_dot_protos_dot_protocol__pb2._ACCOUNT
_ACCOUNTSTATE.fields_by_name['last_checkpoint'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ACCOUNTSTATE.fields_by_name['orders'].message_type = contrib_dot_coms_dot_protos_dot_protocol__pb2._ORDER
_ACCOUNTSTATE.fields_by_name['first_session'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ACCOUNTSTATE.fields_by_name['daily_returns'].message_type = _RETURN
_RETURN.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name['AccountState'] = _ACCOUNTSTATE
DESCRIPTOR.message_types_by_name['Return'] = _RETURN
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AccountState = _reflection.GeneratedProtocolMessageType('AccountState', (_message.Message,), dict(
DESCRIPTOR = _ACCOUNTSTATE,
__module__ = 'contrib.coms.client.protos.account_state_pb2'
# @@protoc_insertion_point(class_scope:AccountState)
))
_sym_db.RegisterMessage(AccountState)
Return = _reflection.GeneratedProtocolMessageType('Return', (_message.Message,), dict(
DESCRIPTOR = _RETURN,
__module__ = 'contrib.coms.client.protos.account_state_pb2'
# @@protoc_insertion_point(class_scope:Return)
))
_sym_db.RegisterMessage(Return)
# @@protoc_insertion_point(module_scope)
| 0.953125 | 1 |
src/main.py | AndreaRoss96/gym-cricket-robot | 0 | 12786571 | <filename>src/main.py
import os
import numpy as np
import argparse
import matplotlib.pyplot as plt
import pywavefront as pw
from copy import deepcopy
import torch
from numpy.lib.polynomial import RankWarning
import time
from utils.util import get_output_folder
from gym_cricket.envs.cricket_env import CricketEnv
from ddpg import DDPG
from neural_network.actor_nn import Actor
from utils.OUNoise import OUNoise
from utils.auxiliaryFuncs import init_nn
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='PyTorch on TORCS with Multi-modal')
# environment arguments
parser.add_argument('--mode', default='train', type=str, help='support option: train/test')
parser.add_argument('--env', default='Cricket-v0', type=str, help='open-ai gym environment')
parser.add_argument('--num_episodes', default=100000, type=int, help='total training episodes')
parser.add_argument('--step_episode', default=400, type=int, help='simulation steps per episode')
parser.add_argument('--early_stop', default=100, type=int, help='change episode after [early_stop] steps with a non-growing reward')
parser.add_argument('--cricket', default='basic_cricket', type=str, help='[hebi_cricket, basic_cricket] - cricket urdf model you want to load')
parser.add_argument('--terrain', default='flat', type=str, help='name of the terrain you want to load')
# reward function
parser.add_argument('--w_X', default=0.5, type=float, help='weight X to compute difference between the robot and the optimal position. Used in the reward function')
parser.add_argument('--w_Y', default=0.5, type=float, help='weight Y to compute difference between the robot and the optimal position. Used in the reward function')
parser.add_argument('--w_Z', default=0.5, type=float, help='weight Z to compute difference between the robot and the optimal position. Used in the reward function')
parser.add_argument('--w_theta', default=0.5, type=float, help='weight theta to compute difference between the robot and the optimal position. Used in the reward function')
parser.add_argument('--w_sigma', default=0.5, type=float, help='weight sigma to compute difference between the robot and the optimal position. Used in the reward function')
parser.add_argument('--disct_factor', default=0.99, type=float, help='discount factor for learnin in the reward function')
parser.add_argument('--w_joints', default=1.5, type=float, help='weight to punish bad joints behaviours in the reward function')
# neural networks
parser.add_argument('--hidden1', default=400, type=int, help='hidden num of first fully connect layer')
parser.add_argument('--hidden2', default=300, type=int, help='hidden num of second fully connect layer')
parser.add_argument('--hidden3', default=150, type=int, help='hidden num of third fully connect layer')
parser.add_argument('--hidden4', default=0, type=int, help='hidden num of fourth fully connect layer')
parser.add_argument('--hidden5', default=0, type=int, help='hidden num of fifth fully connect layer')
parser.add_argument('--conv_hidden1', default=0, type=int, help='hidden num of first convolutional layer')
parser.add_argument('--conv_hidden2', default=0, type=int, help='hidden num of second convolutional layer')
parser.add_argument('--conv_hidden3', default=0, type=int, help='hidden num of third convolutional layer')
parser.add_argument('--conv_hidden4', default=0, type=int, help='hidden num of fourth convolutional layer')
parser.add_argument('--conv_hidden5', default=0, type=int, help='hidden num of fifth convolutional layer')
parser.add_argument('--kernel_size1', default=1, type=int, help='num of first kernel for cnn')
parser.add_argument('--kernel_size2', default=0, type=int, help='num of second kernel for cnn')
parser.add_argument('--kernel_size3', default=0, type=int, help='num of third kernel for cnn')
parser.add_argument('--kernel_size4', default=0, type=int, help='num of fourth kernel for cnn')
# ddpg arguments
parser.add_argument('--bsize', default=64, type=int, help='minibatch size')
parser.add_argument('--rate', default=0.001, type=float, help='learning rate')
parser.add_argument('--prate', default=0.0001, type=float, help='policy net learning rate (only for DDPG)')
parser.add_argument('--warmup', default=250, type=int, help='time without training but only filling the replay memory')
parser.add_argument('--discount', default=0.99, type=float, help='')
parser.add_argument('--rmsize', default=6000000, type=int, help='memory size')
parser.add_argument('--window_length', default=1, type=int, help='')
parser.add_argument('--tau', default=0.001, type=float, help='moving average for target network')
parser.add_argument('--ou_theta', default=0.0001, type=float, help='noise theta')
parser.add_argument('--ou_sigma', default=0.0002, type=float, help='noise sigma')
parser.add_argument('--ou_mu', default=0.0, type=float, help='noise mu')
# TODO
parser.add_argument('--validate_episodes', default=20, type=int, help='how many episode to perform during validate experiment')
parser.add_argument('--max_episode_length', default=500, type=int, help='')
parser.add_argument('--validate_steps', default=2000, type=int, help='how many steps to perform a validate experiment')
parser.add_argument('--output', default='output', type=str, help='')
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--init_w', default=0.003, type=float, help='')
parser.add_argument('--train_iter', default=200000,type=int, help='train iters each timestep')
parser.add_argument('--epsilon', default=50000,type=int, help='linear decay of exploration policy')
parser.add_argument('--seed', default=-1, type=int, help='')
parser.add_argument('--resume', default='default',type=str, help='Resuming model path for testing')
# parsing argument
args = parser.parse_args()
args.output = get_output_folder(args.output, args.env)
if args.resume == 'default':
# args.resume = 'output/{}-run6'.format(args.env)
args.resume = 'output/{}-run0'.format(args.env)
env = CricketEnv(
#plane_path='src/gym_cricket/assests/terrains/' + args.terrain + '/' + args.terrain + '.urdf',
cricket_model = args.cricket)
noise = OUNoise(env.action_space)
num_episodes = args.num_episodes
step_per_episode = args.step_episode
rewards = []
avg_rewards = []
## Set the final Goal @TODO read this from a file
wheels = [0.0] * 8
limbs = [0.0, -np.pi/2, np.pi, -np.pi/2, 0.0, np.pi/2,\
np.pi, np.pi/2, 0.0,-np.pi/2, np.pi, -np.pi/2, 0.0,\
np.pi/2, np.pi, np.pi/2, 0.0, 0.0]
goals = np.concatenate([wheels,limbs])
env.set_goal(joint_position=goals)
_, limb_joints, _ = env.cricket.get_joint_ids()
num_limb_joints = len(limb_joints)
env.set_reward_values(
w_joints = np.full((num_limb_joints,), args.w_joints),
disc_factor = 0.5,
w_X=args.w_X, w_Y=args.w_X, w_Z=args.w_X,
w_theta=args.w_theta ,w_sigma=args.w_theta)
f_name = os.path.join(os.path.dirname(__file__), 'gym_cricket/assests/terrains/' + args.terrain + '/' + args.terrain + '.obj')
scene = pw.Wavefront(f_name)
terrain = np.array(scene.vertices)
terrain = np.reshape(terrain, (4,3,1,1,1))
# terrain = torch.FloatTensor(terrain)
## Initialize neural networks
# hidden layers for fully connected neural network (robot)
hidden_layers = [args.hidden1,args.hidden2,args.hidden3,args.hidden4,args.hidden5]
hidden_layers = [layers for layers in hidden_layers if layers is not 0]
# convolutional layers for convolutional neural network (terrain)
conv_hidden_layers = [args.conv_hidden1,args.conv_hidden2,args.conv_hidden3,args.conv_hidden4,args.conv_hidden5]
conv_hidden_layers = [layers for layers in conv_hidden_layers if layers is not 0]
# kernel sizes for convolutional neural network (terrain)
kernel_sizes = [args.kernel_size1, args.kernel_size2, args.kernel_size3, args.kernel_size4]
kernel_sizes = [layers for layers in kernel_sizes if layers is not 0]
actor, critic, actor_target, critic_target = init_nn(
env, terrain,
hidden_layers = hidden_layers,
conv_layers= conv_hidden_layers,
kernel_sizes=kernel_sizes)
# Initialize DDPG
ddpg = DDPG(env, actor, critic, actor_target, critic_target, terrain,args)
# output
output = 'weights_out0'
output = get_output_folder(output, 'cricket-v0')
# file = open("action_out.txt", "w")
for episode in range(num_episodes):
state = env.reset()
ddpg.reset(state) # new
noise.reset() # delete
episode_reward = 0
for step in range(step_per_episode):
action = ddpg.select_action(state) #.get_action(state) # invoke the actor nn to generate an action (compute forward)
# file.write(f'Action {action}\n\n')
reward, new_state, done, info = env.step(action)
new_state = deepcopy(new_state)
ddpg.observe(reward,new_state,done)
state = new_state
episode_reward += reward
if done :
print('!'*80)
break
if episode > args.warmup:
ddpg.update_policy()
if episode % int(num_episodes/3) == 0:
ddpg.save_model(output)
rewards.append(episode_reward)
print('_'*40)
print(f'episode no: {episode}')
print(f'episode reward: {episode_reward}')
n = 10
print(f'last {n} episode reward: {rewards[-n:]}')
print('_'*40)
print()
avg_rewards.append(np.mean(rewards[-10:]))
# file.close()
ddpg.save_model(output) # add read/load directory for the measures of the goal and then use it as a output
file_out = file_ = open(os.path.join(os.path.dirname(__file__), 'out_rew.txt'),'w')
for reward in rewards:
file_out.write(str(reward) + '\n')
file_out.close()
plt.plot(rewards)
plt.plot(avg_rewards)
plt.plot()
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.show()
| 2.15625 | 2 |
projects/ephys_passive_opto.py | int-brain-lab/project_extraction | 0 | 12786572 | <gh_stars>0
from collections import OrderedDict
import numpy as np
import one.alf.io as alfio
from ibllib.io.extractors import ephys_fpga
from ibllib.dsp.utils import sync_timestamps
from ibllib.plots import squares, vertical_lines
from ibllib.pipes import tasks
from ibllib.pipes.ephys_preprocessing import (
EphysRegisterRaw, EphysPulses, RawEphysQC, EphysAudio, EphysMtscomp, EphysVideoCompress, EphysVideoSyncQc,
EphysCellsQc, EphysDLC, SpikeSorting)
LASER_PULSE_DURATION_SECS = .5
LASER_PROBABILITY = .8
DISPLAY = False
class EphysPassiveOptoTrials(tasks.Task):
cpu = 1
io_charge = 90
level = 1
signature = {
'input_files': [
('_iblrig_taskSettings.raw.json', 'raw_behavior_data', True),
('_spikeglx_sync.times.npy', 'raw_ephys_data', True),
('_spikeglx_sync.polarities.npy', 'raw_ephys_data', True),
('_spikeglx_sync.channels.npy', 'raw_ephys_data', True),
('*.nidq.wiring.json', 'raw_ephys_data', False),
('*.nidq.meta', 'raw_ephys_data', False),
],
'output_files': [
('_ibl_trials.laserIntervals.npy', 'alf', True),
('_ibl_trials.laserProbability.npy', 'alf', True),
('_ibl_trials.intervals.npy', 'alf', True),
('_ibl_wheel.timestamps.npy', 'alf', True),
('_ibl_wheel.position.npy', 'alf', True),
('_ibl_wheelMoves.intervals.npy', 'alf', True),
('_ibl_wheelMoves.peakAmplitude.npy', 'alf', True),
]
}
def _run(self):
sync, sync_map = ephys_fpga.get_main_probe_sync(self.session_path)
bpod = ephys_fpga.get_sync_fronts(sync, sync_map['bpod'])
laser_ttl = ephys_fpga.get_sync_fronts(sync, sync_map['laser_ttl'])
t_bpod = bpod['times'][bpod['polarities'] == 1]
t_laser = laser_ttl['times'][laser_ttl['polarities'] == 1]
_, _, ibpod, ilaser = sync_timestamps(t_bpod, t_laser, return_indices=True)
if DISPLAY:
for ch in np.arange(3):
ch0 = ephys_fpga.get_sync_fronts(sync, 16 + ch)
squares(ch0['times'], ch0['polarities'], yrange=[-.5 + ch, .5 + ch])
vertical_lines(t_bpod[ibpod], ymax=4)
trial_starts = t_bpod
trial_starts[ibpod] = t_laser[ilaser]
ntrials = trial_starts.size
# create the trials dictionary
trials = {}
trials['laserIntervals'] = np.zeros((ntrials, 2)) * np.nan
trials['laserIntervals'][ibpod, 0] = t_laser[ilaser]
trials['laserIntervals'][ibpod, 1] = t_laser[ilaser] + LASER_PULSE_DURATION_SECS
trials['intervals'] = np.zeros((ntrials, 2)) * np.nan
trials['intervals'][:, 0] = trial_starts
trials['intervals'][:, 1] = np.r_[trial_starts[1:], np.nan]
trials['laserProbability'] = trial_starts * 0 + LASER_PROBABILITY
# creates the wheel object
wheel, moves = ephys_fpga.get_wheel_positions(sync=sync, chmap=sync_map)
# save objects
alf_path = self.session_path.joinpath('alf')
alf_path.mkdir(parents=True, exist_ok=True)
out_files = []
out_files += alfio.save_object_npy(alf_path, object='trials', namespace='ibl', dico=trials)
out_files += alfio.save_object_npy(alf_path, object='wheel', namespace='ibl', dico=wheel)
out_files += alfio.save_object_npy(alf_path, object='wheelMoves', namespace='ibl', dico=moves)
return out_files
class EphysPassiveOptoPipeline(tasks.Pipeline):
label = __name__
def __init__(self, session_path=None, **kwargs):
super(EphysPassiveOptoPipeline, self).__init__(session_path, **kwargs)
tasks = OrderedDict()
self.session_path = session_path
# level 0
tasks["EphysRegisterRaw"] = EphysRegisterRaw(self.session_path)
tasks["EphysPulses"] = EphysPulses(self.session_path)
tasks["EphysRawQC"] = RawEphysQC(self.session_path)
tasks["EphysAudio"] = EphysAudio(self.session_path)
tasks["EphysMtscomp"] = EphysMtscomp(self.session_path)
tasks['EphysVideoCompress'] = EphysVideoCompress(self.session_path)
# level 1
tasks["SpikeSorting"] = SpikeSorting(
self.session_path, parents=[tasks["EphysMtscomp"], tasks["EphysPulses"]])
tasks["EphysPassiveOptoTrials"] = EphysPassiveOptoTrials(self.session_path, parents=[tasks["EphysPulses"]])
# level 2
tasks["EphysVideoSyncQc"] = EphysVideoSyncQc(
self.session_path, parents=[tasks["EphysVideoCompress"], tasks["EphysPulses"], tasks["EphysPassiveOptoTrials"]])
tasks["EphysCellsQc"] = EphysCellsQc(self.session_path, parents=[tasks["SpikeSorting"]])
tasks["EphysDLC"] = EphysDLC(self.session_path, parents=[tasks["EphysVideoCompress"]])
self.tasks = tasks
__pipeline__ = EphysPassiveOptoPipeline
| 2.046875 | 2 |
gan_model_data/train.py | Monnoroch/generative | 1 | 12786573 | import argparse
import sys
import tensorflow as tf
from gan_model_data import model
from common.experiment import Experiment, load_checkpoint
from common.training_loop import TrainingLoopParams, training_loop
def print_graph(session, model, step, nn_generator):
"""
A helper function for printing key training characteristics.
"""
if nn_generator:
real, fake = session.run([model.average_probability_real, model.average_probability_fake])
print("Saved model with step %d; real = %f, fake = %f" % (step, real, fake))
else:
real, fake, mean, stddev = session.run([model.average_probability_real, model.average_probability_fake, model.mean, model.stddev])
print("Saved model with step %d; real = %f, fake = %f, mean = %f, stddev = %f" % (step, real, fake, mean, stddev))
def train(session, global_step, model_ops, args, hparams):
print_graph(session, model_ops, global_step, hparams.nn_generator)
# First, we run one step of discriminator training.
for _ in range(max(int(args.discriminator_steps/2), 1)):
session.run(model_ops.discriminator_train)
# Then we run one step of generator training.
for _ in range(args.generator_steps):
session.run(model_ops.generator_train)
for _ in range(int(args.discriminator_steps/2)):
session.run(model_ops.discriminator_train)
def main(args):
"""
The main function to train the model.
"""
parser = argparse.ArgumentParser(description="Train the gan-normal model.")
parser.add_argument("--batch_size", type=int, default=32, help="The size of the minibatch")
parser.add_argument("--d_learning_rate", type=float, default=0.01, help="The discriminator learning rate")
parser.add_argument("--g_learning_rate", type=float, default=0.02, help="The generator learning rate")
parser.add_argument("--d_l2_reg", type=float, default=0.0005, help="The discriminator L2 regularization parameter")
parser.add_argument("--g_l2_reg", type=float, default=0., help="The generator L2 regularization parameter")
parser.add_argument("--input_mean", type=float, default=[], help="The mean of the input dataset", action="append")
parser.add_argument("--input_stddev", type=float, default=[], help="The standard deviation of the input dataset", action="append")
parser.add_argument("--dropout", type=float, default=0.5, help="The dropout rate to use in the descriminator")
parser.add_argument("--discriminator_steps", type=int, default=1, help="The number of steps to train the descriminator on each iteration")
parser.add_argument("--generator_steps", type=int, default=1, help="The number of steps to train the generator on each iteration")
parser.add_argument("--nn_generator", default=False, action="store_true", help="Whether to use a neural network as a generator")
parser.add_argument("--generator_features", default=[], action="append", type=int, help="The number of features in generators hidden layers")
parser.add_argument("--discriminator_features", default=[], action="append", type=int, help="The number of features in discriminators hidden layers")
Experiment.add_arguments(parser)
TrainingLoopParams.add_arguments(parser)
args = parser.parse_args(args)
# Default input mean and stddev.
if not args.input_mean:
args.input_mean.append(15.)
if not args.input_stddev:
args.input_stddev.append(7.)
if len(args.input_mean) != len(args.input_stddev):
print("There must be the same number of input means and standard deviations.")
sys.exit(1)
experiment = Experiment.from_args(args)
hparams = experiment.load_hparams(model.ModelParams, args)
# Create the model.
model_ops = model.GanNormalModel(hparams, model.DatasetParams(args), model.TrainingParams(args, training=True))
training_loop(TrainingLoopParams(args), experiment, model_ops.summaries,
lambda session, global_step: train(session, global_step, model_ops, args, hparams),
checkpoint=load_checkpoint(args))
if __name__ == "__main__":
main(sys.argv[1:])
| 2.640625 | 3 |
element-frame-based/OCR/eval.py | dymbe/ad-versarial | 43 | 12786574 | <gh_stars>10-100
import os
import cv2
from OCR.tf_tesseract.my_vgsl_model import MyVGSLImageModel, ctc_decode
from OCR.tf_tesseract.read_params import read_tesseract_params
from OCR.ocr_utils import *
from OCR.l2_attack import init
from tensorflow import app
from tensorflow.python.platform import flags
from timeit import default_timer as timer
flags.DEFINE_string('image', "", 'image to load')
flags.DEFINE_integer('target_height', 0, 'Resize image to this height')
flags.DEFINE_string('target', "adchoices", 'text target')
flags.DEFINE_integer('use_gpu', -1, 'GPU id (>=0) or cpu (-1)')
flags.DEFINE_bool('timeit', False, 'time the execution')
FLAGS = flags.FLAGS
if FLAGS.use_gpu >= 0:
os.environ['CUDA_VISIBLE_DEVICES'] = "{}".format(FLAGS.use_gpu)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = ""
def eval():
use_gpu = FLAGS.use_gpu >= 0
char_map = read_all_chars()
params = read_tesseract_params(use_gpu=use_gpu)
model = MyVGSLImageModel(use_gpu=use_gpu)
img = cv2.imread(FLAGS.image, -1)
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
h, w, ch = img.shape
config = tf.ConfigProto(log_device_placement=False,
allow_soft_placement=False)
with tf.Graph().as_default(), tf.Session(config=config) as sess:
img_var = tf.placeholder(dtype=tf.float32, shape=(None, None, ch))
height_var = tf.placeholder(dtype=tf.int64, shape=[1], name='height')
width_var = tf.placeholder(dtype=tf.int64, shape=[1], name='width')
size_var = tf.placeholder(dtype=tf.int32, shape=[2], name='size')
img_preproc = img_var
if ch == 4:
img_preproc = remove_alpha(img_var)
size_mul = get_size_mul(h, w, target_height=FLAGS.target_height)
img_preproc = preprocess_tf(img_preproc, height_var[0], width_var[0])
img_large = tf.image.resize_images(img_preproc, size_mul*size_var,
method=tf.image.ResizeMethod.BILINEAR)
img_large = tf.image.rgb_to_grayscale(img_large)
logits, _ = model(img_large, size_mul*height_var, size_mul*width_var)
text_output = ctc_decode(logits, model.ctc_width)
text_output2 = ctc_decode(logits, model.ctc_width, beam=True)
init_ops = init(params, use_gpu=use_gpu, skip=0)
sess.run(init_ops)
if FLAGS.timeit:
t1 = timer()
n = 100
for i in range(n):
h = np.random.randint(low=40, high=80)
w = np.random.randint(low=150, high=200)
img = np.zeros(shape=(h, w, ch), dtype=np.float32)
sess.run(text_output, feed_dict={img_var: img,
size_var: [h, w],
height_var: [h],
width_var: [w]})
t2 = timer()
print("time for {} images: {:.3f} s".format(n, t2 - t1))
else:
logits_np, output, output2 = sess.run(
[logits, text_output, text_output2],
feed_dict={img_var: img,
size_var: [h, w],
height_var: [h],
width_var: [w]})
s1 = decode(output, char_map)[0]
s2 = decode(output2, char_map)[0]
labels = np.argmax(logits_np, axis=-1)
print(decode(labels, char_map, sparse=False))
dist1 = levenshtein(s1.lower(), FLAGS.target.lower())
dist2 = levenshtein(s2.lower(), FLAGS.target.lower())
print(s1, dist1)
print(s2, dist2)
def main(argv):
del argv
eval()
if __name__ == '__main__':
app.run()
| 2.34375 | 2 |
hknweb/studentservices/forms.py | Boomaa23/hknweb | 0 | 12786575 | <reponame>Boomaa23/hknweb
import datetime
from django import forms
from hknweb.studentservices.models import DepTour, Resume, ReviewSession
class DocumentForm(forms.ModelForm):
class Meta:
model = Resume
fields = ("name", "document", "notes", "email")
class ReviewSessionForm(forms.ModelForm):
start_time = forms.DateTimeField(input_formats=("%m/%d/%Y %I:%M %p",))
end_time = forms.DateTimeField(input_formats=("%m/%d/%Y %I:%M %p",))
class Meta:
model = ReviewSession
fields = ("name", "slug", "location", "description", "start_time", "end_time")
help_texts = {
"start_time": "mm/dd/yyyy hh:mm, 24-hour time",
"end_time": "mm/dd/yyyy hh:mm, 24-hour time",
"slug": "e.g. <semester>-<name>",
}
widgets = {
"slug": forms.TextInput(attrs={"placeholder": "e.g. <semester>-<name>"}),
}
labels = {
"slug": "URL-friendly name",
}
class ReviewSessionUpdateForm(forms.ModelForm):
start_time = forms.DateTimeField(input_formats=("%m/%d/%Y %I:%M %p",))
end_time = forms.DateTimeField(input_formats=("%m/%d/%Y %I:%M %p",))
class Meta:
model = ReviewSession
fields = ["name", "slug", "start_time", "end_time", "location", "description"]
labels = {
"slug": "URL-friendly name",
}
class TourRequest(forms.ModelForm):
datetime = forms.DateTimeField(
help_text="MM/DD/YYYY hh:mm AM/PM",
input_formats=("%m/%d/%Y %I:%M %p",),
label="Desired Date and Time",
)
confirm_email = forms.EmailField(max_length=100)
class Meta:
model = DepTour
fields = ["name", "datetime", "email", "confirm_email", "phone", "comments"]
def clean_date(self):
date = self.cleaned_data["date"]
if date < datetime.date.today():
raise forms.ValidationError("The date cannot be in the past!")
return date
def clean_desired_time(self):
date = self.cleaned_data.get("date", 0)
time = self.cleaned_data["desired_time"]
if date == datetime.date.today() and time < datetime.datetime.now().time():
raise forms.ValidationError("Time cannot be in the past!")
return time
def clean_confirm_email(self):
email = self.cleaned_data["email"]
confirm_email = self.cleaned_data["confirm_email"]
if email and confirm_email:
if email != confirm_email:
raise forms.ValidationError("Emails do not match.")
return confirm_email
| 2.1875 | 2 |
settings.py | DiogoKramel/SailPy | 3 | 12786576 | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/assets/'
# Extra places to collect and find static files
# STATICFILES_DIRS = (os.path.join(BASE_DIR, '/assets/')) | 2.09375 | 2 |
tests/pruebas_funcionales/login.py | Javier-Alonso29/conalep | 2 | 12786577 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.chrome.options import Options
options = Options()
extset = ['enable-automation', 'ignore-certificate-errors']
options.add_argument("--window-size=600,600")
options.add_argument("--headless")
options.add_experimental_option("excludeSwitches", extset)
driver = webdriver.Chrome(options=options)
# driver = webdriver.Chrome()
driver.implicitly_wait(5)
driver.get('http://homestead.test')
driver.find_element_by_id('email').send_keys('<EMAIL>')
driver.find_element_by_id('password').send_keys('<PASSWORD>' + Keys.ENTER)
time.sleep(0.5) | 2.25 | 2 |
crud/models.py | TownOneWheel/townonewheel | 0 | 12786578 | <reponame>TownOneWheel/townonewheel
from django.db import models
from django.db.models.fields import NullBooleanField
from django.contrib.auth.models import User, update_last_login
from behavior import BaseField
class Cat(BaseField):
catname = models.CharField(max_length=64)
gender = models.CharField(max_length=20, null=True, blank=True)
color = models.CharField(max_length=20, null=True, blank=True)
neutering = models.CharField(max_length=10, null=True, blank=True)
friendly = models.IntegerField(default='0')
location = models.TextField()
location_lat = models.FloatField(default=37.54490018658278)
location_lon = models.FloatField(default=127.05685028171477)
upload_user = models.ForeignKey(User, on_delete=models.SET_NULL, related_name='upload', null=True, blank=True)
# cat_like = models.TextField()
class CatImage(models.Model):
cat = models.ForeignKey(Cat, on_delete=models.SET_NULL, related_name='image', null=True, blank=True)
url = models.TextField(null=True, blank=True)
class Comment(BaseField):
cat = models.ForeignKey(Cat, on_delete=models.SET_NULL, related_name='cat', null=True, blank=True)
user = models.ForeignKey(User, on_delete=models.SET_NULL, related_name='writer', null=True, blank=True)
content = models.TextField() | 2.1875 | 2 |
online/online_detection/hmm_online_endpose_detection.py | birlrobotics/bnpy | 3 | 12786579 | <gh_stars>1-10
#!/usr/bin/env python
import sys
import os
import pandas as pd
import numpy as np
from hmmlearn.hmm import *
from sklearn.externals import joblib
import ipdb
from math import (
log,
exp
)
from sklearn.preprocessing import (
scale,
normalize
)
#######-----ros module----##########
import rospy
from std_msgs.msg import (
Empty,
Header
)
from baxter_core_msgs.msg import EndpointState
from sensor_msgs.msg import JointState
from geometry_msgs.msg import WrenchStamped
from birl_sim_examples.msg import (
Tag_MultiModal,
Hmm_Log
)
from birl_sim_examples.srv import (
State_Switch,
State_SwitchResponse
)
import threading
mylock = threading.RLock()
data_arr = np.array([0])
hmm_previous_state =0
hmm_state = 0
data_index = 0
df = pd.DataFrame()
header = Header()
success_path = "/home/ben/ML_data/REAL_BAXTER_PICK_N_PLACE_6_1/success"
model_save_path = "/home/ben/ML_data/REAL_BAXTER_PICK_N_PLACE_6_1/model/endpoint_pose"
figure_save_path = "/home/ben/ML_data/REAL_BAXTER_PICK_N_PLACE_6_1/figure/endpoint_pose"
class ROSThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def callback_multimodal(self,data):
mylock.acquire()
global hmm_state
global data_arr
global data_index
global df
global header
global hmm_previous_state
hmm_state = data.tag
if not hmm_state==hmm_previous_state:
df = pd.DataFrame()
header = data.wrench_stamped.header
df_append_data = {'.endpoint_state.pose.position.x':[data.endpoint_state.pose.position.x],
'.endpoint_state.pose.position.y':[data.endpoint_state.pose.position.y],
'.endpoint_state.pose.position.z':[data.endpoint_state.pose.position.z],
'.endpoint_state.pose.orientation.x':[data.endpoint_state.pose.orientation.x],
'.endpoint_state.pose.orientation.y':[data.endpoint_state.pose.orientation.y],
'.endpoint_state.pose.orientation.z':[data.endpoint_state.pose.orientation.z],
'.endpoint_state.pose.orientation.w':[data.endpoint_state.pose.orientation.w],
'.tag':[data.tag]}
df_append = pd.DataFrame(df_append_data, columns=['.endpoint_state.pose.position.x',
'.endpoint_state.pose.position.y',
'.endpoint_state.pose.position.z',
'.endpoint_state.pose.orientation.x',
'.endpoint_state.pose.orientation.y',
'.endpoint_state.pose.orientation.z',
'.endpoint_state.pose.orientation.w',
'.tag'])
df = df.append(df_append, ignore_index = True)
df = df.fillna(method='ffill')
data_arr = df.values[df.values[:,-1] ==hmm_state]
data_arr = data_arr[:,:-1]
data_index = data_arr.shape[0]
hmm_previous_state = hmm_state
mylock.release()
def run(self):
# set up Subscribers
rospy.Subscriber("/tag_multimodal", Tag_MultiModal, self.callback_multimodal)
print "Topic /tag_multimodal publish rate: 100 hz"
print "Topic /robot/limb/right/endpoint_state publish rate: 100hz"
print "Topic /robot/joint_states publish rate: 120hz"
print "Topic /wrench/filter publish rate: 200hz"
while not rospy.is_shutdown():
rospy.spin()
class HMMThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
#n_state = 10
#n_iteraton = 100
#covariance_type_string = 'diag'
#preprocessing_scaling = False
#preprocessing_normalize = False
#data_feature = 6
self.model_1 = joblib.load(model_save_path+"/multisequence_model/model_s1.pkl")
self.model_2 = joblib.load(model_save_path+"/multisequence_model/model_s2.pkl")
self.model_3 = joblib.load(model_save_path+"/multisequence_model/model_s3.pkl")
self.model_4 = joblib.load(model_save_path+"/multisequence_model/model_s4.pkl")
self.expected_log_1 = joblib.load(model_save_path+'/multisequence_model/expected_log.pkl')[0]
self.expected_log_2 = joblib.load(model_save_path+'/multisequence_model/expected_log.pkl')[1]
self.expected_log_3 = joblib.load(model_save_path+'/multisequence_model/expected_log.pkl')[2]
self.expected_log_4 = joblib.load(model_save_path+'/multisequence_model/expected_log.pkl')[3]
self.threshold_1 = joblib.load(model_save_path+'/multisequence_model/threshold.pkl')[0]
self.threshold_2 = joblib.load(model_save_path+'/multisequence_model/threshold.pkl')[1]
self.threshold_3 = joblib.load(model_save_path+'/multisequence_model/threshold.pkl')[2]
self.threshold_4 = joblib.load(model_save_path+'/multisequence_model/threshold.pkl')[3]
def run(self):
#ipdb.set_trace()
global data_arr
global hmm_state
global data_index
global header
hmm_log = Hmm_Log()
publishing_rate = 50
r = rospy.Rate(publishing_rate)
pub = rospy.Publisher("/hmm_online_result", Hmm_Log, queue_size=10)
while not rospy.is_shutdown():
if hmm_state == 1:
try:
hmm_log.expected_log.data = self.expected_log_1[data_index-1]
hmm_log.threshold.data = self.threshold_1[data_index-1]
hmm_log.current_log.data = self.model_1.score(data_arr)
if (hmm_log.current_log.data-hmm_log.threshold.data)>=0:
hmm_log.event_flag =1
else:
hmm_log.event_flag=0
print "%d"%(data_index)
except:
rospy.logerr("the data shape is %d ",data_index)
elif hmm_state == 2:
try:
hmm_log.expected_log.data = self.expected_log_2[data_index-1]
hmm_log.threshold.data = self.threshold_2[data_index-1]
hmm_log.current_log.data = self.model_2.score(data_arr)
if (hmm_log.current_log.data-hmm_log.threshold.data)>=0:
hmm_log.event_flag =1
else:
hmm_log.event_flag=0
print "%d"%(data_index)
except:
rospy.logerr("the data shape is %d",data_index)
elif hmm_state == 3:
try:
hmm_log.expected_log.data = self.expected_log_3[data_index-1]
hmm_log.threshold.data = self.threshold_3[data_index-1]
hmm_log.current_log.data = self.model_3.score(data_arr)
if (hmm_log.current_log.data-hmm_log.threshold.data)>=0:
hmm_log.event_flag =1
else:
hmm_log.event_flag=0
print "%d"%(data_index)
except:
rospy.logerr("the data shape is %d",data_index)
elif hmm_state == 4:
try:
hmm_log.expected_log.data = self.expected_log_4[data_index-1]
hmm_log.threshold.data = self.threshold_4[data_index-1]
hmm_log.current_log.data = self.model_4.score(data_arr)
if (hmm_log.current_log.data-hmm_log.threshold.data)>=0:
hmm_log.event_flag =1
else:
hmm_log.event_flag=0
except:
rospy.logerr("the data shape is %d",data_index)
hmm_log.header = header
pub.publish(hmm_log)
r.sleep()
return 0
def main():
rospy.init_node("hmm_online_parser", anonymous=True)
thread1 = ROSThread()
thread2 = HMMThread()
thread1.setDaemon(True)
thread2.setDaemon(True)
thread1.start()
thread2.start()
while not rospy.is_shutdown():
rospy.spin()
return 0
if __name__ == '__main__':
sys.exit(main())
| 1.976563 | 2 |
test_day07/test_ex13.py | anxodio/aoc2021 | 0 | 12786580 | <filename>test_day07/test_ex13.py
from pathlib import Path
from typing import List
from statistics import median
def get_minimum_alignement_fuel(positions: List[int]) -> int:
best_position = int(median(positions))
return sum(abs(pos - best_position) for pos in positions)
def test_get_minimum_alignement_fuel():
assert get_minimum_alignement_fuel([16, 1, 2, 0, 4, 2, 7, 1, 2, 14]) == 37
if __name__ == "__main__":
with open((Path(__file__).parent / "input.txt")) as f:
raw_lines = [line.rstrip("\n") for line in f]
positions = [int(position) for position in raw_lines[0].split(",")]
print(get_minimum_alignement_fuel(positions))
| 3.515625 | 4 |
namecheapapi/api/whoisguard.py | porfel/namecheapapi | 23 | 12786581 | from namecheapapi.api.session import Session
class WhoisguardAPI:
def __init__(self, session: Session) -> None:
self.session = session
def change_email_address(self):
pass
def enable(self):
pass
def disable(self):
pass
def unallot(self):
pass
def discard(self):
pass
def allot(self):
pass
def get_list(self):
pass
def renew(self):
pass
| 1.882813 | 2 |
utilities/data_cleaning.py | Araualla/Cell_health | 0 | 12786582 | from utilities.constants import TREAT, CONC
from utilities.counts import count_cells_per_well, normalise_count_cells
# labels for concentration of treatments in the experiment
number2conc = {2: '0 ug/mL',
3: '0.137 ug/mL',
4: '0.412 ug/mL',
5: '1.235 ug/mL',
6: '3.704 ug/mL',
7: '11.11 ug/mL',
8: '33.33 ug/mL',
9: '100 ug/mL',
10: '300ug/mL'}
# labels for the nanoparticle treatments in the experiment
row2np = {'A': 'Si-F8BT',
'B': 'Si-CNPPV',
'C': 'Si-P3',
'D': 'Si-P4',
'E': 'PP-F8BT',
'F': 'PP-CNPPV',
'G': 'PP-P3',
'H': 'PP-P4'}
# labels for the control treatments in the experiment
controls = {'A': 'FCCP Control',
'B': 'FCCP Control',
'C': 'Triton-X',
'D': 'Triton-X',
'E': 'H2O',
'F': 'H2O',
'G': 'DMSO',
'H': 'DMSO'}
def clean_data(data):
"""Clean a csv file"""
# removing Weighted_Relative_Moment_Inertia
# high frequency of nan
data.drop(columns=['Weighted_Relative_Moment_Inertia'])
data.columns = [format_column_name(x) for x in data.columns]
data = label_data(data)
data = normalise_data(data)
count = count_cells_per_well(data)
normalised_counts = normalise_count_cells(data, count)
return data, count, normalised_counts
def label_data(data):
""" Takes one dataframe and applies the correct labels to each row"""
# some rows miss these two features, which are fundamental. **EXTERMINATE**
drop = data[['Area Nuc', 'Area Cell']].isnull().sum(axis=1) != 0
drop = data.index.values[drop]
data = data.drop(index=drop)
data[CONC] = data.apply(lambda x: number2conc.get(x['Number'], 'control'), axis=1)
data.head()
data[TREAT] = data.apply(lambda x: row2np.get(x['Row'], 'control'), axis=1)
data.head()
for key in controls:
data.loc[(data[CONC] == 'control') & (data['Row'] == key), TREAT] = controls[key]
data = data.drop(columns=['Number', 'Count Nuc'])
return data
def format_column_name(string):
"""Automatically reformats feature names into something more machine-readable."""
string = ' '.join(string.strip().split())
string = (string
.replace('_', ' ')
.replace('[', '')
.title()
.replace('- Um', '')
)
# if ('Feret' in string or 'Perimeter' in string) and '(μm)' not in string:
# string += ' (μm)'
if 'Mempernuc' in string:
string = string.replace('Mempernuc', 'Mem Per Nuc')
if 'Mitoint' in string:
string = string.replace('Mitoint', 'Mito Int ')
string = string.title()
if 'dxa' in string or 'Dxa' in string:
string = string.replace('dxa', ' DxA')
string = string.replace('Dxa', ' DxA')
if 'Wmoi' in string:
string = string.replace('Wmoi', 'WMOI')
if 'Conc' in string:
string = string.replace('Conc', 'Concentration')
return string
def format_dataframe_columns(df):
df.columns = [format_column_name(colname) for colname in df.columns]
return df
def normalise_data(data):
"""Z-scores all numeric data."""
# select only numeric data
numeric = data._get_numeric_data()
# apply transformation
numeric = numeric - numeric.mean()
numeric = numeric / numeric.std()
# mind that we don't have the classes column in this dataframe!
# put class information back in
numeric[CONC] = data[CONC].tolist()
numeric[TREAT] = data[TREAT].tolist()
return numeric
| 2.4375 | 2 |
plc_io/core_libraries/mqtt_current_monitor_interface_py3.py | bopopescu/docker_images_a | 2 | 12786583 | import paho.mqtt.client as mqtt
import ssl
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
import time
import msgpack
class MQTT_Current_Monitor_Publish(object):
def __init__(self,redis_site,topic_prefix,qs ) :
self.topic_prefix = topic_prefix
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_terminal( query_list,
relationship = "PACKAGE", property_mask={"name":"MQTT_DEVICES_DATA"} )
package_sets, package_sources = qs.match_list(query_list)
package = package_sources[0]
generate_handlers = Generate_Handlers(package,qs)
data_structures = package["data_structures"]
self.job_queue_client = generate_handlers.construct_job_queue_client(data_structures["MQTT_PUBLISH_QUEUE"])
def read_current_limit(self):
request = {}
request["topic"] = "INPUT/MQTT_CURRENT/GET_LIMIT_CURRENTS"
self.send_request(request)
def read_max_currents(self):
request = {}
request["topic"] = "INPUT/MQTT_CURRENT/GET_MAX_CURRENTS"
self.send_request(request)
def clear_max_currents(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/CLEAR_MAX_CURRENTS"
self.send_request(request)
def read_current(self):
request = {}
request["topic"] = "INPUT/MQTT_CURRENT/READ_CURRENT"
self.send_request(request)
def enable_equipment_relay(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/ENABLE_EQUIPMENT_RELAY"
self.send_request(request)
def enable_irrigation_relay(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/ENABLE_IRRIGATION_RELAY"
self.send_request(request)
def disable_equipment_relay(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/DISABLE_EQUIPMENT_RELAY"
self.send_request(request)
def disable_irrigation_irrigation(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/DISABLE_IRRIGATION_RELAY"
self.send_request(request)
def read_relay_states(self):
request = {}
request["topic"] = "OUTPUT/MQTT_CURRENT/READ_RELAY_STATES"
self.send_request(request)
def send_request(self,msg_dict):
msg_dict["tx_topic"] =self.topic_prefix +msg_dict["topic"]
#print("msg_dict",msg_dict)
self.job_queue_client.push(msg_dict)
if __name__ == "__main__":
import datetime
import time
import string
import urllib.request
import math
import redis
import base64
import json
import os
import copy
#import load_files_py3
from redis_support_py3.graph_query_support_py3 import Query_Support
import datetime
from py_cf_new_py3.chain_flow_py3 import CF_Base_Interpreter
#
#
# Read Boot File
# expand json file
#
file_handle = open("system_data_files/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site = json.loads(data)
x = MQTT_Current_Monitor_Publish(redis_site,"/REMOTES/CURRENT_MONITOR_1/")
while(1):
time.sleep(5)
x.read_max_currents()
time.sleep(5)
x.clear_max_currents()
time.sleep(5)
x.read_relay_states()
| 2.15625 | 2 |
tests/testlibraries/parametrizer/file_path_with_resource_factories.py | yukihiko-shinoda/fixture-file-handler | 0 | 12786584 | <gh_stars>0
"""This module implements factory for file path with file."""
from abc import abstractmethod
from pathlib import Path
from typing import Generic, Type, TypeVar
from fixturefilehandler.file_paths import RelativeDeployFilePath, RelativeVacateFilePath
from tests.testlibraries.parametrizer.file_states import MultipleFilePathState, ThreeFilePathState, TwoFilePathState
PATH_TARGET = Path("test.txt")
PATH_BACKUP = Path("test.txt.bak")
PATH_RESOURCE = Path("test.txt.dist")
TypeVarTwoFilesState = TypeVar("TypeVarTwoFilesState", bound=TwoFilePathState)
class AbstractFilePathWithResourceFactory(Generic[TypeVarTwoFilesState]):
"""This class implements abstract factory."""
@staticmethod
@abstractmethod
def create(tmp_path, file_state: TypeVarTwoFilesState):
"""This class creates files and returns file path"""
class VacateFilePathWithFileFactory(AbstractFilePathWithResourceFactory):
"""This class implements factory for vacate file path."""
@staticmethod
def create(tmp_path, file_state: TwoFilePathState):
file_path = RelativeVacateFilePath(PATH_TARGET, PATH_BACKUP, tmp_path)
file_state.expect_target.create_file(file_path)
file_state.expect_backup.create_file(file_path)
return file_path
class DeployFilePathWithFileFactory(AbstractFilePathWithResourceFactory):
"""This class implements factory for deploy file path."""
@staticmethod
def create(tmp_path, file_state: ThreeFilePathState):
file_path = RelativeDeployFilePath(PATH_TARGET, PATH_BACKUP, PATH_RESOURCE, tmp_path)
file_state.expect_target.create_file(file_path)
file_state.expect_backup.create_file(file_path)
file_state.expect_resource.create_file(file_path)
return file_path
class VacateFilePathWithDirectoryFactory(AbstractFilePathWithResourceFactory):
"""This class implements factory for vacate file path."""
@staticmethod
def create(tmp_path, file_state: TwoFilePathState):
file_path = RelativeVacateFilePath(PATH_TARGET, PATH_BACKUP, tmp_path)
file_state.expect_target.create_directory(file_path)
file_state.expect_backup.create_directory(file_path)
return file_path
class DeployFilePathWithDirectoryFactory(AbstractFilePathWithResourceFactory):
"""This class implements factory for deploy file path."""
@staticmethod
def create(tmp_path, file_state: ThreeFilePathState):
file_path = RelativeDeployFilePath(PATH_TARGET, PATH_BACKUP, PATH_RESOURCE, tmp_path)
file_state.expect_target.create_directory(file_path)
file_state.expect_backup.create_directory(file_path)
file_state.expect_resource.create_directory(file_path)
return file_path
class FilePathWithFileFactory:
"""This class implements factory for file path and file."""
@classmethod
def create(cls, tmp_path, multi_file_state):
"""This class creates files and returns file path"""
factory = cls._create(multi_file_state)
return factory.create(tmp_path, multi_file_state)
@classmethod
def _create(cls, multi_file_state: MultipleFilePathState) -> Type[AbstractFilePathWithResourceFactory]:
if isinstance(multi_file_state, TwoFilePathState):
return VacateFilePathWithFileFactory
if isinstance(multi_file_state, ThreeFilePathState):
return DeployFilePathWithFileFactory
raise ValueError()
class FilePathWithDirectoryFactory:
"""This class implements factory for file path and file."""
@classmethod
def create(cls, tmp_path, multi_file_state):
"""This class creates files and returns file path"""
factory = cls._create(multi_file_state)
return factory.create(tmp_path, multi_file_state)
@classmethod
def _create(cls, multi_file_state: MultipleFilePathState) -> Type[AbstractFilePathWithResourceFactory]:
if isinstance(multi_file_state, TwoFilePathState):
return VacateFilePathWithDirectoryFactory
if isinstance(multi_file_state, ThreeFilePathState):
return DeployFilePathWithDirectoryFactory
raise ValueError()
| 2.65625 | 3 |
courses/models.py | Cent-Luc/University_Portal | 0 | 12786585 | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Course(models.Model):
title = models.CharField(max_length=200)
code = models.SlugField(max_length=200, unique=True)
summary = models.TextField(blank=True)
class Meta:
ordering = ['title']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("courses_admin_list")
class Unit(models.Model):
course = models.ForeignKey(Course,
related_name='courses',
on_delete=models.CASCADE)
title = models.CharField(max_length=200)
code = models.SlugField(max_length=200, unique=True)
overview = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering =['created']
def __str__(self):
return self.title
class Module(models.Model):
unit = models.ForeignKey(Unit,
related_name='modules',
on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.TextField(blank=True)
def __str__(self):
return self.title
| 2.203125 | 2 |
tools/versioncmp/examples/static_web/w_vordir_deterministic.py | dtip/magics | 0 | 12786586 | # (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
from MetPy import *
from MagPy import *
import sys
print 'plotting:'
arg_dict = {}
for i in sys.argv[1:]:
print i
arg_name,arg_value = string.split(i,"=")
arg_list = string.split(arg_value,",")
arg_dict[arg_name] = arg_list
print
grib_files = ["vorticity.grib","divergence.grib"]
# --- vorticity has specific areas ...
list_areas=[[-30,-30,30,100],[-30,90,30,-140],[-30,-150,30,-20]]
w700 = FieldSet("wind.grib")
windex = FieldIndex(w700,"level","step")
for grib_file in grib_files:
vor700_fieldset = FieldSet(grib_file) * 100000
vor700_index = FieldIndex(vor700_fieldset,"step")
coast = Coastlines(libfile='magpylib.cfg',libname='coastlines',map_coastline_land_shade="off",)
contour_vodiv_neg = Contour(libfile='magpylib.cfg',libname="vodiv_neg",)
contour_vodiv_pos = Contour(libfile='magpylib.cfg',libname="vodiv_pos",)
w700_wind = Wind(libfile='magpylib.cfg',libname="wind700rhdiv",)
layout = Layout(orientation="landscape",format="a4",layout=SimpleLayout(1,1),plot_coastlines="last",)
box = LegendBox(legend_box_x_position=104,legend_box_y_position=0,
legend_box_x_length=6,legend_box_y_length=95,
legend_display_type = 'continuous',legend_title="on",legend_title_text="10**-5 s-1",
legend_text_maximum_height=1,legend_text_quality="medium",)
for area in list_areas:
geography = CornerArea(projection='cylindrical',area=area,)
for step in arg_dict["step"]:
iw700 = windex.constrained_access(wanted=2,level = 700,step=step)
vor700 = vor700_index.constrained_access(wanted=1,step=step)
s = SubLayout(
coastlines = coast,
plot_coastlines = "both",
geography = geography,
layout = AbsoluteLayout([
[3,1,85,95],
]),
page_id_line = "off",
page_id_line_system_plot = "off",
page_id_line_date_plot = "off",
page_id_line_errors_plot = "off",
page_id_line_logo_plot = "off",
page_id_line_user_text = str(arg_dict["text"][0]),
)
print "plotting:",area," step:",step
title = FieldAutoTitle(vor700,text = [None,"${titleParameterName} / v-velocity"])
layout.plot(s(box,FieldInput(vor700),contour_vodiv_neg,contour_vodiv_pos,UVWindFieldInput(iw700[0],iw700[1]),w700_wind,title))
| 1.960938 | 2 |
tests/factories.py | omni-digital/omni-wagtail-library | 2 | 12786587 | <gh_stars>1-10
# -*- coding:utf8 -*-
from __future__ import unicode_literals
from factory import Sequence
from wagtail_factories import PageFactory
from wagtail_library.models import LibraryIndex, LibraryDetail
class LibraryIndexFactory(PageFactory):
title = Sequence("Library index {}".format)
body = Sequence("Library index {} body.".format)
class Meta(object):
"""Factory properties."""
model = LibraryIndex
class LibraryDetailFactory(PageFactory):
title = Sequence("Library detail {}".format)
body = Sequence("Library detail {} body.".format)
class Meta(object):
"""Factory properties."""
model = LibraryDetail
| 1.898438 | 2 |
ocr/form_recognizer.py | PrynsTag/oneBarangay | 0 | 12786588 | """Recognize and extract forms."""
import os
from statistics import fmean
from azure.ai.formrecognizer.aio import FormRecognizerClient, FormTrainingClient
from azure.core.credentials import AzureKeyCredential
class RecognizeCustomFormsSampleAsync:
"""Class to recognize forms in async mode."""
async def recognize_custom_forms(self, custom_model_id, filename):
"""Extract text from custom form.
Args:
custom_model_id: The trained custom model id.
filename: The filename of the document that will be scanned.
Returns:
The header for the table and the extracted text.
"""
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
async with FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
# Make sure your form's type is included in the
# list of form types the custom model can recognize
form_url = (
f"https://storage.googleapis.com/"
f"{os.getenv('GS_MEDIA_BUCKET_NAME')}/"
f"{filename}"
)
poller = await form_recognizer_client.begin_recognize_custom_forms_from_url(
model_id=model_id, form_url=form_url, include_field_elements=True
)
forms = await poller.result()
table = []
header = {}
for _, form in enumerate(forms):
row = {}
for idx, (name, field) in enumerate(form.fields.items()):
if idx >= 3:
for value in field.value:
for i, val in value.to_dict()["value"].items():
data = val["value_data"]
# Condition for "No Data"
if data:
words = data["field_elements"]
# Condition for multiple word result
if len(words) > 1:
word_list = [word["text"] for word in words]
confidence_list = [word["confidence"] for word in words]
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": " ".join(word_list),
"confidence": round(fmean(confidence_list), 3),
}
else:
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": words[0]["text"],
"confidence": words[0]["confidence"],
}
else:
slug_name = (
val["name"]
.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
)
row[slug_name] = {
"text": data,
"confidence": data,
}
if i == "REMARKS":
table.append(row)
row = {}
else:
slug_name = (
name.lower().replace(" ", "_").replace("(", "").replace(")", "")
)
header[slug_name] = {
"text": field.value,
"confidence": field.confidence,
}
return header, table
async def form_recognizer_runner(filename):
"""Runner for the form recognizer.
Args:
filename: The filename of the document to be scanned
Returns:
The form header and the table scanned.
"""
sample = RecognizeCustomFormsSampleAsync()
model_id = None
if os.getenv("CONTAINER_SAS_URL"):
endpoint = os.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT")
key = os.getenv("AZURE_FORM_RECOGNIZER_KEY")
if not endpoint or not key:
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (
await form_training_client.begin_training(
os.getenv("CONTAINER_SAS_URL"), use_training_labels=True
)
).result()
model_id = model.model_id
return await sample.recognize_custom_forms(model_id, filename)
| 2.671875 | 3 |
src/yews/transforms/base.py | Lchuang/yews | 6 | 12786589 | <reponame>Lchuang/yews<gh_stars>1-10
def is_transform(obj):
"""Verfy if a object is a ``transform-like`` object.
Args:
obj: Object to be determined.
Returns:
bool: True for ``transform-like`` object, false otherwise.
"""
return hasattr(obj, '__call__')
class BaseTransform(object):
"""An abstract class representing a Transform.
All other transform should subclass it. All subclasses should override
``__call__`` which performs the transform.
Note:
A transform-like object has ``__call__`` implmented. Typical
transform-like objects include python functions and methods.
"""
def __call__(self, data):
raise NotImplementedError
def __repr__(self):
head = self.__class__.__name__
content = [f"{key} = {val}" for key, val in self.__dict__.items()]
body = ", ".join(content)
return f"{head}({body})"
class Compose(BaseTransform):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.ZeroMean(),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, wav):
for t in self.transforms:
wav = t(wav)
return wav
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
| 3.40625 | 3 |
recipes/timit_v2/local/timit-norm-trans.py | RobinAlgayres/beer | 46 | 12786590 | <filename>recipes/timit_v2/local/timit-norm-trans.py
'''Normalize the TIMIT transcription by mapping the set of phones
to a smaller subset.
'''
import argparse
import sys
import logging
logging.basicConfig(format='%(levelname)s: %(message)s')
def run():
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--map-60-48', action='store_true')
group.add_argument('--map-48-39', action='store_true')
parser.add_argument('phonemap', help='the 60-48-39 mapping')
args = parser.parse_args()
# Load the phone map.
map_60to48 = {}
map_48to39 = {}
to_remove = []
with open(args.phonemap, 'r') as fid:
for line in fid:
phones = line.strip().split()
try:
map_60to48[phones[0]] = phones[1]
map_48to39[phones[1]] = phones[2]
except IndexError:
to_remove.append(phones[0])
# If there is no mapping for a phone else than "q"
# print a warning message.
if not phones[0] == 'q':
msg = 'No mapping for the phone "{}". It will be ' \
'removed from the transcription.'
logging.warning(msg.format(phones[0]))
# Select the requested mapping from the command line arguments.
if args.map_60_48:
mapping = map_60to48
else:
mapping = map_48to39
# Normalize the transcription
for line in sys.stdin:
tokens = line.strip().split()
uttid = tokens[0]
utt_trans = tokens[1:]
# Remove the phones that have no mapping from the
# original transcription.
utt_trans = [phone for phone in utt_trans
if phone not in to_remove]
new_utt_trans = map(lambda x: mapping[x], utt_trans)
print(uttid, ' '.join(new_utt_trans))
if __name__ == '__main__':
run()
| 2.703125 | 3 |
modules/launch_module.py | BigFlubba/Reco-PC-Server | 1 | 12786591 | # Module: launch
# Description: Lauches a custom shortcut in the shortcuts directory
# Usage: !launch [shortcut]
# Dependencies: os, time, glob
import os, configs,time
from lib.helpers import checkfolder
from lib.reco_embeds import recoEmbeds as rm
from glob import glob
async def launch(ctx,client, shortcut=None):
p=configs.BOT_PREFIX
fileOpened=False
checkfolder()
if configs.operating_sys == "Windows":
if shortcut!="":
if shortcut.isnumeric():
msg=await rm.msg(ctx,f"**Opening File No: {shortcut}**",color=rm.color('colorforWaitingMsg'))
elif shortcut=="list":
msg=await ctx.send("> Gathering files from **Shortcut Folder**.")
else:
msg=await rm.msg(ctx,f"Searching **{shortcut.capitalize()}**",color=rm.color('colorforWaitingMsg'))
elif shortcut=="":
await rm.msg(ctx,f'''**Help - {p}launch**
Using launch command you can easily open any application or file which are available in your Reco's **Shortcut folder**.
**Commands:**
```{p}launch list
{p}launch open
{p}launch File_Number
{p}launch File_Name```
**🎬 YouTube**
**[How to use {p}launch in {client.user.name}?](https://youtu.be/-b-7-8oK1tI)**''')
return
shortcutFolderPath=configs.RECO_PATH+"/shortcuts/*"
files = glob(shortcutFolderPath)
print(len(files))
print(files)
time.sleep(1)
if len(files)!=0:
folderExtensions=set([f".{e.split('.')[-1]}" for e in files])
folderFileNames=[f"{f.split(chr(92))[-1]}" for f in files]
print(folderExtensions)
else:
await msg.delete()
await rm.msg(ctx,f"**Shortcut Folder is Empty!**\n\n**Path**: {shortcutFolderPath}",rm.color('colorforError'))
return
if shortcut=="list":
await msg.delete()
filenames=f"Files Count: **{len(files)}** \n\n"+"\n".join([f"**{n}** - **{f.split(chr(92))[-1].replace('_',f'{chr(92)}_')}**" for n,f in enumerate(files)])
await rm.extendableMsg(ctx,filenames)
elif shortcut.isnumeric():
if int(shortcut)<len(files):
await rm.editMsg(ctx,msg,f"**Opening {files[int(shortcut)].split(chr(92))[-1]}**...")
os.startfile(files[int(shortcut)])
else:
await rm.editMsg(ctx,msg,f"**❌ Invalid File Number!**\n\nTry:\n**{p}launch list**",color=rm.color('colorforError'))
elif shortcut!="":
if shortcut!=None:
for e in folderExtensions:
if (os.path.isfile("shortcuts/" + shortcut + e)):
await rm.editMsg(ctx,msg,f'**Opening {shortcut.capitalize() }{e}**...')
os.startfile("shortcuts\\" + shortcut + e)
fileOpened=True
break
elif shortcut.__contains__("."):
if (os.path.isfile("shortcuts/" + shortcut)):
await rm.editMsg(ctx,msg,f'**Opening {shortcut.capitalize()}**...')
os.startfile("shortcuts\\" + shortcut)
fileOpened=True
break
if not fileOpened:
for f in folderFileNames:
file=f.lower()
print("File Finder: ",shortcut,"->",file)
if file.__contains__(shortcut.lower()):
index= folderFileNames.index(f)
await rm.editMsg(ctx,msg,f'**Opening {files[index].split(chr(92))[-1]}**...')
os.startfile(files[index])
fileOpened=True
break
if not fileOpened:
await rm.editMsg(ctx,msg,"**No such file in your shortcuts folder.**",color=rm.color('colorforError'))
else:
await ctx.send("Module not yet supported on Linux and macOS") | 2.890625 | 3 |
pre_poetry/enum_annotator.py | noelmcloughlin/linkml-model-enrichment | 6 | 12786592 | #!/usr/bin/env python3
from __future__ import print_function
import json
import sys
import urllib.error
import urllib.parse
import urllib.request
from strsimpy.cosine import Cosine
import yaml
import re
import pandas as pds
import requests
import click
import logging
import click_log
import random
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
pds.set_option('display.expand_frame_repr', False)
global inferred_model, ecg, opg, rrg, qfg, mdg, omg
ecg = None
failures = []
cols2display = ['enum_class', 'orig_enum', 'query', 'obo_id', 'pref_lab',
'name', 'cosine_dist', 'dist_ok', 'type', 'scope', 'rank']
success_frame = pds.DataFrame(columns=cols2display)
# MIN CHARACTERS FOR SEARCH NOT BEING ENFORCED
# TODO write mapped terms back in as meanings
# give option for overwriting?
# TODO all user to specify enum classes to process
# when verbose, stderr gets status and debugging info
# stdout gets the modified model as yaml and should be redirected to a file
# OLS dataframe structure not identical to previous BP dataframes:
# different columns
# BP shows one best row
# OLS lists up to N best
# not filtering out small queries in OLS approach yet
# (OLS approach?) neither handling nor optimizing for repeat values
# not merging results back into model yet
# examples of previously challenging mappings
# # bicarbonate
# # term_iri = 'https://www.ebi.ac.uk/ols/api/ontologies/chebi/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252FCHEBI_32139'
# # fungus
# # term_iri = 'https://www.ebi.ac.uk/ols/api/ontologies/ncbitaxon/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252FNCBITaxon_33169'
# # sars-cov-2
# # term_iri = 'https://www.ebi.ac.uk/ols/api/ontologies/ncbitaxon/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252FNCBITaxon_2697049'
# # <NAME> T7
# # # http://purl.obolibrary.org/obo/NCBITaxon_10760
# # term_iri = 'https://www.ebi.ac.uk/ols/api/ontologies/ncbitaxon/terms/http%253A%252F%252Fpurl.obolibrary.org%252Fobo%252FNCBITaxon_10760'
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# TODO add filter based on min_search_chars_param?
# no longer requiring a minimum search length
def one_enum_to_ols_frame_list(permitteds, one_enum_param):
global failures
global success_frame
per_enum_frame = pds.DataFrame(columns=cols2display)
for orig_enum in permitteds:
temp = one_enum_param + ": " + orig_enum
logger.info(temp)
# tidied_enum = re.sub(r'[_,.\-;@#?!&$ ]+', ' ', orig_enum)
if ecg is not None:
tidied_enum = re.sub(r'[' + ecg + ']+', ' ', orig_enum)
ontologies_phrase = ''
if len(opg) > 1:
ontologies_phrase = 'ontology=' + opg.lower()
qf_phrase = ''
if len(qfg) > 1:
qf_phrase = 'queryFields=' + qfg.lower()
# requiring local loses EROs annotations of SV40
# 'local=true' + '&' + \
request_string = 'http://www.ebi.ac.uk/ols/api/search?q=' + \
urllib.parse.quote(tidied_enum) + '&' + \
'type=class' + '&' + \
'exact=false' + '&' + \
ontologies_phrase + "&" + \
'rows=' + str(rrg) + '&' + \
qf_phrase
logger.debug(request_string)
response_param = requests.get(request_string)
ols_string_search_res_j = response_param.json()
ols_string_search_res_frame = pds.DataFrame(ols_string_search_res_j['response']['docs'])
ols_string_search_res_frame.insert(0, "query", tidied_enum)
# did the string search get any result rows?
r, c = ols_string_search_res_frame.shape
if r == 0:
no_search_res_dict = {'description': '', 'id': orig_enum, 'iri': '', 'is_defining_ontology': '',
'label': '', 'obo_id': '', 'ontology_name': '', 'ontology_prefix': '',
'short_form': '', 'type': ''}
no_search_res_frame = pds.DataFrame([no_search_res_dict])
ols_string_search_res_frame = ols_string_search_res_frame.append(no_search_res_frame)
failures.append(orig_enum)
ols_string_search_res_frame['query'] = orig_enum
inner_cosine_obj = Cosine(1)
annotations_frame = pds.DataFrame(columns=['name', 'obo_id', 'scope', 'type', 'xrefs'])
for ols_string_search_res_row in ols_string_search_res_frame.itertuples(index=False):
once = urllib.parse.quote(ols_string_search_res_row.iri, safe='')
twice = urllib.parse.quote(once, safe='')
# build url from base
term_retr_base = 'http://www.ebi.ac.uk/ols/api/ontologies/'
term_retr_assembled = term_retr_base + ols_string_search_res_row.ontology_name + '/terms/' + twice
term_details = requests.get(term_retr_assembled)
term_json = term_details.json()
has_label = 'label' in set(term_json.keys())
if has_label:
logger.debug(term_retr_assembled)
temp = term_json['label']
logger.debug(temp)
label_frame = pds.DataFrame([[term_json['label'], 'label', 'label', '']],
columns=['name', 'scope', 'type', 'xrefs'])
label_frame['obo_id'] = term_json['obo_id']
label_frame['pref_lab'] = term_json['label']
annotations_frame = annotations_frame.append(label_frame, ignore_index=True)
# also get other properties?
has_synonyms = 'obo_synonym' in set(term_json.keys())
if has_synonyms:
obo_syn_json = term_json['obo_synonym']
obo_syn_frame = pds.DataFrame(obo_syn_json)
obo_syn_frame['obo_id'] = term_json['obo_id']
obo_syn_frame['pref_lab'] = term_json['label']
annotations_frame = annotations_frame.append(obo_syn_frame, ignore_index=True)
# # don't process every kind of annotation, like genetic code
# has_annotations = 'annotation' in set(term_json.keys())
# if has_annotations:
# obo_ano_json = term_json['annotation']
# for anokey in obo_ano_json.keys():
# for keyval in obo_ano_json[anokey]:
# new_row = {'name': keyval,
# 'obo_id': term_json['obo_id'],
# 'scope': anokey,
# 'type': 'annotation',
# 'xrefs': '',
# 'pref_lab': term_json['label']}
# annotations_frame = annotations_frame.append(new_row, ignore_index=True)
annotations_row_count = len(annotations_frame.index)
if annotations_row_count == 0:
logger.warning('NO ANNOTATIONS')
manual_row = pds.Series(['', '', '', '', '', ''])
row_df = pds.DataFrame([manual_row], columns=['name', 'obo_id', 'scope', 'type', 'xrefs', 'pref_lab'])
annotations_frame = pds.concat([row_df, annotations_frame], ignore_index=True)
failures.append(orig_enum)
annotations_frame['enum_class'] = one_enum_param
annotations_frame['query'] = tidied_enum
annotations_frame['orig_enum'] = orig_enum
# check whether anny of the annotation on any of the hits have an
# acceptable cosine string distance
annotations_frame['name'] = annotations_frame['name'].fillna('')
annotations_frame['cosine_dist'] = \
annotations_frame.apply(lambda row: inner_cosine_obj.distance(tidied_enum.strip().lower(),
row['name'].strip().lower()),
axis=1)
annotations_frame = annotations_frame.sort_values('cosine_dist')
annotations_frame['dist_ok'] = annotations_frame['cosine_dist'] <= mdg
annotations_frame['rank'] = list(range(1, len(annotations_frame.index)+1))
# annotations_frame = annotations_frame[
# ['enum_class', 'orig_enum', 'query', 'name', 'cosine_dist', 'dist_ok',
# 'obo_id', 'pref_lab', 'type', 'scope']]
annotations_frame = annotations_frame[cols2display]
# do something with xrefs?
logger.debug(annotations_frame)
# get best acceptable row
acceptable_cosine = annotations_frame[annotations_frame['cosine_dist'] <= mdg]
acceptable_row_count = len(acceptable_cosine.index)
if acceptable_row_count > 0:
best_acceptable = acceptable_cosine.iloc[0]
success_frame = success_frame.append(best_acceptable)
# check if permitted value already has a meaning
meaning_search = list(inferred_model['enums'][one_enum_param]['permissible_values'][orig_enum].keys())
if 'meaning' in meaning_search:
has_meaning = True
else:
has_meaning = False
meaningless = not has_meaning
if meaningless or omg:
# insert meaning
inferred_model['enums'][one_enum_param]['permissible_values'][orig_enum]['meaning'] = best_acceptable[
'obo_id']
inferred_model['enums'][one_enum_param]['permissible_values'][orig_enum]['description'] = \
best_acceptable['pref_lab']
else:
temp = 'NO ACCEPTABLE MAPPINGS FOR ' + one_enum_param + " " + orig_enum
logger.warning(temp)
# sort and make unique
failures.append(orig_enum)
per_enum_frame = per_enum_frame.append(annotations_frame)
# I think there will be one success frame for each enum
success_frame = success_frame[cols2display]
success_frame = success_frame[list(annotations_frame.columns)]
logger.info(success_frame)
return per_enum_frame
def all_enums_to_ols(inferred_model_param, the_enums_param):
multi_enum_frame = pds.DataFrame(columns=cols2display)
for one_enum in the_enums_param:
permitteds = get_one_enum_class(inferred_model_param, one_enum)
one_enum_class_list = one_enum_to_ols_frame_list(permitteds, one_enum)
multi_enum_frame = multi_enum_frame.append(one_enum_class_list)
return multi_enum_frame
def get_one_enum_class(inferred_model_param, enum_class_param):
inferred_enums = inferred_model_param['enums'][enum_class_param]['permissible_values']
inferred_keys = list(inferred_enums.keys())
inferred_keys.sort(key=str.casefold)
return inferred_keys
def get_enum_list(inferred_model_param):
inner_enums = list(inferred_model_param['enums'].keys())
return inner_enums
def case_fold_list_sort(input_list):
output_list = input_list
output_list.sort(key=str.casefold)
return output_list
def read_yaml_model(modelfile_param):
with open(modelfile_param) as file:
inner_inferred_model = yaml.load(file, Loader=yaml.FullLoader)
return inner_inferred_model
# don't forget type field on options ???
# synbio example (without redirection of yaml stdout):
# ./linkml_model_enrichment/mixs_qd_bp_or_ols.py \
# --modelfile target/Ontology_example_20210317_P2B1_allmods_categorytype_different_scores_per_mod-1.yaml \
# --ontoprefix NCBItaxon,SO \
# --enum_list species_enum,host_organism_enum,category_enum,type_enum,type_long_enum \
# --verbose
@click.command()
@click_log.simple_verbosity_option(logger)
@click.option('--modelfile', '-f',
help='Path to a YAML linkml file containing enumerated values.',
required=True,
type=click.Path(exists=True),
)
@click.option('--tabular_outputfile', '-t',
default='mappings_log.tsv',
help='A tsv dump of all search results will be written to this file.',
show_default=True,
type=click.Path()
)
@click.option('--ontoprefix', '-p',
default='NCBITaxon,SO,ENVO,PATO,GO,OBI',
help='comma-separated list of (abbreviated) ontologies to search over.',
show_default=True
)
@click.option('--enum_list', '-e',
default='',
help='Comma-separated list of enums to search with. Defaults to all enums.',
show_default=False
)
# the choice and order of the query_fields has a big impact on what terms are returned
# overwrite the model's description with preferred term?
# OLS defaults are {label, synonym, description, short_form, obo_id, annotations, logical_description, iri}
@click.option('--query_fields', '-q',
default='',
help="Comma-separated list of term properties to include in string similarity calculation. " +
"Defaults to label,synonym,description,short_form,obo_id,annotations,logical_description,iri.",
show_default=False
)
# replaced_chars impacts returned fields too
# 'SARS-CoV-2' fails if the hyphens are escaped or ???
@click.option('--replaced_chars', '-c',
default='\.\_\- ',
help='Characters to replace with whitespace.',
show_default=True
)
@click.option('--min_search_chars', '-n',
default=2,
help='TEMPORARILY DISABLED. Queries with fewer characters will not be submitted in the search.',
show_default=True
)
@click.option('--row_req', '-r',
default=5,
help='Requested number of search results.',
show_default=True
)
@click.option('--maxdist', '-x',
default=0.05,
help="Maximum string distance between query and best matching term's best matching property.",
show_default=True
)
@click.option('--overwite_meaning', '-m',
help="Should existing enum meanings and descriptions be overwritten?",
is_flag=True
)
@click.option('--search_engine', '-s',
default='OLS',
help="BioPortal option has been temporarily disabled.",
show_default=True
)
def clickmain(modelfile, tabular_outputfile, ontoprefix, enum_list, query_fields, replaced_chars, min_search_chars,
row_req, maxdist, overwite_meaning, search_engine):
"""Uses web-based ontology lookup tools to map the permitted values of enums from linkml files to CURIES.
Optionally overwrites the meaning with a CURIE and the description with a preferred label.
Writes the resulting YAML to STDOUT."""
global failures, inferred_model, ecg, opg, rrg, qfg, mdg, omg
inferred_model = read_yaml_model(modelfile)
ecg = replaced_chars
opg = ontoprefix
rrg = row_req
qfg = query_fields
mdg = maxdist
omg = overwite_meaning
requested_enums = enum_list.split(",")
sorted_requested = case_fold_list_sort(requested_enums)
avaialble_enums = get_enum_list(inferred_model)
sorted_avaialble = case_fold_list_sort(avaialble_enums)
logger.info(sorted_avaialble)
if len(enum_list) == 0 or len(enum_list[0]) == 0:
settled_enums = sorted_avaialble
else:
settled_enums = sorted_requested
if search_engine == 'OLS':
all_ols_results = all_enums_to_ols(inferred_model, settled_enums)
logger.info("MAPPING FAILURES")
logger.info(list(set(failures)))
all_ols_results.to_csv(tabular_outputfile, sep='\t')
yaml.safe_dump(inferred_model, sys.stdout, default_flow_style=False)
elif search_engine == 'BioPortal':
logger.warning('BioPortal search temporarily disabled')
return
else:
logger.warning('No valid search engine specified')
if __name__ == '__main__':
clickmain(auto_envvar_prefix='ENUMENRICH')
| 2.1875 | 2 |
surrortg/devices/udp/udp_switch.py | SurrogateInc/surrortg-sdk | 21 | 12786593 | import asyncio
import logging
import struct
from surrortg.inputs import Switch
from . import UdpInput
class UdpSwitch(Switch, UdpInput):
"""Class for udp-controlled switch.
:param cmd: udp byte that identifies the control id
:type cmd: int
:param multiplier: multiplier of the value, defaults to 1.0
:type multiplier: float, optional
:param repeat_commands: defines if commands should be repeated,
defaults to False
:type repeat_commands: bool, optional
"""
def __init__(self, cmd, repeat_commands=False):
super().__init__()
self.cmd = cmd
self.value_off = 0
self.value_on = 1
self.should_repeat = repeat_commands
self.current_val = self.value_off
self.repeat_task = None
async def on(self, seat):
self._handle_command(self.value_on, seat)
async def off(self, seat):
self._handle_command(self.value_off, seat)
def _handle_command(self, val, seat):
self._send_command(val, seat)
if self.should_repeat:
self.current_val = val
if self.repeat_task is not None:
self.repeat_task.cancel()
self.repeat_task = asyncio.create_task(
self._repeat_command(10, 0.2, seat)
)
def _send_command(self, val, seat):
"""Sends a udp command to the endpoint of the seat
:param val: switch position value, 0 or 1
:type val: int
:param seat: Robot seat
:type seat: int
"""
assert val == 0 or val == 1
if seat not in self.endpoints:
logging.warning(
f"Endpoint not found for seat {seat}, not sending command."
)
return
endpoint = self.endpoints[seat]
logging.debug(
f"Running udp switch {self.cmd} of seat {seat} with value {val}"
)
if not endpoint.closed:
try:
endpoint.send(struct.pack("BB", self.cmd, val))
except OSError as e:
logging.warning(
f"Failed to send value {val} to seat {seat} "
f"command {self.cmd}: {e}"
)
else:
logging.debug(
f"Did not send value {val} to seat {seat} "
f"command {self.cmd}, was closed"
)
async def _repeat_command(self, num_sends, interval, seat):
"""Calls _send_command on repeat a specific number of times
:param num_sends: number of times _send_command is called
:type num_sends: int
:param interval: number of seconds between command sends
:type interval: float
:param seat: Robot seat
:type seat: int
"""
for _ in range(num_sends):
await asyncio.sleep(interval)
self._send_command(self.current_val, seat)
| 2.78125 | 3 |
src/convert.py | vinid223/gtkoutkeeptomd | 0 | 12786594 | import argparse
import errno
import json
import logging
import os
import textwrap
from os import walk
def load_json_file(path: str):
f = open(path, "r")
data = f.read()
f.close()
return json.loads(data)
def save_markdown_file(path: str, data):
f = open(path, "w")
f.writelines(data)
f.close()
def convert_list_content(list_content):
data = ""
for item in list_content:
checked = "x" if item["isChecked"] else " "
text = item["text"]
data = data + f"- [{checked}] {text}\n"
return data
def convert_to_markdown(json_data, note_name):
archived = json_data["isArchived"]
data = f"# {note_name}\n\n"
if "listContent" in json_data:
data = data + convert_list_content(json_data["listContent"])
if "textContent" in json_data:
data = data + json_data["textContent"]
return archived, data
def set_path_to_file_names(dir, filenames):
new_files = []
for file in filenames:
new_files.append(os.path.join(dir, file))
return new_files
def get_folder_files(path, recursive):
dirpath, dirnames, filenames = next(walk(path), (None, None, []))
filenames = set_path_to_file_names(path, filenames)
if recursive and dirnames:
for dir in dirnames:
filenames = filenames + get_folder_files(os.path.join(path, dir), recursive)
return filenames
def convert_file(
input,
output=None,
archived=False,
archivedoutput=None,
from_folder=False,
force_file=False,
):
print(f"\n\nConverting file {input}")
file_name, extension = os.path.splitext(os.path.basename(input))
if extension != ".json" and not force_file:
print(
"Skipping file, not json format. Use flag --force to force the file to be used. WARNING: This script may throw an error."
)
return
json_data = load_json_file(input)
note_name = json_data["title"] if json_data["title"] else file_name
note_archived, markdown = convert_to_markdown(json_data, note_name)
print(f"Archived: {note_archived}")
print(f"Note name: {note_name}")
print(f"File name: {file_name}")
if from_folder:
archive = "archived" if archived and note_archived else ""
archive = archivedoutput if archivedoutput and archived else archive
output_file = os.path.join(output, archive, f"{note_name}.md")
else:
output_file = output if output else f"{note_name}.md"
print(f"Outputing file to {output_file}")
save_markdown_file(output_file, markdown)
def convert_folder(
path,
recursive=False,
output=None,
archived=False,
archivedoutput=None,
force_file=False,
):
filenames = get_folder_files(path, recursive)
for file in filenames:
try:
convert_file(file, output, archived, archivedoutput, True, force_file)
except Exception as e:
print(f"Error converting file: {file}")
logging.error(e)
description_lines = [
"Convert Google Takeout Keep files to Markdown",
"",
"\tconvert.py --input some_exported_file.json --output converted.md",
"",
"\tconvert.py --input /path/to/input --output /path/to/output -r",
]
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(
"""\
Convert Google Takeout Keep files to Markdown.
----------------------------------------------
convert.py -i some_exported_file.json --o converted.md
convert.py -i /path/to/input -o /path/to/output -r -a
"""
),
)
parser.add_argument(
"-i",
"--input",
dest="input",
type=str,
required=True,
help="Path to input file or directory.",
)
parser.add_argument(
"-o", "--output", dest="output", type=str, help="Path to output file or directory."
)
parser.add_argument(
"-r",
"--recursive",
dest="recursive",
action="store_true",
help="Directory only. Enable recursive convertion for directories. Not used for individual files. The subdirectories structures will be lost in the output folder.",
)
parser.add_argument(
"-a",
"--archived",
dest="archived",
action="store_true",
help='Directory only. Separate archived notes to a separate directory. Default directory "archived"',
)
parser.add_argument(
"-f",
"--force",
dest="force_file",
action="store_true",
help="Force the file to be read if the extension is not .json. This may break the conversion.",
)
parser.add_argument(
"--archivedoutput",
dest="archivedoutput",
type=str,
help="Path to archived output directory.",
)
if __name__ == "__main__":
args = parser.parse_args()
if os.path.isdir(args.input):
if args.output:
try:
os.mkdir(args.output)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
if args.archived:
archive_path = (
os.path.join(args.output, args.archivedoutput)
if args.archivedoutput
else os.path.join(args.output, "archived")
)
try:
print(f"Making dir {archive_path}")
os.mkdir(archive_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
convert_folder(
args.input,
args.recursive,
args.output,
args.archived,
args.archivedoutput,
args.force_file,
)
elif os.path.isfile(args.input):
if args.recursive:
print("Recursive flag will be ignored. Input not a folder.")
convert_file(args.input, args.output)
else:
parser.error("The input parameter is not a folder or usable file")
| 2.953125 | 3 |
redpanda/orm.py | amancevice/redpanda | 24 | 12786595 | """
Custom ORM behavior.
"""
import pandas
import sqlalchemy.orm
from redpanda import dialects
class Query(sqlalchemy.orm.Query):
"""
RedPanda SQLAlchemy Query.
Adds the frame() method to queries.
"""
def __init__(self, entities, session=None, read_sql=None):
super(Query, self).__init__(entities, session)
if read_sql is None:
try:
entity_zero, *_ = entities
read_sql = entity_zero.__read_sql__
except (AttributeError, TypeError, ValueError):
read_sql = {}
self._read_sql = read_sql
def frame(self, **read_sql):
"""
Return RedPanda pandas.DataFrame instance.
"""
# Get conecion
conn = self.session.connection()
# Get SQL+params from engine
sql, params = dialects.statement_and_params(conn.engine, self)
# Get read_sql arguments
read_sql = {**self._read_sql, **{'params': params}, **read_sql}
# Read SQL into DataFrame
dataframe = pandas.read_sql(str(sql), conn.engine, **read_sql)
if read_sql.get('columns') is not None:
dataframe = dataframe[read_sql['columns']]
return dataframe
class Session(sqlalchemy.orm.Session):
"""
RedPanda SQLAlchemy Session.
Adds add_dataframe() method to session.
"""
def add_dataframe(self, cls, dataframe, parse_index=False):
"""
Return a generator for SQLAlchemy models from a pandas.DataFrame.
:param class cls: Target model for DataFrame
:param pandas.DataFrame dataframe: pandas.DataFrame to parse
:param boolean parse_index: parse the index as a model attr
:returns iter: Generator of SQLAlchemy objects.
"""
for idx, row in dataframe.iterrows():
attrs = row.dropna().to_dict()
if parse_index is True:
if dataframe.index.name is None:
raise ValueError('Cannot parse unnamed index')
attrs[dataframe.index.name] = idx
self.add(cls(**attrs))
def sessionmaker(class_=Session, query_cls=Query, **kwargs):
"""
Override of sqlalchemy.orm.sessionmaker to use RedPanda Session/Query.
"""
return sqlalchemy.orm.sessionmaker(
class_=class_, query_cls=query_cls, **kwargs)
def within(self, index):
"""
Like between() but takes a pandas index object.
:param pandas.Index index: pandas index
:returns self: result of between() with start/end as the ends of the index.
"""
try:
start = index.min().start_time
end = index.max().end_time
except AttributeError:
start = index.min()
end = index.max()
return self.between(start, end)
sqlalchemy.orm.attributes.InstrumentedAttribute.within = within
| 2.875 | 3 |
Unit5/HomeWorks/p1.py | yuhao1998/PythonStudy | 0 | 12786596 | '''
任意累积
描述
请根据编程模板补充代码,计算任意个输入数字的乘积。
注意,仅需要在标注...的地方补充一行或多行代码。
'''
def cmul(a, *b):
input(a)
m = a
for i in b:
m *= i
return m
print(eval("cmul({})".format(input())))
'''
该程序需要注意两个内容:
1. 无限制数量函数定义的方法,其中b在函数cmul中表达除了a之外的所有输入参数;
2. 以字符串形式调用函数的方法,"cmul()"与eval()的组合,提供了很多灵活性。
''' | 3.6875 | 4 |
tests/apitest/test_user_api.py | Eternity-labs/eternity-backend-server | 0 | 12786597 | <filename>tests/apitest/test_user_api.py
from . import ApiTestCase
class TestUserApi(ApiTestCase):
def test_accountinfo(self):
payload = self.client.get("/user/accountinfo/0x123124")
AccountId = payload["AccountId"]
assert AccountId | 2.3125 | 2 |
client.py | ucipass/sio | 0 | 12786598 | <gh_stars>0
from socketIO_client_nexus import SocketIO, LoggingNamespace
import time
host = "127.0.0.1"
port = 8080
def sioCallback(*args):
print('socket.io reply', args, "on:", time.strftime('%X'))
socketIO = SocketIO(host, port, LoggingNamespace)
while True:
socketIO.emit('echo', {'xxx': 'yyy'}, sioCallback)
socketIO.wait_for_callbacks(seconds=1)
time.sleep(1)
| 2.8125 | 3 |
frappe/tests/test_webform.py | oryxsolutions/frappe | 0 | 12786599 | <gh_stars>0
import unittest
import frappe
from frappe.utils import set_request
from frappe.website.serve import get_response
from frappe.www.list import get_list_context
class TestWebform(unittest.TestCase):
def test_webform_publish_functionality(self):
edit_profile = frappe.get_doc("Web Form", "edit-profile")
# publish webform
edit_profile.published = True
edit_profile.save()
set_request(method="GET", path="update-profile")
response = get_response()
self.assertEqual(response.status_code, 200)
# un-publish webform
edit_profile.published = False
edit_profile.save()
response = get_response()
self.assertEqual(response.status_code, 404)
def test_get_context_hook_of_webform(self):
create_custom_doctype()
create_webform()
# check context for apps without any hook
context_list = get_list_context("", "Custom Doctype", "test-webform")
self.assertFalse(context_list)
# create a hook to get webform_context
set_webform_hook(
"webform_list_context",
"frappe.www._test._test_webform.webform_list_context",
)
# check context for apps with hook
context_list = get_list_context("", "Custom Doctype", "test-webform")
self.assertTrue(context_list)
def create_custom_doctype():
frappe.get_doc(
{
"doctype": "DocType",
"name": "Custom Doctype",
"module": "Core",
"custom": 1,
"fields": [{"label": "Title", "fieldname": "title", "fieldtype": "Data"}],
}
).insert(ignore_if_duplicate=True)
def create_webform():
frappe.get_doc(
{
"doctype": "Web Form",
"module": "Core",
"title": "Test Webform",
"route": "test-webform",
"doc_type": "Custom Doctype",
"web_form_fields": [
{
"doctype": "Web Form Field",
"fieldname": "title",
"fieldtype": "Data",
"label": "Title",
}
],
}
).insert(ignore_if_duplicate=True)
def set_webform_hook(key, value):
from frappe import hooks
# reset hooks
for hook in "webform_list_context":
if hasattr(hooks, hook):
delattr(hooks, hook)
setattr(hooks, key, value)
frappe.cache().delete_key("app_hooks")
| 2.046875 | 2 |
hw2/Duelling.py | suyash622/Random | 0 | 12786600 | import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
import argparse
import random
import gym
import sys
from collections import deque
from keras import backend as K
from keras.layers import Input, Dense
from keras.models import Model
from keras.utils import plot_model
env = gym.make('MountainCar-v0')
state_space=env.observation_space.shape[0]
action_s=env.action_space.n
#Hyperparameters
learning_rate=0.001
episodes=1000000
epsilon_start=0.5
epsilon_end=0.05
#decay=(epsilon_start-epsilon_end)/100000
decay = 0.9
batch_size=32
max_steps=200
gamma=1.0
hidden_layer=50
class QNetwork():
def __init__(self,learning_rate,action_space,input_dim):
# self.model= Sequential()
# self.model.add(Dense(units=30,activation='relu',input_dim=state_space,kernel_initializer='he_uniform'))
# self.model.add(Dense(units=30,activation='relu',kernel_initializer='he_uniform'))
# self.model.add(Dense(units=30,activation='relu',kernel_initializer='he_uniform'))
# self.model.add(Dense(units=action_s,activation='linear',kernel_initializer='he_uniform'))
self.input = Input(shape=(input_dim,))
self.x=Dense(hidden_layer,activation='relu')(self.input)
# self.x=keras.layers.BatchNormalization(axis=-1)(self.x)
self.x=Dense(hidden_layer,activation='relu')(self.x)
# self.x=keras.layers.BatchNormalization(axis=-1)(self.x)
self.x=Dense(hidden_layer,activation='relu')(self.x)
self.value= Dense(1,activation='linear',name='value')(self.x)
self.value1=self.value
self.advantage = Dense(action_s,activation='linear',name='advantage')(self.x)
self.advantage_mean = keras.layers.Lambda(lambda x:K.mean(x,axis=-1,keepdims=True))(self.advantage)
self.advantage_mean1 = self.advantage_mean
# self.value=keras.layers.RepeatVector(2)
# print('Value',self.value.shape)
# self.value = keras.layers.Lambda(lambda x:K.equal(x,axis=-1,keepdims=True))(self.value)
i=1
while(i<action_s):
self.value=keras.layers.Lambda(lambda x:K.concatenate(x, axis=-1))([self.value,self.value1])
self.advantage_mean=keras.layers.Lambda(lambda x:K.concatenate(x,axis=-1))([self.advantage_mean1,self.advantage_mean])
i+=1
# print('Adv',self.keras.backend.identity.shape)
# self.advantage_mean=keras.layers.Lambda(lambda x:K.identity(x))(self.advantage_mean)
# print('Val1',self.value1.shape)
self.advantage_subtract_mean = keras.layers.Subtract()([self.advantage,self.advantage_mean])
# print('Adv su',self.advantage_mean.shape)
self.added = keras.layers.Add()([self.advantage_subtract_mean,self.value])
# print("Added",self.added.shape)
# equivalent to added = keras.layers.add([x1, x2])
# self.out = Dense(action_s,activation='linear')(self.added)
# print("out",self.out.shape)
self.optimizer=keras.optimizers.Adam(lr=learning_rate)
self.model = Model(inputs=self.input, outputs=self.added)
self.model.compile(loss='mse',optimizer=self.optimizer)
plot_model(self.model, to_file='Duelling2.png')
def save_model_weights(self, fname):
self.model.save_weights(fname)
def load_model(self, model_file):
self.model.load(model_file)
def load_model_weights(self,fname):
self.model.load_weights(fname)
class DQN_Agent():
# In this class, we will implement functions to do the following.
# (1) Create an instance of the Q Network class.
# (2) Create a function that constructs a policy from the Q values predicted by the Q Network.
# (a) Epsilon Greedy Policy.
# (b) Greedy Policy.
# (3) Create a function to train the Q Network, by interacting with the environment.
# (4) Create a function to test the Q Network's performance on the environment.
# (5) Create a function for Experience Replay.
def __init__(self, environment_name, render=False):
self.env = environment_name
self.net=QNetwork(learning_rate,action_s,state_space)
self.prev_net=QNetwork(learning_rate,action_s,state_space)
self.prev_net.model.set_weights(self.net.model.get_weights())
self.q_values=np.zeros([batch_size,action_s])
self.memory=Replay_Memory()
self.burn_in_memory()
def epsilon_greedy_policy(self, q_values,epsilon):
if (epsilon>np.random.random()):
action=random.randrange(action_s)
else:
action=np.argmax(q_values[0])
return action
def greedy_policy(self, q_values):
action=np.argmax(q_values)
return action
def train(self):
# In this function, we will train our network.
# If training without experience replay_memory, then you will interact with the environment
# in this function, while also updating your network parameters.
# If you are using a replay memory, you should interact with environment here, and store these
# transitions to memory, while also updating your model.
epsilon = epsilon_start
for i in range(1000000):
state = env.reset()
state=np.reshape(state,[1,state_space])
total_reward=0
step=0
while step<max_steps:
env.render()
step+=1
q_values = self.net.model.predict(state)
action=self.epsilon_greedy_policy(q_values,epsilon)
new_state,reward,done, _ = env.step(action)
new_state=np.reshape(new_state,[1,state_space])
self.memory.append([state,action,reward,done,new_state])
minibatch=self.memory.sample_batch()
batch_states=np.zeros((batch_size,state_space))
batch_next_states=np.zeros((batch_size,state_space))
t_int=0
for batch_state, batch_action, batch_reward, batch_done, batch_new_state in minibatch:
batch_states[t_int]=batch_state
batch_next_states[t_int]=batch_new_state
t_int+=1
batch_q_values=self.net.model.predict(batch_states)
batch_prev_q_values=self.prev_net.model.predict(batch_next_states)
t_int=0
for batch_state, batch_action, batch_reward, batch_done, batch_new_state in minibatch:
if batch_done:
temp=0
else:
temp=gamma*(np.amax(batch_prev_q_values[t_int]))
batch_q_values[t_int][batch_action] = batch_reward+temp
t_int+=1
self.net.model.fit(batch_states,batch_q_values,batch_size=batch_size,epochs=1,verbose=0)
epsilon*=decay
if epsilon<epsilon_end:
epsilon = epsilon_end
total_reward+=reward
state=new_state
if done:
break
self.prev_net.model.set_weights(self.net.model.get_weights())
print(i,total_reward)
def test(self, model_file=None):
# Evaluate the performance of your agent over 100 episodes, by calculating cummulative rewards for the 100 episodes.
# Here you need to interact with the environment, irrespective of whether you are using a memory.
pass
def burn_in_memory(self):
state = env.reset()
state=np.reshape(state,[1,state_space])
for i in range(self.memory.burn_in):
action=random.randrange(action_s)
new_state, reward, done, _ = env.step(action)
new_state=np.reshape(new_state,[1,state_space])
self.memory.append([state,action,reward,done,new_state])
state=new_state
if done:
state=env.reset()
state=np.reshape(state,[1,state_space])
class Replay_Memory():
def __init__(self, memory_size=10000, burn_in=5000):
self.transitions =[]
self.memory_size=memory_size
self.burn_in = burn_in
def sample_batch(self, batch_size=32):
return random.sample(self.transitions,batch_size)
def append(self, transition):
if(len(self.transitions)<self.memory_size):
self.transitions.append(transition)
else:
idx=random.randint(1,self.memory_size-1)
# print(idx)
del self.transitions[idx]
self.transitions.append(transition)
def parse_arguments():
parser = argparse.ArgumentParser(description='Linear Q network parser')
parser.add_argument('--env',dest='env',type=str)
parser.add_argument('--render',dest='render',type=int,default=0)
parser.add_argument('--train',dest='train',type=int,default=1)
parser.add_argument('--model',dest='model_file',type=str)
return parser.parse_args()
def main(args):
args = parse_arguments()
environment_name = args.env
# Setting the session to allow growth, so it doesn't allocate all GPU memory.
gpu_ops = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_ops)
sess = tf.Session(config=config)
# Setting this as the default tensorflow session.
keras.backend.tensorflow_backend.set_session(sess)
agent=DQN_Agent(environment_name)
# print(agent)
DQN_Agent.train(agent)
# You want to create an instance of the DQN_Agent class here, and then train / test it.
if __name__ == '__main__':
main(sys.argv)
| 2.484375 | 2 |
web/api_rest/mini_facebook/python_users_relationships_service_api_llano/controllers/PersonsApi.py | CALlanoR/virtual_environments | 0 | 12786601 | <gh_stars>0
from flask import Blueprint, request
from services.PersonsService import PersonsService
from flask import jsonify
persons_api = Blueprint('persons_api', __name__)
persons_service = PersonsService()
@persons_api.route('/persons/', methods=['POST'])
def add_person():
try:
_json = request.json
_id = _json['id']
_name = _json['name']
_email = _json['email']
_login = _json['login']
_password = _json['password']
# validate the received values
if _name and request.method == 'POST':
persons_service.add_person(int(_id), _name, _email, _login, _password)
return 'person with id: ' +_id +' inserted'
else:
return not_found()
except Exception as e:
print(e)
@persons_api.route('/persons', methods=['GET'])
def get_all_persons():
try:
app.logger.info("in /persons")
rows = persons_service.get_all_persons()
resp = jsonify(rows)
resp.status_code = 200
return resp
except Exception as e:
print(e)
@persons_api.route('/persons/<int:personId>/friends', methods=['GET'])
def get_friends(personId):
try:
row = persons_service.get_friends(personId)
resp = jsonify(row)
resp.status_code = 200
return resp
except Exception as e:
print(e)
@persons_api.route('/persons/<string:name>/byName', methods=['GET'])
def get_person_by_name(name):
try:
row = persons_service.get_person_by_name(name)
resp = jsonify(row)
resp.status_code = 200
return resp
except Exception as e:
print(e)
@persons_api.route('/persons/<int:personId>/mayYouKnow', methods=['GET'])
def get_friends_from_my_friends(personId):
try:
row = persons_service.get_friends_from_my_friends(personId)
resp = jsonify(row)
resp.status_code = 200
return resp
except Exception as e:
print(e)
@persons_api.route('/persons/person1/<int:personId1>/person2/<int:personId2>', methods=['POST'])
def add_new_relationship(personId1, personId2):
try:
if personId1 and personId2:
persons_service.add_new_relationship(personId1, personId2)
return str(personId1)+' and '+str(personId2)+' are friends now.'
else:
return not_found()
except Exception as e:
print(e)
@persons_api.route('/persons/delete/person1/<int:personId1>/person2/<int:personId2>', methods=['POST'])
def delete_relationship(personId1, personId2):
try:
if personId1 and personId2:
persons_service.delete_relationship(personId1, personId2)
return str(personId1)+' and '+str(personId2)+' are not longer friends.'
else:
return not_found()
except Exception as e:
print(e)
@persons_api.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 404
return resp | 2.890625 | 3 |
data/train/python/d961160e69c3b9c624baed9fdc6dfac21f4188e3urls.py | harshp8l/deep-learning-lang-detection | 84 | 12786602 | <filename>data/train/python/d961160e69c3b9c624baed9fdc6dfac21f4188e3urls.py<gh_stars>10-100
from django.conf.urls import patterns, include, url
from django.contrib import admin
from api import *
from tastypie.api import Api
v1_api = Api(api_name='v1')
v1_api.register(AddressResource())
v1_api.register(PersonResource())
v1_api.register(FormOfLegResource())
v1_api.register(TypeOfSocFormResource())
v1_api.register(SocialFormationResource())
v1_api.register(FiliaResource())
'''
v1_api.register(AdresaResource())
v1_api.register(AdresaResource())
v1_api.register(ArbitrazhnijResource())
v1_api.register(BorzhnikResource())
v1_api.register(KreditorResource())
v1_api.register(VimogiResource())
'''
urlpatterns = patterns('',
url(r'^api/', include(v1_api.urls)),
url(r'^admin/', include(admin.site.urls)),
)
| 1.578125 | 2 |
.ipynb_checkpoints/pyKinectProjectilePrediction-checkpoint.py | PMcGloin/pyKinectProjectilePrediction | 0 | 12786603 | from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import ctypes
import _ctypes
import pygame
import sys
import numpy as np
import cv2
#if sys.hexversion >= 0x03000000:
# import _thread as thread
#else:
# import thread
class DepthRuntime(object):
def __init__(self):
pygame.init()
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Loop until the user clicks the close button.
self._done = False
# Used to manage how fast the screen updates
self._clock = pygame.time.Clock()
# Kinect runtime object, we want only color and body frames
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)
# back buffer surface for getting Kinect depth frames, 8bit grey, width and height equal to the Kinect depth frame size
self._frame_surface = pygame.Surface((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), 0, 24)
# here we will store skeleton data
self._bodies = None
# Set the width and height of the screen [width, height]
self._infoObject = pygame.display.Info()
self._screen = pygame.display.set_mode((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
pygame.display.set_caption("Kinect for Windows v2 Depth")
#def background_subtraction(self, current_frame, previous_frame):
# previousFrame = [0] * 217088
# return frame
def draw_depth_frame(self, frame, target_surface):
if frame is None: # some usb hub do not provide the infrared image. it works with Kinect studio though
return
target_surface.lock()
f8=np.uint8(frame.clip(1,4000)/16.)
frame8bit=np.dstack((f8,f8,f8))
address = self._kinect.surface_as_array(target_surface.get_buffer())
ctypes.memmove(address, frame8bit.ctypes.data, frame8bit.size)
del address
target_surface.unlock()
def run(self):
# -------- Main Program Loop -----------
frame = [0] * 217088
frames = [frame] * 5
fgbg = cv2.createBackgroundSubtractorKNN()
# fgbg = cv2.createBackgroundSubtractorMOG2()
# print (len(previousFrames))
# print(previousFrames)
while not self._done:
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
self._done = True # Flag that we are done so we exit this loop
elif event.type == pygame.VIDEORESIZE: # window resized
self._screen = pygame.display.set_mode(event.dict['size'], pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
# --- Getting frames and drawing
if self._kinect.has_new_depth_frame():
frame = self._kinect.get_last_depth_frame()
fgmask = fgbg.apply(frame)
# flattenMask = []
# for item in fgmask:
# flattenMask.append(item)
flattenMask = [value for element in fgmask for value in element]
# print (type(flattenMask[0]))
flattenMask = np.array(flattenMask)
# flattenMask = np.array(fgmask)
# flattenMask = flattenMask / 255
# print ("flattenMask\n",flattenMask)
frameMask = []
# frameMask = np.array(frameMask)
for val in np.nditer(flattenMask):
# i = 0
if val == 255:
frameMask.append(1)
# val = 1
else:
frameMask.append(0)
# val = 0
# i += 1
frameMask = np.array(frameMask)
# np.set_printoptions(threshold=sys.maxsize)
# print("frame\n",frame)
# print ("flattenMask\n",flattenMask)
# print ("frameMask\n",frameMask)
outputFrame = np.multiply(frame, frameMask)
# frames.append(outputFrame)
# frames.pop(0)
# outputFrame2 = []
# cv2.fastNlMeansDenoisingMulti(frames, 4, 4, outputFrame2)
# outputFrame2 = cv2.fastNlMeansDenoising(outputFrame)
# outputFrame = np.multiply(frame, fgmask)
# cv2.imshow('frame',fgmask)
self.draw_depth_frame(outputFrame, self._frame_surface)
# k = cv2.waitKey(30) & 0xff
# if k == 27:
# break
# frames.append(frame)
# frames.pop(0)
# outputFrame = np.subtract(frames[0], frames[1])
# self.draw_depth_frame(outputFrame, self._frame_surface)
#self.draw_depth_frame(frame, self._frame_surface)
#frame = np.average(np.array([frame, previousFrame]), axis=0)
#np.set_printoptions(threshold=sys.maxsize)
#print(outputFrame)
#print(frame.size)
# outputFrame = (np.array(previousFrames[0]) + np.array(previousFrames[1]) + np.array(previousFrames[2]) + np.array(previousFrames[3]) + np.array(previousFrames[4])) / 5
# self.draw_depth_frame(outputFrame.astype(int), self._frame_surface)
# frame2 = cv.fastNlMeansDenoisingMulti(previousFrames, 2 , 3)
frame = None
outputFrame = None
self._screen.blit(self._frame_surface, (0,0))
pygame.display.update()
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
self._clock.tick(60)
# Close our Kinect sensor, close the window and quit.
self._kinect.close()
pygame.quit()
__main__ = "Kinect v2 Depth"
game =DepthRuntime();
game.run(); | 2.609375 | 3 |
nablapps/meeting_records/admin.py | Amund211/nablaweb | 17 | 12786604 | <filename>nablapps/meeting_records/admin.py
"""
Admin interface for meeting record app
"""
from django.contrib import admin
from nablapps.core.admin import ChangedByMixin
from .models import MeetingRecord
@admin.register(MeetingRecord)
class MeetingRecordAdmin(ChangedByMixin, admin.ModelAdmin):
"""Admin interface for MeetingRecord model"""
fields = ("title", "slug", "description", "pub_date", "file")
prepopulated_fields = {"slug": ("title",)}
| 1.875 | 2 |
project/controllers/pilotConsoleController.py | MattiaPeiretti/TVG | 0 | 12786605 | # Libs
import flask
# Modules
from project.visionGrabber.device import Device
def get_vision_feed():
return flask.Response(generate_frame_from_view(Device()), mimetype='multipart/x-mixed-replace; boundary=frame')
def generate_frame_from_view(camera):
while True:
#get camera frame
frame = camera.get_frame()
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(frame) + b'\r\n') | 2.53125 | 3 |
CombinedList/main.py | rishidevc/stkovrflw | 0 | 12786606 | <reponame>rishidevc/stkovrflw
# https://stackoverflow.com/questions/51165779/combine-2-lists-of-pairs#51165779
def get_combined_users(list1, list2):
usernames = set()
combined = []
for user in sorted(list2 + list1, key=lambda user: user[0]): # Do not use => list1 + list2
if not user[0] in usernames:
usernames.add(user[0])
combined.append(user)
return combined
if __name__ == "__main__":
dyndns = [('user1', 'dyndns1'), ('user2', 'dyddns2'), ('user3', 'dyndns3'), ('user4', 'dyddns4')]
ip = [('user1', '1.1.1.1'), ('user2', '192.168.3.11'), ('user4', '172.16.58.3')]
combined = get_combined_users(dyndns, ip)
print(combined)
# >> options.colBy = 5;
# >> options.rowBy = 3;
# >> obj = LatexTableFromMCode('magic(20)', options)
# ...
# ...
# ...
# >> obj.compileLatex();
# ...
# ...
# ...
# >> obj.options
# ans =
# struct with fields:
# latexFileName: 'Latex-Table-03-Jul-2018-19-23-23.tex'
# rowBy: 3
# colBy: 5
# alignment: 'c'
# tablePos: 'htbp'
# colNames: ''
# fillBlankWith: ''
# colFontStyle: ''
# >> | 2.578125 | 3 |
tests/test_ghoclient.py | fccoelho/ghoclient | 1 | 12786607 | #!/usr/bin/env python
"""Tests for `ghoclient` package."""
import unittest
from click.testing import CliRunner
import ghoclient
from ghoclient import cli
from ghoclient import Index
import pandas as pd
from whoosh.searching import Hit
class TestGhoclient(unittest.TestCase):
"""Tests for `ghoclient` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
# assert 'ghoclient.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
class TestGHO(unittest.TestCase):
def test_get_countries_as_df(self):
GC = ghoclient.ghoclient.GHOSession()
df = GC.get_countries()
self.assertIsInstance(df, pd.DataFrame)
def test_get_dimensions_as_df(self):
GC = ghoclient.ghoclient.GHOSession()
df = GC.get_dimensions()
self.assertIsInstance(df, pd.DataFrame)
self.assertEquals(len(df.columns), 3)
def test_get_data(self):
GC = ghoclient.ghoclient.GHOSession()
df = GC.fetch_data_from_codes(code='WHS3_522')
class Test_Index(unittest.TestCase):
def test_build_index(self):
ghoclient.index.build_index(None)
assert ghoclient.index.ix is not None
def test_search(self):
res = ghoclient.index.search('tuberculosis')
self.assertGreaterEqual(len(res), 0)
self.assertIsInstance(res[0], dict)
self.assertIn('code', res[0]) | 2.421875 | 2 |
src/ptide/main.py | ptphp/PyLib | 1 | 12786608 | <reponame>ptphp/PyLib
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from PySide import QtCore, QtGui,QtWebKit
from ptpy.pyside.webkit.webview import WebView
from ptpy.dir.tree import listFiles
from ptpy.file.main import getContent
from ptpy.offline.main import download
import json
PREVIEW_URL = "http://dev.game110.cn"
class Editor(QtCore.QObject):
def __init__(self,parent = None):
super(Editor,self).__init__(parent)
self.htmlSrc = ""
@QtCore.Slot(result=str)
def getHtmlSrc(self):
return self.htmlSrc
@QtCore.Slot(str,result=str)
def getFiles(self,path):
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
files = listFiles(path)
QtGui.QApplication.restoreOverrideCursor()
return json.dumps(files)
@QtCore.Slot(str,str,result=str)
def saveContent(self,filename,content):
fi = QtCore.QFile(filename)
if not fi.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "Dock Widgets",
"Cannot write file %s:\n%s." % (filename, file.errorString()))
return
out = QtCore.QTextStream(fi)
out.setCodec("UTF-8")
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
out << content
QtGui.QApplication.restoreOverrideCursor()
return ""
@QtCore.Slot(str,result=str)
def getContent(self,path):
return getContent(path)
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.createActions()
self.createMenus()
self.setupWebView()
#self.createToolBars()
self.setWindowTitle("Pt IDE")
sb = self.createStatusbar()
self.setStatusBar(sb)
def save(self):
filename, filtr = QtGui.QFileDialog.getSaveFileName(self,
"Choose a file name", '.', "HTML (*.html *.htm)")
if not filename:
return
fi = QtCore.QFile(filename)
if not fi.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "Dock Widgets",
"Cannot write file %s:\n%s." % (filename, file.errorString()))
return
out = QtCore.QTextStream(fi)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
out << self.textEdit.toHtml()
QtGui.QApplication.restoreOverrideCursor()
self.statusBar().showMessage("Saved '%s'" % filename, 2000)
def about(self):
QtGui.QMessageBox.about(self, "About PtIde",
"The <b>PtIde</b> Vervsion 1.0")
def font(self):
font, ok = QtGui.QFontDialog.getFont()
#print font
if ok:
self.webview.setFont(font)
self.textEdit.setFont(font)
def createActions(self):
self.quitAct = QtGui.QAction("&Quit", self, shortcut="Ctrl+Q",
statusTip="Quit the application", triggered=self.close)
self.fontAct = QtGui.QAction("&Font", self,
statusTip="Set Font",
triggered=self.font)
self.aboutAct = QtGui.QAction("&About", self,
statusTip="Show the application's About box",
triggered=self.about)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.fontAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.quitAct)
self.viewMenu = self.menuBar().addMenu("&View")
self.menuBar().addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
def createToolBars(self):
#self.fileToolBar = self.addToolBar("File")
#self.fileToolBar.addAction(self.printAct)
self.locationEdit = QtGui.QLineEdit(self)
self.locationEdit.setSizePolicy(QtGui.QSizePolicy.Expanding,
self.locationEdit.sizePolicy().verticalPolicy())
self.locationEdit.returnPressed.connect(self.changeLocation)
#self.WebViewBar = self.addToolBar("WebView Bar")
self.WebViewBar.addAction(self.webview.pageAction(QtWebKit.QWebPage.Back))
self.WebViewBar.addAction(self.webview.pageAction(QtWebKit.QWebPage.Forward))
self.WebViewBar.addAction(self.webview.pageAction(QtWebKit.QWebPage.Reload))
self.WebViewBar.addAction(self.webview.pageAction(QtWebKit.QWebPage.Stop))
homveact = QtGui.QAction(self)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("images/home.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
homveact.setIcon(icon)
self.WebViewBar.addAction(homveact)
homveact.triggered.connect(self.loadPage)
self.WebViewBar.addWidget(self.locationEdit)
#def createStatusBar(self):
# self.statusBar().showMessage("Ready")
def createStatusbar(self):
sb = self.statusBar()
sb.progress = QtGui.QProgressBar()
sb.progress.setMaximumHeight(13)
sb.addPermanentWidget(sb.progress)
return sb
def consolePanel(self):
dock = QtGui.QDockWidget("Console", self)
dock.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea | QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea)
self.consoleView = QtWebKit.QWebView()
self.consoleView.load("ui/console.html")
dock.setWidget(self.consoleView)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, dock)
self.viewMenu.addAction(dock.toggleViewAction())
def previewPanel(self):
dock = QtGui.QDockWidget("Preview", self)
dock.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea | QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea)
self.webview = WebView(self)
self.connect(self.webview.page().networkAccessManager(),QtCore.SIGNAL("replyStart(QString)"), self.replyStart)
self.connect(self.webview.page().networkAccessManager(),QtCore.SIGNAL("replyFinish(QString)"), self.replyFinish)
#self.loadPage()
self.webview.loadStarted.connect(self.loadStart)
self.webview.titleChanged.connect(self.adjustTitle)
self.webview.loadProgress.connect(self.setProgress)
self.webview.loadFinished.connect(self.adjustLocation)
self.webview.linkClicked.connect(self.linkclick)
self.webview.page().javaScriptConsoleMessage = self.consolePrint
self.locationEdit = QtGui.QLineEdit(self)
self.locationEdit.setSizePolicy(QtGui.QSizePolicy.Expanding,
self.locationEdit.sizePolicy().verticalPolicy())
self.locationEdit.returnPressed.connect(self.changeLocation)
widget = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
layout.addWidget(self.locationEdit)
layout.addWidget(self.webview)
layout.setSpacing(0)
layout.setContentsMargins(0,0,0,0)
widget.setLayout(layout)
dock.setWidget(widget)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, dock)
self.viewMenu.addAction(dock.toggleViewAction())
self.inspector = inspector = QtWebKit.QWebInspector()
inspector.setPage(self.webview.page())
QtGui.QShortcut(QtGui.QKeySequence('F5'), self,self.refreshPrev)
def setupWebView(self):
self.editorView = QtWebKit.QWebView()
self.editorView.load("ui/index.html")
self.editorView.page().mainFrame().javaScriptWindowObjectCleared.connect(self.addEditorJsObj)
self.previewPanel()
self.consolePanel()
self.setCentralWidget(self.editorView)
def consolePrint(self,msg,line,id):
#print msg,line,id
#print json.dumps(msg)
self.consoleView.page().mainFrame().evaluateJavaScript("$.console.addConPanel('%s','%s','%s')" % (msg,str(line),id))
def refreshPrev(self):
url = self.webview.url().toString()
if url == '':
url = PREVIEW_URL
self.webview.load(url)
def addEditorJsObj(self):
self.editor = editor = Editor()
self.editorView.page().mainFrame().addToJavaScriptWindowObject("editor",editor)
def setHtmlSrc(self,html):
self.editor.htmlSrc = html
self.editorView.page().mainFrame().evaluateJavaScript("setHtmlSrc()")
def replyStart(self,url):
#self.editorView.page().mainFrame().evaluateJavaScript(js)
self.consoleView.page().mainFrame().evaluateJavaScript("$.console.addPanel('%s')" % (url))
#print "start:++++>",url
def replyFinish(self,url):
print "finish:===>",url
if self.webview.page().networkAccessManager().cache().data(url):
download(url,self.webview.page().networkAccessManager().cache().data(url).readAll())
#print self.webview.page().networkAccessManager().cache().data(url).readAll()
#print self.webview.page().networkAccessManager().cache().metaData(url).rawHeaders()
js = "reply('%s')" % url
#self.editorView.page().mainFrame().evaluateJavaScript(js)
def loadPage(self):
self.webview.load("http://www.baidu.com")
def loadStart(self):
pass#self.setHtmlSrc("")
#self.editorView.page().mainFrame().evaluateJavaScript('clearRequest()')
def linkclick(self,url):
pass#print url
def changeLocation(self):
url = QtCore.QUrl.fromUserInput(self.locationEdit.text())
self.locationEdit.setText(url.toString())
self.webview.load(QtCore.QUrl(url))
self.webview.setFocus()
def adjustTitle(self):
self.statusBar().showMessage(self.webview.title())
#self.setWindowTitle(self.webview.title())
def adjustLocation(self):
self.locationEdit.setText(self.webview.url().toString())
#self.setHtmlSrc(self.webview.page().mainFrame().toHtml())
def setProgress(self, progress):
self.statusBar().progress.setValue(progress)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWin = MainWindow()
mainWin.showMaximized()
sys.exit(app.exec_())
| 2.109375 | 2 |
pos_tagger/trained_model.py | ashwoolford/BNLTK | 14 | 12786609 | <reponame>ashwoolford/BNLTK
# Bangla Natural Language Toolkit: Parts of Speech Tagger
#
# Copyright (C) 2019 BNLTK Project
# Author: <NAME> <<EMAIL>>
from keras.models import load_model
from string import punctuation
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
import platform
import getpass
import os
import sys
import logging
logging.getLogger('tensorflow').disabled = True
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class Loader:
texts = ''
sentences = []
model = ''
model_path = None
tagged_data_path = None
def __init__(self):
self.texts = ''
self.sentences = []
self.model = None
self.model_path = None
self.tagged_data_path = None
def path_generator(self):
isFiles_exist = True
if platform.system() == 'Windows':
self.model_path = "C:\\Users\\"+getpass.getuser()+"\\bnltk_data\\pos_data\\keras_mlp_bangla.h5"
self.tagged_data_path = "C:\\Users\\"+getpass.getuser()+"\\bnltk_data\\pos_data\\bn_tagged_mod.txt"
else:
self.model_path = "/Users/"+getpass.getuser()+"/bnltk_data/pos_data/keras_mlp_bangla.h5"
self.tagged_data_path = "/Users/"+getpass.getuser()+"/bnltk_data/pos_data/bn_tagged_mod.txt"
def load_keras_model(self):
self.path_generator()
self.model = load_model(self.model_path)
self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.load_corpus()
self.data_manipulator()
def load_corpus(self):
#file = '/Users/ashrafhossain/bnltk_data/pos_data/bn_tagged_mod.txt'
self.texts = open(self.tagged_data_path, encoding="utf8").readlines()
def tuple_maker(self, line):
line = line.split(' ')
sentence = []
for x in line:
if x == '':
continue
else:
x = x.split('\\')
tup = []
for y in x:
tup.append(y);
sentence.append(tuple(tup))
return sentence
def data_manipulator(self):
for i in self.texts:
self.sentences.append(self.tuple_maker(i))
class BanglaPosTagger:
sentences = []
mod_elements = []
model = ''
dict_vectorizer = None
label_encoder = None
def __init__(self):
self.sentences = []
self.mod_elements = []
self.model = ''
self.dict_vectorizer = DictVectorizer(sparse=False)
self.label_encoder = LabelEncoder()
def load(self):
loader_ = Loader()
loader_.load_keras_model()
self.model = loader_.model
self.sentences = loader_.sentences
#print(self.sentences[0])
#print(self.mod_elements)
train_test_cutoff = int(.80 * len(self.sentences))
training_sentences = self.sentences[:train_test_cutoff]
testing_sentences = self.sentences[train_test_cutoff:]
train_val_cutoff = int(.25 * len(training_sentences))
validation_sentences = training_sentences[:train_val_cutoff]
training_sentences = training_sentences[train_val_cutoff:]
X_train, y_train = self.transform_to_dataset(training_sentences)
X_test, y_test = self.transform_to_dataset(testing_sentences)
X_val, y_val = self.transform_to_dataset(validation_sentences)
#dict_vectorizer = DictVectorizer(sparse=False)
self.dict_vectorizer.fit(X_train + X_test + X_val)
self.label_encoder.fit(y_train + y_test + y_val)
def bn_pos_tag(self, input):
self.load()
self.bn_tokenizer(input)
t_list = self.training_transform_to_dataset([self.mod_elements])
t_list = self.dict_vectorizer.transform(t_list)
#print(t_list)
predictions = self.model.predict(t_list)
list_ = []
for x in range(0, len(predictions)):
list_.append(np.argmax(predictions[x]))
#label_encoder = LabelEncoder()
labels = self.label_encoder.inverse_transform(list_)
result = []
for i in range(0, len(labels)):
tup = []
tup.append(self.mod_elements[i])
tup.append(labels[i])
result.append(tuple(tup))
return result
def bn_tokenizer(self, input_):
words = input_.split(' ')
words = [x.strip(' ') for x in words]
words = [i for i in words if i]
dict_ = {}
dict_['।'] = True
for p in punctuation:
dict_[p] = True
for n in words:
if dict_.get(n[-1]):
self.mod_elements.append(n[:-1])
self.mod_elements.append(n[-1])
else:
self.mod_elements.append(n)
self.mod_elements = [i for i in self.mod_elements if i]
def add_basic_features(self, sentence_terms, index):
#print(sentence_terms[index])
""" Compute some very basic word features.
:param sentence_terms: [w1, w2, ...]
:type sentence_terms: list
:param index: the index of the word
:type index: int
:return: dict containing features
:rtype: dict
"""
term = sentence_terms[index]
return {
'nb_terms': len(sentence_terms),
'term': term,
'is_first': index == 0,
'is_last': index == len(sentence_terms) - 1,
'prefix-1': term[0],
'prefix-2': term[:2],
'prefix-3': term[:3],
'suffix-1': term[-1],
'suffix-2': term[-2:],
'suffix-3': term[-3:],
'prev_word': '' if index == 0 else sentence_terms[index - 1],
'next_word': '' if index == len(sentence_terms) - 1 else sentence_terms[index + 1]
}
def training_transform_to_dataset(self, tagged_sentences):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X = []
#print(len(tagged_sentences))
for pos_tags in tagged_sentences:
#print(pos_tags)
for index in range(len(pos_tags)):
# Add basic NLP features for each sentence term
X.append(self.add_basic_features(pos_tags, index))
return X
def untag(self, tagged_sentence):
"""
Remove the tag for each tagged term.
:param tagged_sentence: a POS tagged sentence
:type tagged_sentence: list
:return: a list of tags
:rtype: list of strings
"""
return [w for w, _ in tagged_sentence]
def transform_to_dataset(self, tagged_sentences):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, y = [], []
for pos_tags in tagged_sentences:
for index, (term, class_) in enumerate(pos_tags):
# Add basic NLP features for each sentence term
X.append(self.add_basic_features(self.untag(pos_tags), index))
y.append(class_)
return X, y
'''
t = BanglaPosTagger()
t.load()
print(t.bn_pos_tag(' আমার সোনার বাংলা । আমি তোমায় ভালোবাসি । '))
'''
| 2.578125 | 3 |
query_strategies/core_set.py | HUTTON9453/Active-DA | 0 | 12786610 | import numpy as np
from .strategy import Strategy
from sklearn.neighbors import NearestNeighbors
import pickle
from datetime import datetime
class CoreSet(Strategy):
def __init__(self, X, Y, idxs_lb, net, handler, args, tor=1e-4):
super(CoreSet, self).__init__(X, Y, idxs_lb, net, handler, args)
self.tor = tor
def query(self, n):
lb_flag = self.idxs_lb.copy()
embedding = self.get_embedding(self.X, self.Y)
embedding = embedding.numpy()
print('calculate distance matrix')
t_start = datetime.now()
dist_mat = np.matmul(embedding, embedding.transpose())
sq = np.array(dist_mat.diagonal()).reshape(len(self.X), 1)
dist_mat *= -2
dist_mat += sq
dist_mat += sq.transpose()
dist_mat = np.sqrt(dist_mat)
print(datetime.now() - t_start)
print('calculate greedy solution')
t_start = datetime.now()
mat = dist_mat[~lb_flag, :][:, lb_flag]
for i in range(n):
if i%10 == 0:
print('greedy solution {}/{}'.format(i, n))
mat_min = mat.min(axis=1)
q_idx_ = mat_min.argmax()
q_idx = np.arange(self.n_pool)[~lb_flag][q_idx_]
lb_flag[q_idx] = True
mat = np.delete(mat, q_idx_, 0)
mat = np.append(mat, dist_mat[~lb_flag, q_idx][:, None], axis=1)
print(datetime.now() - t_start)
opt = mat.min(axis=1).max()
bound_u = opt
bound_l = opt/2.0
delta = opt
xx, yy = np.where(dist_mat <= opt)
dd = dist_mat[xx, yy]
lb_flag_ = self.idxs_lb.copy()
subset = np.where(lb_flag_==True)[0].tolist()
SEED = 5
pickle.dump((xx.tolist(), yy.tolist(), dd.tolist(), subset, float(opt), n, self.n_pool), open('mip{}.pkl'.format(SEED), 'wb'), 2)
import ipdb
ipdb.set_trace()
# solving MIP
# download Gurobi software from http://www.gurobi.com/
# sh {GUROBI_HOME}/linux64/bin/gurobi.sh < core_set_sovle_solve.py
sols = pickle.load(open('sols{}.pkl'.format(SEED), 'rb'))
if sols is None:
q_idxs = lb_flag
else:
lb_flag_[sols] = True
q_idxs = lb_flag_
print('sum q_idxs = {}'.format(q_idxs.sum()))
return np.arange(self.n_pool)[(self.idxs_lb ^ q_idxs)]
| 2.5 | 2 |
CheckIO/Elementary/15_Common_Words.py | marshallhumble/Project_Euler | 3 | 12786611 | <reponame>marshallhumble/Project_Euler<filename>CheckIO/Elementary/15_Common_Words.py<gh_stars>1-10
#!/usr/bin/env python
"""
Let's continue examining words. You are given two string with words separated by commas. Try to find what is common
between these strings. The words are not repeated in the same string.
Your function should find all of the words that appear in both strings. The result must be represented as a string of
words separated by commas in alphabetic order.
Input: Two arguments as strings.
Output: The common words as a string.
Precondition:
Each string contains no more than 10 words.
All words separated by commas.
All words consist of lowercase latin letters.
"""
def checkio(first, second):
return ','.join(sorted(list(set(first.split(',')) & set(second.split(',')))))
# These "asserts" using only for self-checking and not necessary for auto-testing
def test_function():
assert checkio("hello,world", "hello,earth") == "hello", "Hello"
assert checkio("one,two,three", "four,five,six") == "", "Too different"
assert checkio("one,two,three", "four,five,one,two,six,three") == "one,three,two", "1 2 3"
if __name__ == '__main__':
test_function() | 4.03125 | 4 |
msax/optimize.py | nagyatka/msax | 2 | 12786612 | <reponame>nagyatka/msax<gh_stars>1-10
from abc import ABC, abstractmethod
from functools import partial
from multiprocessing.pool import Pool
import numpy as np
import cma
import pyswarms
from msax.error import sax_error
def sax_objective_fun(params, x_source, m_size, l_1, use_inf=False):
a = int(np.round(params[0]))
w = int(np.round(params[1]))
return np.mean([sax_error(x=x, a=a, w=w, memory_limit=m_size, l_1=l_1, use_inf=use_inf) for x in x_source])
def optimize(objective_func, x_source, m_size, l_1 = 1, mode='cma' , **kwargs):
"""
Available modes: cma, bipop-cma, local-pso, global-pso
:param l_1:
:param objective_func:
:param x_source:
:param m_size:
:param mode:
:param kwargs:
:return:
"""
if mode == 'cma' or mode == 'bipop-cma':
x0 = kwargs.pop('x0')
sigma0 = kwargs.pop('sigma0')
popsize = kwargs.pop('popsize')
seed = kwargs.pop('seed', None)
verbose = kwargs.pop('verbose', True)
if verbose:
verbose = 3
else:
verbose = -1
return CMAOptimizationResult(
mode,
cma.fmin(objective_func,
x0=x0,
sigma0=sigma0,
args=(x_source, m_size, l_1),
bipop=True if mode=='bipop-cma' else False,
options={'popsize': popsize, 'seed': seed, 'verbose': verbose}))
elif mode == 'local-pso' or mode == 'global-pso':
n_particles = kwargs.pop('n_particles')
bounds = ([3.0, 2.0], [np.inf, np.inf])
options = {
'c1': kwargs.pop('c1'),
'c2': kwargs.pop('c2'),
'w': kwargs.pop('w')
}
iters = kwargs.pop('iters')
min_a, max_a, min_w, max_w = 3.0, 500.0, 2.0, 500.0
all_a = np.arange(min_a, max_a, max_a / n_particles)
all_w = np.arange(min_w, max_w, max_w / n_particles)
init_pos = np.array([all_a, all_w]).transpose()
def pso_function_wrapper(particle_coords, **params):
"""
Wrapper function for the objective function because the pso implementation passes all particles in one
list instead of passing them one by one.
:param objective_func:
:param particle_coords:
:param kwargs:
:return:
"""
obj_func_wrapper = partial(objective_func, **params)
with Pool() as p:
return np.array(p.map(obj_func_wrapper, particle_coords, chunksize=3))
# This implementation is slower (+5-10% time)
#res = [objective_func(particle_coord, **params) for particle_coord in particle_coords]
#return np.array(res)
if mode == 'global-pso':
optimizer = pyswarms.single.GlobalBestPSO(
n_particles=n_particles,
dimensions=2,
options=options,
init_pos=init_pos,
bounds=bounds)
cost, pos = optimizer.optimize(
pso_function_wrapper,
iters=iters,
fast=True,
x_source=x_source,
m_size=m_size,
use_inf=True,
l_1=l_1)
return PSOOptimizationResult(mode, cost, pos, iters, optimizer.cost_history)
else:
options['k'] = kwargs.pop('k')
options['p'] = kwargs.pop('p')
optimizer = pyswarms.single.LocalBestPSO(
n_particles=n_particles,
dimensions=2,
options=options,
init_pos=init_pos,
bounds=bounds)
cost, pos = optimizer.optimize(
pso_function_wrapper,
iters=iters,
fast=True,
x_source=x_source,
m_size=m_size,
use_inf=True,
l_1=l_1)
return PSOOptimizationResult(mode, cost, pos, iters, optimizer.cost_history)
else:
raise RuntimeError('Unknown optimization mode')
class OptimizationResult(ABC):
@property
@abstractmethod
def optimizer_name(self):
pass
@property
@abstractmethod
def w(self):
pass
@property
@abstractmethod
def a(self):
pass
@property
@abstractmethod
def cost(self):
pass
@property
@abstractmethod
def iters(self):
pass
@property
@abstractmethod
def history(self):
pass
def __str__(self):
return "OptimizationResult ({}): w={}, a={}, (value/cost: {}, #iterations: {})".format(
self.optimizer_name,
self.w,
self.a,
self.cost,
self.iters)
def __repr__(self):
return self.__str__()
class CMAOptimizationResult(OptimizationResult):
def __init__(self, name, cma_result):
self.name = name
self.cma_result = cma_result
self.hist = cma_result[-1].load().f[:,-1].copy()
@property
def optimizer_name(self):
return self.name
@property
def w(self):
return int(np.round(self.cma_result[0][1]))
@property
def a(self):
return int(np.round(self.cma_result[0][0]))
@property
def cost(self):
return self.cma_result[1]
@property
def iters(self):
return self.cma_result[4]
@property
def history(self):
return self.hist
class PSOOptimizationResult(OptimizationResult):
def __init__(self, name, cost, pos, iters, hist):
self.name = name
self.pso_cost = cost
self.pos = pos
self.iter_no = iters
self.hist = hist
@property
def optimizer_name(self):
return self.name
@property
def w(self):
return np.round(self.pos[1])
@property
def a(self):
return np.round(self.pos[0])
@property
def cost(self):
return self.pso_cost
@property
def iters(self):
return self.iter_no
@property
def history(self):
return self.hist
| 2.15625 | 2 |
tests/test_supplier_image_upload.py | MartyDiaz/IT_Automation_Project | 0 | 12786613 | import os.path
import pytest
from unittest import mock
from it_automation.supplier_image_upload import post_images
@pytest.mark.parametrize(
"_input, expected",
[(201, "Success"), (400, "POST error status=400")]
)
@mock.patch("it_automation.run.requests.post")
def test_post_images(mock_requests_post, _input, expected):
mock_requests_post.return_value = mock.Mock(**{"status_code": _input})
test_url = 'test_url'
test_image_directory = os.path.expanduser('~') + '/Documents' \
'/google_class' \
'/project_8' \
'/tests' \
'/images'
if _input != 201:
with pytest.raises(Exception, match=expected):
post_images(test_url, test_image_directory)
else:
post_images(test_url, test_image_directory)
mock_requests_post.assert_called() | 2.421875 | 2 |
pybatfish/client/commands.py | li-ch/pybatfish | 1 | 12786614 | <filename>pybatfish/client/commands.py
# coding=utf-8
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains Batfish client commands that query the Batfish service."""
from __future__ import absolute_import, print_function
from imp import new_module
import json
import logging
import os
import sys
import tempfile
from typing import Any, Dict, List, Optional, Union # noqa: F401
from warnings import warn
from deprecated import deprecated
from requests import HTTPError
from pybatfish.client.consts import CoordConsts, WorkStatusCode
from pybatfish.datamodel import answer
from pybatfish.datamodel.answer.base import get_answer_text
from pybatfish.datamodel.answer.table import TableAnswerElement
from pybatfish.datamodel.assertion import Assertion, AssertionType
from pybatfish.datamodel.referencelibrary import NodeRoleDimension, \
NodeRolesData, ReferenceBook, ReferenceLibrary
from pybatfish.exception import BatfishException
from pybatfish.util import (get_uuid, validate_name, zip_dir)
from . import resthelper, restv2helper, workhelper
from .options import Options
from .session import Session
from .workhelper import (_get_data_get_question_templates, get_work_status,
kill_work)
warn(
"Pybatfish public API is being updated, note that API names and parameters will soon change.")
# TODO: normally libraries don't configure logging in code
_bfDebug = True
bf_logger = logging.getLogger("pybatfish.client")
bf_session = Session(bf_logger)
if _bfDebug:
bf_logger.setLevel(logging.INFO)
bf_logger.addHandler(logging.StreamHandler())
else:
bf_logger.addHandler(logging.NullHandler())
__all__ = ['bf_add_analysis',
'bf_add_node_role_dimension',
'bf_add_reference_book',
'bf_auto_complete',
'bf_configure_question',
'bf_create_check',
'bf_delete_analysis',
'bf_delete_container',
'bf_delete_network',
'bf_delete_snapshot',
'bf_delete_testrig',
'bf_extract_answer_list',
'bf_extract_answer_summary',
'bf_generate_dataplane',
'bf_get_analysis_answers',
'bf_get_answer',
'bf_get_info',
'bf_get_node_role_dimension',
'bf_get_node_roles',
'bf_get_reference_book',
'bf_get_reference_library',
'bf_get_work_status',
'bf_init_analysis',
'bf_init_container',
'bf_init_snapshot',
'bf_init_testrig',
'bf_kill_work',
'bf_list_analyses',
'bf_list_containers',
'bf_list_networks',
'bf_list_incomplete_works',
'bf_list_questions',
'bf_list_snapshots',
'bf_list_testrigs',
'bf_logger',
'bf_print_answer',
'bf_run_analysis',
'bf_session',
'bf_set_container',
'bf_set_network',
'bf_set_snapshot',
'bf_set_testrig',
'bf_str_answer',
'bf_sync_snapshots_sync_now',
'bf_sync_snapshots_update_settings',
'bf_sync_testrigs_sync_now',
'bf_sync_testrigs_update_settings']
def bf_add_analysis(analysisName, questionDirectory):
return _bf_init_or_add_analysis(analysisName, questionDirectory, False)
def bf_add_node_role_dimension(dimension):
# type: (NodeRoleDimension) -> None
"""
Adds another role dimension to the active network.
Individual roles within the dimension must have a valid (java) regex.
The node list within those roles, if present, is ignored by the server.
:param dimension: The NodeRoleDimension object for the dimension to add
:type dimension: :class:`pybatfish.datamodel.referencelibrary.NodeRoleDimension`
"""
if dimension.type == "AUTO":
raise ValueError("Cannot add a dimension of type AUTO")
restv2helper.add_node_role_dimension(bf_session, dimension)
def bf_add_reference_book(book):
# type: (ReferenceBook) -> None
"""
Adds another reference book to the active network.
:param book: The ReferenceBook object to add
:type book: :class:`pybatfish.datamodel.referencelibrary.ReferenceBook`
"""
restv2helper.add_reference_book(bf_session, book)
def _bf_answer_obj(question_str, parameters_str, question_name,
background, snapshot, reference_snapshot):
# type: (str, str, str, bool, str, Optional[str]) -> Union[str, Dict]
json.loads(parameters_str) # a syntactic check for parametersStr
if not question_name:
question_name = Options.default_question_prefix + "_" + get_uuid()
# Upload the question
json_data = workhelper.get_data_upload_question(bf_session, question_name,
question_str,
parameters_str)
resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_UPLOAD_QUESTION, json_data)
# Answer the question
work_item = workhelper.get_workitem_answer(bf_session, question_name,
snapshot, reference_snapshot)
answer_dict = workhelper.execute(work_item, bf_session, background)
if background:
return work_item.id
return answer.from_string(answer_dict["answer"])
def bf_auto_complete(completionType, query, maxSuggestions=None):
"""Auto complete the partial query based on its type."""
jsonData = workhelper.get_data_auto_complete(bf_session, completionType,
query, maxSuggestions)
response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_AUTO_COMPLETE,
jsonData)
if CoordConsts.SVC_KEY_SUGGESTIONS in response:
return response[CoordConsts.SVC_KEY_SUGGESTIONS]
else:
bf_logger.error("Unexpected response: " + str(response))
return None
def bf_configure_question(inQuestion, exceptions=None, assertion=None):
"""
Get a new question template by adding the supplied exceptions and assertions.
:param inQuestion: The question to use as a starting point
:type inQuestion: :class:`pybatfish.question.question.QuestionBase`
:param exceptions: Exceptions to add to the template.
- `None` means keep the existing set.
- `[]` means wipe out the existing set
:param assertion: Assertion to add to the template.
- `None` means keep the original one.
- empty string means wipe out the existing set
:return: The changed template. If both exceptions and assertion are `None`,
you may still not get back the original
template but get a "flattened" version where the parameter values have
been inlined.
"""
jsonData = workhelper.get_data_configure_question_template(bf_session,
inQuestion,
exceptions,
assertion)
response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_CONFIGURE_QUESTION_TEMPLATE,
jsonData)
if CoordConsts.SVC_KEY_QUESTION in response:
return response[CoordConsts.SVC_KEY_QUESTION]
else:
bf_logger.error("Unexpected response: " + str(response))
return None
def bf_create_check(inQuestion, snapshot=None, reference_snapshot=None):
"""
Turn a question into a check.
1) Adds answers on the current base (and delta if differential) testrig as exceptions.
2) Asserts that the new count of answers is zero.
If the original question had exceptions or assertions, they will be overridden.
:param inQuestion: The question to use as a starting point
:type inQuestion: :class:`pybatfish.question.question.QuestionBase`
:return: The modified template with exceptions and assertions added.
"""
snapshot = bf_session.get_snapshot(snapshot)
if reference_snapshot is None and inQuestion.getDifferential():
raise ValueError(
"reference_snapshot argument is required to create a differential check")
# override exceptions before asking the question so we get all the answers
inQuestionWithoutExceptions = bf_configure_question(inQuestion,
exceptions=[])
inAnswer = _bf_answer_obj(inQuestionWithoutExceptions, snapshot=snapshot,
reference_snapshot=reference_snapshot).dict()
exceptions = bf_extract_answer_list(inAnswer)
assertion = Assertion(AssertionType.COUNT_EQUALS, 0)
outQuestion = bf_configure_question(inQuestionWithoutExceptions,
exceptions=exceptions,
assertion=assertion)
return outQuestion
def bf_delete_analysis(analysisName):
jsonData = workhelper.get_data_delete_analysis(bf_session, analysisName)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_DEL_ANALYSIS,
jsonData)
return jsonResponse
@deprecated("Deprecated in favor of bf_delete_network(name)")
def bf_delete_container(containerName):
"""
Delete container by name.
.. deprecated:: In favor of :py:func:`bf_delete_network`
"""
bf_delete_network(containerName)
def bf_delete_network(name):
# type: (str) -> None
"""
Delete network by name.
:param name: name of the network to delete
:type name: string
"""
if name is None:
raise ValueError('Network to be deleted must be supplied')
jsonData = workhelper.get_data_delete_network(bf_session, name)
resthelper.get_json_response(bf_session, CoordConsts.SVC_RSC_DEL_NETWORK,
jsonData)
def bf_delete_snapshot(name):
# type: (str) -> None
"""
Delete named snapshot from current network.
:param name: name of the snapshot to delete
:type name: string
"""
_check_network()
if name is None:
raise ValueError('Snapshot to be deleted must be supplied')
json_data = workhelper.get_data_delete_snapshot(bf_session, name)
resthelper.get_json_response(bf_session, CoordConsts.SVC_RSC_DEL_SNAPSHOT,
json_data)
@deprecated("Deprecated in favor of bf_delete_snapshot(name)")
def bf_delete_testrig(testrigName):
"""
Delete named testrig from current network.
:param testrigName: name of the testrig to delete
:type testrigName: string
.. deprecated:: In favor of :py:func:`bf_delete_snapshot`
"""
bf_delete_snapshot(testrigName)
def bf_extract_answer_list(answerJson, includeKeys=None):
if "question" not in answerJson:
bf_logger.error("question not found in answerJson")
return None
if "status" not in answerJson or answerJson["status"] != "SUCCESS":
bf_logger.error("question was not answered successfully")
return None
question = answerJson["question"]
if "JsonPathQuestion" not in question["class"]:
bf_logger.error("exception creation only works to jsonpath questions")
return None
if "answerElements" not in answerJson or "results" not in \
answerJson["answerElements"][0]:
bf_logger.error(
"unexpected packaging of answer: answerElements does not exist of is not (non-empty) list")
return None
'''
Jsonpath questions/answers are annoyingly flexible: they allow for multiple answerElements and multiple path queries
following usage in templates, we pick the first answerElement and the response for the first query.
When the answer has no results, the "result" field is missing
'''
result = answerJson["answerElements"][0]["results"]["0"].get("result", {})
return [val for key, val in result.items() if
includeKeys is None or key in includeKeys]
def bf_extract_answer_summary(answerJson):
"""Get the answer for a previously asked question."""
if "status" not in answerJson or answerJson["status"] != "SUCCESS":
bf_logger.error("question was not answered successfully")
return None
if "summary" not in answerJson:
bf_logger.error("summary not found in the answer")
return None
return answerJson["summary"]
def _bf_generate_dataplane(snapshot):
# type: (str) -> Dict[str, str]
workItem = workhelper.get_workitem_generate_dataplane(bf_session, snapshot)
answerDict = workhelper.execute(workItem, bf_session)
return answerDict
def bf_generate_dataplane(snapshot=None):
# type: (Optional[str]) -> str
"""Generates the data plane for the supplied snapshot. If no snapshot argument is given, uses the last snapshot initialized."""
snapshot = bf_session.get_snapshot(snapshot)
answerDict = _bf_generate_dataplane(snapshot)
answer = answerDict["answer"]
return answer
def bf_get_analysis_answers(analysisName, snapshot=None,
reference_snapshot=None):
# type: (str, str, Optional[str]) -> Any
"""Get the answers for a previously asked analysis."""
snapshot = bf_session.get_snapshot(snapshot)
jsonData = workhelper.get_data_get_analysis_answers(bf_session,
analysisName, snapshot,
reference_snapshot)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_GET_ANALYSIS_ANSWERS,
jsonData)
answersDict = json.loads(jsonResponse['answers'])
return answersDict
def bf_get_answer(questionName, snapshot, reference_snapshot=None):
# type: (str, str, Optional[str]) -> Any
"""
Get the answer for a previously asked question.
:param questionName: the unique identifier of the previously asked question
:param snapshot: the snapshot the question is run on
:param reference_snapshot: if present, the snapshot against which the answer
was computed differentially.
"""
jsonData = workhelper.get_data_get_answer(bf_session, questionName,
snapshot, reference_snapshot)
response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_GET_ANSWER,
jsonData)
answerJson = json.loads(response["answer"])
return answerJson
def bf_get_info():
jsonResponse = resthelper.get_json_response(bf_session, '', useHttpGet=True)
return jsonResponse
def bf_get_node_role_dimension(dimension):
# type: (str) -> NodeRoleDimension
"""Returns the set of node roles for the active network."""
return NodeRoleDimension(
**restv2helper.get_node_role_dimension(bf_session, dimension))
def bf_get_node_roles():
# type: () -> NodeRolesData
"""Returns the set of node roles for the active network."""
return NodeRolesData(**restv2helper.get_node_roles(bf_session))
def bf_get_reference_book(book_name):
# type: (str) -> ReferenceBook
"""Returns the reference book with the specified for the active network."""
return ReferenceBook(
**restv2helper.get_reference_book(bf_session, book_name))
def bf_get_reference_library():
# type: () -> ReferenceLibrary
"""Returns the reference library for the active network."""
return ReferenceLibrary(**restv2helper.get_reference_library(bf_session))
def bf_get_work_status(wItemId):
return get_work_status(wItemId, bf_session)
def _bf_init_or_add_analysis(analysisName, questionDirectory, newAnalysis):
from pybatfish.question.question import load_dir_questions
_check_network()
module_name = 'pybatfish.util.anonymous_module'
module = new_module(module_name)
sys.modules[module_name] = module
q_names = load_dir_questions(questionDirectory, moduleName=module_name)
questions = [(qname, getattr(module, qname)) for qname in q_names]
analysis = dict()
for o in questions:
question_name = o[0]
question_class = o[1]
question = question_class().dict()
analysis[question_name] = question
analysis_str = json.dumps(analysis, indent=2, sort_keys=True)
with tempfile.NamedTemporaryFile() as tempFile:
analysis_filename = tempFile.name
with open(analysis_filename, 'w') as analysisFile:
analysisFile.write(analysis_str)
analysisFile.flush()
json_data = workhelper.get_data_configure_analysis(
bf_session, newAnalysis, analysisName, analysis_filename, None)
json_response = resthelper.get_json_response(
bf_session, CoordConsts.SVC_RSC_CONFIGURE_ANALYSIS, json_data)
return json_response
def bf_init_analysis(analysisName, questionDirectory):
return _bf_init_or_add_analysis(analysisName, questionDirectory, True)
@deprecated("Deprecated in favor of bf_set_network(name, prefix)")
def bf_init_container(containerName=None,
containerPrefix=Options.default_network_prefix):
"""
Initialize a new container.
.. deprecated:: In favor of :py:func:`bf_set_network`
"""
bf_set_network(containerName, containerPrefix)
def bf_init_snapshot(upload, name=None, overwrite=False, background=False):
# type: (str, Optional[str], bool, bool) -> Union[str, Dict[str, str]]
"""Initialize a new snapshot.
:param upload: snapshot to upload
:type upload: zip file or directory
:param name: name of the snapshot to initialize
:type name: string
:param overwrite: whether or not to overwrite an existing snapshot with the
same name
:type overwrite: bool
:param background: whether or not to run the task in the background
:type background: bool
:return: name of initialized snapshot, or JSON dictionary of task status if background=True
:rtype: Union[str, Dict]
"""
if bf_session.network is None:
bf_set_network()
if name is None:
name = Options.default_snapshot_prefix + get_uuid()
validate_name(name)
if name in bf_list_snapshots():
if overwrite:
bf_delete_snapshot(name)
else:
raise ValueError(
'A snapshot named ''{}'' already exists in network ''{}'''.format(
name, bf_session.network))
file_to_send = upload
if os.path.isdir(upload):
temp_zip_file = tempfile.NamedTemporaryFile()
zip_dir(upload, temp_zip_file)
file_to_send = temp_zip_file.name
json_data = workhelper.get_data_upload_snapshot(bf_session, name,
file_to_send)
resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_UPLOAD_SNAPSHOT,
json_data)
work_item = workhelper.get_workitem_parse(bf_session, name)
answer_dict = workhelper.execute(work_item, bf_session,
background=background)
if background:
bf_session.baseSnapshot = name
return answer_dict
status = WorkStatusCode(answer_dict["status"])
if status != WorkStatusCode.TERMINATEDNORMALLY:
raise BatfishException(
'Initializing snapshot {ss} failed with status {status}: {msg}'.format(
ss=name,
status=status,
msg=answer_dict['answer']))
else:
bf_session.baseSnapshot = name
bf_logger.info("Default snapshot is now set to %s",
bf_session.baseSnapshot)
return bf_session.baseSnapshot
@deprecated(
"Deprecated in favor of bf_init_snapshot(upload, delta, name, background)")
def bf_init_testrig(dirOrZipfile, testrigName=None,
background=False):
"""
Initialize a new testrig.
.. deprecated:: In favor of :py:func:`bf_init_snapshot`
"""
return bf_init_snapshot(upload=dirOrZipfile, name=testrigName,
background=background)
def bf_kill_work(wItemId):
return kill_work(bf_session, wItemId)
def bf_list_analyses():
_check_network()
jsonData = workhelper.get_data_list_analyses(bf_session)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_LIST_ANALYSES,
jsonData)
answer = jsonResponse['analysislist']
return answer
@deprecated("Deprecated in favor of bf_list_networks()")
def bf_list_containers():
"""
List containers the session's API key can access.
.. deprecated:: In favor of :py:func:`bf_list_networks`
"""
return bf_list_networks()
def bf_list_networks():
# type: () -> List[str]
"""
List networks the session's API key can access.
:return: a list of network names
"""
json_data = workhelper.get_data_list_networks(bf_session)
json_response = resthelper.get_json_response(
bf_session, CoordConsts.SVC_RSC_LIST_NETWORKS, json_data)
return list(map(str, json_response['networklist']))
def bf_list_incomplete_works():
jsonData = workhelper.get_data_list_incomplete_work(bf_session)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_LIST_INCOMPLETE_WORK,
jsonData)
return jsonResponse
def bf_list_questions():
_check_network()
jsonData = workhelper.get_data_list_questions(bf_session)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_LIST_QUESTIONS,
jsonData)
answer = jsonResponse['questionlist']
return answer
def bf_list_snapshots(verbose=False):
# type: (bool) -> Union[List[str], Dict]
"""
List snapshots for the current network.
:param verbose: If true, return the full output of Batfish, including
snapshot metadata.
:return: a list of snapshot names or the full json response containing
snapshots and metadata (if `verbose=True`)
"""
json_data = workhelper.get_data_list_snapshots(bf_session,
bf_session.network)
json_response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_LIST_SNAPSHOTS,
json_data)
if verbose:
return json_response
return [s['testrigname'] for s in json_response['snapshotlist']]
@deprecated("Deprecated in favor of bf_list_snapshots()")
def bf_list_testrigs(currentContainerOnly=True):
"""
List testrigs.
.. deprecated:: In favor of :py:func:`bf_list_snapshots`
"""
container_name = None
if currentContainerOnly:
_check_network()
container_name = bf_session.network
json_data = workhelper.get_data_list_testrigs(bf_session, container_name)
json_response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_LIST_TESTRIGS,
json_data)
return json_response
def bf_str_answer(answer_json):
"""Convert the Json answer to a string."""
try:
if "answerElements" in answer_json and "metadata" in \
answer_json["answerElements"][0]:
table_answer = TableAnswerElement(answer_json)
return table_answer.table_data.to_string()
else:
return get_answer_text(answer_json)
except Exception as error:
return "Error getting answer text: {}\n Original Json:\n {}".format(
error, json.dumps(answer_json, indent=2))
def bf_print_answer(answer_json):
# type: (Dict) -> None
"""Print the given answer JSON to console."""
print(bf_str_answer(answer_json))
def _bf_get_question_templates():
jsonData = _get_data_get_question_templates(bf_session)
jsonResponse = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_GET_QUESTION_TEMPLATES,
jsonData)
return jsonResponse[CoordConsts.SVC_KEY_QUESTION_LIST]
def bf_run_analysis(analysisName, snapshot, reference_snapshot=None):
# type: (str, str, Optional[str]) -> str
workItem = workhelper.get_workitem_run_analysis(bf_session, analysisName,
snapshot,
reference_snapshot)
workAnswer = workhelper.execute(workItem, bf_session)
# status = workAnswer["status"]
answer = workAnswer["answer"]
return answer
@deprecated("Deprecated in favor of bf_set_network(name)")
def bf_set_container(containerName):
"""
Set the current container by name.
.. deprecated:: In favor of :py:func:`bf_set_network`
"""
bf_set_network(containerName)
def bf_set_network(name=None, prefix=Options.default_network_prefix):
# type: (str, str) -> str
"""
Configure the network used for analysis.
:param name: name of the network to set. If `None`, a name will be generated using prefix.
:type name: string
:param prefix: prefix to prepend to auto-generated network names if name is empty
:type name: string
:return: The name of the configured network, if configured successfully.
:rtype: string
:raises BatfishException: if configuration fails
"""
if name is None:
name = prefix + get_uuid()
validate_name(name, "network")
try:
net = restv2helper.get_network(bf_session, name)
bf_session.network = str(net['name'])
return bf_session.network
except HTTPError as e:
if e.response.status_code != 404:
raise BatfishException('Unknown error accessing network', e)
json_data = workhelper.get_data_init_network(bf_session, name)
json_response = resthelper.get_json_response(
bf_session, CoordConsts.SVC_RSC_INIT_NETWORK, json_data)
network_name = json_response.get(CoordConsts.SVC_KEY_NETWORK_NAME)
if network_name is None:
raise BatfishException(
"Network initialization failed. Server response: {}".format(
json_response))
bf_session.network = str(network_name)
return bf_session.network
def bf_set_snapshot(name=None, index=None):
# type: (Optional[str], Optional[int]) -> str
"""
Set the current snapshot by name or index.
:param name: name of the snapshot to set as the current snapshot
:type name: string
:param index: set the current snaphot to the `index`th most recent snapshot
:type index: int
:return: the name of the successfully set snapshot
:rtype: str
"""
if name is None and index is None:
raise ValueError('One of name and index must be set')
if name is not None and index is not None:
raise ValueError('Only one of name and index can be set')
snapshots = bf_list_snapshots()
# Index specified, simply give the ith snapshot
if index is not None:
if not (-len(snapshots) <= index < len(snapshots)):
raise IndexError(
"Server has only {} snapshots: {}".format(
len(snapshots), snapshots))
bf_session.baseSnapshot = snapshots[index]
# Name specified, make sure it exists.
else:
assert name is not None # type-hint to Python
if name not in snapshots:
raise ValueError(
'No snapshot named ''{}'' was found in network ''{}'': {}'.format(
name, bf_session.network, snapshots))
bf_session.baseSnapshot = name
bf_logger.info("Default snapshot is now set to %s", bf_session.baseSnapshot)
return bf_session.baseSnapshot
@deprecated("Deprecated in favor of bf_set_snapshot(name)")
def bf_set_testrig(testrigName):
"""
Set the current testrig and environment by name.
.. deprecated:: In favor of :py:func:`bf_set_snapshot`
"""
bf_set_snapshot(testrigName)
def bf_sync_snapshots_sync_now(plugin, force=False):
"""
Synchronize snapshots with specified plugin.
:param plugin: name of the plugin to sync snapshots with
:type plugin: string
:param force: whether or not to overwrite any conflicts
:type force: bool
:return: json response containing result of snapshot sync from Batfish service
:rtype: dict
"""
json_data = workhelper.get_data_sync_snapshots_sync_now(bf_session, plugin,
force)
json_response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_SYNC_SNAPSHOTS_SYNC_NOW,
json_data)
return json_response
@deprecated(
"Deprecated in favor of bf_sync_snapshots_sync_now(plugin_id, force)")
def bf_sync_testrigs_sync_now(pluginId, force=False):
"""
Synchronize snapshots with specified plugin.
.. deprecated:: In favor of :py:func:`bf_sync_snapshots_sync_now`
"""
return bf_sync_snapshots_sync_now(pluginId, force)
def bf_sync_snapshots_update_settings(plugin, settings):
"""
Update snapshot sync settings for the specified plugin.
:param plugin: name of the plugin to update
:type plugin: string
:param settings: settings to update
:type settings: dict
:return: json response containing result of settings update from Batfish service
:rtype: dict
"""
json_data = workhelper.get_data_sync_snapshots_update_settings(bf_session,
plugin,
settings)
json_response = resthelper.get_json_response(bf_session,
CoordConsts.SVC_RSC_SYNC_SNAPSHOTS_UPDATE_SETTINGS,
json_data)
return json_response
@deprecated(
"Deprecated in favor of bf_sync_snapshots_update_settings(plugin_id, settings)")
def bf_sync_testrigs_update_settings(pluginId, settingsDict):
"""
Synchronize snapshots with specified plugin.
.. deprecated:: In favor of :py:func:`bf_sync_snapshots_update_settings`
"""
return bf_sync_snapshots_update_settings(pluginId, settingsDict)
def _check_network():
"""Check if current network is set."""
if bf_session.network is None:
raise BatfishException("Network is not set")
| 1.96875 | 2 |
components/collector/src/source_collectors/sonarqube/complex_units.py | kargaranamir/quality-time | 33 | 12786615 | """SonarQube complex units collector."""
from .violations import SonarQubeViolationsWithPercentageScale
class SonarQubeComplexUnits(SonarQubeViolationsWithPercentageScale):
"""SonarQube complex methods/functions collector."""
rules_configuration = "complex_unit_rules"
total_metric = "functions"
| 1.539063 | 2 |
lab/to_str.py | cleac/bool_to_algeb | 0 | 12786616 | from .const import ITERABLE_TYPES
from .exceptions import OperatorNotFoundError
TRANSLATIONS = {
'and': lambda x, y: '{} and {}'.format(x, y),
'or': lambda x, y: '{} or {}'.format(x, y),
'not': lambda x: 'not {}'.format(x),
'*': lambda x, y: '{} * {}'.format(x, y),
'/': lambda x, y: '{} / {}'.format(x, y),
'+': lambda x, y: '{} + {}'.format(x, y),
'-': lambda x, y: '{} - {}'.format(x, y),
}
def to_str(argument):
operator, *args = argument
if operator not in TRANSLATIONS:
raise OperatorNotFoundError(operator)
for pos, arg in enumerate(args):
arg_type = type(arg)
if arg_type in ITERABLE_TYPES:
args[pos] = '({})'.format(to_str(arg))
return TRANSLATIONS[operator](*args)
| 2.875 | 3 |
ontology/neural_network/sherlock/listify_length.py | ehbeam/neuro-knowledge-engine | 15 | 12786617 | #!/usr/bin/python
import os, math
import pandas as pd
import numpy as np
np.random.seed(42)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
torch.manual_seed(42)
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterSampler
def doc_mean_thres(df):
doc_mean = df.mean()
df_bin = 1.0 * (df.values > doc_mean.values)
df_bin = pd.DataFrame(df_bin, columns=df.columns, index=df.index)
return df_bin
def load_doc_term_matrix(version=190325, binarize=True):
dtm = pd.read_csv("../../../data/text/dtm_{}.csv.gz".format(version), compression="gzip", index_col=0)
if binarize:
dtm = doc_mean_thres(dtm)
return dtm
def load_coordinates():
atlas_labels = pd.read_csv("../../../data/brain/labels.csv")
activations = pd.read_csv("../../../data/brain/coordinates.csv", index_col=0)
activations = activations[atlas_labels["PREPROCESSED"]]
return activations
def load_raw_domains(k):
list_file = "../../lists/lists_k{:02d}.csv".format(k)
lists = pd.read_csv(list_file, index_col=None)
circuit_file = "../../circuits/circuits_k{:02d}.csv".format(k)
circuits = pd.read_csv(circuit_file, index_col=None)
return lists, circuits
def numpy2torch(data):
inputs, labels = data
inputs = Variable(torch.from_numpy(inputs.T).float())
labels = Variable(torch.from_numpy(labels.T).float())
return inputs, labels
def reset_weights(m):
if isinstance(m, nn.Linear):
m.reset_parameters()
class Net(nn.Module):
def __init__(self, n_input=0, n_output=0, n_hid=100, p_dropout=0.5):
super(Net, self).__init__()
self.fc1 = nn.Linear(n_input, n_hid)
self.bn1 = nn.BatchNorm1d(n_hid)
self.dropout1 = nn.Dropout(p=p_dropout)
self.fc2 = nn.Linear(n_hid, n_hid)
self.bn2 = nn.BatchNorm1d(n_hid)
self.dropout2 = nn.Dropout(p=p_dropout)
self.fc3 = nn.Linear(n_hid, n_hid)
self.bn3 = nn.BatchNorm1d(n_hid)
self.dropout3 = nn.Dropout(p=p_dropout)
self.fc4 = nn.Linear(n_hid, n_hid)
self.bn4 = nn.BatchNorm1d(n_hid)
self.dropout4 = nn.Dropout(p=p_dropout)
self.fc5 = nn.Linear(n_hid, n_hid)
self.bn5 = nn.BatchNorm1d(n_hid)
self.dropout5 = nn.Dropout(p=p_dropout)
self.fc6 = nn.Linear(n_hid, n_hid)
self.bn6 = nn.BatchNorm1d(n_hid)
self.dropout6 = nn.Dropout(p=p_dropout)
self.fc7 = nn.Linear(n_hid, n_hid)
self.bn7 = nn.BatchNorm1d(n_hid)
self.dropout7 = nn.Dropout(p=p_dropout)
self.fc8 = nn.Linear(n_hid, n_output)
# Xavier initialization for weights
for fc in [self.fc1, self.fc2, self.fc3, self.fc4,
self.fc5, self.fc6, self.fc7, self.fc8]:
nn.init.xavier_uniform_(fc.weight)
def forward(self, x):
x = self.dropout1(F.relu(self.bn1(self.fc1(x))))
x = self.dropout2(F.relu(self.bn2(self.fc2(x))))
x = self.dropout3(F.relu(self.bn3(self.fc3(x))))
x = self.dropout4(F.relu(self.bn4(self.fc4(x))))
x = self.dropout5(F.relu(self.bn5(self.fc5(x))))
x = self.dropout6(F.relu(self.bn6(self.fc6(x))))
x = self.dropout7(F.relu(self.bn7(self.fc7(x))))
x = torch.sigmoid(self.fc8(x))
return x
def optimize_hyperparameters(param_list, train_set, val_set, n_epochs=100):
criterion = F.binary_cross_entropy
inputs_val, labels_val = numpy2torch(val_set[0])
op_idx, op_params, op_score_val, op_state_dict, op_loss = 0, 0, 0, 0, 0
for params in param_list:
print("-" * 75)
print(" ".join(["{} {:6.5f}".format(k.upper(), v) for k, v in params.items()]))
print("-" * 75 + "\n")
# Initialize variables for this set of parameters
n_input = train_set[0][0].shape[0]
n_output = train_set[0][1].shape[0]
net = Net(n_input=n_input, n_output=n_output,
n_hid=params["n_hid"], p_dropout=params["p_dropout"])
optimizer = optim.Adam(net.parameters(),
lr=params["lr"], weight_decay=params["weight_decay"])
net.apply(reset_weights)
running_loss = []
# Loop over the dataset multiple times
for epoch in range(n_epochs):
for data in train_set:
# Get the inputs
inputs, labels = numpy2torch(data)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Update the running loss
running_loss += [loss.item()]
if epoch % (n_epochs/5) == (n_epochs/5) - 1:
print(" Epoch {:3d}\tLoss {:6.6f}".format(epoch + 1, running_loss[-1] / 100))
# Evaluate on the validation set
with torch.no_grad():
preds_val = net.eval()(inputs_val).float()
score_val = roc_auc_score(labels_val, preds_val, average="macro")
print("\n Validation Set ROC-AUC {:6.4f}\n".format(score_val))
# Update outputs if this model is the best so far
if score_val > op_score_val:
print(" Best so far!\n")
op_score_val = score_val
op_state_dict = net.state_dict()
op_params = params
op_loss = running_loss
return op_score_val
def load_mini_batches(X, Y, split, mini_batch_size=64, seed=0, reshape_labels=False):
np.random.seed(seed)
m = len(split) # Number of training examples
mini_batches = []
# Split the data
X = X.loc[split].T.values
Y = Y.loc[split].T.values
# Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
if reshape_labels:
shuffled_Y = shuffled_Y.reshape((1,m))
# Partition (shuffled_X, shuffled_Y), except the end case
num_complete_minibatches = math.floor(m / mini_batch_size) # Mumber of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : (k+1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k+1) * mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handle the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, -(m % mini_batch_size):]
mini_batch_Y = shuffled_Y[:, -(m % mini_batch_size):]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def optimize_list_len(k):
# Load the data splits
splits = {}
for split in ["train", "validation"]:
splits[split] = [int(pmid.strip()) for pmid in open("../../../data/splits/{}.txt".format(split), "r").readlines()]
act_bin = load_coordinates()
dtm_bin = load_doc_term_matrix(version=190325, binarize=True)
lists, circuits = load_raw_domains(k)
# Specify the hyperparameters for the randomized grid search
param_grid = {"lr": [0.001],
"weight_decay": [0.001],
"n_hid": [100],
"p_dropout": [0.1]}
param_list = list(ParameterSampler(param_grid, n_iter=1, random_state=42))
batch_size = 1024
n_epochs = 100
list_lens = range(5, 26)
op_lists = pd.DataFrame()
for circuit in range(1, k+1):
print("-" * 100)
print("Fitting models for domain {:02d}".format(circuit))
forward_scores, reverse_scores = [], []
structures = circuits.loc[circuits["CLUSTER"] == circuit, "STRUCTURE"]
for list_len in list_lens:
print("-" * 85)
print("Fitting models for lists of length {:02d}".format(list_len))
words = lists.loc[lists["CLUSTER"] == circuit, "TOKEN"][:list_len]
# Optimize forward inference classifier
train_set_f = load_mini_batches(dtm_bin[words], act_bin[structures], splits["train"], mini_batch_size=batch_size, seed=42)
val_set_f = load_mini_batches(dtm_bin[words], act_bin[structures], splits["validation"], mini_batch_size=len(splits["validation"]), seed=42)
try:
op_val_f = optimize_hyperparameters(param_list, train_set_f, val_set_f, n_epochs=n_epochs)
except:
op_val_f = 0.0
forward_scores.append(op_val_f)
# Optimize reverse inference classifier
train_set_r = load_mini_batches(act_bin[structures], dtm_bin[words], splits["train"], mini_batch_size=batch_size, seed=42)
val_set_r = load_mini_batches(act_bin[structures], dtm_bin[words], splits["validation"], mini_batch_size=len(splits["validation"]), seed=42)
try:
op_val_r = optimize_hyperparameters(param_list, train_set_r, val_set_r, n_epochs=n_epochs)
except:
op_val_r = 0.0
reverse_scores.append(op_val_r)
scores = [(forward_scores[i] + reverse_scores[i])/2.0 for i in range(len(forward_scores))]
print("-" * 85)
print("Mean ROC-AUC scores: {}".format(scores))
op_len = list_lens[scores.index(max(scores))]
print("-" * 100)
print("\tCircuit {:02d} has {:02d} words".format(circuit, op_len))
op_df = lists.loc[lists["CLUSTER"] == circuit][:op_len]
op_df["ROC_AUC"] = max(scores)
op_lists = op_lists.append(op_df)
op_lists.to_csv("../../lists/lists_k{:02d}_oplen_nn.csv".format(k), index=None)
| 2.234375 | 2 |
bg2feed/parser.py | knikolla/bg2feed | 0 | 12786618 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import json
import os
from urllib import parse
import time
import bs4
import requests
from flask import request
from selenium import webdriver
class GlobeParser(object):
def __init__(self):
print('Initializing...')
self.driver_options = webdriver.ChromeOptions()
self.driver_options.add_argument('headless')
driver = webdriver.Chrome(options=self.driver_options)
self.login(driver)
self.cookies = driver.get_cookies()
driver.close()
self.session = requests.session()
for cookie in self.cookies:
c = requests.cookies.create_cookie(
domain=cookie['domain'], name=cookie['name'], value=cookie['value']
)
self.session.cookies.set_cookie(c)
print('Logged in! Ready.')
def get_driver(self) -> webdriver.Chrome:
driver = webdriver.Chrome(options=self.driver_options)
driver.get('https://www.bostonglobe.com')
for cookie in self.cookies:
if 'expiry' in cookie:
del(cookie['expiry'])
driver.add_cookie(cookie)
return driver
@staticmethod
def login(driver):
driver.get('https://pages.bostonglobe.com/login/')
email_field = driver.find_element_by_name('email')
email_field.send_keys(os.environ['BOSTONGLOBE_USER'])
pass_field = driver.find_element_by_name('password')
pass_field.send_keys(os.environ['<PASSWORD>BE_<PASSWORD>'])
submit = driver.find_element_by_xpath('/html/body/div/div/section/form/input')
submit.click()
time.sleep(10)
@staticmethod
def replace_url(url):
if 'bostonglobe.com' not in url:
# These links are served by www3 and start with /
url = 'https://www3.bostonglobe.com%s' % url
original_encoded = parse.quote(url)
return '%s/proxy/%s' % (request.url_root, original_encoded)
@staticmethod
def restore_url(url):
url = url.replace('%s/proxy/' % request.url_root, '')
return parse.unquote(url)
@staticmethod
def parse_title(soup) -> str:
return soup.title.text.replace(' - The Boston Globe', '')
@staticmethod
def fix_image_url(url: str):
# Images hosted in this domain are (so far) prepended
# by a resizer script. Go straight to the source.
index = url.find('arc-anglerfish')
if index > -1:
url = url[index:]
if url.startswith('//'):
url = 'https:%s' % url
if not url.startswith('https://'):
url = 'https://%s' % url
return url
@staticmethod
def parse_metadata(soup) -> dict:
# TODO(knikolla): There are still cases where author doesn't show up.
try:
metadata = json.loads(soup.find('script', type='application/ld+json').text)
except AttributeError:
return {'author': '<EMAIL>'}
try:
authors = metadata['author']['name']
if isinstance(authors, list):
authors = ', '.join(authors)
metadata['author'] = authors
except KeyError:
metadata['author'] = '<EMAIL>'
return metadata
@classmethod
def parse_images(cls, soup) -> list:
images = []
query = soup.find_all('img', 'width_full')
for image in query:
images.append({'src': cls.fix_image_url(image['data-src']),
'alt': image['alt']})
query = soup.find_all('img', 'lead-media__media')
for image in query:
images.append({'src': cls.fix_image_url(image['src']),
'alt': image['alt']})
return images
@staticmethod
def parse_article_from_script(soup) -> list:
scripts = soup.find_all('script')
messy_json = None
for script in scripts:
if 'Fusion.globalContent' in script.text:
messy_json = script.text
if not messy_json:
print('Error finding article data!')
return ['Error loading article.']
start = messy_json.find('{"_id":')
messy_json = messy_json[start:]
end = messy_json.find(';Fusion.globalContentConfig')
script = messy_json[:end]
inside = False
clean_json = ''
for i, char in enumerate(script):
if char == '<':
inside = True
if char == '>':
inside = False
if inside and char == '"':
char = '\"' # Unescaped characters prevent json loading
clean_json = clean_json + char
article = json.loads(clean_json)
return [
x['content'] for x in article['content_elements'] if x['type'] == 'text'
]
@property
def today_url(self):
now = datetime.datetime.now()
today = now.strftime('%Y/%m/%d')
return 'https://www3.bostonglobe.com/todayspaper/%s' % today
def find_top_stories(self):
html = self.session.get(self.today_url).text
soup = bs4.BeautifulSoup(html, 'html5lib')
# Top Stories
top = soup.find('div', 'stories-top')
top = top.find_all('div', 'story')
top_stories = []
for story in top:
processed = {
'title': story.find('h2').text,
'url': self.replace_url(story.find('a')['href']),
'summary': ''.join([p.text for p in story.find_all('p')])
}
image = story.find('img')
if image:
processed['image'] = self.fix_image_url(image['src'])
top_stories.append(processed)
return top_stories
def find_section(self, key):
html = self.session.get(self.today_url).text
soup = bs4.BeautifulSoup(html, 'html5lib')
sections = soup.find_all('div', 'tod-paper-section')
found = None
for section in sections:
title = section.find('h2').find('a').text
if key in title.lower():
found = section
break
if not found:
return
stories = []
parsed = section.find_all('a')[1:]
for story in parsed:
try:
stories.append({'title': story.find('h3').text,
'url': self.replace_url(story['href'])})
except AttributeError:
# Because of course, in some the A is inside the H3
continue
parsed = section.find_all('h3')[1:]
for story in parsed:
try:
stories.append({'title': story.text,
'url': self.replace_url(story.find('a')['href'])})
except (AttributeError, TypeError):
# Because of course, in some the A is inside the H3
continue
return stories
def get_section(self, section):
html = self.session.get('https://www3.bostonglobe.com/news/%s' % section).text
soup = bs4.BeautifulSoup(html, 'html5lib')
section = soup.find_all('div', 'stories-top')[0]
stories = []
parsed = section.find_all('div', 'story')
for story in parsed:
a = story.find('a')
stories.append({'title': a.text,
'url': self.replace_url(a['href'])})
return stories
@functools.lru_cache(maxsize=128)
def get_article_selenium(self, url):
driver = self.get_driver()
driver.get(url)
soup = bs4.BeautifulSoup(driver.page_source, 'html5lib')
article = soup.find('div', 'article-content')
driver.close()
return {
'title': self.parse_title(soup),
'paragraphs': [p.text for p in article.find_all('p')],
'images': self.parse_images(soup),
'metadata': self.parse_metadata(soup),
}
@functools.lru_cache(maxsize=128)
def get_article(self, url):
url = self.restore_url(url)
r = self.session.get(url)
if r.status_code == 404:
# Some Javascript shit is happening here, use Selenium.
return self.get_article_selenium(url)
soup = bs4.BeautifulSoup(r.text, 'html5lib')
return {
'title': self.parse_title(soup),
'paragraphs': self.parse_article_from_script(soup),
'metadata': self.parse_metadata(soup),
'images': self.parse_images(soup),
}
| 2.25 | 2 |
connect4game.py | kkanodia7/Connect-4 | 0 | 12786619 | <reponame>kkanodia7/Connect-4
# Created by <NAME> on Feb 2, 2019
import random
import sys
players = {1: "+", -1: "x"} # One player is +, other is x
funcs = {1: max, -1: min}
# Board spaces' weights for AI
move_matrix = [0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 10, 20, 30, 20, 10, 0, 0,
0, 10, 20, 30, 40, 30, 20, 10, 0,
0, 20, 30, 40, 50, 40, 30, 20, 0,
0, 20, 30, 40, 50, 40, 30, 20, 0,
0, 10, 20, 30, 40, 30, 20, 10, 0,
0, 0, 10, 20, 30, 20, 10, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]
# Prints the board in nice square format
def pretty_board(board):
print(board[10:17]+"\n"+board[19:26]+"\n"+board[28:35]+"\n"+board[37:44]+"\n"+board[46:53]+"\n"+board[55:62]+"\n")
# Returns the empty starting board
def start_board():
return "?"*8 + "??......."*6 + "?"*10
# Returns all possible moves (all columns that are not filled)
def get_valid_moves(board):
cols = []
for c in range(10, 17):
if board[c] == ".":
cols.append(c)
return cols
# Places given player's token in given column, returns new board
def make_move(board, player, col):
index = col
while board[index+9] == ".":
index += 9
return board[:index] + players[player] + board[index+1:], index
# Returns True if a given player's move in a given space resulted in a victory for that player
def goal_test(board, player, index):
dirs = [1, 8, 9, 10]
for nd in dirs:
d = nd
temp = index
line = 1
for i in range(5):
temp += d
line += 1
if board[temp] != players[player]:
line -= 1
if d > 0:
d *= -1
temp = index
else:
break
if line >= 4:
return True
return False
# Uses MiniMax algorithm based on weight matrix, to pre-set depth, to determine best possible move for AI
def minimax(board, player, depth):
cols = get_valid_moves(board)
if len(cols) == 0:
return 0, -5, -5
if depth == 0:
score = 0
for m in range(11, 61):
if board[m] == "+":
score += move_matrix[m]
elif board[m] == "x":
score -= move_matrix[m]
return score, -5, -5
moves = []
for c in cols:
nm, index = make_move(board, player, c)
if goal_test(nm, player, index):
moves.append((100000 * player, index, nm))
else:
count = minimax(nm, -player, depth-1)[0]
moves.append((count, index, nm))
return funcs[player](moves)
# Takes in either RANDOM or PLAYER and plays it against AI
def game(opponent):
board = start_board()
print("1234567")
pretty_board(board)
print()
while True:
if len(get_valid_moves(board)) == 0:
print("No winner!")
break
if opponent == "RANDOM": # Random vs. AI
col = random.choice(get_valid_moves(board))
board, index = make_move(board, 1, col)
print("Random chose column", index % 9)
print("1234567")
pretty_board(board)
print()
if goal_test(board, 1, index):
print("Random Wins!")
break
elif opponent == "PLAYER": # Player vs. AI
col = int(input("Which column (1 - 7)? "))
board, index = make_move(board, 1, col)
print("You chose column", index % 9)
print("1234567")
pretty_board(board)
print()
if goal_test(board, 1, index):
print("You Win!")
break
v, index, board = minimax(board, -1, 5) # AI depth set at 5
print("AI chose column", index % 9)
print("1234567")
pretty_board(board)
print()
if goal_test(board, -1, index):
print("AI Wins!")
break
if __name__ == "__main__":
mode = "0"
while mode != "1" and mode != "2" and mode != "3":
mode = input("1) Player vs. AI 2) Random vs. AI 3) Quit\nEnter option number: ")
if mode == "1":
game("PLAYER")
elif mode == "2":
game("RANDOM")
# Potential Future Improvements:
# - Select depth (difficulty) of AI before starting a game
# - At least 1 second delay between moves for better visibility
# - Make pretty-board potentially look nicer by adding spaces between each column
# - AB-pruning and other such optimizations to increase speed of AI
# - Improve or train weight matrix to make AI smarter
# - More heuristics for AI, besides just win-condition and weight matrix (favor consecutive pieces?)
# - Possible option to increase number of consecutive pieces required for victory from 4
# - Ability to play AI against AI, setting separate difficulties for both
# - Ability to play player against player, player against random, etc.
# - Select board size before a game, rather than a fixed 7x6 board
# - Add a graphical interface to game instead of just displaying in terminal
| 3.46875 | 3 |
functions/instance_scheduler/state_service_test.py | lmaczulajtys/gcp-instance-scheduler | 0 | 12786620 | from datetime import datetime
from config.period import Period
from config.schedule import Schedule
from config.scheduler_config import SchedulerConfig
from schedulers.state_service import StateService, State
config = SchedulerConfig(
periods={
"period1": Period(
name="period1",
begin_time="9:00",
end_time="13:00",
weekdays=[0, 1, 2, 3, 4],
),
"period2": Period(
name="period2",
begin_time="15:00",
end_time="16:00",
weekdays=[0, 1, 2, 3, 4],
),
"period3": Period(
name="period3", end_time="21:00", weekdays=[0, 1, 2, 3, 4, 5, 6]
),
},
schedules={
"schedule1": Schedule(
name="schedule1", periods_names=["period1", "period2", "period3"]
)
},
schedule_tag_name="schedule",
timezone="Europe/Warsaw",
)
service = StateService(config=config)
def test_automatic_schedules_businessday():
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 08:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 09:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 10:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 13:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 13:30"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 18:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 15:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 13:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 15:30"),
last_start=datetime.fromisoformat("2021-03-02 15:00"),
last_stop=datetime.fromisoformat("2021-03-02 13:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 16:00"),
last_start=datetime.fromisoformat("2021-03-02 15:00"),
last_stop=datetime.fromisoformat("2021-03-02 13:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 16:30"),
last_start=datetime.fromisoformat("2021-03-02 15:00"),
last_stop=datetime.fromisoformat("2021-03-02 16:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 21:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 18:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 22:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 21:00"),
)
== State.STOPPED
)
def test_automatic_schedules_manual_start():
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 08:00"),
last_start=datetime.fromisoformat("2021-03-02 07:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 09:00"),
last_start=datetime.fromisoformat("2021-03-02 07:00"),
last_stop=datetime.fromisoformat("2021-03-01 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 09:00"),
last_start=datetime.fromisoformat("2021-03-02 07:00"),
last_stop=datetime.fromisoformat("2021-03-02 08:00"),
)
== State.RUNNING
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 19:00"),
last_start=datetime.fromisoformat("2021-03-02 20:00"),
last_stop=datetime.fromisoformat("2021-03-02 18:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 23:00"),
last_start=datetime.fromisoformat("2021-03-02 22:00"),
last_stop=datetime.fromisoformat("2021-03-02 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-07 22:00"),
last_start=datetime.fromisoformat("2021-03-07 17:00"),
last_stop=datetime.fromisoformat("2021-03-07 23:10"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-07 22:00"),
last_start=datetime.fromisoformat("2021-03-07 17:00"),
last_stop=datetime.fromisoformat("2021-03-07 01:10"),
)
== State.STOPPED
)
def test_automatic_schedules_manual_stop():
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-02 11:00"),
last_start=datetime.fromisoformat("2021-03-02 09:00"),
last_stop=datetime.fromisoformat("2021-03-02 10:00"),
)
== State.UNKNOWN
)
def test_automatic_schedules_weekend():
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 08:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 09:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 10:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 13:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 13:30"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.UNKNOWN
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 21:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-05 21:00"),
)
== State.STOPPED
)
assert (
service.get_desired_state(
schedule_name="schedule1",
current_datetime=datetime.fromisoformat("2021-03-06 22:00"),
last_start=datetime.fromisoformat("2021-03-06 09:00"),
last_stop=datetime.fromisoformat("2021-03-06 21:00"),
)
== State.STOPPED
)
| 2.5625 | 3 |
ckanext/example_theme/v14_more_custom_css/plugin.py | okfde/ckankrzn | 2,805 | 12786621 | <gh_stars>1000+
../v13_custom_css/plugin.py | 1.101563 | 1 |
Problems/Logistic function/task.py | gabrielizalo/jetbrains-academy-python-credit-calculator | 0 | 12786622 | <gh_stars>0
import math
my_int = int(input())
sigmoid = math.exp(my_int) / (math.exp(my_int) + 1)
print(round(sigmoid, 2))
| 2.875 | 3 |
examples/docStrings/metric_reduce_npy.py | mathieulagrange/doce | 1 | 12786623 | import explanes as el
import numpy as np
import pandas as pd
np.random.seed(0)
experiment = el.experiment.Experiment()
experiment.project.name = 'example'
experiment.path.output = '/tmp/'+experiment.project.name+'/'
experiment.factor.f1 = [1, 2]
experiment.factor.f2 = [1, 2, 3]
experiment.metric.m1 = ['mean', 'std']
experiment.metric.m2 = ['min', 'argmin']
def process(setting, experiment):
metric1 = setting.f1+setting.f2+np.random.randn(100)
metric2 = setting.f1*setting.f2*np.random.randn(100)
np.save(experiment.path.output+setting.id()+'_m1.npy', metric1)
np.save(experiment.path.output+setting.id()+'_m2.npy', metric2)
experiment.setPath()
experiment.do([], process, progress=False)
(settingDescription, columnHeader, constantSettingDescription, nbColumnFactor) = experiment.metric.reduce(experiment.factor.mask([1]), experiment.path.output, verbose=True)
df = pd.DataFrame(settingDescription, columns=columnHeader)
df[columnHeader[nbColumnFactor:]] = df[columnHeader[nbColumnFactor:]].round(decimals=2)
print(constantSettingDescription)
print(df)
| 2.5625 | 3 |
pybinding/greens.py | lise1020/pybinding | 159 | 12786624 | """Green's function computation and related methods
Deprecated: use the chebyshev module instead
"""
import warnings
from . import chebyshev
from .support.deprecated import LoudDeprecationWarning
__all__ = ['Greens', 'kpm', 'kpm_cuda']
Greens = chebyshev.KPM
def kpm(*args, **kwargs):
warnings.warn("Use pb.kpm() instead", LoudDeprecationWarning, stacklevel=2)
return chebyshev.kpm(*args, **kwargs)
def kpm_cuda(*args, **kwargs):
warnings.warn("Use pb.kpm_cuda() instead", LoudDeprecationWarning, stacklevel=2)
return chebyshev.kpm_cuda(*args, **kwargs)
| 2.375 | 2 |
src/debugpy/_vendored/pydevd/tests_python/resources/_debugger_case_source_mapping_and_reference.py | r3m0t/debugpy | 695 | 12786625 | def full_function():
# Note that this function is not called, it's there just to make the mapping explicit.
a = 1 # map to cEll1, line 2
b = 2 # map to cEll1, line 3
c = 3 # map to cEll2, line 2
d = 4 # map to cEll2, line 3
def create_code():
cell1_code = compile(''' # line 1
a = 1 # line 2
b = 2 # line 3
''', '<cEll1>', 'exec')
cell2_code = compile('''# line 1
c = 3 # line 2
d = 4 # line 3
''', '<cEll2>', 'exec')
# Set up the source in linecache. Python doesn't have a public API for
# this, so we have to hack around it, similar to what IPython does.
import linecache
import time
code = ''' # line 1
a = 1 # line 2
b = 2 # line 3
'''
linecache.cache['<cEll1>'] = (
len(code),
time.time(),
[line + '\n' for line in code.splitlines()],
'<cEll1>',
)
code = '''# line 1
c = 3 # line 2
d = 4 # line 3
'''
linecache.cache['<cEll2>'] = (
len(code),
time.time(),
[line + '\n' for line in code.splitlines()],
'<cEll2>',
)
return {'cEll1': cell1_code, 'cEll2': cell2_code}
if __name__ == '__main__':
code = create_code()
exec(code['cEll1'])
exec(code['cEll1'])
exec(code['cEll2'])
exec(code['cEll2'])
print('TEST SUCEEDED')
| 2.9375 | 3 |
hope-note-module/hope-python-2.7-note/Chapter1.py | Hope6537/hope-battlepack | 5 | 12786626 | <reponame>Hope6537/hope-battlepack
# encoding:utf-8
# !/usr/bin/env python
# Python语法层面
__author__ = 'Hope6537'
print "Hi,My name is %s , I am %d years old " % ("hope6537", 20)
programLanguages = ["java", "c#", "c++"];
programLanguages.append("python")
programLanguages.insert(1, "javascript")
programLanguages.pop()
programLanguages[2] = "c"
print programLanguages
print programLanguages[0]
print "please input your number"
age = int(raw_input())
if age >= 20:
print "yes old man", age
else:
print "yes teenager", age
names = ['Michael', 'Bob', 'Tracy']
for name in names:
print name
sum = 0
for x in range(101):
sum = sum + x
print sum
sum = 0
n = 1
while n <= 100:
sum = sum + n
n = n + 1
print sum
d = {'Michael': 95, 'Bob': 75, 'Tracy': 85}
print d['Michael']
print 'Thomas' in d
print 1 > 2 and 2 < 3
# 参数定义的顺序必须是:必选参数、默认参数、可变参数和关键字参数。
# 必選参数
def my_abs(x):
if x >= 0:
return x, x
else:
return -x, x
value, origin = my_abs(-12)
print value
print origin
# 可变参数
def calc(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc(1, 2, 3, 4, 5))
# 默認參數
def enroll(name, gender, age=6, city='Beijing'):
print 'name:', name
print 'gender:', gender
print 'age:', age
print 'city:', city
enroll('Sarah', 'F')
enroll('Bob', 'M', 7)
enroll('Adam', 'M', city='Tianjin')
# 关键字参数 即里面是一个tuple
def person(name, age, **kw):
print 'name:', name, 'age:', age, 'other:', kw
person('Michael', 30);
person('Bob', 35, city='Beijing')
person('Adam', 45, gender='M', job='Engineer')
kw = {'city': 'Beijing', 'job': 'Engineer'}
person('Jack', 24, **kw)
| 3.53125 | 4 |
dmwmclient/datasvc.py | FernandoGarzon/dmwmclient | 1 | 12786627 | import httpx
import pandas
from .util import format_dates
BLOCKARRIVE_BASISCODE = {
-6: "no_source",
-5: "no_link",
-4: "auto_suspend",
-3: "no_download_link",
-2: "manual_suspend",
-1: "block_open",
0: "routed",
1: "queue_full",
2: "rerouting",
}
class DataSvc:
"""PhEDEx datasvc REST API
Full documentation at https://cmsweb.cern.ch/phedex/datasvc/doc
"""
defaults = {
# PhEDEx datasvc base URL with trailing slash
"datasvc_base": "https://cmsweb.cern.ch/phedex/datasvc/",
# Options: prod, dev, debug
"phedex_instance": "prod",
}
def __init__(self, client, datasvc_base=None, phedex_instance=None):
if datasvc_base is None:
datasvc_base = DataSvc.defaults["datasvc_base"]
if phedex_instance is None:
phedex_instance = DataSvc.defaults["phedex_instance"]
self.client = client
self.baseurl = httpx.URL(datasvc_base)
self.jsonurl = self.baseurl.join("json/%s/" % phedex_instance)
self.xmlurl = self.baseurl.join("xml/%s/" % phedex_instance)
async def jsonmethod(self, method, **params):
return await self.client.getjson(url=self.jsonurl.join(method), params=params)
async def blockreplicas(self, **params):
"""Get block replicas as a pandas dataframe
Parameters
----------
block block name, can be multiple (*)
dataset dataset name, can be multiple (*)
node node name, can be multiple (*)
se storage element name, can be multiple (*)
update_since unix timestamp, only return replicas whose record was
updated since this time
create_since unix timestamp, only return replicas whose record was
created since this time. When no "dataset", "block"
or "node" are given, create_since is default to 24 hours ago
complete y or n, whether or not to require complete or incomplete
blocks. Open blocks cannot be complete. Default is to
return either.
dist_complete y or n, "distributed complete". If y, then returns
only block replicas for which at least one node has
all files in the block. If n, then returns block
replicas for which no node has all the files in the
block. Open blocks cannot be dist_complete. Default is
to return either kind of block replica.
subscribed y or n, filter for subscription. default is to return either.
custodial y or n. filter for custodial responsibility. default is
to return either.
group group name. default is to return replicas for any group.
show_dataset y or n, default n. If y, show dataset information with
the blocks; if n, only show blocks
"""
resjson = await self.jsonmethod("blockreplicas", **params)
df = pandas.json_normalize(
resjson["phedex"]["block"],
record_path="replica",
record_prefix="replica.",
meta=["bytes", "files", "name", "id", "is_open"],
)
format_dates(df, ["replica.time_create", "replica.time_update"])
return df
async def nodes(self, **params):
"""Returns a simple dump of phedex nodes.
Parameters
----------
node PhEDex node names to filter on, can be multiple (*)
noempty filter out nodes which do not host any data
"""
resjson = await self.jsonmethod("nodes", **params)
df = pandas.json_normalize(
resjson["phedex"],
record_path="node",
record_prefix="node.",
)
return df
async def data(self, human_readable=None, **params):
"""Shows data which is registered (injected) to phedex
Parameters
----------
dataset dataset name to output data for (wildcard support)
block block name to output data for (wildcard support)
file file name to output data for (wildcard support)
level display level, 'file' or 'block'. when level=block
no file details would be shown. Default is 'file'.
when level = 'block', return data of which blocks were created since this time;
when level = 'file', return data of which files were created since this time
create_since when no parameters are given, default create_since is set to one day ago
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("data", **params)
out = []
for _instance in resjson["phedex"]["dbs"]:
for _dataset in _instance["dataset"]:
for _block in _dataset["block"]:
for _file in _block["file"]:
out.append(
{
"Dataset": _dataset["name"],
"Is_dataset_open": _dataset["is_open"],
"block_Name": _block["name"],
"Block_size_(GB)": _block["bytes"] / 1000000000.0,
"Time_block_was_created": _block["time_create"],
"File_name": _file["lfn"],
"File_checksum": _file["checksum"],
"File_size": _file["size"] / 1000000000.0,
"Time_file_was_created": _file["time_create"],
}
)
df = pandas.json_normalize(out)
format_dates(df, ["Time_file_was_created", "Time_block_was_created"])
if human_readable:
mapping = {
"Is_dataset_open": "Is dataset open",
"block_Name": "Block Name",
"Block_size_(GB)": "Block size (GB)",
"Time_block_was_created": "Time Block Was Created",
"File_name": "File Name",
"File_checksum": "File Checksum",
"File_size": "File Size (GB)",
"Time_file_was_created": "Time File Was Created",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def errorlog(self, human_readable=None, **params):
"""Return detailed transfer error information, including logs of the transfer and validation commands.
Note that phedex only stores the last 100 errors per link, so more errors may have occurred then indicated by this API
call.
Parameters
----------
Required inputs: at least one of the followings: from, to, block, lfn
optional inputs: (as filters) from, to, dataset, block, lfn
from name of the source node, could be multiple
to name of the destination node, could be multiple
block block name
dataset dataset name
lfn logical file name
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("errorlog", **params)
out = []
for _instance in resjson["phedex"]["link"]:
for _block in _instance["block"]:
for _file in _block["file"]:
for _transfer_error in _file["transfer_error"]:
out.append(
{
"Link": _instance["from"] + " to " + _instance["to"],
"LFN": _file["name"],
"file_Checksum": _file["checksum"],
"file_size_(GB)": _file["size"] / 1000000000.0,
"Block_name": _block["name"],
"Error_log": str(_transfer_error["detail_log"]["$t"]),
"From_PFN": _transfer_error["from_pfn"],
"To_PFN": _transfer_error["to_pfn"],
"Time": _transfer_error["time_done"],
}
)
df = pandas.json_normalize(out)
format_dates(df, ["Time"])
if human_readable:
mapping = {
"From_PFN": "From PFN",
"To_PFN": "To PFN",
"Error_log": "Error Log",
"Block_Name": "Block Name",
"Block_size_(GB)": "Block size (GB)",
"file_checksum": "File Checksum",
"file_size_(GB)": "File Size (GB)",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def blockarrive(self, human_readable=None, **params):
"""Return estimated time of arrival for blocks currently subscribed for transfer. If the estimated time of arrival (ETA)
cannot be calculated, or the block will never arrive, a reason for the missing estimate is provided.
Parameters
----------
id block id
block block name, could be multiple, could have wildcard
dataset dataset name, could be multiple, could have wildcard
to_node destination node, could be multiple, could have wildcard
priority priority, could be multiple
update_since updated since this time
basis technique used for the ETA calculation, or reason it's missing.
arrive_before only show blocks that are expected to arrive before this time.
arrive_after only show blocks that are expected to arrive after this time.
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("blockarrive", **params)
out = []
for _block in resjson["phedex"]["block"]:
for _destination in _block["destination"]:
out.append(
{
"Block_Name": _block["name"],
"Destination": _destination["name"],
"Time_Arrive": _destination["time_arrive"],
"Time_update": _destination["time_update"],
"Number_of_files": _destination["files"],
"Block_size_(GB)": _destination["bytes"] / 1000000000.0,
"Basis_code": BLOCKARRIVE_BASISCODE.get(
_destination["basis"], "No code specified"
),
}
)
df = pandas.json_normalize(out)
format_dates(df, ["Time_Arrive", "Time_update"])
if human_readable:
mapping = {
"Block_Name": "Block Name",
"Block_size_(GB)": "Block size (GB)",
"Time_Arrive": "Time Arrive",
"Time_update": "Time Update",
"Number_of_files": "Number Of Files",
"Basis_code": "Basis Code",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def filereplicas(self, human_readable=None, **params):
"""Serves the file replicas known to phedex.
Parameters
----------
block block name, with '*' wildcards, can be multiple (*). required when no lfn is specified. Block names must
follow the syntax /X/Y/Z#, i.e. have three /'s and a '#'. Anything else is rejected.
dataset dataset name. Syntax: /X/Y/Z, all three /'s obligatory. Wildcads are allowed.
node node name, can be multiple (*)
se storage element name, can be multiple (*)
update_since unix timestamp, only return replicas updated since this
time
create_since unix timestamp, only return replicas created since this
time
complete y or n. if y, return only file replicas from complete block
replicas. if n only return file replicas from incomplete block
replicas. default is to return either.
dist_complete y or n. if y, return only file replicas from blocks
where all file replicas are available at some node. if
n, return only file replicas from blocks which have
file replicas not available at any node. default is
to return either.
subscribed y or n, filter for subscription. default is to return either.
custodial y or n. filter for custodial responsibility. default is
to return either.
group group name. default is to return replicas for any group.
lfn logical file name
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("filereplicas", **params)
out = []
for _block in resjson["phedex"]["block"]:
for _file in _block["file"]:
for _replica in _file["replica"]:
out.append(
{
"Block_name": _block["name"],
"Files": _block["files"],
"Block_size_(GB)": _block["bytes"] / 1000000000.0,
"lfn": _file["name"],
"Checksum": _file["checksum"],
"File_created_on": _file["time_create"],
"File_replica_at": _replica["node"],
"File_subcribed": _replica["subscribed"],
"Custodial": _replica["custodial"],
"Group": _replica["group"],
"File_in_node_since": _replica["time_create"],
}
)
df = pandas.json_normalize(out)
format_dates(df, ["File_created_on", "File_in_node_since"])
if human_readable is True:
mapping = {
"Block_name": "Block Name",
"Block_size_(GB)": "Block size (GB)",
"File_created_on": "File Created On",
"File_replica_at": "File Replica At",
"File_subcribed": "File Subcribed",
"File_in_node_since": "File In Node Since",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def agentlogs(self, human_readable=None, **params):
"""Show messages from the agents.
Parameters
----------
required inputs: at least one of the optional inputs
optional inputs: (as filters) user, host, pid, agent, update_since
node name of the node
user user name who owns agent processes
host hostname where agent runs
agent name of the agent
pid process id of agent
update_since ower bound of time to show log messages. Default last 24 h.
"""
if type(human_readable) is not bool and human_readable is not None:
raise Exception("Wrong human_readable parameter type")
resjson = await self.jsonmethod("agentlogs", **params)
out = []
for _agent in resjson["phedex"]["agent"]:
for _node in _agent["node"]:
node = _node["name"]
for _log in _agent["log"]:
out.append(
{
"Agent": _agent["name"],
"Host": _agent["host"],
"PID": _agent["pid"],
"Node": node,
"User": _agent["user"],
"Reason": _log["reason"],
"Time": _log["time"],
"state_dir": _log["state_dir"],
"working_dir": _log["working_dir"],
"Message": str(_log["message"]["$t"]),
}
)
df = pandas.json_normalize(out)
format_dates(df, ["Time"])
if human_readable is True:
mapping = {
"state_dir": "State Directory",
"working_dir": "Working Directory",
}
df2 = df.rename(columns=mapping)
return df2
else:
return df
async def missingfiles(self, human_readable=None, **params):
"""Show files which are missing from blocks at a node.
Parameters
----------
block block name (wildcards) (*)
lfn logical file name (*)
node node name (wildcards)
se storage element.
subscribed y or n. whether the block is subscribed to the node or not
default is null (either)
custodial y or n. filter for custodial responsibility,
default is to return either
group group name
default is to return missing blocks for any group.
(*) either block or lfn is required
"""
resjson = await self.jsonmethod("missingfiles", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
print("Wrong human_readable parameter type")
df = pandas.json_normalize(out)
return df
elif human_readable is None or human_readable is False:
for _block in resjson["phedex"]["block"]:
for _file in _block["file"]:
for _missing in _file["missing"]:
out.append(
{
"block_name": _block["name"],
"file_name": _file["name"],
"checksum": _file["checksum"],
"size": _file["bytes"],
"created": _file["time_create"],
"origin_node": _file["origin_node"],
"missing_from": _missing["node_name"],
"disk": _missing["se"],
"custodial": _missing["custodial"],
"subscribed": _missing["subscribed"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["created"])
elif human_readable is True:
for _block in resjson["phedex"]["block"]:
for _file in _block["file"]:
for _missing in _file["missing"]:
out.append(
{
"Block Name": _block["name"],
"File Name": _file["name"],
"checksum": _file["checksum"],
"Size of file": _file["bytes"],
"Time created": _file["time_create"],
"Origin Node": _file["origin_node"],
"Missing from": _missing["node_name"],
"Disk": _missing["se"],
"Custodial?": _missing["custodial"],
"Subscribed?": _missing["subscribed"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["Time created"])
async def agents(self, human_readable=None, **params):
"""Serves information about running (or at least recently running) phedex agents.
Parameters
----------
required inputs: none
optional inputs: (as filters) node, se, agent
node node name, could be multiple
se storage element name, could be multiple
agent agent name, could be multiple
version phedex version
update_since updated since this time
detail 'y' or 'n', default 'n'. show "code" information at file level *
"""
resjson = await self.jsonmethod("agents", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
print("Wrong human_readable parameter type")
df = pandas.json_normalize(out)
return df
elif human_readable is None or human_readable is False:
for _node in resjson["phedex"]["node"]:
for _agent in _node["agent"]:
out.append(
{
"Node": _node["node"],
"Host": _node["host"],
"Agent_name": _node["name"],
"Agent_label": _agent["label"],
"Time_update": _agent["time_update"],
"state_dir": _agent["state_dir"],
"version": _agent["version"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["Time_update"])
elif human_readable is True:
for _node in resjson["phedex"]["node"]:
for _agent in _node["agent"]:
out.append(
{
"Node": _node["node"],
"Host": _node["host"],
"Agent name": _node["name"],
"Agent label": _agent["label"],
"Time update": _agent["time_update"],
"Directory": _agent["state_dir"],
"Version": _agent["version"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["Time update"])
async def blocklatency(self, human_readable=None, **params):
"""Show authentication state and abilities
Parameters
----------
ability authorization ability. If passed then the nodes (from TMDB)
that the user is allowed to use "ability" for are returned.
require_cert if passed then the call will die if the user is not
authenticated by certificate
require_passwd if passed then the call will die if the user is not
authenticated by password
"""
resjson = await self.jsonmethod("blocklatency", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
print("Wrong human_readable parameter type")
df = pandas.json_normalize(out)
return df
elif human_readable is None or human_readable is False:
for _block in resjson["phedex"]["block"]:
for _destination in _block["destination"]:
for _latency in _destination["latency"]:
out.append(
{
"Block": _block["name"],
"Block_ID": _block["id"],
"Dataset": _block["dataset"],
"Size": _block["bytes"],
"Time_create": _block["time_create"],
"Number_of_files": _block["files"],
"Time_update": _block["time_update"],
"Destination": _destination["name"],
"custodial": _latency["is_custodial"],
"last_suspend": _latency["last_suspend"],
"last_replica": _latency["last_replica"],
"time_subscription": _latency["time_subscription"],
"block_closed": _latency["block_close"],
"latency": _latency["latency"],
}
)
df = pandas.json_normalize(out)
return format_dates(
df,
[
"Time_update",
"last_suspend",
"last_replica",
"time_subscription",
"block_closed",
"Time_create",
],
)
elif human_readable is True:
for _block in resjson["phedex"]["block"]:
for _destination in _block["destination"]:
for _latency in _destination["latency"]:
out.append(
{
"Block": _block["name"],
"Block ID": _block["id"],
"Dataset": _block["dataset"],
"Size": _block["bytes"],
"Time Create": _block["time_create"],
"Number of files": _block["files"],
"Time Update": _block["time_update"],
"Destination": _destination["name"],
"custodial": _latency["is_custodial"],
"Last Suspend": _latency["last_suspend"],
"Last Replica": _latency["last_replica"],
"Time Subscription": _latency["time_subscription"],
"Block Closed": _latency["block_close"],
"Latency": _latency["latency"],
}
)
df = pandas.json_normalize(out)
return format_dates(
df,
[
"Time Update",
"Last Suspend",
"Last Replica",
"Time Subscription",
"Block Closed",
"Time Create",
],
)
async def requestlist(self, human_readable=None, **params):
"""Serve as a simple request search and cache-able catalog of requests to save within a client,
which may then use the request ID to obtain further details using TransferRequests or DeletionRequests.
Parameters
----------
request * request id
type request type, 'xfer' (default) or 'delete'
approval approval state, 'approved', 'disapproved', 'mixed', or 'pending'
requested_by * requestor's name
node * name of the destination node
(show requests in which this node is involved)
decision decision at the node, 'approved', 'disapproved' or 'pending'
group * user group
create_since created since this time
create_until created until this time
decide_since decided since this time
decide_until decided until this time
dataset * dataset is part of request, or a block from this dataset
block * block is part of request, or part of a dataset in request
decided_by * name of person who approved the request
* could be multiple and/or with wildcard
** when both 'block' and 'dataset' are present, they form a logical disjunction (ie. or)
"""
resjson = await self.jsonmethod("requestlist", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
df = pandas.json_normalize(out)
raise Exception("Wrong human_readable parameter type")
return df
elif human_readable is None or human_readable is False:
for _request in resjson["phedex"]["request"]:
for _node in _request["node"]:
out.append(
{
"request_id": _request["id"],
"time_created": _request["time_create"],
"requested_by": _request["requested_by"],
"approval": _request["approval"],
"node": _node["name"],
"time_decided": _node["time_decided"],
"decided_by": _node["decided_by"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["time_created", "time_decided"])
else:
for _request in resjson["phedex"]["request"]:
for _node in _request["node"]:
out.append(
{
"Request ID": _request["id"],
"Time Created": _request["time_create"],
"Requested by": _request["requested_by"],
"Approval": _request["approval"],
"Node": _node["name"],
"Time decided": _node["time_decided"],
"Decided by": _node["decided_by"],
}
)
df = pandas.json_normalize(out)
return format_dates(df, ["Time Created", "Time decided"])
async def blockreplicasummary(self, human_readable=None, **params):
"""Show authentication state and abilities
Parameters
----------
ability authorization ability. If passed then the nodes (from TMDB)
that the user is allowed to use "ability" for are returned.
require_cert if passed then the call will die if the user is not
authenticated by certificate
require_passwd if passed then the call will die if the user is not
authenticated by password
"""
resjson = await self.jsonmethod("blockreplicasummary", **params)
out = []
if human_readable is not None and type(human_readable) is not bool:
print("Wrong human_readable parameter type")
df = pandas.json_normalize(out)
return df
else:
for _block in resjson["phedex"]["block"]:
for _replica in _block["replica"]:
out.append(
{
"Block": _block["name"],
"Node": _replica["node"],
"Complete": _replica["complete"],
}
)
df = pandas.json_normalize(out)
return df
| 2.609375 | 3 |
_unittests/ut_sphinxext/test_mathdef_extension.py | Pandinosaurus/pyquickhelper | 18 | 12786628 | <filename>_unittests/ut_sphinxext/test_mathdef_extension.py<gh_stars>10-100
"""
@brief test log(time=4s)
@author <NAME>
"""
import sys
import os
import unittest
from docutils.parsers.rst import directives
from pyquickhelper.loghelper.flog import fLOG
from pyquickhelper.pycode import get_temp_folder
from pyquickhelper.helpgen import rst2html
from pyquickhelper.sphinxext import MathDef, MathDefList
from pyquickhelper.sphinxext.sphinx_mathdef_extension import mathdef_node, visit_mathdef_node, depart_mathdef_node
class TestMathDefExtension(unittest.TestCase):
def test_post_parse_sn_todoext(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
directives.register_directive("mathdef", MathDef)
directives.register_directive("mathdeflist", MathDefList)
def test_mathdef(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from docutils import nodes as skip_
content = """
test a directive
================
before
.. mathdef::
:title: first def
:tag: definition
:lid: label1
this code should appear___
after
""".replace(" ", "")
if sys.version_info[0] >= 3:
content = content.replace('u"', '"')
tives = [("mathdef", MathDef, mathdef_node,
visit_mathdef_node, depart_mathdef_node)]
html = rst2html(content, # fLOG=fLOG,
writer="custom", keep_warnings=True,
directives=tives, extlinks={'issue': ('http://%s', '_issue_')})
temp = get_temp_folder(__file__, "temp_mathdef", clean=False)
with open(os.path.join(temp, "test_mathdef.html"), "w", encoding="utf8") as f:
f.write(html)
t1 = "this code should appear"
if t1 not in html:
raise Exception(html)
t1 = "after"
if t1 not in html:
raise Exception(html)
t1 = "first def"
if t1 not in html:
raise Exception(html)
def test_mathdeflist(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from docutils import nodes as skip_
content = """
test a directive
================
before
.. mathdef::
:title: first def2
:tag: Theoreme
this code should appear___
middle
.. mathdeflist::
:tag: definition
after
""".replace(" ", "")
if sys.version_info[0] >= 3:
content = content.replace('u"', '"')
tives = [("mathdef", MathDef, mathdef_node,
visit_mathdef_node, depart_mathdef_node)]
html = rst2html(content, # fLOG=fLOG,
writer="custom", keep_warnings=True,
directives=tives)
temp = get_temp_folder(__file__, "temp_mathdef", clean=False)
with open(os.path.join(temp, "test_mathdeflist.html"), "w", encoding="utf8") as f:
f.write(html)
t1 = "this code should appear"
if t1 not in html:
raise Exception(html)
t1 = "after"
if t1 not in html:
raise Exception(html)
t1 = "first def2"
if t1 not in html:
raise Exception(html)
def test_mathdeflist_contents(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from docutils import nodes as skip_
content = """
test a directive
================
before
.. mathdef::
:title: first def2
:tag: Theoreme
this code should appear___
middle
.. mathdeflist::
:tag: definition
:contents:
after
""".replace(" ", "")
if sys.version_info[0] >= 3:
content = content.replace('u"', '"')
tives = [("mathdef", MathDef, mathdef_node,
visit_mathdef_node, depart_mathdef_node)]
html = rst2html(content, # fLOG=fLOG,
writer="custom", keep_warnings=True,
directives=tives)
temp = get_temp_folder(__file__, "temp_mathdef", clean=False)
with open(os.path.join(temp, "test_mathdeflist_contents.html"), "w", encoding="utf8") as f:
f.write(html)
t1 = "this code should appear"
if t1 not in html:
raise Exception(html)
t1 = "after"
if t1 not in html:
raise Exception(html)
t1 = "first def2"
if t1 not in html:
raise Exception(html)
def test_mathdeflist_contents_body_sphinx(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from docutils import nodes as skip_
content = """
test a directive
================
before
.. mathdef::
:title: first def2
:tag: Theoreme
this code should appear___
middle
.. mathdeflist::
:tag: definition
:contents:
middle2
.. mathdeflist::
:tag: Theoreme
:contents:
after
""".replace(" ", "")
if sys.version_info[0] >= 3:
content = content.replace('u"', '"')
tives = [("mathdef", MathDef, mathdef_node,
visit_mathdef_node, depart_mathdef_node)]
html = rst2html(content, # fLOG=fLOG,
writer="custom", keep_warnings=True,
directives=tives, layout="sphinx")
body = rst2html(content, # fLOG=fLOG,
writer="custom", keep_warnings=True,
directives=tives, layout="sphinx_body")
if "<body>" in body:
raise Exception(body)
if "</body>" in body:
raise Exception(body)
temp = get_temp_folder(__file__, "temp_mathdef", clean=False)
with open(os.path.join(temp, "test_mathdeflist_contents_sphinx.html"), "w", encoding="utf8") as f:
f.write(html)
# not yet ready
if "alabaster" in html:
raise Exception(html)
t1 = "this code should appear"
if t1 not in body:
raise Exception(body)
t1 = "after"
if t1 not in body:
raise Exception(body)
t1 = "first def2"
if t1 not in body:
raise Exception(body)
t1 = 'class="reference internal"'
if t1 not in body:
raise Exception(body)
if __name__ == "__main__":
unittest.main()
| 2.234375 | 2 |
src/utilities/paths.py | ab3llini/BlindLess | 1 | 12786629 | <reponame>ab3llini/BlindLess
import os
import re
def __robust_respath_search():
"""
Resolve the path for resources from anywhere in the code.
:return: The real path of the resources
"""
curpath = os.path.realpath(__file__)
basepath = curpath
while os.path.split(basepath)[1] != 'src':
newpath = os.path.split(basepath)[0]
if newpath == basepath:
print("ERROR: unable to find source from path " + curpath)
break
basepath = os.path.split(basepath)[0]
return os.path.join(os.path.split(basepath)[0], "resources")
# ######### RESOURCES DIRECTORIES DEFINITION ###########
RESPATH = __robust_respath_search()
MODELS_FOLDER = 'models'
DATA_FOLDER = 'data'
def resources_path(*paths):
"""
Very base function for resources path management.
Return the complete path from resources given a sequence of directories
eventually terminated by a file, and makes all necessary subdirectories
:param paths: a sequence of paths to be joined starting from the base of resources
:return: the complete path from resources (all necessary directories are created)
"""
p = os.path.join(RESPATH, *paths)
if os.path.splitext(p)[1] != '':
basep = os.path.split(p)[0]
else:
basep = p
os.makedirs(basep, exist_ok=True)
return p
# ############################## BASE DIRECTORY-RELATIVE PATHS ###############
def models_path(*paths):
"""
Builds the path starting where all model data should be.
:param paths: sequence of directories to be joined after the standard base.
:return: The path relative to this standard folder
"""
return resources_path(MODELS_FOLDER, *paths)
def data_path(*paths):
return resources_path(DATA_FOLDER, *paths)
if __name__ == '__main__':
print(resources_path('models', 'bert', 'runs'))
| 2.859375 | 3 |
tests/create_golden_values.py | jond01/dicom-numpy | 89 | 12786630 | """
Generate a golden NPZ file from a dicom ZIP archive.
"""
import argparse
import numpy as np
from dicom_numpy.zip_archive import combined_series_from_zip
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help='Output golden NPZ file', required=False)
parser.add_argument('input', help="Input DICOM zip archive")
return parser.parse_args()
def generate_golden_values(input_zip, output_path='golden_values'):
"""
Generate a golden NPZ file for a given DICOM zip archive.
"""
voxels, ijk_to_xyz = combined_series_from_zip(input_zip)
np.savez_compressed(output_path, voxels=voxels, ijk_to_xyz=ijk_to_xyz)
if __name__ == '__main__':
args = parse_args()
if args.output:
generate_golden_values(args.input, args.output)
else:
generate_golden_values(args.input)
| 3.046875 | 3 |
test/test_2_garage_compact_parking.py | jlarkin21/parking-garage-python | 0 | 12786631 | <reponame>jlarkin21/parking-garage-python
from typing import List
from garage.garage import Garage
from garage.parking_level import ParkingLevel
from garage.parking_space import ParkingSpace
from garage.vehicle import Vehicle
from garage.vehicle_type import VehicleType
from test.utils import TestHelpers
def test_standard_cars_are_rejected_from_compact_parking_space():
parking_space_a = ParkingSpace(compact=True)
parking_space_b = ParkingSpace(compact=True)
parking_space_c = ParkingSpace(compact=True)
parking_space_d = ParkingSpace(compact=True)
parking_space_e = ParkingSpace(compact=True)
parking_space_f = ParkingSpace(compact=True)
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_2 = Vehicle(vehicle_type=VehicleType.Car)
vehicle_3 = Vehicle(vehicle_type=VehicleType.Car)
vehicle_4 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_5 = Vehicle(vehicle_type=VehicleType.Car)
vehicle_6 = Vehicle(vehicle_type=VehicleType.Car)
expected_vehicles_rejected: List[Vehicle] = [
vehicle_2,
vehicle_3,
vehicle_5,
vehicle_6,
]
actual_vehicles_rejected = garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_are_rejected(
actual=actual_vehicles_rejected, expected=expected_vehicles_rejected
)
def test_trucks_are_rejected_from_compact_parking_space():
parking_space_a = ParkingSpace(compact=True)
parking_space_b = ParkingSpace(compact=True)
parking_space_c = ParkingSpace(compact=True)
parking_space_d = ParkingSpace(compact=True)
parking_space_e = ParkingSpace(compact=True)
parking_space_f = ParkingSpace(compact=True)
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_2 = Vehicle(vehicle_type=VehicleType.Truck)
vehicle_3 = Vehicle(vehicle_type=VehicleType.Truck)
vehicle_4 = Vehicle(vehicle_type=VehicleType.Truck)
vehicle_5 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_6 = Vehicle(vehicle_type=VehicleType.Truck)
expected_vehicles_rejected: List[Vehicle] = [
vehicle_2,
vehicle_3,
vehicle_4,
vehicle_6,
]
actual_vehicles_rejected = garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_are_rejected(
actual=actual_vehicles_rejected, expected=expected_vehicles_rejected
)
def test_compact_vehicles_are_prioritized_into_compact_parking_space():
parking_space_a = ParkingSpace(compact=True)
parking_space_b = ParkingSpace()
parking_space_c = ParkingSpace()
parking_space_d = ParkingSpace(compact=True)
parking_space_e = ParkingSpace()
parking_space_f = ParkingSpace()
parking_level_1 = ParkingLevel(spaces=[parking_space_a, parking_space_b])
parking_level_2 = ParkingLevel(spaces=[parking_space_c, parking_space_d])
parking_level_3 = ParkingLevel(spaces=[parking_space_e, parking_space_f])
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(vehicle_type=VehicleType.Car)
vehicle_2 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_3 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_4 = Vehicle(vehicle_type=VehicleType.Truck)
vehicle_5 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_6 = Vehicle(vehicle_type=VehicleType.Car)
expected_vehicles_on_level_1: List[Vehicle] = [vehicle_2, vehicle_1]
expected_vehicles_on_level_2: List[Vehicle] = [vehicle_4, vehicle_3]
expected_vehicles_on_level_3: List[Vehicle] = [vehicle_5, vehicle_6]
garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_on_levels(
levels=garage.levels,
expected_vehicles=[
expected_vehicles_on_level_1,
expected_vehicles_on_level_2,
expected_vehicles_on_level_3,
],
)
| 3.34375 | 3 |
Start.py | OmarGSharaf/Multithreaded-socket-server | 0 | 12786632 | <gh_stars>0
import sys, os, signal
from subprocess import Popen
if __name__ == "__main__" :
for i in range (0,5):
Popen(['python', 'Client.py'], stdin=None, stdout=None, stderr=None, close_fds=True)
| 2.234375 | 2 |
setup.py | Bridgeconn/mt2414 | 10 | 12786633 | <filename>setup.py
from setuptools import setup
setup(
name="mt2414",
description="MT2414",
version="0.1.0",
install_requires=[
"nltk",
"polib",
"Flask",
"Flask-RESTful",
"PyJWT",
"Flask-Cors",
"requests",
"psycopg2",
"scrypt",
"gunicorn",
"pyexcel",
"pyotp",
"pyexcel-xlsx",
"xlrd"
],
)
| 1.171875 | 1 |
normalization.py | kuredatan/taxocluster | 0 | 12786634 | <filename>normalization.py
#Centralization-reduction for a list of values
import numpy as np
from misc import inf
#Hypothesis of uniform probability for the occurrence of any bacteria whatever the clinic data may be (which is a strong hypothesis...)
def expectList(vList):
n = len(vList)
if not n:
print "\n/!\ ERROR: Empty list."
raise ValueError
exp = 0
for i in range(n):
if vList[i]:
exp += vList[i]/n
return exp
def standardDeviationList(vList):
vProductList = [x*x for x in vList if x]
expProd = expectList(vProductList)
exp = expectList(vList)
expS = exp*exp
return np.sqrt(expProd-expS),exp
def normalizeList(valueList):
stDeviation,exp = standardDeviationList(valueList)
if not stDeviation:
print "\n/!\ ERROR: Math problem (Division by zero)."
raise ValueError
normList = []
for value in valueList:
if value:
normList.append((value-exp)/stDeviation)
return normList
| 3.3125 | 3 |
ruco/clicker.py | nizig/ruco | 10 | 12786635 | """
clicker - rapid command-line user interface development
- Provides convenient syntax and semantics for constructing command-line
interfaces definitions, and tools to speed up development of command-line
applications.
- Define all commands, options, and arguments accepted by an application using
a straight-forward syntax in yaml or json.
- For simple applications, an argument parser is easily instantiated for a
CLI definition, and callbacks for commands/options/arguments are
automatically mapped to Python functions implemented by the user.
(See the main function of this script for an example of this idiom.)
- For complex applications, skeleton Python source code can be generated for
command/option/argument handlers from a CLI definition in yaml/json, which
can then be implemented incrementally by the user.
- The command-line interface definition semantics allow 'inheritance', that
is, deriving a new CLI definition from an existing one, which could be useful
for complex applications with many commands that are more similar than
different.
- Last but far from least, clicker is built using the (outstanding, fantastic,
amazing, where-would-I-be-without-it) Click toolkit:
http://click.pocoo.org/
<NAME> <<EMAIL>>
"""
from __future__ import print_function
import click
import collections
import copy
import json
import sys
import traceback
import yaml
try:
import IPython
pp = IPython.lib.pretty.pprint
def debug():
traceback.print_exc()
IPython.embed()
except ImportError:
pp = print
import pdb
def debug():
traceback.print_exc()
pdb.pm()
def popkey(d, key, default=None):
if key in d:
r = d[key]
del d[key]
return r
return default
def merge(old, new):
def shift(k):
if k in new:
old[k] = new[k]
shift("name")
shift("help")
shift("options")
shift("arguments")
if "commands" in new:
if "commands" not in old:
old["commands"] = new["commands"]
else:
for new_command in new["commands"]:
try:
old_command = [
x for x in old["commands"]
if x["name"] == new_command["name"]
][0]
old["commands"].remove(old_command)
except IndexError:
pass
old["commands"].append(new_command)
if "groups" in new:
if "groups" not in old:
old["groups"] = new["groups"]
else:
for new_group in new["groups"]:
try:
old_group = [
x for x in old["groups"]
if x["name"] == new_group["name"]
][0]
merge(old_group, new_group)
except IndexError:
old["groups"].append(new_group)
return old
def stub(
data, fd=sys.stdout, groups=False, get_cb=None, tab=" ", indent=0,
imports=True
):
def tabs(): return indent * tab
def push(): tabs += 1
def pop(): tabs -= 1
def p(s): fd.write(s)
path = []
if get_cb is None:
get_cb = lambda p: "_".join(p)
def build_options(o):
pass
def print_command(c):
paths.append(c["name"])
p(tabs() + "def %s(%s):\n" % (get_cb(path), build_options(c)))
push()
p(tabs() + "pass\n\n")
pop()
paths.pop()
def print_commands(g):
for c in g.get("commands", ()):
print_command(c)
def print_group(g):
paths.append(g["name"])
if groups:
p(tabs() + "def %s(%s):\n" % (get_cb(path), build_options(g)))
push()
p(tabs() + "pass\n\n")
pop()
print_commands(g)
paths.pop()
def print_groups(g):
for gg in g.get("groups", ()):
print_group(gg)
if imports:
p(tabs() + "import click\n\n")
p(tabs() + "get_context = click.get_current_context\n")
p(tabs() + "get_obj = lambda: get_context().obj\n\n")
print_group(data)
def build(
data, env=None, get_cb=None, require_commands=True, require_groups=False
):
path = []
if get_cb is None:
def get_cb(p, r):
n = "_".join(p)
f = (env or globals()).get(n)
if not f and r:
raise KeyError("Required callback not found in globals(): %s" % n)
return f
def build_argument(a):
a = copy.copy(a)
name = popkey(a, "name")
a["type"] = eval(a.get("type", "None"), {"click": click})
a["default"] = eval(a.get("default", "None"))
a["nargs"] = eval(a.get("nargs", "None"))
return click.Argument([name], **a)
def build_arguments(c):
return [build_argument(x) for x in c.get("arguments", ())]
def build_option(o):
o = copy.copy(o)
name = popkey(o, "name").split(" ")
o["type"] = eval(o.get("type", "None"), {"click": click})
o["default"] = eval(o.get("default", "None"))
for n in name:
if n.startswith("--"):
break
else:
n = None
if n:
o["envvar"] = "%s_%s" % (
"_".join(path).upper(),
n[2:].replace("-", "_").upper()
)
return click.Option(name, **o)
def build_options(o):
return [build_option(x) for x in o.get("options", ())]
def build_command(c, require_cb=require_commands, cls=click.Command):
c = copy.copy(c)
path.append(c["name"])
try:
c["callback"] = get_cb(path, require_cb)
c["params"] = build_options(c)
c["params"].extend(build_arguments(c))
popkey(c, "options")
popkey(c, "arguments")
popkey(c, "commands")
name = popkey(c, "name")
return cls(name, **c)
finally:
path.pop()
def build_commands(g):
return [build_command(x) for x in g.get("commands", ())]
def build_group(g):
group = build_command(g, require_cb=require_groups, cls=click.Group)
try:
path.append(g["name"])
for subgroup in build_groups(g):
group.add_command(subgroup, name=subgroup.name)
for command in build_commands(g):
group.add_command(command)
return group
finally:
path.pop()
def build_groups(g):
return [build_group(x) for x in g.get("groups", ())]
if len(data.get("groups", ())) == 0 and len(data.get("commands", ())) == 0:
rv = build_command(data)
else:
rv = build_group(data)
return rv
#return build_group(data)
def _setup_yaml():
def representer(dumper, data):
return dumper.represent_dict(data.items())
def constructor(loader, node):
return collections.OrderedDict(loader.construct_pairs(node))
tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
yaml.add_representer(collections.OrderedDict, representer)
yaml.add_constructor(tag, constructor)
JSON = "json"
YAML = "yaml"
def loads(s, type=YAML, data={}):
"Load from string."
if type == JSON:
new_data = json.loads(s, object_pairs_hook=collections.OrderedDict)
elif type == YAML:
_setup_yaml()
new_data = yaml.load(s, Loader=yaml.loader.BaseLoader)
else:
raise ValueError("Invalid type: %s" % type)
return merge(data, new_data)
def loadf(f, type=None, data={}):
"Load from file."
if type is None:
if path.lower().endswith("json"):
type = JSON
elif path.lower()[-4:] in (".yml", "yaml"):
type = YAML
else:
raise ValueError("Can't determine file type: %s" % f)
with open(f) as fd:
return loads(fd.read(), type=type, data=data)
def loadmf(files, type=None, data={}):
"Load from many files."
for f in files:
load(f, type=type, data=data)
return data
def loadfd(fd, type=YAML, data={}):
"Load from file descriptor."
raise NotImplementedError()
def loadmfd(fds, type=YAML, data={}):
"Load from many file descriptors."
raise NotImplementedError()
class Cli:
def __init__(self):
self.data = {}
self.cli = None
def loads(self, s, type=YAML):
loads(s, type=type, data=self.data)
def loadf(self, file, type=None):
loadf(file, type=type, data=self.data)
def loadmf(self, files, type=None):
loadmf(files, data=self.data)
def loadfd(self, fd, type=YAML):
loadfd(fd, type=type, data=self.data)
def loadmfd(self, fds, type=YAML):
loadmfd(fds, type=type, data=self.data)
def build(self, *args, **kwargs):
self.cli = build(self.data, *args, **kwargs)
def run(self, *args, **kwargs):
self.build(*args, **kwargs)
self.cli()
def clear(self):
self.__init__()
_yaml = """
name: clicker
help: Do things with clicker CLI definitions
commands:
- name: merge
help: Merge multiple definition files into one
options:
- name: -o --output
help: Output file, default -
type: click.File('wb')
default: '"-"'
- name: -f --format
help: Output format, default yaml
type: click.Choice(["json", "yaml"])
default: '"yaml"'
arguments:
- name: files
nargs: -1
required: yes
- name: stub
help: Generate Python stubs from defininition files
options:
- name: -o --output
help: Output file, default -
type: click.File("wb")
default: '"-"'
- name: -g --groups
help: Generate group callbacks
is_flag: yes
- name: --no-imports
help: Don't generate imports
is_flag: yes
- name: -t --tab
help: Tab string, default '" "'
default: '" "'
- name: -c --click-stubs
help: Generate Click stubs
is_flag: yes
arguments:
- name: files
nargs: -1
required: yes
"""
def clicker_merge(output, format, files):
d = loadmf(files)
if format == YAML:
output.write(yaml.dump(d))
elif format == JSON:
output.write(json.dumps(d))
def clicker_stub(output, groups, no_imports, tab, click_stubs, files):
d = loadmf(files)
stub(d, fd=output, groups=group, imports=(not no_imports), tab=tab)
def main():
cli = Cli()
cli.loads(_yaml)
cli.run(require_groups=True)
if __name__ == "__main__":
main()
| 2.78125 | 3 |
reservoirpy/nodes/concat.py | ariwanski/reservoirpy | 1 | 12786636 | <gh_stars>1-10
# Author: <NAME> at 08/07/2021 <<EMAIL>>
# Licence: MIT License
# Copyright: <NAME> (2018) <<EMAIL>>
from typing import Sequence
import numpy as np
from ..node import Node
from ..utils.validation import check_node_io
def concat_forward(concat: Node, data):
axis = concat.axis
if not isinstance(data, np.ndarray):
if len(data) > 1:
return np.concatenate(data, axis=axis)
else:
return np.asarray(data)
else:
return data
def concat_initialize(concat: Node, x=None, **kwargs):
if x is not None:
if isinstance(x, np.ndarray):
concat.set_input_dim(x.shape[1])
concat.set_output_dim(x.shape[1])
elif isinstance(x, Sequence):
result = concat_forward(concat, x)
concat.set_input_dim(tuple([u.shape[1] for u in x]))
if result.shape[0] > 1:
concat.set_output_dim(result.shape)
else:
concat.set_output_dim(result.shape[1])
class Concat(Node):
def __init__(self, axis=1, name=None):
super(Concat, self).__init__(
hypers={"axis": axis},
forward=concat_forward,
initializer=concat_initialize,
name=name,
)
def _check_io(self, X, *args, io_type="input", **kwargs):
if io_type == "input":
if isinstance(X, np.ndarray):
return check_node_io(self, X, *args, io_type=io_type, **kwargs)
elif isinstance(X, Sequence):
checked_X = []
for i in range(len(X)):
input_dim = None
if self.is_initialized:
input_dim = self.input_dim[i]
checked_X.append(check_node_io(self, X[i], input_dim, **kwargs))
return checked_X
else:
return check_node_io(self, X, *args, io_type=io_type, **kwargs)
| 2.578125 | 3 |
allennlp/semparse/domain_languages/common/__init__.py | schmmd/allennlp | 17 | 12786637 | <filename>allennlp/semparse/domain_languages/common/__init__.py
from allennlp.semparse.domain_languages.common.date import Date
| 1.210938 | 1 |
entities.py | nav/rbac-abac | 1 | 12786638 | <reponame>nav/rbac-abac<filename>entities.py
import abc
import typing
from dataclasses import dataclass
@dataclass(frozen=True)
class Subject(abc.ABC):
identity: str
@dataclass(frozen=True)
class Role(Subject):
@property
def urn(self):
return f"role:{self.identity}"
@dataclass(frozen=True)
class User(Subject):
@property
def urn(self):
return f"user:{self.identity}"
@dataclass(frozen=True)
class Resource:
name: str
owner_urn: typing.Optional[str] = None
approver_urn: typing.Optional[str] = None
identity: typing.Optional[str] = None
@property
def urn(self):
if self.identity:
return f"resource:{self.name}:{self.identity}"
return f"resource:{self.name}"
@dataclass(frozen=True)
class Action:
name: str
@property
def urn(self):
return f"action:{self.name}"
# Instances
user_role = Role(identity="user")
approver_role = Role(identity="approver")
manager_role = Role(identity="manager")
admin_role = Role(identity="admin")
alice_user = User(identity="alice")
bob_user = User(identity="bob")
charlie_user = User(identity="charlie")
doug_user = User(identity="doug")
eli_user = User(identity="eli")
frank_user = User(identity="frank")
gary_user = User(identity="gary")
order_resource = Resource(name="order")
settings_resource = Resource(name="settings")
user_settings_resource = Resource(name="settings", identity="user")
finance_settings_resource = Resource(name="settings", identity="finance")
read_action = Action(name="read")
write_action = Action(name="write")
change_action = Action(name="change")
approve_action = Action(name="approve") # approve action is an arbitrary domain action
manage_action = Action(name="manage") # manage action is an arbitrary domain action
| 2.5625 | 3 |
AgeOfBarbarians/etl.py | jymsq/bigdata_analyse | 1 | 12786639 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/12/30 14:40
# @Author : way
# @Site :
# @Describe: 数据处理
import os
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
############################################# 合并数据文件 ##########################################################
# 只取用于分析的字段,因为字段数太多,去掉没用的字段可以极大的节省内存和提高效率
dir = r"C:\Users\Administrator\Desktop\AgeOfBarbarians"
data_list = []
for path in os.listdir(dir):
path = os.path.join(dir, path)
data = pd.read_csv(path)
data = data[
['user_id', 'register_time', 'pvp_battle_count', 'pvp_lanch_count', 'pvp_win_count', 'pve_battle_count',
'pve_lanch_count', 'pve_win_count', 'avg_online_minutes', 'pay_price', 'pay_count']
]
data_list.append(data)
data = pd.concat(data_list)
############################################# 输出处理 ##########################################################
# 没有重复值
# print(data[data.duplicated()])
# 没有缺失值
# print(data.isnull().sum())
############################################# 数据保存 ##########################################################
# 保存清洗后的数据 mysql
engine = create_engine('mysql://root:[email protected]:3306/test?charset=utf8')
data.to_sql('age_of_barbarians', con=engine, index=False, if_exists='append')
| 2.296875 | 2 |
src/nefelibata/builders/index.py | betodealmeida/nefelibata | 22 | 12786640 | import logging
from typing import Optional
from jinja2 import Environment
from jinja2 import FileSystemLoader
from nefelibata import __version__
from nefelibata.builders import Builder
from nefelibata.builders import Scope
from nefelibata.builders.utils import hash_n
from nefelibata.builders.utils import random_color
from nefelibata.post import get_posts
_logger = logging.getLogger(__name__)
class IndexBuilder(Builder):
scopes = [Scope.SITE]
def process_site(self, force: bool = True) -> None:
"""Generate index and archives."""
_logger.info("Creating index")
env = Environment(
loader=FileSystemLoader(
str(self.root / "templates" / self.config["theme"]),
),
)
template = env.get_template("index.html")
posts = get_posts(self.root)
posts.sort(key=lambda x: x.date, reverse=True)
show = self.config.get("posts-to-show", 10)
# first page; these will be updated
page = 1
name: Optional[str] = "index.html"
previous: Optional[str] = None
while name:
page_posts, posts = posts[:show], posts[show:]
# link to next page
next = f"archive{page}.html" if posts else None
html = template.render(
__version__=__version__,
config=self.config,
language=self.config["language"],
posts=page_posts,
breadcrumbs=[("Recent Posts", None)],
previous=previous,
next=next,
hash_n=hash_n,
random_color=random_color,
)
file_path = self.root / "build" / name
with open(file_path, "w") as fp:
fp.write(html)
page += 1
previous, name = name, next
| 2.203125 | 2 |
tests/test_mixed.py | rentbrella/janus | 0 | 12786641 | <filename>tests/test_mixed.py<gh_stars>0
import asyncio
import contextlib
import sys
import threading
import pytest
import janus
class TestMixedMode:
@pytest.mark.skipif(
sys.version_info < (3, 7),
reason="forbidding implicit loop creation works on "
"Python 3.7 or higher only",
)
def test_ctor_noloop(self):
with pytest.raises(RuntimeError):
janus.Queue()
@pytest.mark.asyncio
async def test_maxsize(self):
q = janus.Queue(5)
assert 5 == q.maxsize
@pytest.mark.asyncio
async def test_maxsize_named_param(self):
q = janus.Queue(maxsize=7)
assert 7 == q.maxsize
@pytest.mark.asyncio
async def test_maxsize_default(self):
q = janus.Queue()
assert 0 == q.maxsize
@pytest.mark.asyncio
async def test_unfinished(self):
q = janus.Queue()
assert q.sync_q.unfinished_tasks == 0
assert q.async_q.unfinished_tasks == 0
q.sync_q.put(1)
assert q.sync_q.unfinished_tasks == 1
assert q.async_q.unfinished_tasks == 1
q.sync_q.get()
assert q.sync_q.unfinished_tasks == 1
assert q.async_q.unfinished_tasks == 1
q.sync_q.task_done()
assert q.sync_q.unfinished_tasks == 0
assert q.async_q.unfinished_tasks == 0
q.close()
await q.wait_closed()
@pytest.mark.asyncio
async def test_sync_put_async_get(self):
loop = janus.current_loop()
q = janus.Queue()
def threaded():
for i in range(5):
q.sync_q.put(i)
async def go():
f = loop.run_in_executor(None, threaded)
for i in range(5):
val = await q.async_q.get()
assert val == i
assert q.async_q.empty()
await f
for i in range(3):
await go()
q.close()
await q.wait_closed()
@pytest.mark.asyncio
async def test_sync_put_async_join(self):
loop = janus.current_loop()
q = janus.Queue()
for i in range(5):
q.sync_q.put(i)
async def do_work():
await asyncio.sleep(1)
while True:
await q.async_q.get()
q.async_q.task_done()
task = loop.create_task(do_work())
async def wait_for_empty_queue():
await q.async_q.join()
task.cancel()
await wait_for_empty_queue()
q.close()
await q.wait_closed()
@pytest.mark.asyncio
async def test_async_put_sync_get(self):
loop = janus.current_loop()
q = janus.Queue()
def threaded():
for i in range(5):
val = q.sync_q.get()
assert val == i
async def go():
f = loop.run_in_executor(None, threaded)
for i in range(5):
await q.async_q.put(i)
await f
assert q.async_q.empty()
for i in range(3):
await go()
q.close()
await q.wait_closed()
@pytest.mark.asyncio
async def test_sync_join_async_done(self):
loop = janus.current_loop()
q = janus.Queue()
def threaded():
for i in range(5):
q.sync_q.put(i)
q.sync_q.join()
async def go():
f = loop.run_in_executor(None, threaded)
for i in range(5):
val = await q.async_q.get()
assert val == i
q.async_q.task_done()
assert q.async_q.empty()
await f
for i in range(3):
await go()
q.close()
await q.wait_closed()
@pytest.mark.asyncio
async def test_async_join_async_done(self):
loop = janus.current_loop()
q = janus.Queue()
def threaded():
for i in range(5):
val = q.sync_q.get()
assert val == i
q.sync_q.task_done()
async def go():
f = loop.run_in_executor(None, threaded)
for i in range(5):
await q.async_q.put(i)
await q.async_q.join()
await f
assert q.async_q.empty()
for i in range(3):
await go()
q.close()
await q.wait_closed()
@pytest.mark.asyncio
async def test_wait_without_closing(self):
q = janus.Queue()
with pytest.raises(RuntimeError):
await q.wait_closed()
q.close()
await q.wait_closed()
@pytest.mark.asyncio
async def test_modifying_forbidden_after_closing(self):
q = janus.Queue()
q.close()
with pytest.raises(RuntimeError):
q.sync_q.put(5)
with pytest.raises(RuntimeError):
q.sync_q.get()
with pytest.raises(RuntimeError):
q.sync_q.task_done()
with pytest.raises(RuntimeError):
await q.async_q.put(5)
with pytest.raises(RuntimeError):
q.async_q.put_nowait(5)
with pytest.raises(RuntimeError):
q.async_q.get_nowait()
with pytest.raises(RuntimeError):
await q.sync_q.task_done()
await q.wait_closed()
@pytest.mark.asyncio
async def test_double_closing(self):
q = janus.Queue()
q.close()
q.close()
await q.wait_closed()
@pytest.mark.asyncio
async def test_closed(self):
q = janus.Queue()
assert not q.closed
assert not q.async_q.closed
assert not q.sync_q.closed
q.close()
assert q.closed
assert q.async_q.closed
assert q.sync_q.closed
@pytest.mark.asyncio
async def test_async_join_after_closing(self):
q = janus.Queue()
q.close()
with pytest.raises(RuntimeError), contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(q.async_q.join(), timeout=0.1)
await q.wait_closed()
@pytest.mark.asyncio
async def test_close_after_async_join(self):
q = janus.Queue()
q.sync_q.put(1)
task = asyncio.ensure_future(q.async_q.join())
await asyncio.sleep(0.1) # ensure tasks are blocking
q.close()
with pytest.raises(RuntimeError), contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(task, timeout=0.1)
await q.wait_closed()
@pytest.mark.asyncio
async def test_sync_join_after_closing(self):
q = janus.Queue()
q.sync_q.put(1)
q.close()
loop = asyncio.get_event_loop()
fut = asyncio.Future()
def sync_join():
try:
q.sync_q.join()
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
thr = threading.Thread(target=sync_join, daemon=True)
thr.start()
with pytest.raises(RuntimeError), contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(fut, timeout=0.1)
await q.wait_closed()
@pytest.mark.asyncio
async def test_close_after_sync_join(self):
q = janus.Queue()
q.sync_q.put(1)
loop = asyncio.get_event_loop()
fut = asyncio.Future()
def sync_join():
try:
q.sync_q.join()
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
thr = threading.Thread(target=sync_join, daemon=True)
thr.start()
thr.join(0.1) # ensure tasks are blocking
q.close()
with pytest.raises(RuntimeError), contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(fut, timeout=0.1)
await q.wait_closed()
| 2.171875 | 2 |
MainOperatorExmaple.py | ZnoKunG/PythonProject | 0 | 12786642 | <reponame>ZnoKunG/PythonProject
money = 150
incomePerDay = 200
costPerday = 175
result = money + 30 * incomePerDay - 30 * costPerday
print(result) | 3.078125 | 3 |
scripts/publish_to_a_topic.py | kscottz/owi_arm | 0 | 12786643 | #!/usr/bin/env python
# THIS SHEBANG IS REALLY REALLY IMPORTANT
import rospy
import time
from std_msgs.msg import Int16MultiArray
if __name__ == '__main__':
try:
rospy.init_node('simple_publisher')
# Tell ros we are publishing to the robot topic
pub = rospy.Publisher('/robot', Int16MultiArray, queue_size=0)
# Setup our message
out = Int16MultiArray()
val = 20
# generate the message data
for j in range(0,4):
# set the joint angles
out.data = [0,50,50,50,int(val)]
# send the message
pub.publish(out)
# do some book keeping
val += 10
rospy.logwarn("Sent a message: {0}".format(val))
time.sleep(1)
except rospy.ROSInterruptException:
rospy.logwarn('ERROR!!!')
| 2.5 | 2 |
scripts/hello.py | SabrinaMB/pacote_python | 0 | 12786644 | <filename>scripts/hello.py
#!/usr/bin/env python3
from dev_aberto import hello
import gettext
gettext.install('pacote_python', localedir='locale')
if __name__ == '__main__':
date, name = hello()
print(_('Último commit feito em:'), date, _(' por'), name)
| 2.046875 | 2 |
server/mausam.py | HackBots1111/flask-server-bot | 0 | 12786645 | <gh_stars>0
from weather import Weather, Unit
def result(query):
weather = Weather(unit= Unit.CELSIUS)
location = weather.lookup_by_location(query)
condition = location.condition
return condition.text
| 2.75 | 3 |
code/mutual_information.py | Rockysed/PSC_classification | 0 | 12786646 | <filename>code/mutual_information.py
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 10:27:51 2018
@author: 754672
"""
#import libraries
import h5py
import numpy as np
from sklearn.feature_selection import mutual_info_classif
#import csdb data
new_file = h5py.File("../data/csdb_blabeled_reinhold_features/csdb_reinhold_features_correct_btd_complete.h5", "r")
#load features
btd_complete_scaled = new_file["btd_complete_scaled"][:]
#load labels
labels = new_file['labels'][:]
#close file
new_file.close()
#mutual information classifier
#init mi
feature_scores = mutual_info_classif(btd_complete_scaled, labels)
#indeces
features_scores_mi_ind = np.argpartition(feature_scores, -10)[-10:]
#retrieve feature score
feature_scores_important = feature_scores[features_scores_mi_ind] | 2.3125 | 2 |
rotv_apps/partners/admin.py | ivellios/django-rotv-apps | 1 | 12786647 | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Partner, MediaPatron, MediaPatronage, NormalMediaPatronage, Colaborator
def activate_event(modeladmin, request, queryset):
for event in queryset.iterator():
event.active = True
event.save()
activate_event.short_description = u'Oznacz wybrane wydarzenia jako aktywne'
class MediaPatronageAdmin(admin.ModelAdmin):
list_display = ['name', 'city', 'spot',
'start', 'end', 'active', 'activated',
'contact_email',
'created', 'modified']
actions = [activate_event, ]
class NormalMediaPatronageAdmin(admin.ModelAdmin):
list_display = ['name', 'start', 'end', 'active']
admin.site.register(MediaPatronage, MediaPatronageAdmin)
admin.site.register(NormalMediaPatronage, NormalMediaPatronageAdmin)
admin.site.register(Partner)
admin.site.register(MediaPatron)
admin.site.register(Colaborator)
| 2.125 | 2 |
manage.py | francismuk/blog | 0 | 12786648 | from app import create_app, db
from flask_script import Manager, Server
# Connect to models
from app.models import User, Category
# Set up migrations
from flask_migrate import Migrate,MigrateCommand
import os
# SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://francis:1234@localhost/blog'
# Creating app instance
# app = create_app('test')
# app = create_app('development')
app = create_app('production')
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://francis:1234@localhost/blogs'
# Create manager instance
manager = Manager(app)
# Create migrate instance
migrate = Migrate(app,db)
manager.add_command('server', Server)
manager.add_command('db',MigrateCommand)
@manager.command
def test():
'''
Run the unit tests
'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app=app, db=db, Category=Category)
if __name__ == '__main__':
manager.run()
| 2.484375 | 2 |
discord_client.py | rsandrini/random_image_sender | 0 | 12786649 | <reponame>rsandrini/random_image_sender
#!/usr/bin/env python3
import json
from datetime import datetime
from discord.ext import commands
import discord
from get_file import rdm
import os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
# read our environment variables
with open("./env.json", "r") as env:
ENV = json.load(env)
# set our environment variables
FOLDER_CRITICAL = ENV["folder_critical"]
FOLDER_CRITICAL_HELPER = ENV["folder_critical_helper"]
FOLDER_FAIL = ENV["folder_fail"]
FOLDER_FAIL_HELPER = ENV["folder_fail_helper"]
COMMAND_FAIL = ENV["command_fail"]
COMMAND_CRITICAL = ENV["command_critical"]
COMMAND_FAIL_HELPER = ENV["command_fail_helper"]
COMMAND_CRITICAL_HELPER = ENV["command_critical_helper"]
COMMAND_CHAR = ENV['command_char'] # Command used to activate bot on discord
COLORS = {
"BLACK": "\033[30m",
"RED": "\033[31m",
"GREEN": "\033[32m",
"YELLOW": "\033[33m",
"BLUE": "\033[34m",
"PURPLE": "\033[35m",
"CYAN": "\033[36m",
"GREY": "\033[37m",
"WHITE": "\033[38m",
"NEUTRAL": "\033[00m"
}
SIGN = (
COLORS["RED"] + "/" +
COLORS["YELLOW"] + "!" +
COLORS["RED"] + "\\" +
COLORS["NEUTRAL"] +
" "
)
def DISPLAY_ERROR(error_msg):
print(
"\n" +
SIGN +
" " +
COLORS["RED"] +
error_msg +
COLORS["NEUTRAL"] +
"\n"
)
def log(context):
channel = context.message.channel
author = context.message.author
channel_type = str(channel.type)
name = author.name
discriminator = author.discriminator
nickname = author.display_name
pseudo = (
COLORS["RED"] +
name + "#" + discriminator +
COLORS["NEUTRAL"] +
" (aka. " +
COLORS["BLUE"] +
nickname +
COLORS["NEUTRAL"] +
")"
)
date = "{:04}/{:02}/{:02} {:02}:{:02}:{:02}".format(
datetime.now().year,
datetime.now().month,
datetime.now().day,
datetime.now().hour,
datetime.now().minute,
datetime.now().second
)
date = COLORS["PURPLE"] + date + COLORS["NEUTRAL"]
if channel_type in ["text"]:
guild = channel.guild
server = (
COLORS["GREEN"] +
guild.name +
COLORS["NEUTRAL"]
)
channel = (
COLORS["CYAN"] +
channel.name +
COLORS["NEUTRAL"]
)
where = "on the server {srv} in {chan}".format(
srv=server,
chan=channel
)
elif channel_type in ["private"]:
where = "in " + COLORS["GREEN"] + "direct message" + COLORS["NEUTRAL"]
else:
print(
COLORS["RED"] +
"This isn't a channel we can send images" +
COLORS["NEUTRAL"]
)
print("{psd} ask for an image {where} at {date}".format(
psd=pseudo,
where=where,
date=date
))
# read our discord acces token
with open("secrets.json", "r") as secrets:
DISCORD_TOKEN = json.load(secrets)["discord"]
bot = commands.Bot(
command_prefix=COMMAND_CHAR,
description="Send a random image"
)
# CRITICAL COMMANDS ================
@bot.command(
name=COMMAND_CRITICAL,
description="Send an critical card! Good shit"
)
async def random_critical_image(context):
await send_img(FOLDER_CRITICAL, context)
@bot.command(
name=COMMAND_CRITICAL_HELPER,
description="Send an help for critical command!"
)
async def critical_help_image(context):
await send_img(FOLDER_CRITICAL_HELPER, context)
# FAIL COMMANDS =====================
@bot.command(
name=COMMAND_FAIL,
description="Send an fail card! Oh no..."
)
async def random_fail_image(context):
await send_img(FOLDER_FAIL, context)
@bot.command(
name=COMMAND_FAIL_HELPER,
description="Send an help for critical command!"
)
async def critical_help_image(context):
await send_img(FOLDER_FAIL_HELPER, context)
async def send_img(folder, context):
log(context)
try:
msg_content = {
"file": discord.File(
folder + "/{}".format(rdm(folder))
)
}
except FileNotFoundError:
DISPLAY_ERROR("The folder `{}` was not found".format(folder))
msg_content = {
"content": "The folder with images is missing, sorry..."
}
except ValueError:
DISPLAY_ERROR("The folder `{}` is empty".format(folder))
msg_content = {"content": "The folder with images is totaly empty"}
try:
await context.send(**msg_content)
except:
DISPLAY_ERROR("Somethings went wrong")
msg_content = {"content": "Somethings went wrongs, sorry.\n┬─┬ ︵ /(.□. \)"}
await context.send(**msg_content)
@bot.command()
async def test(ctx, arg):
await ctx.send(arg)
@bot.event
async def on_ready():
print(
COLORS["YELLOW"] +
"I'm logged in as {name} !\n".format(name=bot.user.name) +
COLORS["NEUTRAL"]
)
bot.run(DISCORD_TOKEN)
| 2.28125 | 2 |
server/apps/api/serializers.py | htmercury/GLselector | 0 | 12786650 | <filename>server/apps/api/serializers.py
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.ModelSerializer):
"""A user serializer to aid in authentication and authorization."""
class Meta:
"""Map this serializer to the default django user model."""
model = User
fields = ('id', 'username', 'password')
class FaceSerializer(serializers.ModelSerializer):
"""Serializer to map the Face Model instance into JSON format."""
class Meta:
model = Face
fields = ('id', 'user', 'shape', 'chin_angle', 'mofa_ratio', 'hlmo_angle')
read_only_fields = ("created_at", "updated_at") | 2.65625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.