blob_id
large_string | language
large_string | repo_name
large_string | path
large_string | src_encoding
large_string | length_bytes
int64 | score
float64 | int_score
int64 | detected_licenses
large list | license_type
large_string | text
string | download_success
bool |
|---|---|---|---|---|---|---|---|---|---|---|---|
b8604dd0374921ac5c8591f7b6e8de0ddbe84279
|
Python
|
juanjoneri/Bazaar
|
/Interview/Practice/CTCI/Trees-Graphs/pond_sizes.py
|
UTF-8
| 1,706
| 4
| 4
|
[] |
no_license
|
"""
You have an integer matrix representing a plot of land, where the values at that
location represents the height above the sea level. Avalue of zero indicates water.
A pond is a region of water connected horizontally, or diagonally. The size is
the number of connected water cells. Find the sizes of all ponds.
"""
class Map():
def __init__(self, land):
self.land = land
self.nb_rows = len(land)
self.nb_cols = len(land[0])
self.visited = set()
def neighbors_of(self, row, col):
delta = [-1, 0, 1]
for delta_row in delta:
for delta_col in delta:
n_row, n_col = row + delta_row, col + delta_col
if 0 <= n_row < self.nb_rows and 0 <= n_col < self.nb_cols:
yield n_row, n_col
def cluster_size(self, row, col):
size = 1
symbol = self.land[row][col]
self.visited.add((row, col))
for n_row, n_col in self.neighbors_of(row, col):
if self.land[n_row][n_col] == symbol and (n_row, n_col) not in self.visited:
size += self.cluster_size(n_row, n_col)
return size
def all_clusters(self, symbol):
for row_i in range(self.nb_rows):
for col_i in range(self.nb_cols):
if self.land[row_i][col_i] == symbol and (row_i, col_i) not in self.visited:
yield self.cluster_size(row_i, col_i)
def pond_sizes(land, symbol=0):
map_ = Map(land)
print(list(map_.all_clusters(symbol)))
def main():
LAND = [[0, 2, 1, 0],
[0, 1, 0, 1],
[1, 1, 0, 1],
[0, 1, 0, 1]]
pond_sizes(LAND)
if __name__ == '__main__':
main()
| true
|
c1839f2c5089234ed493301d937e7a0538c2517c
|
Python
|
paigexx/email_validation
|
/flask_app/models/user.py
|
UTF-8
| 1,748
| 2.9375
| 3
|
[] |
no_license
|
from flask_app.config.mysqlconnection import connectToMySQL
import re
from flask import flash
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class User:
def __init__(self, data):
self.usernme = data["username"]
self.email = data["email"]
self.created_at = data["created_at"]
self.updated_at = data["updated_at"]
@classmethod
def get_users(cls):
query = "SELECT * FROM users;"
return connectToMySQL("email_validation").query_db(query)
@classmethod
def get_user(cls, data):
query = "SELECT * FROM users WHERE users.id = %(user_id)s;"
results = connectToMySQL("email_validation").query_db(query, data)
return results[0]
@classmethod
def create_user(cls, data):
query = "INSERT INTO users (user_name, email, created_at, updated_at) VALUES (%(user_name)s, %(email)s, NOW(), NOW());"
return connectToMySQL("email_validation").query_db(query, data)
@classmethod
def delete_user(cls, data):
query = "DELETE FROM users WHERE users.id = %(user_id)s"
return connectToMySQL("email_validation").query_db(query, data)
@staticmethod
def validate_data(data):
is_valid = True
if not email_regex.match(data["email"]):
flash("Please enter a valid email")
is_valid = False
if len(data["user_name"]) < 6:
flash("Username must be betweeen 6-8 characters.")
is_valid = False
if data["user_name"][0] == " " or data["email"][0] == " ":
flash("Please don't add a space before your username.")
is_valid = False
return is_valid
| true
|
fc28df1e1fcf0fd1a74956d89a6771c51424cd2c
|
Python
|
Ekupka1/Python-Code
|
/EstPiOfDartsThrown.py
|
UTF-8
| 1,459
| 4.375
| 4
|
[] |
no_license
|
# Programming Assignment 1, Circle "Area"
# Ethan Kupka
# Sept 27th, 2019
# Programming Assignment 1, Circle "Area"
# Ethan Kupka
# Sept 27th, 2019
import random
import math
dart = int(input("How many darts are you throwing?"))
numdarts = dart
insideCount = 0
for i in range(numdarts):
randx = 2 * random.random() - 1
randy = 2 * random.random() - 1
x = randx
y = randy
freddistance = x**2 + y**2
#for statement
if freddistance <= 1:
insideCount = insideCount + 1
#if statement
pi = (insideCount / numdarts) * 4
print("Estimated Area of the circle is:", pi)
print("This is the actual area is", math.pi)
#Dimension Names: C2D) S1 R2
#Equation: X^2 + y^2 < 1
#EC: A = pi*r^2
#Program intentions: Estimating the area in the circle
#Monte Carlo Method, pi is not given.
# Result 1) Darts(10000000)Estimated Area of the circle is: 3.1420512 - This is the actual area is 3.141592653589793
# The estimated and the correct answer are very close, the more darts thrown the more accurate it will be.
# Result 2) Darts(100)Estimated Area of the circle is: 3.48 - This is the actual area is 3.141592653589793
# The estimated and the correct answer are wildly off, the less darts thrown the less accurate it will be.
# Result 3) Darts(5000)Estimated Area of the circle is: 3.1688 - This is the actual area is 3.141592653589793
# The estimated and the correct answer are becoming closer, the more darts thrown closer they get.
| true
|
c9afb62504e2cfb40a2841eb8b08cf9354cbdc2b
|
Python
|
zihuilee/test
|
/hashlib_test.py
|
UTF-8
| 123
| 2.5625
| 3
|
[] |
no_license
|
import hashlib
md5 = hashlib.md5(b'c8b388b4459e13f978d7c846f4')
md5.update('1234'.encode('utf-8'))
print(md5.hexdigest())
| true
|
d9b99c3a539b5d586d8430602190e7e567f2356e
|
Python
|
RickyL-2000/python-learning
|
/business/方方/作业5/p1.py
|
UTF-8
| 605
| 3.078125
| 3
|
[] |
no_license
|
"""
方方:
2.作业2: 鸡兔同笼问题
已知在同一个笼子里有n只正常的鸡和兔,鸡和兔的总脚数为m只,其中n和m由用户输入。问鸡和兔各有几只?如果无解也请说明。要求利用列表解析式来编程实现。
提示,两个分支的条件语句格式为:
if condition :
branch1
else:
branch2
编写代码,保存成.py格式,作为作业附件上传
下面给出了参考的运行示例:
"""
all_num = input("请输入鸡和兔共几只: ")
all_feet_num = input("请输入鸡和兔共几只脚: ")
chicken_num = 0
rabbit_num = 0
| true
|
361c4c13c2fa394ed7e3063b35a182c3acd96311
|
Python
|
wjj800712/python-11
|
/chengxiangzheng/week5/Q2.py
|
UTF-8
| 495
| 3.765625
| 4
|
[] |
no_license
|
#要求随机返回一种商品,要求商品被返回的概率与其库存成正比。
import random
wazi=['wazi'] * 100
xiezi=['xiezi'] * 200
tuoxie=['tuoxie'] * 300
xianglian=['xianglian'] * 400
s1= wazi+xiezi+tuoxie+xianglian
s2 = random.sample(s1,100)
#print(s1)
#print(s2)
print(s2.count('wazi'),end=" ") #count()方法用于统计某个元素在列表中出现的次数。
print(s2.count('xiezi'),end=" ")
print(s2.count('tuoxie'),end=" ")
print(s2.count('xianglian'))
| true
|
2b882c5e3d81d3952a079ecf8b796fc36997851b
|
Python
|
NigrumAquila/py_checkio
|
/electronic_station/digits_multiplications.py
|
UTF-8
| 151
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
import functools
def checkio(number: int) -> int:
return functools.reduce(lambda x,y:x*y, [int(char) for char in str(number) if not char == '0'])
| true
|
04cfd4dfcd2b5fe1c3b69258dce16488f428d4d7
|
Python
|
mukundajmera/competitiveprogramming
|
/Sorting/Count possible triangles.py
|
UTF-8
| 1,387
| 3.640625
| 4
|
[] |
no_license
|
# User function Template for python3
class Solution:
def findNumberOfTriangles(ob, arr, n):
arr.sort()
count = 0
# traversing through the array elements
for i in range(n - 2):
k = i + 2
for j in range(i + 1, n):
while (k < n and arr[i] + arr[j] > arr[k]):
k += 1
count += k - j - 1
return count
# def findNumberOfTriangles(ob, arr, n):
# #code here
# #sort array first
# arr.sort()
# posibilites = 0
# for i in range(n-2):
# #two pointer approch
# a = i+1
# b = n-1
# while(b>=i+1):
# if arr[a]+arr[i] > arr[b]:
# # print(b,a)
# posibilites = posibilites + (b-a)
# b -= 1
# a = i + 1
# else:
# a += 1
# if a == b:
# a = i + 1
# b -= 1
# return posibilites
# {
# Driver Code Starts
# Initial Template for Python 3
if __name__ == '__main__':
t = int(input())
for _ in range(t):
n = int(input())
arr = list(map(int, input().strip().split()))
ob = Solution()
print(ob.findNumberOfTriangles(arr, n))
# } Driver Code Ends
| true
|
27d232e757d53f5c12a484c267c4817325449d14
|
Python
|
DipanwitaManna16/Image_Processing
|
/app1_basics.py
|
UTF-8
| 1,525
| 3.15625
| 3
|
[] |
no_license
|
import cv2
import numpy as np
img = cv2.imread("Images\WhatsApp Image 2020-09-06 at 10.46.51 AM.jpeg")
print(type(img))
print(img.shape)
#----------------------------gray_image---------------------
#img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#---------------------------Resize the image-----------------
#img_resize = cv2.resize(img,(256,256))
#---------------------------Flip the image-------------------
# 1->vertical | 0-> horizontal | -1->combined effect
#img_flip = cv2.flip(img , -1)
#--------------------------Cropping---------------------------
#img_crop = img[100:300 , 200:500]
#------------------------Saving an image----------------------
#cv2.imwrite('fruits_small.png',img_crop)
#-----------------------BlackImage---------------------------
img1 = np.zeros((512,512,3))
#----------------------Creating a Rectangle------------------
cv2.rectangle(img1 , pt1=(100,100), pt2=(200,300), color=(255,0,0) , thickness=3)
#----------------------Creating a Circle----------------------
cv2.circle(img1 , center=(200,400) , radius=50 , color=(0,0,255), thickness=-1)
#------------------------Line----------------------------------
cv2.line(img1 ,pt1=(0,0) , pt2=(512,512) , thickness=2 , color=(0,255,0))
#-----------------------FontText-------------------------------
cv2.putText(img1 , text='Hi', org=(400,400) ,fontScale=4 , color=(0,255,255) ,
thickness=2 , lineType=cv2.LINE_AA , fontFace=cv2.FONT_ITALIC)
cv2.imshow("window",img1)
cv2.waitKey(0)
| true
|
2d9c12e09ff90491664a3c8d3d90a9344e80deb5
|
Python
|
AzureAD/microsoft-authentication-library-for-python
|
/sample/migrate_rt.py
|
UTF-8
| 2,851
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
"""
The configuration file would look like this:
{
"authority": "https://login.microsoftonline.com/organizations",
"client_id": "your_client_id",
"scope": ["User.ReadBasic.All"],
// You can find the other permission names from this document
// https://docs.microsoft.com/en-us/graph/permissions-reference
}
You can then run this sample with a JSON configuration file:
python sample.py parameters.json
"""
import sys # For simplicity, we'll read config file from 1st CLI param sys.argv[1]
import json
import logging
import msal
# Optional logging
# logging.basicConfig(level=logging.DEBUG) # Enable DEBUG log for entire script
# logging.getLogger("msal").setLevel(logging.INFO) # Optionally disable MSAL DEBUG logs
def get_preexisting_rt_and_their_scopes_from_elsewhere():
# Maybe you have an ADAL-powered app like this
# https://github.com/AzureAD/azure-activedirectory-library-for-python/blob/1.2.3/sample/device_code_sample.py#L72
# which uses a resource rather than a scope,
# you need to convert your v1 resource into v2 scopes
# See https://docs.microsoft.com/azure/active-directory/develop/azure-ad-endpoint-comparison#scopes-not-resources
# You may be able to append "/.default" to your v1 resource to form a scope
# See https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent#the-default-scope
# Or maybe you have an app already talking to Microsoft identity platform v2,
# powered by some 3rd-party auth library, and persist its tokens somehow.
# Either way, you need to extract RTs from there, and return them like this.
return [
("old_rt_1", ["scope1", "scope2"]),
("old_rt_2", ["scope3", "scope4"]),
]
# We will migrate all the old RTs into a new app powered by MSAL
config = json.load(open(sys.argv[1]))
app = msal.PublicClientApplication(
config["client_id"], authority=config["authority"],
# token_cache=... # Default cache is in memory only.
# You can learn how to use SerializableTokenCache from
# https://msal-python.readthedocs.io/en/latest/#msal.SerializableTokenCache
)
# We choose a migration strategy of migrating all RTs in one loop
for old_rt, scopes in get_preexisting_rt_and_their_scopes_from_elsewhere():
result = app.acquire_token_by_refresh_token(old_rt, scopes)
if "error" in result:
print("Discarding unsuccessful RT. Error: ", json.dumps(result, indent=2))
print("Migration completed")
# From now on, those successfully-migrated RTs are saved inside MSAL's cache,
# and becomes available in normal MSAL coding pattern, which is NOT part of migration.
# You can refer to:
# https://github.com/AzureAD/microsoft-authentication-library-for-python/blob/1.2.0/sample/device_flow_sample.py#L42-L60
| true
|
45f439afdfc56af018f733ecbbd66f7590b3a297
|
Python
|
huchangchun/spider
|
/01-spider/practice/01-wangyi-music.py
|
UTF-8
| 9,215
| 3.046875
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
"""
1.抓取所有的分类的id,然后拼接出对应的分类的链接
2.访问分类的链接,抓取所有歌单(专辑)的详细页面的链接
3.访问详细页面的链接,抓取所有歌曲的详细页面的链接
4.抓取歌曲的信息(歌名,歌手名,分类信息),存储到文本csv或者txt等)或数据库里
5.将歌曲名传递给download_music实现,下载对应音乐文件(这个操作可以只下载一首)
"""
"""
分析:
1、入口:http://music.163.com/discover/playlist/
2、每页每个播放列表入口:"baseurl+ //div[@class='u-cover u-cover-1']/a/@href"
3、每个列表中的歌曲://ul[@class='f-hide']/li/a/@href"
4、获取下一页入口:"//div[@class='u-page']/a[last()]/@href"
判断是否还有下一页://div[@class='u-page']/a[last()]/@class")[0] == zbtn znxt
http://music.163.com/song/media/outer/url?id={}.mp3
"""
import requests
from bs4 import BeautifulSoup
from lxml import etree
import time
#获取etree
def get_html_by_etree(url,headers):
return etree.HTML(requests.get(url,headers=headers).content.decode('utf-8'))
#1.获取所有页面的播放列表入口
def get_play_list(playlisturl,headers,baseurl):
selector = get_html_by_etree(playlisturl,headers)
num = 0
page = 1
next_page_url = playlisturl
while True:
print("这是第{0}页 Url:{1} ".format(page,next_page_url))
play_list =[baseurl + i for i in selector.xpath("//div[@class='u-cover u-cover-1']/a/@href")]
num += len(play_list)
yield play_list
time.sleep(0.2)
next_page_text = selector.xpath("//div[@class='u-page']/a[last()]/@class")[0]
#判断是否还有下一页
if next_page_text =='zbtn znxt':
page += 1
next_page_url = baseurl + selector.xpath("//div[@class='u-page']/a[last()]/@href")[0]
selector = get_html_by_etree(next_page_url,headers)
time.sleep(0.2)
else:
break;
print("总共{0}个playlist ".format(num))
def getsongslist(playlisturl,headers,baseurl):
num = 0
for playlists in get_play_list(playlisturl,headers,baseurl):
for playlist in playlists:
print("playlist",playlist)
time.sleep(0.2)
selector = get_html_by_etree(playlist, headers)
songurls = selector.xpath("//ul[@class='f-hide']/li/a/@href")
songurls = [baseurl + i for i in songurls]
# print("songurls",songurls)
num += len(songurls)
yield songurls
print("总共{0}首歌 ".format(num))
def savesongurl(playlisturl,headers,baseurl):
num = 0
for songlists in getsongslist(playlisturl,headers,baseurl):
for songurl in songlists:
time.sleep(0.2)
num += 1
print("获取第{0}首歌信息".format(num))
songinfo = get_songs_info(songurl,headers)
save_data(songinfo)
# def get_songs_list(playlisturls,headers,baseurl):
# songs_list = []
# num = 0
# for listurl in playlisturls:
# num +=1
# # print("第{0}歌曲列表 ".format(num))
# selector = get_html_by_etree(listurl, headers)
# songurls = selector.xpath("//ul[@class='f-hide']/li/a/@href")
# songs_list += [baseurl + i for i in songurls]
# # for url in songurls:
# # if url not in songs_list:
# # songs_list += url
# print(len(songs_list))
# print("总共{0}首歌 ".format(len(songs_list)))
# for i in songs_list:
# with open('songsurl.txt', 'a+', encoding='utf-8') as outputfile:
# outputfile.write(i + '\n')
# return songs_list
#1.获取所有页面的播放列表入口
# def get_play_list(playlisturl,headers,baseurl):
# selector = get_html_by_etree(playlisturl,headers)
# play_list =[]
# page = 1
# next_page_url = playlisturl
# while True:
# print("这是第{0}页 Url:{1} ".format(page,next_page_url))
# play_list +=[baseurl + i for i in selector.xpath("//div[@class='u-cover u-cover-1']/a/@href")]
# next_page_text = selector.xpath("//div[@class='u-page']/a[last()]/@class")[0]
# #判断是否还有下一页
# if next_page_text =='zbtn znxt':
# page += 1
# next_page_url = baseurl + selector.xpath("//div[@class='u-page']/a[last()]/@href")[0]
# selector = get_html_by_etree(next_page_url,headers)
# else:
# break;
# print("总共{0}个playlist ".format(len(play_list)))
# # print(play_list)
# return play_list
#2.获取每页的歌曲
# def get_songs_list(playlisturls,headers,baseurl):
# songs_list = []
# num = 0
# for listurl in playlisturls:
# num +=1
# # print("第{0}歌曲列表 ".format(num))
# selector = get_html_by_etree(listurl, headers)
# songurls = selector.xpath("//ul[@class='f-hide']/li/a/@href")
# songs_list += [baseurl + i for i in songurls]
# # for url in songurls:
# # if url not in songs_list:
# # songs_list += url
# print(len(songs_list))
# print("总共{0}首歌 ".format(len(songs_list)))
# for i in songs_list:
# with open('songsurl.txt', 'a+', encoding='utf-8') as outputfile:
# outputfile.write(i + '\n')
# return songs_list
#3.获取歌曲详细信息
def get_songs_info(songurl,headers):
songs_info={}
time.sleep(0.2)
selector = get_html_by_etree(songurl, headers)
songs_info['songname'] = selector.xpath("//em[@class='f-ff2']/text()")[0]
songs_info['musician'] = selector.xpath("//p[@class='des s-fc4']/span/@title")[0]
songs_info['cnt_comment_count'] = selector.xpath("string(//span[@class='sub s-fc3']/span)")
songs_info['songurl'] = songurl
return songs_info
#保存数据
first_time=True
def save_data(data):
global first_time
if first_time == True:
first_time = False
title =','.join([str(i) for i in data.keys()])
with open('musics.txt', 'a+', encoding='utf-8') as outputfile:
outputfile.write(title + '\n')
content =','.join([str(i) for i in data.values()])
with open('musics.txt','a+',encoding='utf-8') as outputfile:
outputfile.write(content+'\n')
if __name__ == '__main__':
baseUrl = 'http://music.163.com'
playListUrl = 'http://music.163.com/discover/playlist'
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'music.163.com',
'Referer': 'http://music.163.com/search/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
}
#获取所有歌单
# play_lists = get_play_list(playListUrl,headers,baseUrl)
#获取所有歌曲
# songurls = get_songs_list(play_lists,headers,baseUrl)
#获取详细信息
# data = get_songs_info(songurls[-20:-1],headers)
# save_data(data)
savesongurl(playListUrl, headers, baseUrl)
# url2 = "http://music.163.com/playlist?id=2151717076"
#
# url2 = "http://music.163.com/playlist?id=2158379247"
# re = requests.get(url,headers=headers).content.decode('UTF-8')
# html = etree.HTML(re)
# print(html.xpath("//div[@class='u-cover u-cover-1']/a/@href"))
# print(html.xpath("//div[@class='u-page']/a[last()]/@href")[0])
# print(html.xpath("//div[@class='u-page']/a[last()]/@class")[0])
# re2 = requests.get(url2,headers=headers).content.decode('UTF-8')
# html2 = etree.HTML(re2)
# with open('wangyi.html', 'a+', encoding='utf-8') as outputfile:
# outputfile.write(re2 + '\n')
# print(html.xpath("//div[@class='ttc']/span/a/@href"))
# print(html.xpath("//div[@class='ttc']/span/a/b/@title"))
# print(html.xpath("//div[@class='hd']/span/@data-res-id"))
# print(html2.xpath("//ul[@class='f-hide']/li/a/@href"))
# url3="http://music.163.com/song?id=544056874"
# re3 = requests.get(url3,headers=headers).content.decode('utf-8')
# html2 = etree.HTML(re3)
# print(re3)
# # print(html2.xpath("//p[@class='des s-fc4]/span/@title"))
# # with open('song.html', 'a+', encoding='utf-8') as outputfile:
# # outputfile.write(re3 + '\n')
# # < div
# # class ="tit" >
# # < em
# # class ="f-ff2" > Double Down < / em >
# # < / div >
# # < / div >
# # < p class ="des s-fc4" > 歌手: < span title="Dave Thomas Junior" > < a class ="s-fc7" href="/artist?id=32233" > Dave Thomas Junior < / a > < / span > < / p >
# # < p
# # class ="des s-fc4" > 所属专辑: < a href="/album?id=37763783" class ="s-fc7" > Double Down < / a > < / p >
# # < div
# # class ="m-info" >
# #songname
# print(html2.xpath("//em[@class='f-ff2']/text()")[0])
#musician
# print(html2.xpath("//p[@class='des s-fc4']/span/@title")[0])
# #span id="cnt_comment_count"
# print(html2.xpath("string(//span[@class='sub s-fc3']/span)"))
| true
|
e310762d0e05b9ecc078827a00b6389ea87e10d9
|
Python
|
JakobKallestad/Python-Kattis
|
/src/VideoSpeedup.py
|
UTF-8
| 328
| 3.0625
| 3
|
[] |
no_license
|
n, p, k = map(int, input().split())
p /= 100
speedups = list(map(int, input().split()))
speedups.append(k)
original_time = 0
prev_time = 0
current_increase = 1
for inc in speedups:
duration = inc - prev_time
original_time += duration*current_increase
current_increase += p
prev_time = inc
print(original_time)
| true
|
641ba6e7707e7127ca943bd5b14f3c2d37c8a44b
|
Python
|
Why-are-we-doing-this/SuperSmashBrars-Discord-
|
/People/emily.py
|
UTF-8
| 938
| 2.71875
| 3
|
[] |
no_license
|
import random
from Character import Character
import discord
from discord.ext import commands
class Emily(Character):
def __init__(self):
super().__init__("Emily", title="2-Dimensional", hp=1300, attack=220, dodge=20, crit=20, defense=20,
gender=1, critValue=2, srec = 6)
async def startpassive(self):
if self.enemy.gender == 0:
self.modifiers['attack']['selfmult'] = 1.5
async def special(self):
self.modifiers['attack']['selfmult'] *= 3
self.enemy.doesdodge = 1
await self.chan.send(f"{self.u.user.name}'s Emily throws a snowflake tantrum")
self.resource -= self.srec
async def reset(self):
self.hp = 1300
await super().reset()
async def endround(self):
if self.isSpecial:
self.modifiers['attack']['selfmult'] /= 3
self.hp = self.hp/10
await super().endround()
| true
|
b08c181de465201b6540a67eeae3a37c1292b695
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02612/s529001506.py
|
UTF-8
| 85
| 3.265625
| 3
|
[] |
no_license
|
n = int(input())
chk = n % 1000
if chk != 0:
print(1000-chk)
else:
print(0)
| true
|
69bda0bd5a57dc292d7f8006ffe42b2017564867
|
Python
|
Maelstrom6/MachineLearning3
|
/Regression/Simple Linear Regression/Simple Linear Regression.py
|
UTF-8
| 1,159
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
from builtins import print
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# Import the data set
dataset = pd.read_csv("Salary_Data.csv")
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
# Split the data set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 / 3, random_state=0)
# Don't need feature scaling
# Fitting simple linear regression to the training set
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the test set results
y_pred = regressor.predict(X_test)
# Visualise the training set results
plt.scatter(X_train, y_train, color="red")
plt.plot(X_train, regressor.predict(X_train), color="blue")
plt.title("Salary vs Experience (Training set)")
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.show()
# Visualise the test set results
plt.scatter(X_test, y_test, color="red")
plt.plot(X_test, regressor.predict(X_test), color="blue")
plt.title("Salary vs Experience (Test set)")
plt.xlabel("Years of Experience")
plt.ylabel("Salary")
plt.show()
| true
|
21f8998c1c0149ffaf54444c6b46310205a10a51
|
Python
|
antogeo/scikit-learn
|
/examples/model_selection/plot_roc_crossval.py
|
UTF-8
| 4,236
| 2.984375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,
"""
import os
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
from sklearn import cross_validation
#%matplotlib
# #############################################################################
# Data IO and generation
# Import some data to play with
if os.uname()[1]== 'antogeo-XPS':
DataTable = np.genfromtxt('/home/antogeo/Dropbox/Lizette_yorgos/train_allFeat.csv',delimiter=',',dtype=None)[1:]
TestSet = np.genfromtxt('/home/antogeo/Dropbox/Lizette_yorgos/test_allFeat.csv',delimiter=',',dtype=None)[1:]
elif os.uname()[1]== 'coma_meth':
DataTable = np.genfromtxt('/home/coma_meth/Dropbox/Lizette_yorgos/train_allFeat.csv',delimiter=',',dtype=None)[1:]
TestSet = np.genfromtxt('/home/coma_meth/Dropbox/Lizette_yorgos/test_allFeat.csv',delimiter=',',dtype=None)[1:]
X, y = (DataTable[:,[1,2,3,5]]).astype(np.float), (DataTable[:,0]=='1')
n_samples, n_features = X.shape
# random_state = np.random.RandomState(0)
# X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# #############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = cross_validation.ShuffleSplit(53, n_iter=500, test_size=.25, random_state=0)
classifier = svm.SVC(C=1, gamma=.1, probability=True)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv:
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
# plt.plot(fpr, tpr, lw=1, alpha=0.3,
# label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| true
|
fc215557c19142926c4a899d365230ed9d270aaa
|
Python
|
dielhennr/datavis_project
|
/data_exploration.py
|
UTF-8
| 4,217
| 2.96875
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.cbook as cbook
import pandas as pd
def top_consumers_2014():
ifile = open('energy.csv', 'r')
states = {}
for i in range(2755):
ifile.readline()
for line in ifile:
line = line.split(',')
state = line[-2]
petrol_consumption = int(float(line[9]))
if petrol_consumption > 4000:
states[state] = petrol_consumption
fig, ax = plt.subplots()
y_pos = np.arange(len(states))
x_pos = states.values()
ax.barh(y_pos, x_pos, align='center',
color='blue')
ax.set_yticks(y_pos)
ax.set_yticklabels(states)
ax.invert_yaxis()
ax.set_xlabel('Consumption\nUnits = 1 Billion British Thermal Units ~ 1055.06*billion Joules')
ax.set_title('States with highest commercial liquid petroleum consumption in 2014')
plt.show()
ifile.close()
def cali_consumption():
fig, ax = plt.subplots()
ifile = open('energy.csv', 'r')
cali_years = []
nc_years = []
ifile.readline()
for line in ifile:
line = line.split(",")
if line[-2] == "California":
petrol_consumption = int(float(line[9]))
cali_years.append(petrol_consumption)
elif line[-2] == "North Carolina":
petrol_consumption = int(float(line[9]))
nc_years.append(petrol_consumption)
plt.hist(cali_years, bins = 10, histtype="step", fill=True, color= "green", label="North\nCarolina")
plt.hist(nc_years, bins = 10, histtype="step", fill=False, color= "red", label="California")
plt.legend()
ax.set_xlabel('Commercial Consumption in Billion BTU')
ax.set_ylabel('Years of this much consumption')
ax.set_title("Commercial liquid petroleum consumption from 1960 - 2014")
plt.show()
ifile.close()
def cali_line():
fig, ax = plt.subplots()
ifile = open('energy.csv', 'r')
cali_years = {}
nocar_years = {}
ifile.readline()
for line in ifile:
line = line.split(",")
if line[-2] == "California":
petrol_consumption = int(float(line[9]))
cayear = int(line[-1])
cali_years[cayear] = petrol_consumption
elif line[-2] == "North Carolina":
petrol_consumption = int(float(line[9]))
ncyear = int(line[-1])
nocar_years[ncyear] = petrol_consumption
cali = plt.plot(list(cali_years.keys()), list(cali_years.values()), "o-", label="CA", color = "g")
ncar = plt.plot(list(nocar_years.keys()), list(nocar_years.values()), "o-", label="NC", color = "m")
plt.legend()
ax.set_xlabel('Year')
ax.set_ylabel('Commercial Consumption in Billion BTU')
ax.set_title("Commercial liquid petroleum consumption in California and North Carolina\nFrom 1960 - 2014")
plt.show()
ifile.close()
np.random.seed(0)
def us_totals():
ifile = open('energy.csv', 'r')
fig, ax = plt.subplots()
ifile.readline()
averages = {}
previous = 1960
wood_avg =coal_avg = lpg_avg = counter = 0
for line in ifile:
line = line.split(",")
year = int(line[-1])
if year == previous:
counter += 1
wood_avg += int(float(line[5]))
coal_avg += int(float(line[6]))
lpg_avg += int(float(line[9]))
else:
averages[int(previous)] = [wood_avg,coal_avg,lpg_avg]
previous = year
wood_avg = coal_avg = lpg_avg = counter = 0
averages_list = list(averages.values())
years_list = list(averages.keys())
wood_avg_list = []
coal_avg_list = []
lpg_avg_list = []
natgas_avg_list = []
for i in averages_list:
wood_avg_list.append(i[0])
coal_avg_list.append(i[1])
lpg_avg_list.append(i[2])
plt.plot(years_list, wood_avg_list, label = "Wood")
plt.plot(years_list, coal_avg_list, label= "Coal", linestyle=":", linewidth = 3.)
plt.plot(years_list, lpg_avg_list, label="LPG", linestyle = "--")
ax.set_xlabel("Year")
ax.set_ylabel("Energy consumed")
ax.set_title("Wood, Coal, and LPG consumption in commercial sector\nUnits in billion BTU")
plt.legend()
plt.show()
ifile.close()
def box_plot():
ifile = pd.read_csv("energy.csv")
ifile = ifile.tail(50)
ifile.plot(kind="box", y='Consumption.Commercial.Liquefied Petroleum Gases', title="Commercial LPG Consumption in US during 2014\nEvery peice of data represents a state's consumption in Billion BTU")
plt.show()
top_consumers_2014()
cali_consumption()
cali_line()
us_totals()
box_plot()
| true
|
5d30d42b0d01da78dbd3efb4c7bee7e3590dd5cb
|
Python
|
etwit/LearnPy
|
/Day3/ex13.py
|
UTF-8
| 813
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
#_*_coding:utf-8_*_
'''
Created on 2017年7月24日
@author: Ethan Wong
'''
def function(arg):
print arg
function('alxe')
#也可以通过 apply执行函数
#print apply(function('aaa'))
def foo(arg):
return arg + 100
li = [11,22,33]
temp = []
for item in li:
temp.append(foo(item))
print temp
#map可以替换以上函数
#map对所有的元素进行统一的操作
temp = map(lambda arg:arg+100, li)
print temp
#用作过滤的 想过滤加一个过滤条件
#如果返回的是true 放在新列表 否则返回false
def foo1(arg):
if arg<22:
return True
else:
return False
print filter(foo1,li)
#用作过滤的 判断通过了才放到新的序列
print filter(lambda x:x<22,li)
#累加 累乘 只能有两个参数
print reduce(lambda x,y:x*y,li)
| true
|
bb5aa3eabc830d69e1583236b848d286556fa4d1
|
Python
|
ankitaggarwal011/s-index
|
/code/reach.py
|
UTF-8
| 541
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
def dp(n, cutoff):
memo = [[None] * cutoff for _ in range(n)]
dp()
def reach(p, h, adj, visited, cutoff):
if p is in visited:
return None
if h > cutoff:
return None
visited.add(p)
if dp.memo[p][h] is not None:
return dp.memo[p][h]
rp = len(adj[p]) * (2 ^ h)
np = 1
hp = h
for q in adj[p]:
reach_tuple = reach(q, h + 1, dp.memo, adj, visited)
if reach_tuple is None:
continue
rq, hq, nq = reach_tuple
rp = rp + rq
hp = max(hp, hp + hq)
np = np + nq
dp.memo[p][h] = (rp, hp, np)
return (rp, hp, np)
| true
|
d78033f4ed657062c0e9f0a8b4c3f17510ee45eb
|
Python
|
ayesha-omarali/cs170proj
|
/solutions.py
|
UTF-8
| 1,880
| 3.1875
| 3
|
[] |
no_license
|
# rough pseudo code because it's late and I don't
# really quite know python well enough to write this correctly
import sys
class solutions:
def __init__(self, instances, sol_file_object):
#given zip file of instances
self.instances = instances
#no clue if this is right
for instance in self.instances:
self.instance = instance
#or maybe self.instance = analyze_instance(instance) idk?
self.instance_name = self.instance.readline() #so i can write to corresponding line.
#https://docs.python.org/2/library/stdtypes.html#str.format
# -- string format crap that i pulled crap from
shit_to_write = []
def read_instances(instances):
#validate instances
scc_lst = []
for instance in instances:
instance_validator()
analyze_instance(instance)
def analyze_instance(instance):
scc_lst = SCCMaker(instance)
for scc in scc_lst:
#organize crap within SCCs
scc = bellman_ford(topological_sort(scc))
scc = all_orderings(scc)
###this is definitely wrong because I don't follow Apollo's code that great
### but essentially implement eff cycle anal such that
### as we iterate through each SCC in the instance, we end up with the best path of each SCC
### then after compare the best of each SCC in it's own list with probs brute force that Apollo wrote
## THEN we save that shit to the shit_to_write list, so we can make an easy writing shit function output.
loose_approx = efficient_cycle_analysis_49(scc, instance)
tight_approx = efficient_cycle_analysis(loose_approx, instance) #i have no idea if this is done properly...
shit_to_write.append([instance.instance_name, tight_approx])
def write_sol(shit_to_write):
sol = open(sol_file_object)
for shit in shit_to_write:
int(sol.writeline(shit[1]'\n').split()[0]) #need to figure out how to ensure solution i is on the ith line
| true
|
4810d4e925a53396fffbe42a5ee5dae88e6cfc7e
|
Python
|
liupy525/LeetCode-OJ
|
/36_Valid-Sudoku.py
|
UTF-8
| 1,611
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/local/env python
# -*- coding: utf-8 -*-
'''
Determine if a Sudoku is valid, according to: Sudoku Puzzles - The Rules.
The Sudoku board could be partially filled, where empty cells are filled with the character '.'.
A partially filled sudoku which is valid.
'''
class Solution:
# @param board, a 9x9 2D array
# @return a boolean
def isValidSudoku(self, board):
s = set()
for i in board:
st = ''.join(i).replace('.', '')
se = set(st)
if len(st)!=len(se):
return False
l = []
for i in range(9):
for j in range(len(board)):
l.append(board[j][i])
st = ''.join(l).replace('.', '')
se = set(st)
if len(se)!=len(st):
return False
l = []
l = []
for i in range(3):
for j in range(3):
for m in range(3):
for n in range(3):
l.append(board[i*3+m][j*3+n])
st = ''.join(l).replace('.', '')
se = set(st)
if len(se)!=len(st):
return False
l = []
return True
#s = ["..4...63.",".........","5......9.","...56....","4.3.....1","...7.....","...5.....",".........","........."]
#s = [".87654321","2........","3........","4........","5........","6........","7........","8........","9........"]
s = ["....5..1.",".4.3.....",".....3..1","8......2.","..2.7....",".15......",".....2...",".2.9.....","..4......"]
t = Solution()
print t.isValidSudoku(s)
| true
|
eb493c4794cc70300013b44d7af4e755411bdf06
|
Python
|
UIUCLibrary/DCC_MigrationTools
|
/MigrationTools/Finders.py
|
UTF-8
| 2,143
| 3.046875
| 3
|
[] |
no_license
|
import os
import re
import pickle
def check_valid_path(func):
def call_func(*args, **kwargs):
path = args[1]
if not os.path.exists(path):
raise FileNotFoundError("Path \"{}\" not found.".format(path))
return func(*args, **kwargs)
return call_func
@check_valid_path
def find_file_locally(filename, path):
for root, dirs, files in os.walk(path):
for file_ in files:
if file_ == filename:
yield os.path.join(root, file_)
class PickeledFinder:
def __init__(self, cache_file=None):
self._tree = None
self._cached_file = cache_file
if self._cached_file is not None:
self.load(self._cached_file)
@check_valid_path
def map_path(self, path):
self._tree = list(os.walk(path))
def walk(self):
if self._tree is None:
raise Exception("No data loaded")
return self._tree
def save(self, filename):
with open(filename, "wb") as f:
pickle.dump(self._tree, f)
pass
def load(self, filename):
with open(filename, "rb") as f:
self._tree = pickle.load(f)
pass
class CachedFinder:
@check_valid_path
def __init__(self, path):
self._tree = []
for item in os.walk(path):
self._tree.append(item)
def find_file(self, filename, case_insensitive=False):
results = []
for root, dirs, files in self._tree:
for file_ in files:
if case_insensitive:
if file_.lower() == filename.lower():
results.append(os.path.join(root, file_))
else:
if file_ == filename:
results.append(os.path.join(root, file_))
return results
def regex_matches(self, regex):
pattern = re.compile(regex, re.IGNORECASE)
results = []
for root, dirs, files in self._tree:
for file_ in files:
if re.match(pattern, file_):
results.append(os.path.join(root, file_))
return results
| true
|
0357e731dc07d7d4ca63986da3e623c53d1835b2
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_206/730.py
|
UTF-8
| 1,119
| 3.5625
| 4
|
[] |
no_license
|
from decimal import *
getcontext.prec=(10)
class horse:
def __init__(self,startPos,speed):
self.startPos=startPos
self.speed=speed
def GetMaxSpeed(D,horses):
maxSpeed=None
#For each horse, reduce our max speed so we won't get a violation
for h in horses:
#We want to finish at exactly the same time as them
horseTravel=D-h.startPos
horseTime=Decimal(horseTravel)/Decimal(h.speed) #We want floats here - we need an ans to 10e-6 precision
desiredSpeed=Decimal(D)/horseTime
if(maxSpeed==None): maxSpeed=desiredSpeed
elif(desiredSpeed < maxSpeed):
maxSpeed=desiredSpeed
return maxSpeed
numCases=int(raw_input())
for case in range(numCases):
line1=list(raw_input().split(" "))
distance=int(line1[0])
numHorses=int(line1[1])
horses=[]
for h in range(numHorses):
horseLine=raw_input().split(" ")
thisHorse=horse(int(horseLine[0]),int(horseLine[1]))
horses.append(thisHorse)
maxSpeed=GetMaxSpeed(distance,horses)
print("Case #"+str(case+1)+": "+str(maxSpeed))
| true
|
008eebf9c395e9f5d909c40b175d11bfff13ee39
|
Python
|
andresberejnoi/CPU_simulation
|
/CPU16/asm_parser_v0.4.py
|
UTF-8
| 9,766
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#Parse assembly code and generate the appropriate machine language for the program
import sys
#===============================================================================
DIC_COMMANDS = {#First, deal with normal instructions
'load': 0x44000000,
'loadi': 0x48000000,
'save': 0x4c000000,
'incr': 0x50000000,
'output':0x70000000,
'jump': 0x74000000,
'halt': 0x7c000000,
#conditional branching instructions
'jgt': 0x84000000,
#Here are operation codes for the ALU.
#This arrangement is temporary
'and': 0x00000000,
'or' : 0x04000000,
'add': 0x08000000,
'sub': 0x18000000,
'slt': 0x1c000000}
#===============================================================================
RAM_OFFSET = 32
def clean_up_empties(array):
new_list = [element for element in array if (element!='' and element!=None and element != [])]
return new_list
def replace_jump_labels(labels_and_loc,code_line):
"""Extremely inefficient. It takes a list of labels declared in assembly and
replaces that string in the code with the address to jump to."""
new_code = []
#labels = [item[0] for item in labels_and_loc]
#locations = [item[1] for item in labels_and_loc]
for label,PC in labels_and_loc:
item_index = 0
for item in code_line:
if label == item:
code_line[code_line.index(item)] = '@'+str(PC)
def assingment_preprocessor(lines):
"""
If code contains assignments, then this function will look and replace those
declarations with the desired value.
Returns
-------
new_lines : list
A new list of lines that have been modified. Lines containing assignments
are omitted from the output
"""
print("#------------------Assignment Preprocessor ")
assignment_dic = {} #it will store the values corresponding to a particular assignment
new_lines = []
symbol = '=' #the symbol that indicates assignments
for line in lines:
if symbol in line: #if line contains an assignment:
split_line = line.split(symbol) #separate the line into left and right side of the '=' symbol
left_val = split_line[0].strip() #strip removes leading and trailing spaces
right_val = split_line[1].strip() #same
assignment_dic[left_val] = right_val #store the value so that we can find it later
print("Assingment saved: {0} -> {1}".format(left_val,right_val))
print(assignment_dic)
#This line will not be in the actual assembled code, so we just continue to the next one
continue
else: #if there is no assignment, we need to check that we need to replace the labels
split_line = line.split()
for word in split_line:
try:
replaced_word = assignment_dic[word] #if the word is the assigned label, look at its value and replace it
idx = split_line.index(word) #get the position of the word to replace
split_line[idx] = replaced_word #replace the word
print("Replaced '{0}' with '{1}'...".format(word,replaced_word))
except KeyError: #if the word is not a label, then just move on to the next word in line
continue
#put the line back as it was (a list of words):
new_lines.append(" ".join(split_line))
print("#------------------Assignment Preprocessor Completed")
return new_lines
def main():
''''''
source_code_file = sys.argv[1] #file containing assembly code for the CPU I am simulating
#---------------------------------------------------------------------------
# Open input file and remove comments
with open(source_code_file,'r') as f:
split_lines = []
for line in f:
split = []
for word in line.rstrip().split():
if word.startswith('#'):
break
if word!='':
split.append(word)
#clean_line = " ".join(split)
split_lines.append(split)
split_lines = clean_up_empties(split_lines)
#---------------------------------------------------------------------------
# Turn lines into strings again:
idx = 0
for split_line in split_lines:
split_lines[idx] = " ".join(split_line)
idx += 1
#---------------------------------------------------------------------------
#run the preprocessor for assignments
split_lines = assingment_preprocessor(split_lines)
#---------------------------------------------------------------------------
# Turn the lines back into lists
idx = 0
for line in split_lines:
split_lines[idx] = line.split()
idx += 1
#---------------------------------------------------------------------------
#Here we should convert instructions to numbers
#First dealing with the case that the line has only one word or element
#this will most likely mean that we have a label, if it ends with ':'. i.e. 'LOOP:'
#Also, write the values to a file:
try:
output_file = sys.argv[2]
except IndexError:
output_file = source_code_file.split('.')[0] + ".hex"
program_counter = 0
labels = []
new_lines = []
for line in split_lines:
if line[0].endswith(":"):
#Create appropriate code for when we have a label to jump back to
labels.append((line[0][:-1],program_counter))
continue
#replace_jump_labels(line)
new_lines.append(line)
program_counter += 1
#Save hex values into fill
with open(output_file, 'w') as handler:
#Print a header for terminal output:
template = "{0:15} | {1:10}"
print(template.format("Assembly","Machine Code"))
print("-"*31)
#Now do the preprocessor step to put jump address into code
for line in new_lines:
replace_jump_labels(labels,line)
#print(line)
#print("")
hex_command = 0
if len(line)<3: #if the command has only two words, then the first 6 bits are for instruction and the rest are an address
#Now we create the hex value command or machine code for the given instruction
for word in line:
word = word.lower()
if word.startswith('@'): #if we have an address, we don't need to look up the value
hex_val = int(word[1:])
else:
try:
hex_val = int(word) #this clause is important for when dealing with inmediate values
except ValueError:
hex_val = DIC_COMMANDS[word] #if the word cannot be cast into an integer, then it must be a word
#User bitwise OR to accumulate commands per line into a single instruction
hex_command = hex_command | hex_val
else: #this clause deals with instructions that specify more than one register
word_index = 0
for word in line:
word = word.lower()
if word.startswith('@'):
#calculate value and shift
shift_ammount = 0
if word_index == 1:
shift_ammount = 21
elif word_index == 2:
shift_ammount = 16
elif word_index == 3:
shift_ammount = 0 #this elif is not necessary, but it makes the code's intent clearer and easier to follow
hex_val = int(word[1:])
hex_val = hex_val << shift_ammount
else:
try:
hex_val = int(word) #this clause is important for when dealing with inmediate values
except ValueError:
hex_val = DIC_COMMANDS[word] #if the word cannot be cast into an integer, then it must be a word
hex_command = hex_command | hex_val
word_index += 1
#Write binary code to file (but as decimal)
hex_str = "0x{:08X}".format(hex_command) #hex version of the number
#print(line,hex_str)
handler.write(hex_str+'\n')
#Print human readable output to terminal:
#template = "{0:-<15}-> {1:10}" #this was defined above. I am repeating it here because of clarity (this script is getting messy)
template = "{0:<15}--> {1:10}"
print(template.format(" ".join(line),hex_str))
#print(new_lines)
if __name__=='__main__':
main()
| true
|
229ab288d5f39bb850efa1ab8dd330d8174324d0
|
Python
|
vlasovskikh/obraz
|
/test/data/plugins/src/_plugins/plugin1.py
|
UTF-8
| 977
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
from __future__ import unicode_literals
import csv
import os
import obraz
@obraz.loader
def load_capitals(path, config):
test1_site = config.get("test1", {})
capitals_filename = test1_site.get("capitals_filename", "capitals.csv")
if path != capitals_filename:
return None
with open(os.path.join(config["source"], path), "r") as fd:
reader = csv.reader(fd)
capitals = dict((country, capital) for capital, country in reader)
return {
"test1": {
"capitals": capitals,
}
}
@obraz.processor
def process_size(site):
for page in site.get("pages", {}):
page["size"] = len(page["content"])
@obraz.generator
def generate_capitals_count_file(site):
name = os.path.join(site["destination"], "capitals_count.txt")
with open(name, "wb") as fd:
capitals = site.get("test1", {}).get("capitals", {})
data = "{0}\n".format(len(capitals)).encode("UTF-8")
fd.write(data)
| true
|
167c3cca99444587c5c61454830e01ccc52a61d3
|
Python
|
daveb-dev/glimslib
|
/glimslib/simulation_helpers/math_reaction_diffusion.py
|
UTF-8
| 135
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
# -- logistic growth
def compute_growth_logistic(conc, prolif_rate, conc_max):
return prolif_rate * conc * ( 1 - conc / conc_max)
| true
|
a46bf9e6236a197d7c482a638d834112dea45463
|
Python
|
airone-cenerino/atcoder
|
/B問題/ABC116B.py
|
UTF-8
| 192
| 3.234375
| 3
|
[] |
no_license
|
a = int(input())
l = [a]
b = a
c = 1
while True:
if b % 2 == 0:
b = int(b/2)
else:
b = 3 * b + 1
c += 1
if b in l:
break
l.append(b)
print(c)
| true
|
513f5901b898f4da35aa87a31a7896c6339ab999
|
Python
|
Pravin2796/python-practice-
|
/chapter 3/2.slicing.py
|
UTF-8
| 106
| 3.390625
| 3
|
[] |
no_license
|
#greeting = "good morning,"
name = "pravin"
#c= greeting + name #concatinating two strngs
print(name[:-1])
| true
|
429763f57af8404b0fa4d2220480f2c4b16f151a
|
Python
|
shafiul-haque-johny/MonteCarloArea
|
/Problem2(Ass-02).py
|
UTF-8
| 1,683
| 3.125
| 3
|
[] |
no_license
|
import math
import random
import numpy as np
import matplotlib.pyplot as plt
# plt.plot([0, 1], [0, 1], color='Blue')
# plt.plot([4, 3], [0, 1], color='Blue')
rectangle = plt.Rectangle((0, 0), 4, 2, fc='none', ec="Blue")
plt.gca().add_patch(rectangle)
plt.xlim(left=-0.2, right=4.2)
plt.ylim(bottom=-0.2, top=2.2)
N = 1000
hit = 0
miss = 0
report_hit = []
report_PI = []
report_C = []
ax = []
ay = []
interval = 1 / 100
"""
for i in range(1, N+1):
x = (i * interval)
y = math.sqrt(1 - (x * x))
ax.append(x)
ay.append(y)
plt.plot(ax, ay, c='Red')
"""
for i in range(1, N+1):
x = random.uniform(0, 1)
y = random.uniform(0, 2)
if y <= x:
hit += 1
plt.scatter(x, y, c='Green')
else:
plt.scatter(x, y, c='Red')
x = random.uniform(3, 4)
y = random.uniform(0, 2)
if y <= 4 - x:
hit += 1
plt.scatter(x, y, c='Green')
else:
plt.scatter(x, y, c='Red')
x = random.uniform(1, 3)
y = random.uniform(0, 1)
plt.scatter(x, y, c='Green')
x = random.uniform(1, 3)
y = random.uniform(1, 2)
if np.sqrt((x-2) ** 2 + (y-1) ** 2) < 1:
hit += 1
plt.scatter(x, y, c='Green')
if i in [100, 1000, 10000]:
report_PI.append((2 * hit) / i)
else:
pass
else:
plt.scatter(x, y, c='Red')
if i in [100, 1000, 10000]:
report_hit.append(hit)
else:
pass
report_C = np.array(report_PI)
report_circle = report_C * 2
print("Half Circle Area:")
print(report_circle)
print("Total Hits:")
print(report_hit)
plt.show()
| true
|
c1fb1389821042cc41c148d726f4a54a3da631c4
|
Python
|
markmuetz/cfg
|
/bin/tex_git_summary.py
|
UTF-8
| 5,486
| 2.734375
| 3
|
[] |
no_license
|
import os
from pathlib import Path
import subprocess as sp
import re
import datetime as dt
import pickle
from collections import defaultdict
import hashlib
import matplotlib
import matplotlib.pyplot as plt
def run(cmd):
return sp.run(cmd, check=True, shell=True, stdout=sp.PIPE, stderr=sp.DEVNULL, encoding='utf8')
def _texcount(fn_globs):
return run('texcount -sum -brief {}'.format(' '.join(fn_globs))).stdout
def _git_check_on_master():
return re.search('\* master', run('git branch').stdout.strip())
def _git_ordered_tags():
return run('git tag --sort=committerdate').stdout.split('\n')
def _git_revisions():
return run('git rev-list master').stdout.split('\n')[::-1]
def _git_checkout(rev):
# Don't care about output.
sp.run('git checkout {}'.format(rev), check=True, stdout=sp.DEVNULL, stderr=sp.DEVNULL, shell=True)
def _git_status():
# Want to show output.
sp.run('git status', shell=True)
def _git_rev_date():
return parse_git_datetime(run('git show -s --format=%ci').stdout.strip())
def get_tex_summary_info(*fn_globs):
texcount_output = _texcount(fn_globs)
if re.search('!!!', texcount_output):
raise TexCountError(texcount_output)
counts_for_fns = {}
for line in texcount_output.split('\n'):
if not line:
continue
split_line = line.split(':')
if len(split_line) == 3:
word_count = int(split_line[0])
# Hack to make ch01_introduction appear the same as ch01 (deals with renames).
filename = split_line[2].strip()[:4]
elif len(split_line) == 2:
word_count = int(split_line[0])
filename = 'totl'
else:
raise Exception(split_line)
counts_for_fns[filename] = word_count
return counts_for_fns
def parse_git_datetime(s):
return dt.datetime.strptime(s, '%Y-%m-%d %H:%M:%S %z')
class TexCountError(Exception):
pass
class TexGitInfo:
def __init__(self, tex_dir, fn_globs, use_tags=False):
self.tex_dir = tex_dir
self.fn_globs = fn_globs
self.use_tags = use_tags
fn_globs_hash = hashlib.sha1(' '.join(self.fn_globs).encode()).hexdigest()
print(fn_globs_hash[:10])
self.cache_dir = tex_dir / '.tex_git_info' / fn_globs_hash[:10]
if not self.cache_dir.exists():
self.cache_dir.mkdir(parents=True)
def run(self):
orig_dir = Path.cwd()
os.chdir(self.tex_dir)
self.error = False
if not _git_check_on_master():
raise Exception('Not on master')
try:
if self.use_tags:
rev_list = _git_ordered_tags()
else:
rev_list = _git_revisions()
self.dates_for_fn = defaultdict(list)
self.counts_for_fn = defaultdict(list)
self.commits_for_fn = defaultdict(list)
self.all_dates = {}
counter = 0
for rev in rev_list:
if not rev:
continue
rev_cache = self.cache_dir / rev
if rev_cache.exists():
with rev_cache.open('rb') as rc:
date, fn_counts = pickle.load(rc)
else:
_git_checkout(rev)
date = _git_rev_date()
try:
fn_counts = get_tex_summary_info(*self.fn_globs)
except TexCountError as tce:
print(f'ERROR: {rev}')
print(f'ERROR: {tce}')
date, fn_counts = None, None
print(f'{rev}: {date}')
with rev_cache.open('wb') as rc:
pickle.dump((date, fn_counts), rc)
if date is None and fn_counts is None:
continue
for fn, count in fn_counts.items():
self.counts_for_fn[fn].append(count)
self.dates_for_fn[fn].append(date)
self.commits_for_fn[fn].append(rev)
if date not in self.all_dates:
self.all_dates[date] = counter
counter += 1
except Exception as e:
print(e)
_git_status()
self.error = True
finally:
_git_checkout('master')
os.chdir(orig_dir)
def plot(self, use_date=True, show=True, display_tex_dir=False):
if not self.error:
plt.figure('word_counts')
plt.title('word counts')
for fn, counts in self.counts_for_fn.items():
dates = self.dates_for_fn[fn]
commits = self.commits_for_fn[fn]
label = self.tex_dir.parts[-1] + ' / ' + fn if display_tex_dir else fn
if not self.use_tags:
commits = range(len(commits))
if use_date:
plt.plot(dates, counts, label=label)
else:
plt.plot(commits, counts, label=label)
plt.legend()
if use_date or self.use_tags:
plt.xticks(rotation=90)
plt.tight_layout()
if show:
plt.show()
def main(tex_dir, fn_globs, use_tags, use_date):
tex_git_info = TexGitInfo(tex_dir, fn_globs, use_tags)
tex_git_info.run()
tex_git_info.plot(use_date=use_date)
return tex_git_info
| true
|
3949ecd3930699f1d6010217e679e5510fe3cf9d
|
Python
|
Lawriegan/leetcode
|
/17 Letter Combinations of a Phone Number.py
|
UTF-8
| 628
| 3.328125
| 3
|
[] |
no_license
|
class Solution:
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
map_list = ['', '', 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
if len(digits) == 0:
return []
def map2num(button1, button2):
return [char1+char2 for char1 in button1 for char2 in button2]
#cur_list = map_list[int(digits[0])]
cur_list = ['']
for i in range(0,len(digits)):
cur_list = map2num(cur_list, map_list[int(digits[i])])
return cur_list
print(Solution().letterCombinations('2'))
| true
|
1da412f6c5343dfa5f83e4d214503a73c903f6a1
|
Python
|
byklo/mit-python
|
/algorithms/sorting/mergeSort.py
|
UTF-8
| 623
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sys
def merge(a1, a2):
i1, i2 = 0, 0
a3 = []
while not (len(a1) == 0 and len(a2) == 0):
if len(a2) == 0:
a3.append(a1[0])
del a1[0]
elif len(a1) == 0:
a3.append(a2[0])
del a2[0]
elif a1[0] <= a2[0]:
a3.append(a1[0])
del a1[0]
else:
a3.append(a2[0])
del a2[0]
return a3
def mergeSort(a):
if len(a) == 1:
return a
else:
k = len(a) / 2
merged = merge(mergeSort(a[:k]), mergeSort(a[k:]))
return merged
verbose = sys.argv[1].lower() == "true"
a = [ int(x) for x in sys.argv[2:] ]
print "Unsorted ->", a
sortedA = mergeSort(a)
print "Sorted ->", sortedA
| true
|
f7e74826255430e657e5893f6755005df34bbb1b
|
Python
|
google-code/0xic-zex
|
/Python/BL2SAVEFILE/savefile.py
|
UTF-8
| 22,904
| 2.640625
| 3
|
[] |
no_license
|
#! /usr/bin/env python
import binascii
from bisect import insort
from cStringIO import StringIO
import hashlib
import json
import math
import optparse
import struct
import sys
class BL2Error(Exception): pass
class ReadBitstream(object):
def __init__(self, s):
self.s = s
self.i = 0
def read_bit(self):
i = self.i
self.i = i + 1
byte = ord(self.s[i >> 3])
bit = byte >> (7 - (i & 7))
return bit & 1
def read_bits(self, n):
s = self.s
i = self.i
end = i + n
chunk = s[i >> 3: (end + 7) >> 3]
value = ord(chunk[0]) &~ (0xff00 >> (i & 7))
for c in chunk[1: ]:
value = (value << 8) | ord(c)
if (end & 7) != 0:
value = value >> (8 - (end & 7))
self.i = end
return value
def read_byte(self):
i = self.i
self.i = i + 8
byte = ord(self.s[i >> 3])
if (i & 7) == 0:
return byte
byte = (byte << 8) | ord(self.s[(i >> 3) + 1])
return (byte >> (8 - (i & 7))) & 0xff
class WriteBitstream(object):
def __init__(self):
self.s = ""
self.byte = 0
self.i = 7
def write_bit(self, b):
i = self.i
byte = self.byte | (b << i)
if i == 0:
self.s += chr(byte)
self.byte = 0
self.i = 7
else:
self.byte = byte
self.i = i - 1
def write_bits(self, b, n):
s = self.s
byte = self.byte
i = self.i
while n >= (i + 1):
shift = n - (i + 1)
n = n - (i + 1)
byte = byte | (b >> shift)
b = b &~ (byte << shift)
s = s + chr(byte)
byte = 0
i = 7
if n > 0:
byte = byte | (b << (i + 1 - n))
i = i - n
self.s = s
self.byte = byte
self.i = i
def write_byte(self, b):
i = self.i
if i == 7:
self.s += chr(b)
else:
self.s += chr(self.byte | (b >> (7 - i)))
self.byte = (b << (i + 1)) & 0xff
def getvalue(self):
if self.i != 7:
return self.s + chr(self.byte)
else:
return self.s
def read_huffman_tree(b):
node_type = b.read_bit()
if node_type == 0:
return (None, (read_huffman_tree(b), read_huffman_tree(b)))
else:
return (None, b.read_byte())
def write_huffman_tree(node, b):
if type(node[1]) is int:
b.write_bit(1)
b.write_byte(node[1])
else:
b.write_bit(0)
write_huffman_tree(node[1][0], b)
write_huffman_tree(node[1][1], b)
def make_huffman_tree(data):
frequencies = [0] * 256
for c in data:
frequencies[ord(c)] += 1
nodes = [[f, i] for (i, f) in enumerate(frequencies) if f != 0]
nodes.sort()
while len(nodes) > 1:
l, r = nodes[: 2]
nodes = nodes[2: ]
insort(nodes, [l[0] + r[0], [l, r]])
return nodes[0]
def invert_tree(node, code=0, bits=0):
if type(node[1]) is int:
return {chr(node[1]): (code, bits)}
else:
d = {}
d.update(invert_tree(node[1][0], code << 1, bits + 1))
d.update(invert_tree(node[1][1], (code << 1) | 1, bits + 1))
return d
def huffman_decompress(tree, bitstream, size):
output = ""
while len(output) < size:
node = tree
while 1:
b = bitstream.read_bit()
node = node[1][b]
if type(node[1]) is int:
output += chr(node[1])
break
return output
def huffman_compress(encoding, data, bitstream):
for c in data:
code, nbits = encoding[c]
bitstream.write_bits(code, nbits)
item_sizes = (
(8, 17, 20, 11, 7, 7, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16),
(8, 13, 20, 11, 7, 7, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17)
)
def pack_item_values(is_weapon, values):
i = 0
bytes = [0] * 32
for value, size in zip(values, item_sizes[is_weapon]):
if value is None:
break
j = i >> 3
value = value << (i & 7)
while value != 0:
bytes[j] |= value & 0xff
value = value >> 8
j = j + 1
i = i + size
return "".join(map(chr, bytes[: (i + 7) >> 3]))
def unpack_item_values(is_weapon, data):
i = 8
data = " " + data
values = []
end = len(data) * 8
for size in item_sizes[is_weapon]:
j = i + size
if j > end:
values.append(None)
continue
value = 0
for b in data[j >> 3: (i >> 3) - 1: -1]:
value = (value << 8) | ord(b)
values.append((value >> (i & 7)) &~ (0xff << size))
i = j
return values
def rotate_data_right(data, steps):
steps = steps % len(data)
return data[-steps: ] + data[: -steps]
def rotate_data_left(data, steps):
steps = steps % len(data)
return data[steps: ] + data[: steps]
def xor_data(data, key):
key = key & 0xffffffff
output = ""
for c in data:
key = (key * 279470273) % 4294967291
output += chr((ord(c) ^ key) & 0xff)
return output
def wrap_item(is_weapon, values, key):
item = pack_item_values(is_weapon, values)
header = struct.pack(">Bi", (is_weapon << 7) | 7, key)
padding = "\xff" * (33 - len(item))
h = binascii.crc32(header + "\xff\xff" + item + padding) & 0xffffffff
checksum = struct.pack(">H", ((h >> 16) ^ h) & 0xffff)
body = xor_data(rotate_data_left(checksum + item, key & 31), key >> 5)
return header + body
def unwrap_item(data):
version_type, key = struct.unpack(">Bi", data[: 5])
is_weapon = version_type >> 7
raw = rotate_data_right(xor_data(data[5: ], key >> 5), key & 31)
return is_weapon, unpack_item_values(is_weapon, raw[2: ]), key
def read_varint(f):
value = 0
offset = 0
while 1:
b = ord(f.read(1))
value |= (b & 0x7f) << offset
if (b & 0x80) == 0:
break
offset = offset + 7
return value
def write_varint(f, i):
while i > 0x7f:
f.write(chr(0x80 | (i & 0x7f)))
i = i >> 7
f.write(chr(i))
def read_protobuf(data):
fields = {}
end_position = len(data)
bytestream = StringIO(data)
while bytestream.tell() < end_position:
key = read_varint(bytestream)
field_number = key >> 3
wire_type = key & 7
value = read_protobuf_value(bytestream, wire_type)
fields.setdefault(field_number, []).append([wire_type, value])
return fields
def read_protobuf_value(b, wire_type):
if wire_type == 0:
value = read_varint(b)
elif wire_type == 1:
value = struct.unpack("<Q", b.read(8))[0]
elif wire_type == 2:
length = read_varint(b)
value = b.read(length)
elif wire_type == 5:
value = struct.unpack("<I", b.read(4))[0]
else:
raise BL2Error("Unsupported wire type " + str(wire_type))
return value
def write_protobuf(data):
b = StringIO()
# If the data came from a JSON file the keys will all be strings
data = dict([(int(k), v) for (k, v) in data.items()])
for key, entries in sorted(data.items()):
for wire_type, value in entries:
if type(value) is dict:
value = write_protobuf(value)
wire_type = 2
elif type(value) in (list, tuple):
sub_b = StringIO()
for v in value:
write_protobuf_value(sub_b, wire_type, v)
value = sub_b.getvalue()
wire_type = 2
write_varint(b, (key << 3) | wire_type)
write_protobuf_value(b, wire_type, value)
return b.getvalue()
def write_protobuf_value(b, wire_type, value):
if wire_type == 0:
write_varint(b, value)
elif wire_type == 1:
b.write(struct.pack("<Q", value))
elif wire_type == 2:
if type(value) is unicode:
value = value.encode("latin1")
write_varint(b, len(value))
b.write(value)
elif wire_type == 5:
b.write(struct.pack("<I", value))
else:
raise BL2Error("Unsupported wire type " + str(wire_type))
def parse_zigzag(i):
if i & 1:
return -1 ^ (i >> 1)
else:
return i >> 1
def unwrap_player_data(data):
if data[: 4] == "CON ":
raise BL2Error("You need to use a program like Horizon or Modio to extract the SaveGame.sav file first")
if data[: 20] != hashlib.sha1(data[20: ]).digest():
raise BL2Error("Invalid save file")
data = lzo1x_decompress("\xf0" + data[20: ])
size, wsg, version = struct.unpack(">I3sI", data[: 11])
if version != 2 and version != 0x02000000:
raise BL2Error("Unknown save version " + str(version))
if version == 2:
crc, size = struct.unpack(">II", data[11: 19])
else:
crc, size = struct.unpack("<II", data[11: 19])
bitstream = ReadBitstream(data[19: ])
tree = read_huffman_tree(bitstream)
player = huffman_decompress(tree, bitstream, size)
if (binascii.crc32(player) & 0xffffffff) != crc:
raise BL2Error("CRC check failed")
return player
def wrap_player_data(player, endian=1):
crc = binascii.crc32(player) & 0xffffffff
bitstream = WriteBitstream()
tree = make_huffman_tree(player)
write_huffman_tree(tree, bitstream)
huffman_compress(invert_tree(tree), player, bitstream)
data = bitstream.getvalue() + "\x00\x00\x00\x00"
header = struct.pack(">I3s", len(data) + 15, "WSG")
if endian == 1:
header = header + struct.pack(">III", 2, crc, len(player))
else:
header = header + struct.pack("<III", 2, crc, len(player))
data = lzo1x_1_compress(header + data)[1: ]
return hashlib.sha1(data).digest() + data
def expand_zeroes(src, ip, extra):
start = ip
while src[ip] == 0:
ip = ip + 1
v = ((ip - start) * 255) + src[ip]
return v + extra, ip + 1
def copy_earlier(b, offset, n):
i = len(b) - offset
end = i + n
while i < end:
chunk = b[i: i + n]
i = i + len(chunk)
n = n - len(chunk)
b.extend(chunk)
def lzo1x_decompress(s):
dst = bytearray()
src = bytearray("\xff" + s[5: ])
ip = 1
skip = 0
if src[ip] > 17:
t = src[ip] - 17; ip += 1
if t < 4:
skip = 3
else:
dst.extend(src[ip: ip + t]); ip += t
skip = 1
while 1:
if not (skip & 1):
t = src[ip]; ip += 1
if t >= 16:
skip = 7
else:
if t == 0:
t, ip = expand_zeroes(src, ip, 15)
dst.extend(src[ip: ip + t + 3]); ip += t + 3
if not (skip & 2):
# first_literal_run
t = src[ip]; ip += 1
if t < 16:
copy_earlier(dst, 1 + 0x0800 + (t >> 2) + (src[ip] << 2), 3); ip += 1
if not (skip & 4) and t < 16:
# match_done
# match_next
t = src[ip - 2] & 3
if t == 0:
continue
dst.extend(src[ip: ip + t]); ip += t
t = src[ip]; ip += 1
skip = 0
while 1:
if t >= 64:
copy_earlier(dst, 1 + ((t >> 2) & 7) + (src[ip] << 3), (t >> 5) + 1); ip += 1
elif t >= 32:
t &= 31
if t == 0:
t, ip = expand_zeroes(src, ip, 31)
copy_earlier(dst, 1 + ((src[ip] | (src[ip + 1] << 8)) >> 2), t + 2); ip += 2
elif t >= 16:
offset = (t & 8) << 11
t &= 7
if t == 0:
t, ip = expand_zeroes(src, ip, 7)
offset += (src[ip] | (src[ip + 1] << 8)) >> 2; ip += 2
if offset == 0:
return str(dst)
copy_earlier(dst, offset + 0x4000, t + 2)
else:
copy_earlier(dst, 1 + (t >> 2) + (src[ip] << 2), 2); ip += 1
t = src[ip - 2] & 3
if t == 0:
break
dst.extend(src[ip: ip + t]); ip += t
t = src[ip]; ip += 1
def read_xor32(src, p1, p2):
v1 = src[p1] | (src[p1 + 1] << 8) | (src[p1 + 2] << 16) | (src[p1 + 3] << 24)
v2 = src[p2] | (src[p2 + 1] << 8) | (src[p2 + 2] << 16) | (src[p2 + 3] << 24)
return v1 ^ v2
clz_table = (
32, 0, 1, 26, 2, 23, 27, 0, 3, 16, 24, 30, 28, 11, 0, 13, 4,
7, 17, 0, 25, 22, 31, 15, 29, 10, 12, 6, 0, 21, 14, 9, 5,
20, 8, 19, 18
)
def lzo1x_1_compress_core(src, dst, ti, ip_start, ip_len):
dict_entries = [0] * 16384
in_end = ip_start + ip_len
ip_end = ip_start + ip_len - 20
ip = ip_start
ii = ip_start
ip += (4 - ti) if ti < 4 else 0
ip += 1 + ((ip - ii) >> 5)
while 1:
while 1:
if ip >= ip_end:
return in_end - (ii - ti)
dv = src[ip: ip + 4]
dindex = dv[0] | (dv[1] << 8) | (dv[2] << 16) | (dv[3] << 24)
dindex = ((0x1824429d * dindex) >> 18) & 0x3fff
m_pos = ip_start + dict_entries[dindex]
dict_entries[dindex] = (ip - ip_start) & 0xffff
if dv == src[m_pos: m_pos + 4]:
break
ip += 1 + ((ip - ii) >> 5)
ii -= ti; ti = 0
t = ip - ii
if t != 0:
if t <= 3:
dst[-2] |= t
dst.extend(src[ii: ii + t])
elif t <= 16:
dst.append(t - 3)
dst.extend(src[ii: ii + t])
else:
if t <= 18:
dst.append(t - 3)
else:
tt = t - 18
dst.append(0)
n, tt = divmod(tt, 255)
dst.extend("\x00" * n)
dst.append(tt)
dst.extend(src[ii: ii + t])
ii += t
m_len = 4
v = read_xor32(src, ip + m_len, m_pos + m_len)
if v == 0:
while 1:
m_len += 4
v = read_xor32(src, ip + m_len, m_pos + m_len)
if ip + m_len >= ip_end:
break
elif v != 0:
m_len += clz_table[(v & -v) % 37] >> 3
break
else:
m_len += clz_table[(v & -v) % 37] >> 3
m_off = ip - m_pos
ip += m_len
ii = ip
if m_len <= 8 and m_off <= 0x0800:
m_off -= 1
dst.append(((m_len - 1) << 5) | ((m_off & 7) << 2))
dst.append(m_off >> 3)
elif m_off <= 0x4000:
m_off -= 1
if m_len <= 33:
dst.append(32 | (m_len - 2))
else:
m_len -= 33
dst.append(32)
n, m_len = divmod(m_len, 255)
dst.extend("\x00" * n)
dst.append(m_len)
dst.append((m_off << 2) & 0xff)
dst.append((m_off >> 6) & 0xff)
else:
m_off -= 0x4000
if m_len <= 9:
dst.append(0xff & (16 | ((m_off >> 11) & 8) | (m_len - 2)))
else:
m_len -= 9
dst.append(0xff & (16 | ((m_off >> 11) & 8)))
n, m_len = divmod(m_len, 255)
dst.extend("\x00" * n)
dst.append(m_len)
dst.append((m_off << 2) & 0xff)
dst.append((m_off >> 6) & 0xff)
def lzo1x_1_compress(s):
src = bytearray(s)
dst = bytearray()
ip = 0
l = len(s)
t = 0
dst.append(240)
dst.append((l >> 24) & 0xff)
dst.append((l >> 16) & 0xff)
dst.append((l >> 8) & 0xff)
dst.append( l & 0xff)
while l > 20:
ll = l if l <= 49152 else 49152
ll_end = ip + ll
if (ll_end + ((t + ll) >> 5)) <= ll_end or (ll_end + ((t + ll) >> 5)) <= ip + ll:
break
t = lzo1x_1_compress_core(src, dst, t, ip, ll)
ip += ll
l -= ll
t += l
if t > 0:
ii = len(s) - t
if len(dst) == 0 and t <= 238:
dst.append(17 + t)
elif t <= 3:
dst[-2] |= t
elif t <= 18:
dst.append(t - 3)
else:
tt = t - 18
dst.append(0)
n, tt = divmod(tt, 255)
dst.extend("\x00" * n)
dst.append(tt)
dst.extend(src[ii: ii + t])
dst.append(16 | 1)
dst.append(0)
dst.append(0)
return str(dst)
def modify_save(data, changes, endian=1):
player = read_protobuf(unwrap_player_data(data))
if changes.has_key("level"):
level = int(changes["level"])
lower = int(math.ceil(60 * ((level ** 2.8) - 1)))
upper = int(math.ceil(60 * (((level + 1) ** 2.8) - 1)))
if player[3][0][1] not in range(lower, upper):
player[3][0][1] = lower
player[2] = [[0, int(changes["level"])]]
if changes.has_key("skillpoints"):
player[4] = [[0, int(changes["skillpoints"])]]
if changes.has_key("money") or changes.has_key("eridium"):
raw = player[6][0][1]
b = StringIO(raw)
values = []
while b.tell() < len(raw):
values.append(read_protobuf_value(b, 0))
if changes.has_key("money"):
values[0] = int(changes["money"])
if changes.has_key("eridium"):
values[1] = int(changes["eridium"])
player[6][0] = [0, values]
if changes.has_key("itemlevels"):
if changes["itemlevels"]:
level = int(changes["itemlevels"])
else:
level = player[2][0][1]
for field_number in (53, 54):
for field in player[field_number]:
field_data = read_protobuf(field[1])
is_weapon, item, key = unwrap_item(field_data[1][0][1])
item = item[: 4] + [level, level] + item[6: ]
field_data[1][0][1] = wrap_item(is_weapon, item, key)
field[1] = write_protobuf(field_data)
if changes.has_key("backpack"):
size = int(changes["backpack"])
if size > 777:
sdus = 255
else:
sdus = int(math.ceil((size - 12) / 3.0))
size = 12 + (sdus * 3)
slots = read_protobuf(player[13][0][1])
slots[1][0][1] = size
player[13][0][1] = write_protobuf(slots)
s = player[36][0][1]
player[36][0][1] = s[: 7] + chr(sdus) + s[8: ]
if changes.has_key("unlocks"):
unlocked, notifications = [], []
if player.has_key(23):
unlocked = map(ord, player[23][0][1])
if player.has_key(24):
notifications = map(ord, player[24][0][1])
unlocks = changes["unlocks"].split(":")
if "slaughterdome" in unlocks:
if 1 not in unlocked:
unlocked.append(1)
if 1 not in notifications:
notifications.append(1)
if unlocked:
player[23] = [[2, "".join(map(chr, unlocked))]]
if notifications:
player[24] = [[2, "".join(map(chr, notifications))]]
return wrap_player_data(write_protobuf(player), endian)
def apply_crude_parsing(player, rules):
for key in rules.split(","):
if ":" in key:
key, field_type = key.split(":", 1)
field_type = int(field_type)
for element in player.get(int(key), []):
element[0] = field_type
b = StringIO(element[1])
end_position = len(element[1])
value = []
while b.tell() < end_position:
value.append(read_protobuf_value(b, field_type))
element[1] = value
else:
for element in player.get(int(key), []):
element[1] = read_protobuf(element[1])
def main():
usage = "usage: %prog [options] [source file] [destination file]"
p = optparse.OptionParser()
p.add_option(
"-d", "--decode",
action="store_true",
help="read from a save game, rather than creating one"
)
p.add_option(
"-j", "--json",
action="store_true",
help="read or write save game data in JSON format, rather than raw protobufs"
)
p.add_option(
"-l", "--little-endian",
action="store_true",
help="change the output format to little endian, to write PC-compatible save files"
)
p.add_option(
"-m", "--modify", metavar="MODIFICATIONS",
help="comma separated list of modifications to make, eg money=99999999,eridium=99"
)
p.add_option(
"-p", "--parse", metavar="FIELDNUMS",
help="perform further protobuf parsing on the specified comma separated list of keys"
)
options, args = p.parse_args()
if len(args) < 1 or args[0] == "-":
input = sys.stdin
else:
input = open(args[0], "rb")
if len(args) < 2 or args[1] == "-":
output = sys.stdout
else:
output = open(args[1], "wb")
if options.little_endian:
endian = 0
else:
endian = 1
if options.modify is not None:
changes = {}
if options.modify:
for m in options.modify.split(","):
k, v = (m.split("=", 1) + [None])[: 2]
changes[k] = v
output.write(modify_save(input.read(), changes, endian))
elif options.decode:
savegame = input.read()
player = unwrap_player_data(savegame)
if options.json:
player = read_protobuf(player)
if options.parse:
apply_crude_parsing(player, options.parse)
player = json.dumps(player, encoding="latin1", sort_keys=True, indent=4)
output.write(player)
else:
player = input.read()
if options.json:
player = write_protobuf(json.loads(player, encoding="latin1"))
savegame = wrap_player_data(player, endian)
output.write(savegame)
if __name__ == "__main__":
main()
| true
|
d7857e25f26cccbc57156d1a39669ac1d322da9a
|
Python
|
AlbertUlysses/Coursera-Data-Structures-and-Algorithms
|
/algorithmic toolbox/week2/lcm.py
|
UTF-8
| 258
| 3.25
| 3
|
[] |
no_license
|
# Uses python3
import sys
def lcm_naive(a, b):
c = a
d = b
while d:
c, d = d, c%d
lcm = (a*b)//c
return lcm
if __name__ == '__main__':
input = sys.stdin.read()
a, b = map(int, input.split())
print(lcm_naive(a, b))
| true
|
500dcc6130bf698f200cd238b71d3a55d3a211db
|
Python
|
wyaadarsh/LeetCode-Solutions
|
/Python3/0119-Pascals-Triangle-II/soln.py
|
UTF-8
| 333
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
class Solution:
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
def pascal(row):
return [1] + [row[i] + row[i + 1] for i in range(len(row) - 1)] + [1]
row = [1]
for _ in range(rowIndex):
row = pascal(row)
return row
| true
|
ab0b1f1dc038e932e591d0dbfd377cc508094936
|
Python
|
afcarl/omscs-ml4t
|
/mc3_h1/gen_data.py
|
UTF-8
| 583
| 3.078125
| 3
|
[] |
no_license
|
"""
template for generating data to fool learners (c) 2016 Tucker Balch
"""
import numpy as np
# this function should return a dataset (X and Y) that will work
# better for linear regresstion than random trees
def best4LinReg():
X = np.random.normal(size=(100, 20), loc=0)
return X, X[:, 19]
def best4RT():
X = np.random.normal(size=(100, 4))
#Y = 0.8 * X[:, 0] + 5.0 * X[:, 1]
Y = np.sin(X[:, 0]) ** 2 + np.sin(X[:, 1]) ** 2 + np.sin(X[:, 2]) ** 2 + np.sin(X[:, 3]) ** 2
#Y = X[:, 19]
return X, Y
if __name__ == "__main__":
print "they call me Tim."
| true
|
a46643a5f4a0377a20dfac36b7b20c899a173ff5
|
Python
|
jschnab/data-structures-algos-python
|
/membership/bloom.py
|
UTF-8
| 425
| 3.296875
| 3
|
[] |
no_license
|
SIZE = 1000 * 1000
N_HASH = 7
class BloomFilter:
def __init__(self, size=SIZE, n_hash=N_HASH):
self.array = [0] * SIZE
def insert(self, item):
for i in range(N_HASH):
self.array[hash(item + str(i)) % SIZE] = 1
def __contains__(self, item):
for i in range(N_HASH):
if self.array[hash(item + str(i)) % SIZE] != 1:
return False
return True
| true
|
fff828826885cca885403c7e53bf3f01fffe856c
|
Python
|
lukasrandom/socket-dev
|
/src/network/server.py
|
UTF-8
| 4,800
| 2.78125
| 3
|
[] |
no_license
|
import threading
import json
import struct
import socket
import language.analyse
import pkg_resources
from utils import utils
class Requests(object):
ACTIONS_FROM_TEXT = "actions_from_text"
class Server(object):
def __init__(self):
super(Server, self).__init__()
self._text_analyser = language.analyse.TextAnalyser()
@staticmethod
def from_configurationn():
ip = utils.get_from_config("ip")
port = utils.get_from_config("port")
server = Server()
server.start(ip, port)
return server
def start(self, ip, port):
self._ip = ip
self._port = port
print("Server Started.")
self._thread = threading.Thread(target=self._start)
self._close_request = False
self._timeout_count = 0
self._thread.start()
def close(self):
self._close_request = True
def restart(self):
self.close()
self.start(self._ip, self._port)
def _start(self):
if self._close_request:
print("Server Closed")
return
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((self._ip, self._port))
server_socket.settimeout(2)
server_socket.listen(1)
try:
connection, address = server_socket.accept()
print("Client Connected.")
with connection:
stream_handler = StreamHandler()
self._listen(connection, stream_handler)
#self.send(connection)
except socket.timeout:
self._timeout_count += 1
if self._timeout_count % 20 == 0:
print("time out count: {}".format(self._timeout_count))
if self._timeout_count > 980:
self.restart()
else:
self._start()
def _listen(self, connection, stream_handler):
connection.settimeout(1)
try:
data_list = stream_handler.data_list_from_connection(connection)
for data in data_list:
self._process_request(connection, data)
except socket.timeout:
print("rec time out")
finally:
if stream_handler.buffer_size > 0:
print("continue to listen.")
self._listen(connection, stream_handler=stream_handler)
def send(self, connection, data):
stream = StreamHandler.data_to_stream(data)
connection.send(stream)
def _process_request(self, connection, data):
request_id = data.get("id")
request = data.get("request")
args = data.get("args")
print("ID: {} | Request: {} | Args: {}".format(request_id, request, args))
if request == Requests.ACTIONS_FROM_TEXT:
result = self._text_analyser.actions_from_text(args)
data = {"id": request_id, "result": result}
print("Sending Back: {}".format(data))
self.send(connection, data=data)
class StreamHandler(object):
def __init__(self):
self._buffer = b""
self._header_size = 8 #len(struct.pack(">Q", 1)) # should be length of 8
self._data_size = None
self._data = None
self._data_list = []
@property
def buffer_size(self):
return len(self._buffer)
@staticmethod
def data_to_stream(data):
data_string = json.dumps(data, ensure_ascii=False).encode("utf-8")
# header is an packed struct storing the data size as an int
header = struct.pack("Q", len(data_string)) # Q to pack as long which mapps to int64 in c# and has a byte aray length of 8
return header + data_string
def data_list_from_connection(self, connection):
if not self._buffer:
try:
self._buffer = connection.recv(self._header_size)
except socket.timeout:
self._buffer = b""
return self._data_list
header = struct.unpack("Q", self._buffer[:self._header_size])[0]
self._data_size = header
read_size = header
self._buffer = b""
else:
read_size = self._data_size - len(self._buffer)
if read_size > 0:
self._buffer = connection.recv(read_size)
while len(self._buffer) < self._data_size:
read_size -= len(self._buffer)
self._buffer += connection.recv(read_size)
data_string = self._buffer[:self._data_size]
data = json.loads(data_string)
self._buffer = b""
self._data_list.append(data)
return self._data_list
| true
|
99201b95819a83ecbc5653feeb660375c6711f9d
|
Python
|
raabrp/rellipsoid
|
/rellipsoid/test_rellipsoid.py
|
UTF-8
| 6,978
| 2.828125
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
from pytest import approx
import numpy as np
from .rellipsoid import _rotmat, Planet, earth
def test_rotmat():
'''
unit test our axis-angle representation for rotations, since we use it
throughout
'''
x = np.array([1, 0, 0])
y = np.array([0, 1, 0])
z = np.array([0, 0, 1])
def r(a, b, c):
'''rotate a by b (pi/2), compare to c'''
assert _rotmat(b, np.pi/2).dot(a) == approx(c)
# cyclic
r(x, y, -z); r(y, z, -x); r(z, x, -y)
# cyclic, negative angle
r(x, -y, z); r(y, -z, x); r(z, -x, y)
# cyclic, negative vector
r(-x, y, z); r(-y, z, x); r(-z, x, y)
# cyclic, negative angle, negative vector
r(-x, -y, -z); r(-y, -z, -x); r(-z, -x, -y)
# anti-cyclic
r(y, x, z); r(z, y, x); (x, z, y)
# anti-cyclic, negative angle
r(y, -x, -z); r(z, -y, -x); r(x, -z, -y)
# anti-cyclic, negative vector
r(-y, x, -z); r(-z, y, -x); r(-x, z, -y)
# anti-cyclic, negative angle, negative vector
r(-y, -x, z); r(-z, -y, x); r(-x, -z, y)
# nominal test values (mass accounts for atmosphere)
# no need for pytest fixtures for something so simple
n = Planet(6378137, 298.257223563, 7.292115e-5, 3986004.418e8)
n.gamma_p = 9.8321849378
def test_nominal_earth():
'''compare against derived WSG84 values (Ref A.)'''
def t(x, y):
assert x == approx(y)
t(n.b, 6356752.3142)
t(n.e2, 6.69437999014e-3)
t(n.e_p, 8.2094437949696e-2)
t(n.E, 5.2185400842339e5)
t(n.b / n.a, 0.996647189335)
t(n.gamma_e, 9.7803253359)
t(n.k, 0.00193185265241)
t(n.m, 0.00344978650684)
def test_rad_curve_prime_vert():
assert n._rad_curve_prime_vert(np.pi/2) == approx(6399593.6258)
def test_somigliana():
def t(s2, g):
assert n._somigliana(s2) == approx(g)
t(0, n.gamma_e)
t(1, n.gamma_p)
def test_free_air_gravity():
def t(phi, h, g):
assert n.get_free_air_gravity(phi, h) == approx(g)
t(0, 0, n.gamma_e)
t(np.pi/2, 0, n.gamma_p)
t(-np.pi/2, 0, n.gamma_p)
def test_cartesian_geodetic():
'''
coordinate transformations
Note the loss of accuracy in the inverse transform (geo -> cartesian)
'''
def t(phi, h, r, z):
assert n._2d_cartesian_to_geodetic(r, z) == approx((phi, h))
assert n._2d_geodetic_to_cartesian(phi, h) == approx((r, z), abs=1e-9)
# equator
t(0, 0, n.a, 0)
t(0, 1000, n.a + 1000, 0)
t(0, -1000, n.a - 1000, 0)
# poles
t(np.pi/2, 0, 0, n.b)
t(-np.pi/2, 0, 0, -n.b)
t(np.pi/2, 1000, 0, n.b + 1000)
t(-np.pi/2, 1000, 0, -n.b - 1000)
t(np.pi/2, -1000, 0, n.b - 1000)
t(-np.pi/2, -1000, 0, -n.b + 1000)
def test_analytic_gravity():
def t(phi, h, g):
assert n.get_analytic_gravity(phi, h) == approx(g)
t(0, 0, (-n.gamma_e, 0))
t(np.pi/2, 0, (-n.gamma_p, 0))
t(-np.pi/2, 0, (-n.gamma_p, 0))
def test_local_cartesian():
'''
More coordinate transforms. Again, geodetic -> cartesian is less accurate
'''
def t(phi, az, h):
toloc, togeo = n.prep_local_cartesian(phi, az, h)
def s(x, y, z, phi, lamda, h):
# high absolute tolerance (2m)
# for _2d_geodetic_to_cartesian
assert toloc(phi, lamda, h) == approx((x, y, z), abs=1)
p, l, j = togeo(x, y, z)
assert p == approx(phi, abs=1e-6)
# high absolute tolerance (2m)
# for _2d_geodetic_to_cartesian
assert j == approx(h, abs=2)
# allow for degeneracy
if not (p == approx(np.pi / 2) or p == approx(-np.pi / 2)):
assert l == approx(lamda)
return s
s = t(0, 0, 0) # at equator looking north
s(0, 0, 0, 0, 0, 0) # origin
s(0, n.b, -n.a, np.pi/2, np.pi/2, 0) # north pole
s(0, -n.b, -n.a, -np.pi/2, 0, 0) # south pole
s(n.a, 0, -n.a, 0, np.pi/2, 0) # east extreme
s(-n.a, 0, -n.a, 0, -np.pi/2, 0) # west extreme
s(0, 0, -2*n.a, 0, np.pi, 0) # opposite side
# sanity check, if not numerical accuracy
# at 40 latitude, y axis is west (x is north)
p = 40 * np.pi / 180
toloc, togeo = n.prep_local_cartesian(
p, -np.pi/2, 0
)
phi, lamda, h = togeo(0, 0, 0) # at origin
assert phi == approx(p)
assert lamda == approx(0)
assert h == approx(0)
phi, lamda, h = togeo(0, 1e14, 0) # west (has radial component)
assert phi < p
assert lamda < 0
assert h > 0
phi, lamda, h = togeo(0, -1e14, 0) # east (has radial component)
assert phi < p
assert lamda > 0
assert h > 0
phi, lamda, h = togeo(0, 0, 1e14) # straight up
assert h > 0
assert phi == approx(p)
assert lamda == approx(lamda)
phi, lamda, h = togeo(0, 0, -2000) # straight down
assert h < 0
assert phi == approx(p)
assert lamda == approx(lamda)
phi, lamda, h = togeo(1e14, 0, 0) # north
assert phi > p
assert lamda == approx(lamda)
assert h > 0
phi, lamda, h = togeo(-1e14, 0, 0) # south
assert phi < p
assert lamda == approx(lamda)
assert h > 0
x, y, z = toloc(50 * np.pi / 180, 0, 0) # north
assert x > 0
assert z < 0
assert y == approx(0, abs=1e-9) # again, geo -> cartesian
x, y, z = toloc(30 * np.pi / 180, 0, 0) # south
assert x < 0
assert z < 0
assert y == approx(0, abs=1e-9) # again, geo -> cartesian
x, y, z = toloc(40 * np.pi / 180, -1, 0) # west
assert x > 0
assert z < 0
assert y > 0
x, y, z = toloc(40 * np.pi / 180, -4, 0) # east
assert x > 0
assert z < 0
assert y < 0
def test_inertial():
# at equator, y is north
a, toN, toF = n.prep_local_cartesian_inertial(0, 0, 0)
ax, ay, az = a(1 / n.omega) # medium interval of time
assert ax < 0
assert az < 0
assert ay == approx(0, abs=1e-9)
ax, ay, az = a(np.pi / n.omega) # half rotation of earth
assert ax == approx(-n.a * np.pi)
assert ay == approx(0, abs=1e-8)
assert az == approx(-n.a * 2)
nx, ny, nz = toN(np.pi / 2 / n.omega, [0, 0, 0]) # quarter rotation
assert nz > 0
assert nx == approx(-n.a)
assert ny == approx(0, abs=1e-9)
nx, ny, nz = toN(np.pi / n.omega, [0, 0, 0]) # half rotation
assert nz == approx(-2 * n.a)
assert nx == approx(-n.a * np.pi)
assert ny == approx(0, abs=1e-8)
# mid-atitude
a, toN, toF = n.prep_local_cartesian_inertial(
40 * np.pi / 180,
0, 0
)
x, y, z = toN(10, (0, 1000, 0)) # flying north @ 100 m/s
assert x > 0 # deflected right (decreasing radius)
assert y < 1000 # surface is rotating around poll, viewed from lat > 0
assert z > 0 # pulled upward
x, y, z = toN(10, (1000, 0, 0)) # flying east @ 100 m/s
assert x < 1000 # effect of rotation > slower velocity in inertial x component
assert y < 0 # surface is rotating around poll, viewed from lat > 0
assert z > 0 # pulled upward
| true
|
5f44ac62881492fc28c1fa129b7a9c6a34c473c5
|
Python
|
chenchonghz/medical-backend
|
/app/Models/pe-data-mining/codes/data.py
|
UTF-8
| 11,131
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import os
import pandas as pd
def is_nan(a):
return str(a) == 'nan'
class Data:
def _config(self):
self.basic1 = "basic2016.csv"
self.basic2 = "basic2017.csv"
self.examine1 = "examine2016.csv"
self.examine2 = "examine2017.csv"
self.diagnosis1 = "diagnosis2016.csv"
self.diagnosis2 = "diagnosis2017.csv"
self.save_namelist = "namelist.npy"
self.save_examine_list = "examine_list.npy"
self.save_examine_list_values = "examine_list_%d.npy"
self.basic_list = ["体检号","出生日期","性别","身高(cm)","体重(Kg)","BMI","血压(mmHg)","是否吸烟","早发心血管病家族史","内科病史","胸廓","呼吸音","心率", "甲状腺","乳腺","眼底(右)","眼底(左)","鼻"]
self.need_normalize = ["是否吸烟"]
def __init__(self, data_path, save_path):
self.X = np.array([])
self.Y = np.array([])
self.data_path = data_path
self.save_path = save_path
self._config()
def load_namelist(self, load_from_save=True, save=True):
save_path = os.path.join(self.save_path, self.save_namelist)
if load_from_save and os.path.exists(save_path):
namelist = np.load(save_path)
else:
basic1 = pd.read_csv(os.path.join(self.data_path, self.basic1), encoding='gbk')
basic2 = pd.read_csv(os.path.join(self.data_path, self.basic2), encoding='gbk')
namelist = np.array(list(set(basic1["体检号"]).intersection(set(basic2["体检号"]))))
if save:
np.save(save_path, namelist)
return namelist
def load_examined_list(self, namelist, load_from_save=True, save=True):
save_path = os.path.join(self.save_path, self.save_examine_list)
if load_from_save and os.path.exists(save_path):
examined = dict()
keys = np.load(save_path)
for i, k in enumerate(keys):
examined[k] = set(np.load(os.path.join(self.save_path, self.save_examine_list_values).replace("%d", str(i))).tolist())
else:
examine1 = pd.read_csv(os.path.join(self.data_path, self.examine1), encoding='gbk')
examine2 = pd.read_csv(os.path.join(self.data_path, self.examine2), encoding='gbk')
examined1 = dict()
examined2 = dict()
for index, row in examine1.iterrows():
if str(row["检验结果"]) == 'nan':
continue
if row["体检号"] in namelist:
if row["体检号"] not in examined1:
examined1[row["体检号"]] = set()
examined1[row["体检号"]].add(row["检验项目"])
for index, row in examine2.iterrows():
if str(row["检验结果"]) == 'nan':
continue
if row["体检号"] in namelist:
if row["体检号"] not in examined2:
examined2[row["体检号"]] = set()
examined2[row["体检号"]].add(row["检验项目"])
examined = dict()
for (k, v) in examined1.items():
if k in examined2:
set1 = v
set2 = examined2[k]
s = set1.intersection(set2)
for v in s:
if v not in examined:
examined[v] = set()
examined[v].add(k)
if save:
np.save(save_path, np.array(list(examined.keys())))
for i,v in enumerate(examined.values()):
np.save(os.path.join(self.save_path, self.save_examine_list_values).replace("%d", str(i)), np.array(list(v)))
return examined
def filter_names_and_objects(self, examined_list, threshold=2000):
namelist = False
objects = list()
for (k, v) in examined_list.items():
if len(v) >= threshold:
if namelist == False:
namelist = v
else:
namelist = namelist.intersection(v)
objects.append(k)
return list(namelist), objects
def load_data(self, namelist, objects):
basic1 = pd.read_csv(os.path.join(self.data_path, self.basic1), encoding='gbk')
basic2 = pd.read_csv(os.path.join(self.data_path, self.basic2), encoding='gbk')
basic1 = basic1[basic1["体检号"].isin(namelist)].drop_duplicates(['体检号'])[self.basic_list]
basic2 = basic2[basic2["体检号"].isin(namelist)].drop_duplicates(['体检号'])[self.basic_list]
examine1 = pd.read_csv(os.path.join(self.data_path, self.examine1), encoding='gbk')
examine2 = pd.read_csv(os.path.join(self.data_path, self.examine2), encoding='gbk')
examine1 = examine1[examine1["体检号"].isin(namelist)]
examine1 = examine1[examine1["检验项目"].isin(objects)]
examine1 = examine1.drop_duplicates(["体检号", "检验项目"])[["体检号", "检验项目", "检验结果"]]
examine_result1 = pd.DataFrame(examine1).pivot(index="体检号", columns="检验项目", values="检验结果")
all1 = basic1.set_index('体检号').join(examine_result1)
examine2 = examine2[examine2["体检号"].isin(namelist)]
examine2 = examine2[examine2["检验项目"].isin(objects)]
examine2 = examine2.drop_duplicates(["体检号", "检验项目"])[["体检号", "检验项目", "检验结果"]]
examine_result2 = pd.DataFrame(examine2).pivot(index="体检号", columns="检验项目", values="检验结果")
all2 = basic2.set_index('体检号').join(examine_result2)
#载入诊断信息
diagnosis1 = pd.read_csv(os.path.join(self.data_path, self.diagnosis1), encoding='gbk')
diagnosis2 = pd.read_csv(os.path.join(self.data_path, self.diagnosis2), encoding='gbk')
diagnosis1 = diagnosis1[diagnosis1["体检号"].isin(namelist)]
diagnosis2 = diagnosis2[diagnosis2["体检号"].isin(namelist)]
# key_words = ["高血压", "糖尿病", "脂肪肝"]
# columns = []
# for k in key_words:
# columns.append("诊断_" + k)
#
# diagnosis = pd.DataFrame(index=all1.index, columns=columns, dtype='Float32')
# diagnosis = diagnosis.fillna(0.)
# for index, row in diagnosis1.iterrows():
# if not is_nan(row["诊断"]):
# for k in key_words:
# if row["诊断"].find(k) >= 0:
# diagnosis.loc[row["体检号"], "诊断_" + k] = 1.
#
# all1 = all1.join(diagnosis)
#
# diagnosis = pd.DataFrame(index=all2.index, columns=columns)
# diagnosis = diagnosis.fillna(0.)
# for index, row in diagnosis2.iterrows():
# if not is_nan(row["诊断"]):
# for k in key_words:
# if row["诊断"].find(k) >= 0:
# diagnosis.loc[row["体检号"], "诊断_" + k] = 1.
#
# all2 = all2.join(diagnosis)
return all1, all2
def data_fix(self, data):
key_words = ["高血压", "高血脂", "糖尿病", "脂肪肝"]
columns = []
for k in key_words:
columns.append("病史_"+k)
history = []
count = 0
for index, row in data.iterrows():
value = []
if not is_nan(row["内科病史"]):
for k in key_words:
if row["内科病史"].find(k) >= 0:
value.append(1.)
else:
value.append(0.)
value = np.array(value)
else:
value = np.zeros(len(key_words))
history.append(value)
history = pd.DataFrame(history, columns=columns, index=data.index.values)
data=pd.concat((data, history),axis=1).drop(["内科病史"], axis=1)
def get_time(a):
if is_nan(a):
return a
strs = a.split(" ")
strs = strs[0].split("/")
return int(strs[0])
data["出生日期"] = data.apply(lambda row: get_time(row["出生日期"]), axis=1)
def get_num(str, delem="/", pos=0):
ret = float('nan')
if not is_nan(str):
strs = str.split(delem)
if len(strs) > pos:
ret = float(strs[pos])
return ret
data["血压(高)"] = data.apply(lambda row: get_num(row["血压(mmHg)"], pos=0), axis=1)
data["血压(低)"] = data.apply(lambda row: get_num(row["血压(mmHg)"], pos=1), axis=1)
del data["血压(mmHg)"]
def default_value(a, default="未见异常"):
if is_nan(a):
return float('nan')
return 0 if a == default else 1
for k in ["胸廓","呼吸音","甲状腺","乳腺","眼底(右)","眼底(左)","鼻"]:
data[k] = data.apply(lambda row: default_value(row[k]), axis=1)
data.rename(columns={'红细胞计数.':'红细胞计数', '白细胞计数.':'白细胞计数'}, inplace = True)
return data
def normalize(self, data):
for k in self.need_normalize:
counts = pd.value_counts(data[[k]].values.ravel())
normalized_values = counts.keys().tolist()
def gen_label(self, data1, data2, column, max, better_ratio=0.2, label="标记"):
combined = data1[[column]].join(data2[[column]], lsuffix="1", rsuffix="2")
if better_ratio > 1:
better_ratio = 0.99
def not_better(a, b, max):
if float(b) > max and float(a) < max:
return 1
if float(b) > max and float(a) > max and float(b)/float(a) > 1 - better_ratio:
return 1
return 0
combined["selected"] = combined.apply(lambda row: not_better(row[column+"1"], row[column+"2"], max), axis=1)
return combined[["selected"]].rename(columns={"selected": label})
def load_X(self):
namelist = self.load_namelist()
examined_list = self.load_examined_list(namelist)
namelist, examined_objects = self.filter_names_and_objects(examined_list)
all1, all2 = self.load_data(namelist, examined_objects)
all1 = self.data_fix(all1)
all2 = self.data_fix(all2)
return all1, all2
if __name__ == '__main__':
data = Data("D:\\Lab\\PE\\data\\csv", "D:\\Lab\\PE\\data\\processed")
namelist = data.load_namelist()
examined_list = data.load_examined_list(namelist, load_from_save=True, save=False)
for (k,v) in examined_list.items():
print(k, ":", len(v))
namelist, examined_objects = data.filter_names_and_objects(examined_list)
print(examined_objects)
all1, all2 = data.load_data(namelist, examined_objects)
data.data_fix(all1)
data.data_fix(all2)
print(all1.keys())
print(all2.keys())
# data.gen_label(all1, all2, "尿酸", 420)
| true
|
2b377163179f086df7f11a2102f795b815309d49
|
Python
|
anolsa/listenandrepeat-praat
|
/GUItesti.py
|
UTF-8
| 1,659
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
from GUI import RadioButton, RadioGroup, Label, Font, Window, TextField, Button, application
win_num = 0
tiedot = []
class TestWindow(Window):
def key_down(self, event):
c = event.char
if c == '\r':
print "Default"
elif c == '\x1b':
print "Cancel"
else:
Window.key_down(self, event)
class TestTextField(TextField):
def __init__(self, number, *args, **kwds):
TextField.__init__(self, *args, **kwds)
self.number = number
nimiLabel = Label("Nimi:")
nimiLabel.position = (20, 20)
grp = RadioGroup()
def set_to_1():
grp.value = 4
def make_window():
global win_num
global tiedot
nimi = ""
win_num += 1
win = TestWindow(size = (320, 200), title = "Text fields %d" % (win_num))
win.tf1 = TestTextField(1,
position = (nimiLabel.right + 20, 20),
width = 200, text = "")
buty = win.tf1.bottom + 20
## buttons = [RadioButton(x = engLabel.right + 20 * i, y = win.tf3.bottom + 20, title = "", group = grp) for i in range(1,6)]
## for i, v in enumerate(buttons):
## v.set_value(i+1)
show_but = Button("Show",
position = (20, buty),
action = tiedot.append(repr(win.tf1.text)))
win.add(nimiLabel)
win.add(win.tf1)
win.add(show_but)
win.width = win.tf1.right + 20
win.height = show_but.bottom + 20
win.tf1.become_target()
win.show()
return win
win = make_window()
def sigterm(*a):
raise Exception("SIGTERM")
import signal
signal.signal(signal.SIGTERM, sigterm)
application().run()
| true
|
f0ad278814775cfb75328cbd0dc78b1d55292e48
|
Python
|
keithwang5/Anti-Stress-Tracker
|
/fitbitWrapper.py
|
UTF-8
| 3,330
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
import ConfigParser
import fitbit
from RecordClasses import Record
import datetime as dt
import time
config_filename = 'config.ini'
# start_time and end_time should both be empty or both set
# returns heartrate stats for a dayrange with start_date included, end_date excluded
def get_heartrate_series(base_date, detail_level, start_time=None, end_time=None):
if detail_level not in ['1sec', '1min', '15min']:
raise ValueError("Period must be either '1sec', '1min', or '15min'")
client = __get_client()
beauty_stats = []
if start_time:
stats = client.intraday_time_series('activities/heart', base_date=base_date, detail_level=detail_level,
start_time=start_time, end_time=end_time)
else:
stats = client.intraday_time_series('activities/heart', base_date=base_date, detail_level=detail_level)
for elem in stats['activities-heart-intraday']['dataset']:
# get the time in datetime format
timestamp = dt.datetime.strptime(elem['time'], '%H:%M:%S')
beauty_stats.append(Record(elem['value'], dt.datetime.combine(base_date, timestamp.time())))
return beauty_stats
# returns sleeping ranges (start-end time of the sleep) for given dates, end date excluded
# includes both night sleep and naps
def get_sleep_ranges(start_date, end_date):
client = __get_client()
beauty_stats = []
for day_number in range((end_date - start_date).days):
base_date = start_date + dt.timedelta(day_number)
stats = client.sleep(date=base_date)['sleep']
for elem in stats:
start = dt.datetime.strptime(elem['startTime'].split('.')[0], "%Y-%m-%dT%H:%M:%S")
duration = dt.timedelta(minutes=elem['timeInBed'])
end = start + duration
beauty_stats.append((start, end))
return beauty_stats
# Private methods #
def __store_token(token_dict):
# Function for refreshing access_token, refresh_token, and expires_at.
config = ConfigParser.SafeConfigParser()
config.read(config_filename)
file_config = open(config_filename, 'w')
config.set('Login Parameters', 'ACCESS_TOKEN', token_dict['access_token'])
config.set('Login Parameters', 'REFRESH_TOKEN', token_dict['refresh_token'])
config.set('Login Parameters', 'EXPIRES_AT', str(int(token_dict['expires_at'])))
config.write(file_config)
file_config.close()
return
def __read_config():
parser = ConfigParser.SafeConfigParser()
parser.read(config_filename)
CLIENT_ID = parser.get('Login Parameters', 'C_ID')
CLIENT_SECRET = parser.get('Login Parameters', 'C_SECRET')
ACCESS_TOKEN = parser.get('Login Parameters', 'ACCESS_TOKEN')
REFRESH_TOKEN = parser.get('Login Parameters', 'REFRESH_TOKEN')
EXPIRES_AT = parser.get('Login Parameters', 'EXPIRES_AT')
return CLIENT_ID, CLIENT_SECRET, ACCESS_TOKEN, REFRESH_TOKEN, EXPIRES_AT
def __get_client():
CLIENT_ID, CLIENT_SECRET, ACCESS_TOKEN, REFRESH_TOKEN, EXPIRES_AT = __read_config()
client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True,
access_token=ACCESS_TOKEN,
refresh_token=REFRESH_TOKEN,
expires_at=EXPIRES_AT,
refresh_cb=__store_token)
return client
| true
|
672519ec779403a12a8554f088b1fd66aaec710b
|
Python
|
TJCarpenter/Library-System
|
/scripts/main.py
|
UTF-8
| 4,755
| 3.328125
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup as bs
import requests
from random import randint, choice
import names
import csv
import time
for i in range(1, 10000):
time.sleep(3)
# URL
url = 'https://booktitlegenerator.com/'
# Request the URL
r = requests.get(url)
# Process the data
soup = bs(r.text, 'html.parser')
li = soup.findAll('li')
author_list = [] # Generates a list of authors an n number of times based on the number of book they have made
auth = 0
for elem in li:
# Generate Authors
if len(author_list) == 0:
while len(author_list) <= len(li):
num_books = randint(1,7) # An author can have 1 to 7 books
author = names.get_full_name()
for j in range(num_books):
author_list.append(author)
for k in range(len(author_list) - len(li)):
author_list.pop()
# Title
title = elem.text # Extracts the titles from the page
# ISBN creation
isbn_len = 10 # Based on ISBN-10
first_sec = str(randint(0,1)) # First part can be either a 1 or a 0
isbn_len -= 1 # Remove 1 from the total length of the ISBN number
secod_sec_len = randint(3,7) # The second part can be between 3 and 7 numbers in length
second_sec = str(randint(int('1' + (secod_sec_len-1)*'0'), int('9' + (secod_sec_len-1)*'9'))) # Generates a number based on the length of the second_sec_len
isbn_len -= len(second_sec) # Removes the length of the section of the total ISBN number length
third_sec = str(randint(int('1' + (isbn_len-2)*'0'), int('9' + (isbn_len-2)*'9'))) # The third part is based on the previous section
isbn_len -= len(third_sec) # Removes the length of the section of the total ISBN number
fourth_sec = str(randint(0, 9)) # The last number is a single digit and can be between 0 and 9
isbn = first_sec + '-' + second_sec + '-' + third_sec + '-' + fourth_sec # Puts all of the ISBN sections together to generate a custom ISBN number for a book
# Author creation
author = author_list[auth] # Select the next author on the list
# Publisher
publisher_co = ['Simon Wallenberg Press', 'Scribner', 'Frederick Ungar', 'J.A. Allen & Co.', 'Willmann–Bell', 'KT Publishing']
publisher = publisher_co[randint(0, len(publisher_co)-1)] # Selects a random publisher from the list
# Publisher Date
published_dates_list = [randint(1920, 1940)]*10 + [randint(1941, 1970)]*15 + [randint(1971, 1990)]*25 + [randint(1991,2018)]*50
published_months = ['01','02','03','04','05','06','07','08','09','10','11','12']
published_month = choice(published_months)
if published_month == '01':
published_day = str(randint(1, 31)) # January
elif published_month == '02':
published_day = str(randint(1, 28)) # Febuary
elif published_month == '03':
published_day = str(randint(1, 31)) # March
elif published_month == '04':
published_day = str(randint(1, 30)) # April
elif published_month == '05':
published_day = str(randint(1, 31)) # May
elif published_month == '06':
published_day = str(randint(1, 30)) # June
elif published_month == '07':
published_day = str(randint(1, 31)) # July
elif published_month == '08':
published_day = str(randint(1, 31)) # August
elif published_month == '09':
published_day = str(randint(1, 30)) # September
elif published_month == '10':
published_day = str(randint(1, 31)) # October
elif published_month == '11':
published_day = str(randint(1, 30)) # November
elif published_month == '12':
published_day = str(randint(1, 31)) # December
if len(published_day) == 1:
published_day = '0' + published_day
published_date = str(choice(published_dates_list)) + '-' + published_month + '-' + published_day
# Formulate Row
row = [title, author, isbn, publisher, published_date]
auth += 1
#print('Title: {}\nAuthor: {}\nISBN: {}\nPublisher: {}\nPublished Date: {}\n'.format(title, author, isbn, publisher, published_date))
with open('Books.csv', 'a', newline='') as file:
writer = csv.writer(file, delimiter=',')
writer.writerows([row])
#print("[{}] Collection Finished".format(i))
file.close()
| true
|
3602693a45d5bdbda9172a6e47150bf06d134bcf
|
Python
|
haibaokesheng/study
|
/leetcode/104.二叉树的最大深度.py
|
UTF-8
| 2,039
| 3.234375
| 3
|
[] |
no_license
|
'''
@Author: your name
@Date: 2020-04-12 11:13:18
@LastEditTime: 2020-04-12 11:19:18
@LastEditors: Please set LastEditors
@Description: In User Settings Edi
@FilePath: \刷题人生\leetcode\104.二叉树的最大深度.py
'''
#
# @lc app=leetcode.cn id=104 lang=python3
#
# [104] 二叉树的最大深度
#
# https://leetcode-cn.com/problems/maximum-depth-of-binary-tree/description/
#
# algorithms
# Easy (72.35%)
# Likes: 502
# Dislikes: 0
# Total Accepted: 155.5K
# Total Submissions: 213.6K
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# 给定一个二叉树,找出其最大深度。
#
# 二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。
#
# 说明: 叶子节点是指没有子节点的节点。
#
# 示例:
# 给定二叉树 [3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
#
# 返回它的最大深度 3 。
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def maxDepth(self, root: TreeNode) -> int:
'''解法一 DFS '''
# dfs
# if not root:
# return 0
#leftMaxDepth = self.maxDepth(root.left)
#rightMaxDepth = self.maxDepth(root.right)
# return max(leftMaxDepth,rightMaxDepth)+1
# return max(self.maxDepth(root.left),self.maxDepth(root.right))+1
# return 0 if not root else max(self.maxDepth(root.left),self.maxDepth(root.right))+1
# if not root:
# return 0
# bfs
'''解法二 BFS '''
if not root:
return 0
res = 0
quene = [root]
while quene:
for _ in range(len(quene)):
node = quene.pop(0)
if node.right:
quene.append(node.right)
if node.left:
quene.append(node.left)
res += 1
return res
# @lc code=end
| true
|
b040fa56b598dbf6bb94fe3a6f4ccc3ee47324ac
|
Python
|
GitNotifyTestUser/TestForGitNotify
|
/Mitsuru's Games/tron.py
|
UTF-8
| 9,005
| 3.265625
| 3
|
[] |
no_license
|
import pygame
import sys
from time import sleep
from pygame.locals import *
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
SILVER = (192, 192, 192)
GOLD = (255, 215, 0)
#INFORMATION
"""
Important Info!!!!
grid is 50x50
each grid is 10x10 pixels
start player at [3][3] and [46][46]
player vehicle is 1x1 box
blue is 1
red is 2
neither is 0
"""
def main():
global FPSCLOCK, DISPLAYSURFACE
FPS = 30
FPSCLOCK = pygame.time.Clock()
#initialization of game
pygame.init()
#size of game screen
size = 551
pygame.display.set_caption("Tron Game")
DISPLAYSURFACE = pygame.display.set_mode((size, size))
#initialization variables
UP = "UP"
DOWN = "DOWN"
LEFT = "LEFT"
RIGHT = "RIGHT"
grid = [[0 for i in range(50)] for j in range(50)]
player1Direction = "UP"
player2Direction = "DOWN"
player1Life = True
player2Life = True
#Player start locations
player1X = 45
player1Y = 45
player2X = 4
player2Y = 4
#Check if players are ready
DISPLAYSURFACE.fill(SILVER)
FONT = pygame.font.Font('freesansbold.ttf', 18)
keySurface = FONT.render("Press any key to start!", True, BLACK)
keyRect = keySurface.get_rect()
keyRect.topleft = (551/2-95, 551/2-50)
DISPLAYSURFACE.blit(keySurface, keyRect)
pygame.display.update()
pygame.event.clear()
pressAnyKey()
for x in range(3, 0, -1):
DISPLAYSURFACE.fill(SILVER)
keySurface = FONT.render(str(x), True, BLACK)
keyRect = keySurface.get_rect()
keyRect.topleft = (551/2, 551/2 - 25)
DISPLAYSURFACE.blit(keySurface, keyRect)
pygame.display.update()
sleep(1)
pygame.event.clear()
while True:
display(grid)
displayPlayer(player1X, player1Y, player2X, player2Y)
#Check for gameovers/draws
#Check for out of bounds collision
if player1X < 0 or player1Y < 0 or player1X > 49 or player1Y > 49:
player1Life = False
if player2X < 0 or player2Y < 0 or player2X > 49 or player2Y > 49:
player2Life = False
if not player1Life and not player2Life:
gameDraw(grid)
if not player1Life:
gameOver(1, grid)
elif not player2Life:
gameOver(2, grid)
#Test for collisions with enemy trail/self
if grid[player1X][player1Y] != 0:
player1Life = False
if grid[player2X][player2Y] != 0:
player2Life = False
if not player1Life and not player2Life:
gameDraw(grid)
if not player1Life:
gameOver(1, grid)
elif not player2Life:
gameOver(2, grid)
#event getter
for event in pygame.event.get():
if event.type == QUIT:
quitGame()
elif event.type == KEYDOWN:
if event.key == K_LEFT and player1Direction != RIGHT:
player1Direction = LEFT
elif event.key == K_RIGHT and player1Direction != LEFT:
player1Direction = RIGHT
elif event.key == K_UP and player1Direction != DOWN:
player1Direction = UP
elif event.key == K_DOWN and player1Direction != UP:
player1Direction = DOWN
elif event.key == K_a and player2Direction != RIGHT:
player2Direction = LEFT
elif event.key == K_d and player2Direction != LEFT:
player2Direction = RIGHT
elif event.key == K_s and player2Direction != UP:
player2Direction = DOWN
elif event.key == K_w and player2Direction != DOWN:
player2Direction = UP
grid[player1X][player1Y] = 1
grid[player2X][player2Y] = 2
if player1Direction == LEFT:
player1X -= 1
elif player1Direction == RIGHT:
player1X += 1
elif player1Direction == DOWN:
player1Y += 1
elif player1Direction == UP:
player1Y -= 1
if player2Direction == LEFT:
player2X -= 1
elif player2Direction == RIGHT:
player2X += 1
elif player2Direction == DOWN:
player2Y += 1
elif player2Direction == UP:
player2Y -= 1
pygame.display.update()
FPSCLOCK.tick(FPS)
#Displays graphics
def display(grid):
DISPLAYSURFACE.fill(SILVER)
for x in range(0, 51):
location = (x * 10 + x)
pygame.draw.line(DISPLAYSURFACE, WHITE, (0, location), (549, location))
pygame.draw.line(DISPLAYSURFACE, WHITE, (location, 0), (location, 549))
#Color each grid that has already been occupied
for x in range(0, 50):
for y in range(0, 50):
colorSquare(x, y, grid)
def displayPlayer(player1X, player1Y, player2X, player2Y):
#Color the player
colorPlayer(player1X, player1Y, BLUE)
colorPlayer(player2X, player2Y, RED)
#Color the grid around the player to distinguish
colorGridAroundPlayer(player1X, player1Y)
colorGridAroundPlayer(player2X, player2Y)
def gameOverDisplay(grid):
DISPLAYSURFACE.fill(SILVER)
for x in range(0, 51):
location = (x * 10 + x)
pygame.draw.line(DISPLAYSURFACE, WHITE, (0, location), (549, location))
pygame.draw.line(DISPLAYSURFACE, WHITE, (location, 0), (location, 549))
#Color each grid that has already been occupied
for x in range(0, 50):
for y in range(0, 50):
colorSquare(x, y, grid)
pygame.display.update()
sleep(1.5)
pressAnyKey()
main()
#Function that waits for a key to be pressed
def pressAnyKey():
pygame.event.clear()
start = True
while start:
for event in pygame.event.get():
if event.type==KEYDOWN:
if event.key == K_ESCAPE:
quitGame()
start = False
#function to color each grid in the game
def colorSquare(x, y ,tempGrid):
if tempGrid[x][y] == 0:
return
elif tempGrid[x][y] == 1:
color = BLUE
elif tempGrid[x][y] == 2:
color = RED
elif tempGrid[x][y] == 3:
color = GOLD
elif tempGrid[x][y] == 4:
color = BLACK
elif tempGrid[x][y] == 5:
color = WHITE
locationX = (x * 10 + x) + 1
locationY = (y * 10 + y) + 1
pygame.draw.rect(DISPLAYSURFACE, color, (locationX, locationY, 10, 10))
return
#color player
def colorPlayer(x, y, color):
locationX = (x * 10 + x) + 1
locationY = (y * 10 + y) + 1
pygame.draw.rect(DISPLAYSURFACE, color, (locationX, locationY, 10, 10))
return
#Color the lines around the player
def colorGridAroundPlayer(x, y):
locationX = (x * 10 + x)
locationY = (y * 10 + y)
pygame.draw.line(DISPLAYSURFACE, BLACK, (locationX, locationY), (locationX, locationY + 11))
pygame.draw.line(DISPLAYSURFACE, BLACK, (locationX, locationY), (locationX + 11, locationY))
pygame.draw.line(DISPLAYSURFACE, BLACK, (locationX + 11, locationY), (locationX + 11, locationY + 11))
pygame.draw.line(DISPLAYSURFACE, BLACK, (locationX, locationY + 11), (locationX + 11, locationY + 11))
#Function for gameover
def gameOver(player, grid):
if player == 1:
player = 2
elif player == 2:
player = 1
grid[0][0] = player
for x in range(0, 50):
for y in range(0, x):
grid[x][y] = player
grid[y][x] = player
grid[x][x] = player
display(grid)
pygame.display.update()
for x in range(3, 47):
grid[x][8] = 3
grid[x][41] = 3
#print gold box
grid[x][24] = 3
grid[x][25] = 3
#print gold box
for x in range(4, 46):
grid[x][9] = 3
grid[x][40] = 3
for y in range(8, 42):
grid[3][y] = 3
grid[4][y] = 3
for y in range(8, 42):
grid[46][y] = 3
grid [45][y] = 3
gameOverDisplay(grid)
#Function for game draws
def gameDraw(grid):
for x in range(0, 50):
for y in range(0, 50):
grid[x][y] = 1
grid[y][x] = 2
grid[x][x] = 4
display(grid)
pygame.display.update()
#printing gold box
for x in range(3, 47):
grid[x][13] = 3
grid[x][36] = 3
for x in range(4, 46):
grid[x][14] = 3
grid[x][35] = 3
for y in range(13, 36):
grid[3][y] = 3
grid[4][y] = 3
for y in range(13, 37):
grid[46][y] = 3
grid [45][y] = 3
gameOverDisplay(grid)
#function to quit the game
def quitGame():
pygame.quit()
sys.exit()
#initiate the game
if __name__ == "__main__":
main()
| true
|
b0abd25c4e0874bdada61a5401179bcbea849aff
|
Python
|
HERoLab/FirstRobot
|
/pygameControlUI.py
|
UTF-8
| 6,471
| 3.0625
| 3
|
[] |
no_license
|
import sys, time
import socket
import json
import pygame
from pygame.locals import *
#Variable Setup.
originSpeed = 47
maxSpeed = 74
minSpeed = 20
turnSpeed = 10
brakeSpeed = 1 #The speed at which to brake per "loop"
stabilizeSpeed = 3 #The speed at which to "stabilize" turns.
leftSpeed = originSpeed
rightSpeed = originSpeed
breakDelay = 10
eventWait = 100
motorOffset = 55 #The offset for the left motor (see Arduino Program).
keysPressed = None
# # # # # # # # # # # Main UI Function # # # # # # # # # # #
def main():
global rightSpeed, leftSpeed, originSpeed, breakDelay, eventWait
# Initialise screen
print "__"*10
print "\n-- Starting the Robit Operator..."
pygame.init()
screen = pygame.display.set_mode((400, 250))
fontStyle = pygame.font.SysFont("Comic Sans MS", 24)
pygame.display.set_caption('Robit Operiter')
# Fill background
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((0, 0, 0))
# Blit everything to the screen
screen.blit(background, (0, 0))
pygame.display.flip()
#Final Variable Setup:
noKeyDuration = 0
turning = False
#Set up the TCP connection.
message = "\n-- What is the IP of the robit? (default: 192.168.1.100)"
robotIP = raw_input(message) or "192.168.1.100"
TCP_Port = 50007
socketConnection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socketConnection.connect((robotIP, TCP_Port))
print "\n-- Connection established!"
running = True
while running:
pygame.event.pump() #Flush the last key presses.
for event in pygame.event.get():
try:
if event.type == QUIT:
running = False
break
elif event.type == KEYUP:
#Allow only one turn event to trigger at a time.
if event.key == pygame.K_LEFT:
turning = False
elif event.key == pygame.K_RIGHT:
turning = False
except KeyboardInterrupt:
running = False
#Get the keys that are currently pressed.
key = pygame.key.get_pressed()
# # # # # # Session Controls # # # # # # #
#End the session if "q" "b" or "ESC" are pressed. Also slow the 'bot.
if key[ pygame.K_q ] or key[ pygame.K_b ] or key[ pygame.K_ESCAPE ]:
rightSpeed = changeSpeed(originSpeed)
leftSpeed = changeSpeed(originSpeed)
running = False
# # # # # Wheel Speed Controls # # # # # #
#Increase/Decrease Speed
if key[ pygame.K_UP ]:
rightSpeed = incrementSpeed(1, rightSpeed)
leftSpeed = incrementSpeed(1, leftSpeed)
elif key[ pygame.K_DOWN ]:
rightSpeed = incrementSpeed(-1, rightSpeed)
leftSpeed = incrementSpeed(-1, leftSpeed)
#Turn By Altering Speeds Choose car-turn and pivot-turn based on speed.
if key[ pygame.K_LEFT ] and not turning:
turning = True
if leftSpeed+turnSpeed > maxSpeed:
leftSpeed = changeSpeed(maxSpeed-turnSpeed)
rightSpeed = changeSpeed(maxSpeed)
else:
rightSpeed = changeSpeed(leftSpeed+turnSpeed)
if key[ pygame.K_RIGHT ] and not turning:
turning = True
if rightSpeed+turnSpeed > maxSpeed:
rightSpeed = changeSpeed(maxSpeed-turnSpeed)
leftSpeed = changeSpeed(maxSpeed)
else:
leftSpeed = changeSpeed(rightSpeed+turnSpeed)
#If no key is pressed, slow to a stop.
if noKeyPressed(key):
if noKeyDuration > breakDelay:
noKeyDuration = noKeyDuration*3/4
if rightSpeed > originSpeed: rightSpeed -= brakeSpeed
elif rightSpeed < originSpeed: rightSpeed += brakeSpeed
if leftSpeed > originSpeed: leftSpeed -= brakeSpeed
elif leftSpeed < originSpeed: leftSpeed += brakeSpeed
#Allow the bot to stabilize to be moving forward as well.
if abs(leftSpeed-rightSpeed) < stabilizeSpeed:
leftSpeed = (leftSpeed+rightSpeed)/2
rightSpeed = leftSpeed
elif originSpeed > leftSpeed > rightSpeed: rightSpeed += stabilizeSpeed
elif originSpeed > rightSpeed > leftSpeed: leftSpeed += stabilizeSpeed
elif leftSpeed > rightSpeed > originSpeed: leftSpeed -= stabilizeSpeed
elif rightSpeed > leftSpeed > originSpeed: rightSpeed -= stabilizeSpeed
else:
noKeyDuration += 1
#Actually send the new speed to the bot.
print encodeSpeeds(leftSpeed, rightSpeed)
socketConnection.send(encodeSpeeds(leftSpeed, rightSpeed))
#Render the UI elements.
color = (55, 255, 100)
left = fontStyle.render("Left: {}".format(leftSpeed-originSpeed), 1, color)
right = fontStyle.render("Right: {}".format(rightSpeed-originSpeed), 1, color)
direction = fontStyle.render("Direction: {}".format(getDirection()), 1, color)
#Draw the rendered elements on the screen (strangely called "blit" in PyGame).
screen.blit(background, (0, 0))
screen.blit(left, (50, 50))
screen.blit(right, (50, 80))
screen.blit(direction, (50, 110))
#Display (or apparently "flip") the screen.
pygame.display.flip()
#Add a delay so the operations don't occur too quickly.
pygame.time.delay(eventWait)
#Close the window.
print "-- Quitting..."
pygame.quit()
sys.exit()
# # # # # # # # # # # Helper Functions # # # # # # # # # # #
def incrementSpeed(change, speed):
if change > 0 and speed+change < maxSpeed:
return speed+change
elif change < 0 and speed+change > minSpeed:
return speed+change
else:
return speed
def changeSpeed(newSpeed):
if minSpeed <= newSpeed <= maxSpeed:
return newSpeed
else:
print "ERROR: Cannot set speed to {}".format(newSpeed)
return originSpeed
def getDirection():
if leftSpeed > rightSpeed:
return "Right"
elif rightSpeed > leftSpeed:
return "Left"
elif leftSpeed==originSpeed:
return "Standing"
elif leftSpeed < originSpeed:
return "Backward"
else:
return "Forward"
#Check if any one of the control keys are pressed.
def noKeyPressed(key):
return not (
key[ pygame.K_LEFT ] or
key[ pygame.K_RIGHT ] or
key[ pygame.K_UP ] or
key[ pygame.K_DOWN ] or
key[ pygame.K_SPACE ] or
key[ pygame.K_LSHIFT ]
)
#Delimit JSON by dollar signs in case packets are concatenated.
def encodeSpeeds(leftSpeed, rightSpeed):
data = {"left": int(leftSpeed), "right": int(rightSpeed)}
return json.dumps(data)+"$"
#If called from the command line, run the UI function.
if __name__ == "__main__": main()
| true
|
238a68f854d62d14c41d63d9b2d8eac26080b971
|
Python
|
tianchuntian/beauty_shop
|
/page/addrpage.py
|
UTF-8
| 3,967
| 2.6875
| 3
|
[] |
no_license
|
import time
from common.base import Base,open_browser
from page.accountpage import Account
from page.buynow import BuyNow
from page.goodspage import GoodsPage
from page.housepage import HousePage
from page.loginpage import LoginPage, url
class AddrPage(Base):
"封装第一次购买商品时要求填写收货信息页面"
# 国家定位器
country_loc=("name","country")
# 中国定位器
china_loc=("xpath","// *[ @ id = 'selCountries_0'] / option[2]")
# 下拉框省的定位器
province_loc=("id","selProvinces_0")
# 市下拉框定位器
city_loc=("name","city")
# 区下拉框定位器
district_loc=("name","district")
# 收货人输入框定位器
consignee_loc=("name","consignee")
# 详细地址输入框定位器
detail_addr_loc=("name","address")
# 电话输入框定位器
tel_loc=("name","tel")
# 邮箱输入框定位器
email_loc=("name","email")
# 邮政编码输入框定位器
zipcode_loc=("name","zipcode")
# 手机输入框定位器
mobile_loc=("name","mobile")
# 配送至这个地址按钮定位器
addr_submit_loc=("class name","bnt_blue_2")
# 定位省下拉框中的选项
province_options_loc=("css selector","select[name='province']>option")
# 定位市下拉框里面的元素
city_optins_loc=("css selector","select[name='city']>option")
# 定位市下拉框里面的元素
district_optins_loc=("css selector","select[name='district']>option")
def click_china(self):
#定位选择中国"
self.click(self.china_loc)
#选择省份
def click_province(self):
self.select_by_index(self.province_loc,self.province_options_loc)
#选择城市
def click_city(self):
self.select_by_index(self.city_loc,self.city_optins_loc)
# "选择区"
def click_district(self):
self.select_by_index(self.district_loc,self.district_optins_loc)
#输入收货人
def input_consigneename(self,name):
self.send_keys(self.consignee_loc,name)
#输入详细地址
def input_detail_addr(self,addr):
self.send_keys(self.detail_addr_loc,addr)
#输入电话
def input_tel(self,tel):
self.send_keys(self.tel_loc,tel)
#输入邮箱
def input_email(self,email):
self.send_keys(self.email_loc,email)
#输入邮政编码
def input_zipcode(self,zipcode):
self.send_keys(self.zipcode_loc,zipcode)
#输入手机
def input_mobile(self,mobile):
self.send_keys(self.mobile_loc,mobile)
#点击送货按钮
def addr_submit(self):
self.click(self.addr_submit_loc)
if __name__ == '__main__':
driver = open_browser()
login = LoginPage(driver)
login.open_url(url)
username = '诸葛亮_2'
password = 'Test123456'
login.input_username(username)
login.input_password(password)
login.submit_click()
print(login.is_successed(username))
login.housepage_click()
house = HousePage(driver)
house.phone_type_click()
# 点击诺基亚
goodspage = GoodsPage(driver)
goodspage.nokia_n85_click()
# 点击立即购买
buynow = BuyNow(driver)
buynow.buy_now_click()
# 点击去付款
account = Account(driver)
account.go_account_click()
#输入送货地址信息并点击配送至这个地址
addrpage=AddrPage(driver)
addrpage.click_china()
time.sleep(1)
print("*"*10)
addrpage.click_province()
time.sleep(1)
print("*" * 10)
addrpage.click_city()
time.sleep(1)
addrpage.click_district()
name='诸葛亮_1'
addrpage.input_consigneename(name)
detail_addr='南阳'
addrpage.input_detail_addr(detail_addr)
tel=12345
addrpage.input_tel(tel)
email='[email protected]'
addrpage.input_email(email)
zipcode='6655'
addrpage.input_zipcode(zipcode)
mobile='15523445666'
addrpage.input_mobile(mobile)
addrpage.addr_submit()
| true
|
69aac504090b8ed60ea504dfde4abb8f56c7a57f
|
Python
|
LucasMaiale/Libro1-python
|
/Cap4/Ejemplo 4_2.py
|
UTF-8
| 832
| 4.09375
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
@author: guardati
Ejemplo 4_2
Solución del problema 4.1 sin usar ciclo.
En este caso se emplea una variable por cada sueldo.
"""
sueldo1 = float(input('Ingrese el sueldo 1: $'))
sueldo2 = float(input('Ingrese el sueldo 2: $'))
sueldo3 = float(input('Ingrese el sueldo 3: $'))
sueldo4 = float(input('Ingrese el sueldo 4: $'))
sueldo5 = float(input('Ingrese el sueldo 5: $'))
sueldo6 = float(input('Ingrese el sueldo 6: $'))
sueldo7 = float(input('Ingrese el sueldo 7: $'))
sueldo8 = float(input('Ingrese el sueldo 8: $'))
sueldo9 = float(input('Ingrese el sueldo 9: $'))
sueldo10 = float(input('Ingrese el sueldo 10: $'))
nomina = sueldo1 + sueldo2 + sueldo3 + sueldo4 + sueldo5 + sueldo6 + sueldo7 + sueldo8 + sueldo9 + sueldo10
print('\nLa nómina que debe pagarse es: $', nomina)
| true
|
6947afb77550a3f9ebf0f822bbb96e17ba93c713
|
Python
|
BelitK/CHIRP
|
/chirp/plugins/registry/scan.py
|
UTF-8
| 3,135
| 2.625
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] |
permissive
|
"""Main method for the Windows registry plugin."""
# Standard Python Libraries
import json
import logging
import os
from typing import Dict, List, Tuple, Union
# cisagov Libraries
from chirp.common import OUTPUT_DIR, REGISTRY, build_report
from chirp.plugins import operators
from chirp.plugins.registry.registry import enumerate_registry_values
async def check_matches(
indicator_list: List[Tuple[str, str]], registry_key: str
) -> Tuple[int, List[Dict[str, str]], Union[str, dict]]:
"""Check for registry key matches to a list of indicators.
:param indicator_list: A list containing tuples of keys and search strings
:type indicator_list: List[Tuple[str,str]]
:param registry_key: A registry key to query
:type registry_key: str
:return: A tuple of (hits, the search criteria, matches)
:rtype: Tuple[int, List[Dict[str,str]], Union[str, dict]]
"""
hits = 0
search_criteria = []
match = ""
for key, search_string in indicator_list:
_match = None
if not match:
if operators.searcher(search_string, registry_key, key.lower()):
_match = registry_key
else:
if operators.searcher(search_string, match, key.lower()):
_match = match
hits += 1
search_criteria.append({"key": str(key), "search_string": search_string})
if _match:
match = _match
return hits, search_criteria, match
async def _report_hits(indicator: str, vals: dict) -> None:
"""Write to the log the number of hits for a given indicator."""
logging.log(
REGISTRY,
"Found {} hit(s) for {} indicator.".format(len(vals["matches"]), indicator),
)
async def run(indicators: dict) -> None:
"""Accept a dict containing events indicators and write out to the OUTPUT_DIR specified by chirp.common.
:param indicators: A dict containing parsed registry indicator files.
:type indicators: dict
"""
if not indicators:
return
logging.debug("(REGISTRY) Entered registry plugin.")
report = {indicator["name"]: build_report(indicator) for indicator in indicators}
for indicator in indicators:
ind = indicator["indicator"]
indicator_list = [(k, v) for k, v in ind.items() if k != "registry_key"]
logging.log(REGISTRY, "Reading {}".format(ind["registry_key"]))
async for value in enumerate_registry_values(ind["registry_key"]):
if value == "ERROR":
logging.log(REGISTRY, "Hit an error, exiting.")
return
hits, search_criteria, match = await check_matches(indicator_list, value)
if hits != len(indicator_list):
continue
report[indicator["name"]]["_search_criteria"] = search_criteria
if match:
report[indicator["name"]]["matches"].append(match)
[await _report_hits(k, v) for k, v in report.items()]
with open(os.path.join(OUTPUT_DIR, "registry.json"), "w+") as writeout:
writeout.write(
json.dumps({r: report[r] for r in report if report[r]["matches"]})
)
| true
|
a208f452c6f82bea40b4e78ca7a616d06904b841
|
Python
|
JimBae/pythonForMachineLearning
|
/ch01/pythonic_code/lambda_function.py
|
UTF-8
| 299
| 3.234375
| 3
|
[] |
no_license
|
# ref
# https://github.com/TEAMLAB-Lecture/AI-python-connect/blob/master/codes/ch_1/pythonic_code/lambda_function.py
def f(x, y):
return x + y
print (f(1,4))
f = lambda x, y: x + y
print (f(1,4))
f = lambda x: x ** 2
print (f(3))
f = lambda x: x/2
print (f(3))
print ((lambda x: x+1)(5))
| true
|
77144575b97ba92cd4a0628d544cce3ef854f0ed
|
Python
|
981377660LMT/algorithm-study
|
/20_杂题/atc競プロ/競プロ典型 90 問/082 - Counting Numbers(★3.py
|
UTF-8
| 720
| 3.234375
| 3
|
[] |
no_license
|
# 1<=L,R<=1e18
# 每个数num在 黑板上写num次 求最终的长度
# !前缀和相减 按照位数分类计算
import sys
sys.setrecursionlimit(int(1e9))
input = sys.stdin.readline
MOD = int(1e9 + 7)
L, R = map(int, input().split())
def cal(upper: int) -> int:
"""[1, upper]内的答案"""
res = 0
for i in range(20):
left, right = 10 ** i, 10 ** (i + 1) - 1
wordLen = i + 1
if right >= upper:
count = upper - left + 1
res += wordLen * (left + upper) * count // 2
break
count = right - left + 1
res += wordLen * (left + right) * count // 2
return res
print((cal(R) - cal(L - 1)) % MOD)
| true
|
574a1d3404aba0cd7099ee288799899c33facdd1
|
Python
|
NoMod-Programming/py-utility
|
/tests/test_utility.py
|
UTF-8
| 1,914
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
from unittest import TestCase
from pyutility import limit_resource, measureit
def func1a(x):
x = [i for i in range(x)]
return -1
def func1b(x):
# recursive function to find xth fibonacci number
if x < 3:
return 1
return func1b(x-1) + func1b(x-2)
def func2():
# error function
return "a" / 2
def func3(*args, **kwagrs):
# args and kwargs function
return list(args) + list(kwagrs.values())
class MeasureitTest(TestCase):
def setUp(self):
self.er_func = measureit(func2)
self.func_m = measureit(func1a)
self.func_t = measureit(func1b)
self.ka_func = measureit(func3)
def test_measureit_1(self):
self.assertIsInstance(self.func_m(100), tuple)
def test_measureit_2(self):
x = self.func_t(10)
self.assertIsInstance(x[0], int)
self.assertIsInstance(x[1], float)
def test_measureit_3(self):
self.assertIsInstance(self.func_t(15), tuple)
def test_measureit_4(self):
self.assertRaises(Exception, self.er_func)
class LimitResourceTest(TestCase):
def setUp(self):
self.er_func = limit_resource(time=2)(func2)
self.func_m = limit_resource(time=2)(func1a)
self.func_t = limit_resource(time=2)(func1b)
self.ka_func = limit_resource(time=2)(func3)
def test_limit_resource_1(self):
self.assertEqual(self.func_m(300), -1)
def test_limit_resource_2(self):
self.assertEqual(self.func_t(3), 2)
def test_limit_resource_3(self):
self.assertRaises(Exception, self.er_func)
def test_limit_resource_4(self):
self.assertRaises(MemoryError, self.func_m, 100_000_000)
def test_limit_resource_5(self):
self.assertRaises(TimeoutError, self.func_t, 50)
def test_limit_resource_6(self):
self.assertEqual(self.ka_func(
1, 2, 3, four=4, five=5), [1, 2, 3, 4, 5])
| true
|
18192d993694962e3932e3d9bf674c897311d454
|
Python
|
sainihimanshu1999/FB-List
|
/MinimumWindowSubstring.py
|
UTF-8
| 744
| 3.65625
| 4
|
[] |
no_license
|
'''
we use simple two pointer approach in this question
'''
from collections import Counter
def window(s,t):
target_counter = Counter(t)
target_len = len(t)
start,end =0,0
minWindow = ''
for end in range(len(s)):
if target_counter[s[end]]>0:
target_len -=1
target_counter[s[end]] -=1
while not target_len:
window_len = end-start+1
if not minWindow or len(minWindow)>window_len:
minWindow = s[start:end+1]
target_counter[s[start]] += 1
if target_counter[s[start]]>0:
target_len +=1
start +=1
return minWindow
s = "ADOBECODEBANC"
t = "ABC"
print(window(s,t))
| true
|
29f39ddc016a183154ae3ce77e19f1875413df47
|
Python
|
innovationcode/searching-problems
|
/search_using_binary/count_rotation.py
|
UTF-8
| 495
| 4.3125
| 4
|
[] |
no_license
|
#Find the Rotation Count in Rotated Sorted array
def count_rotation(arr):
low = 0
high = len(arr) - 1
while(low <= high):
mid = (low + high) // 2
if(arr[mid - 1] > arr[mid] and arr[mid] < arr[mid + 1]):
return mid
elif arr[mid] < arr[high]:
high = mid - 1
else:
low = mid + 1
arr = [9, 10, 11, 12, 15, 16, 17, 20, 45, 78, 89, 1, 2, 3, 4, 8]
print("The array rotated by " , count_rotation(arr), " rotations.")
| true
|
b54f9e4a64ae10e0f33ed5469a5058ff7a3fa52f
|
Python
|
Deep455/Python-programs-ITW1
|
/python_assignment_2/py14.py
|
UTF-8
| 380
| 3.9375
| 4
|
[] |
no_license
|
n=int(input("enter size of list : "))
lst = []
print("enter elements : ")
for i in range(n):
element = int(input())
lst.append(element)
print("initially list : ")
print(lst)
for i in range(n-1):
for j in range(n-1-i):
if lst[j] > lst[j+1]:
temp = lst[j]
lst[j] = lst[j+1]
lst[j+1] = temp
print("after sorting : ")
print(lst)
| true
|
6396a6b5b83b03bc72e1709884ace5e58af05d4c
|
Python
|
NLeSC/spreading_dye_sampler
|
/spreading_dye_sampler/dye_blot.py
|
UTF-8
| 3,797
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
import numpy as np
class DyeBlot:
def __init__(self, grid_dims, cell_dims):
self._grid_dims = grid_dims
"""Width and height of the grid in a 2-element list."""
self._cell_dims = cell_dims
"""Width and height of the cells in a 2-element list."""
self._cells = []
"""A list of 2-tuples containing cell coordinates."""
def num_cells(self):
"""Returns the number of cells in the blot."""
return len(self._cells)
def for_each_cell(self, function):
"""
Calls function(x, y) for each cell in the blot.
Args:
function: A callable object.
"""
for cell in self._cells:
function(cell[0], cell[1])
def make_blot(grid_dims, cell_dims, req_num_cells, permitted=None, squeeze=False):
"""
Create a DyeBlot on a given grid, perhaps enforcing a mask.
If permitted is specified, only cells for which permitted has a
truthy value will be selected. If squeeze is True, non-permitted
cells will simply be removed from consideration when extending
a blot; if it is False, blot construction will be stopped and
None returned if a non-permitted cell is used.
Args:
grid_dims (List): A 2-element list with width and height \
of the grid, in that order.
cell_dims (List): A 2-element list with width and height \
off the cells, in that order.
req_num_cells (Int): The required size of the blot.
permitted (np.ndarray): A 2D array of size corresponding to \
grid_dims.
Returns:
(Union[DyeBlot, None]) The constructed DyeBlot, or None if \
squeeze is False and a masked cell was used.
"""
from numpy.random import random
from numpy.random import choice
grid_width = grid_dims[0]
grid_height = grid_dims[1]
cell_width = cell_dims[0]
cell_height = cell_dims[1]
blot = DyeBlot(grid_dims, cell_dims)
if permitted is None:
permitted = np.ones(grid_dims, dtype=bool)
neighbours = {} # dict of (x, y) -> shared_edge_length
def _add_neighbour(neighbour, new_edge_length):
if neighbour in blot._cells:
return
if neighbour in neighbours:
neighbours[neighbour] += new_edge_length
else:
neighbours[neighbour] = new_edge_length
def _add_cell(cell):
blot._cells.append(cell)
_add_neighbour((cell[0] - 1, cell[1]), cell_height)
_add_neighbour((cell[0], cell[1] - 1), cell_width)
_add_neighbour((cell[0] + 1, cell[1]), cell_height)
_add_neighbour((cell[0], cell[1] + 1), cell_width)
def _draw_neighbour():
cells, lengths = zip(*neighbours.items())
indices = np.arange(0, len(cells))
probabilities = np.asarray(lengths) / float(sum(lengths))
index = choice(indices, p=probabilities)
return cells[index]
def _is_permitted(neighbour):
if neighbour[0] < 0: return False
if neighbour[0] >= grid_width: return False
if neighbour[1] < 0: return False
if neighbour[1] >= grid_height: return False
return permitted[neighbour]
cx = int(np.floor(random() * grid_width))
cy = int(np.floor(random() * grid_height))
start = cx, cy
if _is_permitted(start):
_add_cell(start)
while neighbours != {} and len(blot._cells) < req_num_cells:
neighbour = _draw_neighbour()
if _is_permitted(neighbour):
_add_cell(neighbour)
elif not squeeze:
return None
del(neighbours[neighbour])
else:
return None
return blot
if __name__ == '__main__':
blot = make_blot([1200, 1600], [30, 30], 10)
print(blot)
| true
|
7b57c6275d080416adc388abd48b4840bd795b71
|
Python
|
robsontpm/scrum-roman
|
/roman.py
|
UTF-8
| 766
| 3.5
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
import pytest
from scr1 import code, decode
# Acceptance tests:
def fib(n):
return 1 if n < 2 else fib(n-1) + fib(n-2)
def test_fib_0_1():
assert fib(0) == 1
assert fib(1) == 1
def test_fib_up_to_10():
assert fib(2) == 2
assert fib(3) == 3
assert fib(4) == 5
assert fib(5) == 8
assert fib(6) == 13
assert fib(7) == 21
assert fib(8) == 34
assert fib(9) == 55
assert fib(10) == 89
def test_code_decode_less_than_10():
for i in xrange(10):
assert i == decode(code(i))
def test_code_decode_fib_times_10():
for i in xrange(10):
num = 10 * fib(i)
assert num == decode(code(num))
def test_code_decode_fib_times_100_plus_fib():
for i in xrange(3, 8):
for j in xrange(3, 8):
num = 100 * fib(i) + fib(j)
assert num == decode(code(num))
| true
|
d09e57f7aa83e1573c5a89d060f389e93c737844
|
Python
|
HerosJiyugaoka/Twitter
|
/twitterShooting.py
|
UTF-8
| 3,610
| 2.578125
| 3
|
[] |
no_license
|
import csv
import os
import sys
import requests
import numpy as np
import slackweb
import pandas as pd
from bs4 import BeautifulSoup
import tweepy
#タイトルをスクレイピングして抽出
def scraping_tit():
url = 'url'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
result = []
for top_news in soup.find_all(class_=['該当ダグ']):
result.append([
top_news.text
])
return result
#urlをスクレイピングして抽出
def scraping_url():
url = 'url'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
result = []
for top_news in soup.find_all(class_=['該当タグ']):
result.append([
'https://e-gakkou.jp' + top_news.get('href')
])
return result
#スクレイピングして抽出したタイトルとurlを対応させてリスト化
def array_con(result_tit, result_url):
result = result_tit
for i in range(9):
result[i].extend(result_url[i])
return result
#csvファイルを開いてリストを格納
def output_csv(result):
with open('last_log.csv', 'w', newline='',encoding='utf_8') as file:
headers = ['Title', 'URL']
writer = csv.writer(file)
writer.writerow(headers)
for row in result:
writer.writerow(row)
#csvファイルを開いてリストに格納
def read_csv():
if not os.path.exists('last_log.csv'):
raise Exception('ファイルがありません。')
if os.path.getsize('last_log.csv') == 0:
raise Exception('ファイルの中身が空です。')
csv_list = pd.read_csv('last_log.csv', header=None).values.tolist()
return csv_list
#last_log.csvから格納したリストとスクレイピングしたリストを比較し、異なる部分のみ格納
def list_diff(result, last_result):
return_list = []
for tmp in (result):
if tmp not in last_result:
return_list.append(tmp)
return return_list
#slackに送信
def send_to_slack(diff_list):
text = '<!channel>\n'
for tmp in diff_list:
text += tmp[0] + '\n' + tmp[1] + '\n'
slack = slackweb.Slack(url='Slack WebHook Url')
slack.notify(text=text)
#Twitter認証して更新をTweet
def hp_tweet(diff_list):
API_KEY = "api_key"
API_SECRET = "api_secret"
ACCESS_TOKEN = "access_token_key"
ACCESS_TOKEN_SECRET = "access_token_secret"
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
text = 'ブログ更新しました!\n'
for tmp in diff_list:
text += tmp[0] + '\n #ヒーローズ #自由ヶ丘 #個別指導 #学習塾 \n' + tmp[1]
api.update_status(text)
#HPの更新がない場合定型文をTweet
def fixed_tweet():
API_KEY = "api_key"
API_SECRET = "api_secret"
ACCESS_TOKEN = "access_token_key"
ACCESS_TOKEN_SECRET = "access_token_secret"
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
text = 'ヒーローズ自由ヶ丘校ではお問い合わせをお待ちしております!\n 是非、お気軽にご相談くださいませ。\n #ヒーローズ #自由ヶ丘 #個別指導 #学習塾 \n'
api.update_status(text)
result_tit = scraping_tit()
result_url = scraping_url()
result = array_con(result_tit,result_url)
csv_list = read_csv()
diff_list=list_diff(result, csv_list)
#更新があったらそれをTweet、slackにも。
if diff_list != []:
send_to_slack(diff_list)
hp_tweet(diff_list)
else:
fixed_tweet()
output_csv(result)
| true
|
2ccde932e8fe6c3759cbb951df0c0fee5b9f359c
|
Python
|
shackspace/body-scanner
|
/dustbin/StepperDriver.py
|
UTF-8
| 2,285
| 2.640625
| 3
|
[] |
no_license
|
import glob, sys, time, os
if not os.getegid() == 0: sys.exit("Run as root for serial port access.")
from pyA20.gpio import gpio
from pyA20.gpio import port
class StepperDriver:
def doStep(self, steps=200):
self.endSleep()
if steps > 50:
self.doRamp(steps=50)
for i in range(0, steps-50):
gpio.output(self.stepPin, 1)
time.sleep(0.001)
gpio.output(self.stepPin, 0)
time.sleep(0.001)
else:
self.doRamp(steps=steps)
self.startSleep()
def doRamp(self, steps=50):
for i in range(1, steps):
gpio.output(self.stepPin, 1)
time.sleep(0.05/i)
gpio.output(self.stepPin, 0)
time.sleep(0.05/i)
def goUp(self, steps=200):
self.setDirectionUp()
self.doStep(steps=steps)
def goDown(self, steps=200):
self.setDirectionDown()
self.doStep(steps=steps)
def goBottom(self):
self.setDirectionDown()
self.endSleep()
self.doRamp()
debounce = 0
for i in range(0, self.height-50):
if gpio.input(self.sensorPin) == 0: debounce += 1
else: debounce = 0
if debounce > 5:
self.goUp(steps=50) #Move the sledge out of the sensor
self.startSleep()
break
gpio.output(self.stepPin, 1)
time.sleep(0.001)
gpio.output(self.stepPin, 0)
time.sleep(0.001)
self.startSleep()
def goTop(self):
self.setDirectionUp()
self.endSleep()
self.doRamp()
for i in range(0, self.height-50):
gpio.output(self.stepPin, 1)
time.sleep(0.001)
gpio.output(self.stepPin, 0)
time.sleep(0.001)
self.startSleep()
def startSleep(self): gpio.output(self.sleepPin, 0)
def endSleep(self): gpio.output(self.sleepPin, 1)
def setDirectionUp(self): gpio.output(self.dirPin, 0)
def setDirectionDown(self): gpio.output(self.dirPin, 1)
def __init__(self):
gpio.init()
self.sensorPin = port.PA8 #Port for the light barrier on the column bottom
self.sleepPin = port.PA9 #Sleep Pin of the Polulu
self.stepPin = port.PA10 #Step Pin of the Polulu
self.dirPin = port.PA20 #Direction Pin of the Polulu
self.height = 8450
#Configure the Pins
gpio.setcfg(self.sensorPin, gpio.INPUT)
gpio.pullup(self.sensorPin, gpio.PULLUP)
gpio.setcfg(self.sleepPin, gpio.OUTPUT)
gpio.setcfg(self.stepPin, gpio.OUTPUT)
gpio.setcfg(self.dirPin, gpio.OUTPUT)
| true
|
14da9f538bc95fe05c2b773056b174581e9b999a
|
Python
|
DmitryMedovschikov/Programming_on_Python.Bioinformatics_Institute
|
/1. Итого по разделу/1.FinalTasks.py
|
UTF-8
| 6,639
| 4.09375
| 4
|
[] |
no_license
|
# Напишите программу, вычисляющую площадь треугольника по переданным длинам
# трёх его сторон по формуле Герона: S = sqrt(p(p−a)(p−b)(p−c)),
# где p=(a+b+c)/2 - полупериметр треугольника. На вход программе подаются
# целые числа, выводом программы должно являться вещественное число,
# соответствующее площади треугольника.
a = int(input())
b = int(input())
c = int(input())
p = (a + b + c) / 2
S = (p * (p - a) * (p - b) * (p - c)) ** (1 / 2)
print(S)
# Напишите программу, принимающую на вход целое число, которая выводит True,
# если переданное значение попадает в интервал: (−15,12]∪(14,17)∪[19,+∞)
# и False в противном случае.
num = int(input())
if -15 < num <= 12 or 14 < num < 17 or num >= 19:
print(True)
else:
print(False)
# Напишите простой калькулятор, который считывает с пользовательского ввода
# три строки: первое число, второе число и операцию, после чего применяет
# операцию к введённым числам ("первое число" "операция" "второе число") и
# выводит результат на экран.
# Поддерживаемые операции: +, -, /, *, mod, pow, div, где
# mod — это взятие остатка от деления,
# pow — возведение в степень,
# div — целочисленное деление.
# Если выполняется деление и второе число равно 0, необходимо выводить
# строку "Деление на 0!"
num_1 = float(input())
num_2 = float(input())
operation = input()
if operation == "+":
result = num_1 + num_2
print(result)
elif operation == "-":
result = num_1 - num_2
print(result)
elif operation == "/":
if num_2 == 0:
print("Деление на 0!")
else:
result = num_1 / num_2
print(result)
elif operation == "*":
result = num_1 * num_2
print(result)
elif operation == "mod":
if num_2 == 0:
print("Деление на 0!")
else:
result = num_1 % num_2
print(result)
elif operation == "pow":
result = num_1 ** num_2
print(result)
elif operation == "div":
if num_2 == 0:
print("Деление на 0!")
else:
result = num_1 // num_2
print(result)
# Комнаты бывают треугольные, прямоугольные и круглые. Требуется написать
# программу, на вход которой подаётся тип фигуры комнаты и соответствующие
# параметры, которая бы выводила площадь получившейся комнаты. Для числа π
# используют значение 3.14. Ниже представлены форматы ввода:
# треугольник
# a
# b
# c
# где a, b и c — длины сторон треугольника
#
# прямоугольник
# a
# b
# где a и b — длины сторон прямоугольника
#
# круг
# r
# где r — радиус окружности
PI = 3.14
f = input()
if f == "треугольник":
a = int(input())
b = int(input())
c = int(input())
p = (a + b + c) / 2
S = (p * (p - a) * (p - b) * (p - c)) ** (1 / 2)
print(S)
elif f == "прямоугольник":
a = int(input())
b = int(input())
S = a * b
print(S)
elif f == "круг":
r = int(input())
S = PI * r ** 2
print(S)
# Напишите программу, которая получает на вход три целых числа, по одному
# числу в строке, и выводит на консоль в три строки сначала максимальное,
# потом минимальное, после чего оставшееся число. На ввод могут подаваться и
# повторяющиеся числа.
num_1 = int(input())
num_2 = int(input())
num_3 = int(input())
max_num = num_1
min_num = num_1
if max_num < num_2:
max_num = num_2
elif min_num > num_2:
min_num = num_2
if max_num < num_3:
max_num = num_3
elif min_num > num_3:
min_num = num_3
midl_num = (num_1 + num_2 + num_3) - (max_num + min_num)
print(max_num, "/n", min_num, "/n", midl_num)
# Напишите программу, считывающую с пользовательского ввода целое число n
# (неотрицательное), выводящее это число в консоль вместе с правильным
# образом изменённым словом "программист", например: 1 программист,
# 2 программиста, 5 программистов. Проверьте, что ваша программа правильно
# обработает все случаи, как минимум до 1000 человек.
num = int(input())
word = "программист"
if num % 10 == 0 or 5 <= num % 10 <= 9 or 11 <= num % 100 <= 14:
word_tail = "ов"
elif num % 10 == 1 and num % 100 != 11:
word_tail = ""
elif 2 <= num % 10 <= 4:
word_tail = "а"
print(num, word + word_tail)
# Билет считается счастливым, если сумма первых трех цифр совпадает с суммой
# последних трех цифр номера билета. Необходимо написать программу, которая
# проверит равенство сумм и выведет "Счастливый", если суммы совпадают, и
# "Обычный", если суммы различны. На вход программе подаётся строка из 6 цифр.
# Выводить нужно только слово "Счастливый" или "Обычный", с большой буквы.
num = int(input())
num_1 = num // 100000
num_2 = num % 100000 // 10000
num_3 = num % 10000 // 1000
num_4 = num % 1000 // 100
num_5 = num % 100 // 10
num_6 = num % 10
if num_1 + num_2 + num_3 == num_4 + num_5 + num_6:
print("Счастливый")
else:
print("Обычный")
| true
|
8953753046a1081d6a33683778e0165f7305899e
|
Python
|
pharick/python-coursera
|
/week5/20-more-than-siblings.py
|
UTF-8
| 192
| 3.421875
| 3
|
[] |
no_license
|
numbers = list(map(int, input().split()))
count = 0
for i in range(1, len(numbers) - 1):
if numbers[i] > numbers[i - 1] and numbers[i] > numbers[i + 1]:
count += 1
print(count)
| true
|
aca60845c457c1ab42771bb28a929bba08f24b19
|
Python
|
mmmvdb/pythonPractice
|
/os/folderRegex/folderRegex.py
|
UTF-8
| 1,512
| 3.890625
| 4
|
[] |
no_license
|
#folderRegex
# Takes a folder and regex expression supplied by argument to the script, and searches all txt files with that folder with the
# regex, displaying any matches in the console
# folderRegex.py - Search a folder for all .txt files and search them with the regex
# Usage: folderRegex.py <path> <regex expression>
# Example: folderRegex.py C:\Windows\Temp ^Hello
import sys, re, os
# ==== Gather arguments ====
if len(sys.argv) == 3:
path = sys.argv[1]
reString = sys.argv[2]
print(reString)
reSearch = re.compile(reString)
# ==== Navigate to the folder ====
if os.path.isdir(path):
os.chdir(path)
else:
print(path + ' is an invalid path')
# ==== Gather all txt files ====
for filename in os.listdir('.'):
if filename.endswith('.txt'):
# ==== In each file, use the regex to find a match ====
file = open(os.path.join(os.getcwd(),filename))
fileLineList = file.readlines()
file.close()
for line in fileLineList:
reResult = reSearch.search(line)
if reResult != None:
# ==== Print the result ====
print(filename + ': ' + line)
else:
print('folderRegex.py - Search a folder for all .txt files and search them with the regex')
print(' Usage: folderRegex.py <path> <regex expression>')
print(' Example: folderRegex.py C:\Windows\System32 ^Hello')
| true
|
075fa3421b6c41b4e18086a8a35f332032957f5d
|
Python
|
allensummer/Tensorflow-Examples
|
/examples/1.Introduction/example2.py
|
UTF-8
| 430
| 3.5
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
create on Tue Nov 7 2017
@autor:allen
function:Addiction with two constans using tensorflow
"""
import tensorflow as tf
a = tf.constant(2)#定义一个常量
b = tf.constant(3)
sess = tf.Session()#连接session
print "a=2, b=3"
print "两个常量相加:%i"% sess.run(a + b)#启动图
print "两个常量相乘: %i" % sess.run(a * b)#启动图
#result
#两个常量相加:5
#两个常量相乘:6
| true
|
7c681a91ca7a99bef9a4b01b292641200a2cc0d0
|
Python
|
maryonella02/Codeforces-Laboratory
|
/10A.py
|
UTF-8
| 2,053
| 3.84375
| 4
|
[] |
no_license
|
"""Power Consumption Calculation
We have n periods of time when someones work at the laptop from start time and end time.
And p1 power consumption fo active mode, p2 power consumption for eco mode, p3 power consumption for sleep mode.
And t1 is time after someone left laptop but it is still active.
and t2 time is time period when sleep mode is on, so time interval between eco mode is on and sleep mode is on."""
n, p1, p2, p3, t1, t2 = map(int, input().split()) # take the input
ans = 0
previous = -1 # will be the value of last end value from periods
while n > 0: # for n periods of time
n -= 1
start, end = map(int, input().split()) # take the time period
ans += (end - start) * p1 # this was an active period so we multiply to p1 and add to answer
if previous != -1: # here we find if we have one more n period
x = start - previous # find time when no one worked at the laptop
if x > t1: # if x is bigger this means that t1 time laptop was active
ans += t1 * p1 # and we add this time to answer
x -= t1 # here we find the remaining time
if x > t2: #if remaining time is bigger than t2, this means that laptop go to eco mode
ans += t2 * p2 # add power laptop spend on eco mode to answer
x -= t2# from remaining time we exclude the previous period t2
ans += x * p3 # the remaining time is multiplied to power laptop spend in sleep mode, because here is no time limit
else: # if remaining time is smaller than t2, this means that x is period of time that need to be multiplied to p2
ans += x * p2 # this is computed and added to answer
else:# if x is smaller than t1 period, this means that x includes in time laptop is still active, but nobody works at him
ans += x * p1 # so x is multiply to active power spending p1
previous = end # set previous to end , to start in future iteration the needed operations if we have one more n time period
print(ans) # print the final asnwer
| true
|
6f1278010ed41704a0a5efde1bd80cea1ae16be5
|
Python
|
stevenhorsman/advent-of-code-2017
|
/day-12/digital_plumber.py
|
UTF-8
| 690
| 3.453125
| 3
|
[] |
no_license
|
import re
import networkx as nx
input_file = 'day-12/input.txt'
def create_graph(input):
graph = nx.Graph()
for line in input.splitlines():
program, neighbours = line.split(' <-> ')
graph.add_node(program)
for neighbour in [prog.strip() for prog in neighbours.split(",")]:
graph.add_edge(program, neighbour)
return graph
def part1(input):
graph = create_graph(input)
return len(nx.node_connected_component(graph, '0'))
def part2(input):
graph = create_graph(input)
return nx.number_connected_components(graph)
if __name__ == "__main__":
with open(input_file) as f:
data = f.read()
print("Part 1: ", part1(data))
print("Part 2: ", part2(data))
| true
|
1111968353a7b48594cde712641f953d43173367
|
Python
|
tealen/TeaTasks
|
/teatasks/db_api.py
|
UTF-8
| 624
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
from sqlalchemy import Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine("sqlite:///data/test.db", echo=True)
Base = declarative_base()
class Lists(Base): # type: ignore
__tablename__ = "lists"
list_id = Column(Integer, primary_key=True)
list_name = Column(String)
class Items(Base): # type: ignore
__tablename__ = "items"
item_id = Column(Integer, primary_key=True)
list_id = Column(Integer, ForeignKey("lists.list_id"))
title = Column(String)
content = Column(String)
Base.metadata.create_all(engine)
| true
|
45986ea9d7efffc415890a9349fc574bf74f9bfb
|
Python
|
ncantrell/keras-progan-inference
|
/progan_layers.py
|
UTF-8
| 1,658
| 2.53125
| 3
|
[] |
no_license
|
import tensorflow as tf
from keras.layers import Layer
import keras.backend as kb
def _pixel_norm(x, epsilon=1e-8, channel_axis=-1):
with tf.variable_scope('PixelNorm'):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=channel_axis, keepdims=True) + epsilon)
class PixelNorm(Layer):
def __init__(self, channel_axis=-1, **kwargs):
self.channel_axis = channel_axis
super().__init__()
def call(self, x):
return _pixel_norm(x, channel_axis=self.channel_axis)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
return {
'channel_axis': self.channel_axis,
**super().get_config()
}
def _upscale2d(x, factor=2):
# Channels last upscale
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Upscale2D'):
s = x.shape
x = tf.reshape(x, [-1, s[1], 1, s[2], 1, s[3]])
x = tf.tile(x, [1, 1, 1, factor, factor, 1])
x = tf.reshape(x, [-1, s[1] * factor, s[2] * factor, s[3]])
return x
class Upscale2D(Layer):
def call(self, x):
return _upscale2d(x)
def compute_output_shape(self, input_shape):
batch_size, h, w, c = input_shape
output_shape = [batch_size, h*2, w*2, c]
return tuple(output_shape)
class ToChannelsLast(Layer):
def call(self, x):
return kb.permute_dimensions(x, [0, 2, 3, 1])
def compute_output_shape(self, input_shape):
batch_size, c, h, w = input_shape
output_shape = [batch_size, h, w, c]
return tuple(output_shape)
custom_objects = {
'Upscale2D': Upscale2D,
'PixelNorm': PixelNorm,
'ToChannelsLast': ToChannelsLast,
}
| true
|
f1dd5d850ea61c49cc3b09f76266b79b53a76287
|
Python
|
kitsmart/pythonbooklet
|
/Chapter 6/Practice Exercise 6/9 Sum list.py
|
UTF-8
| 259
| 3.640625
| 4
|
[] |
no_license
|
def sum_list(list):
lst = 0
for i in list:
lst += i
return lst
def list_of_deviation(list)
mean = sum_list(list) / float(len(list))
i = 0
for i.list in list:
print(mean - i)
i = i + 1
list_of_deviation(1, 2, 3, 4, 5, 6, 7, 8)
| true
|
8a9d7951d7ad6239d35778a0d304584d115c65be
|
Python
|
TheShubhamJindal/movie-recommender
|
/Recomend.py
|
UTF-8
| 1,658
| 3.40625
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as nm
from collections import OrderedDict
print ("Lets do this")
movies_df = pd.read_csv('/home/prafful/Desktop/ml-latest-small/movies.csv', header=None, names=['movie_id', 'movie_title', 'movie_genre'])
movies_df = pd.concat([movies_df, movies_df.movie_genre.str.get_dummies(sep='|')], axis=1)
'''
print("Enter the movie name")
movie_name=raw_input()
#print (movies_df.iloc[1426][1])
for x in range(0,9000):
if (movie_name==movies_df.iloc[x][1]):
print (movies_df.loc[x])
'''
class Genre:
def __init__(movie,gen,points):
movie.gen = gen
movie.points = points
def d(movie):
print (movie.gen,movie.points)
print ("Enter 3 Genres")
genre_1=raw_input()
genre_2=raw_input()
genre_3=raw_input()
x=Genre(genre_1,5)
y=Genre(genre_2,3)
z=Genre(genre_3,2)
movie_categories = movies_df.columns[3:]
p=[]
for i in movie_categories:
if(x.gen==i):
p.append(x.points)
elif(y.gen==i):
p.append(y.points)
elif(z.gen==i):
p.append(z.points)
else:
p.append(0)
print (p)
def dot_product(vector_1, vector_2):
return sum([ i*j for i,j in zip(vector_1, vector_2)])
def get_movie_score(movie_features, p):
return dot_product(movie_features, p)
def get_movie_recommendations(p, n_recommendations):
#we add a column to the movies_df dataset with the calculated score for each movie for the given user
movies_df['score'] = movies_df[movie_categories].apply(get_movie_score,
args=([p]), axis=1)
print( movies_df.sort_values(by=['score'], ascending=False)['movie_title'][:n_recommendations])
get_movie_recommendations(p, 10)
| true
|
d32dfb78aab4cd5d0f4d346adf889bf199e1362d
|
Python
|
jforty11/BE107Group1repo
|
/lab5/detect_flies.py
|
UTF-8
| 2,999
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/python
import numpy as np # Presumably something to do with numbers
import cv2 # Image processing
from matplotlib import pyplot as plt # Plotting function or something
directory = "/home/lev/Documents/College/Be107/Week5/videos_for_tracking/larvae_stills/"
prefix = "frame000"
suffix = ".jpg"
infix = [0, 1, 2, 3, 4, 5, 6, 7]
img = cv2.imread(directory + prefix + str(infix[6]) + suffix, cv2.IMREAD_GRAYSCALE)
print(directory + prefix + str(infix[6]) + suffix)
# What are the dimensions of the image?
print img
# Invert image
img = 255 - img
img2 = img # Copy
# Threshold image -- might be better to use THRESH_TRUNC
ret, thresh = cv2.threshold(img, 50, 255, cv2.THRESH_TOZERO_INV)
#cv2.imshow('Regular thresholding', thresh)
# Threshold with adaptive thresholding
thresh2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 19, 8)
#cv2.imshow('Adaptive thresholding', thresh2)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours2, hierarchy2 = cv2.findContours(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_mean = []
for i, e in enumerate(contours):
cont_x=[]
cont_y=[]
for i2, val in enumerate(e):
for i3, val2 in enumerate(val):
#print(e)
cont_x.append(val2[0])
cont_y.append(val2[1])
cont_mean = [np.mean(cont_x), np.mean(cont_y)]
contour_mean.append(cont_mean)
#print contours
#cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
#cv2.drawContours(img2, contours2, -1, (0, 255, 0), 3)
#cv2.imshow('dis image doe', img)
#cv2.imshow('dis inmage doeeee', img2)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# Accumulate weighted
total_img = np.zeros((480, 752))
for x in infix:
img = cv2.imread(directory + prefix + str(x) + suffix, cv2.IMREAD_GRAYSCALE)
cv2.accumulateWeighted(img, total_img, 0.1)
# If we want to dispaly total_img we have to divide by 255 since float types are assumed to scale from 0 to 1 rather than 0 to 255
#total_img /= 255
#cv2.imshow('dat booty and we call her', total_img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
for x in infix:
img = cv2.imread(directory + prefix + str(x) + suffix, cv2.IMREAD_GRAYSCALE)
img = cv2.absdiff(img, total_img.astype('uint8'))
img = 255 - img
ret, thresh = cv2.threshold(img, 175, 255, cv2.THRESH_TOZERO_INV)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
contour_mean = []
for i, e in enumerate(contours):
cont_x=[]
cont_y=[]
for i2, val in enumerate(e):
for i3, val2 in enumerate(val):
#print(e)
cont_x.append(val2[0])
cont_y.append(val2[1])
cont_mean = [np.mean(cont_x), np.mean(cont_y)]
contour_mean.append(cont_mean)
for circ, cval in enumerate(contour_mean):
cv2.circle(img, (int(cval[0]),int(cval[1])), 8, 'red')
cv2.imshow('dflickoooo', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
ea387a142facc7771d3b5b9e7466c32718fcc18e
|
Python
|
finkeler/BqFieldSizeAnalyzer
|
/TableMetadata.py
|
UTF-8
| 1,347
| 3.140625
| 3
|
[] |
no_license
|
import datetime
from collections import OrderedDict
class TableMetadata(object):
def __init__(self, table_name, json_schema, creation_time, last_modified_time, num_rows, num_bytes):
self._name=table_name
self._json_schema = json_schema
self._creation_time=creation_time
self._last_modified_time=last_modified_time
self._num_rows=num_rows
self._num_bytes=num_bytes
#self._schema = schema
#self.properties = {}
def set_last_modified_time(self, last_modified_time):
self._last_modified_time = last_modified_time
def set_num_bytes(self, num_bytes):
self._num_bytes = num_bytes
def set_num_rows(self, num_rows):
self._num_rows = num_rows
def encode(t):
if isinstance(t, TableMetadata):
ordered_dict = OrderedDict()
ordered_dict['tableName']= t._name
ordered_dict['tableCreationTime']= t._creation_time.strftime('%Y-%m-%d %H:%M:%S')
ordered_dict['tableLastModifiedTime']= t._last_modified_time.strftime('%Y-%m-%d %H:%M:%S')
ordered_dict['tableRows'] = t._num_rows
ordered_dict['tableNumBytes'] = t._num_bytes
return ordered_dict
else:
raise TypeError('Object of type {} is not JSON serializable'.format(t.__class__.__name__))
| true
|
55ea2505de7eb3e224a61b253c788b15fa4f29b5
|
Python
|
psarz/python-lpthw_practice_exercises-
|
/ex15.py
|
UTF-8
| 736
| 3.90625
| 4
|
[] |
no_license
|
from sys import argv
#import for using argument variable librery
script, filename = argv
# defined to argument variables script and filename
txt = open(filename)
#we called a function to read file content
print "here's your file %r:" % filename
#we are printing the content of the file.
print txt.read()
#in this line we are printing the value in txt using the read method
print "Type the file name again:"
file_again = raw_input(">")
#taking the input from the user by using raw_input , which will store in file_again
txt_again = open(file_again)
#defined a new variable txt_again and putting the value of file again in it using the open method.
print txt_again.read()
#reading the value of txt_again using the read method.
| true
|
fb0b10e84c1b2019deb874beead39fcd57d5e3d8
|
Python
|
hakataramen/trial
|
/cp2.py
|
UTF-8
| 195
| 2.921875
| 3
|
[] |
no_license
|
import sys
print(len(sys.argv))
print(sys.argv[1])
print(sys.argv[2])
f=sys.argv[1]
t=sys.argv[2]
cp = open(t, "w")
for line in open(f):
print(line, end=" ")
cp.write(line)
cp.close
| true
|
4286401d0d750fec4fdb4b9ae74659864a0e17b6
|
Python
|
vivekmahajan/phrase-based-decoder
|
/decoder.py
|
UTF-8
| 8,853
| 2.640625
| 3
|
[] |
no_license
|
from heap import Heap
import sys
from math import log
import copy
'''
kenlm_swig_wrapper = "/cs/natlang-sw/Linux-x86_64/NL/LM/NGRAM-SWIG-KENLM"
print "wrapper!!!!",kenlm_swig_wrapper
if ( kenlm_swig_wrapper is None ):
sys.stderr.write("Error: Environment variable NGRAM_SWIG_KENLM is not set. Exiting!!\n")
sys.exit(1)
sys.path.insert(1, kenlm_swig_wrapper)
from kenlm import *
path_lm = "/cs/natlang-data/wmt10/lm/eparl_nc_news_2m.en.lm"
lm = readLM(path_lm)
'''
class phrase_table:
def __init__(self, filename):
self.phrase_table_file = open(filename, "r")
self.phrase_table = self.parse_file()
def parse_file(self):
phrase_table = {}
for line in self.phrase_table_file:
splits = line[:-1].split(' ||| ')
if phrase_table.has_key(splits[0]):
phrase_table[splits[0]].append((splits[1], splits[2]))
else:
phrase_table[splits[0]] = []
phrase_table[splits[0]].append((splits[1], splits[2]))
return phrase_table
class Hypothesis:
def __init__(self, trans_source, dest, p_Lm, p_pt, dis, stack_id, end_d):
self.p_Lm = pow( 10,float(p_Lm))
self.p_pt = float(p_pt)
self.dis = float(dis)
self.dest = copy.deepcopy(dest)
self.trans_source = copy.deepcopy(trans_source)
self.stack_id = stack_id
self.end_d = end_d
#print "creating hypothesis ",self.trans_source, self.stack_id
def get_priority(self):
return (self.p_Lm * self.p_pt * self.dis)
def get_mld(self):
return (self.p_Lm * self.p_pt * self.dis)
#return (self.p_pt * self.dis)
def generate_gaps(trans_source, end_d):
#calculating the gaps
string_gaps = []
gap = []
start_index = -1
for i in range(0, len(trans_source)):
if trans_source[i][1] == 1 :
if len(gap) is not 0:
if abs(start_index - end_d) < dis_limit:
string_gaps.append((gap, start_index))
gap = []
start_index = -1
else:
gap.append(trans_source[i][0])
if start_index == -1:
start_index = i
if len(gap) is not 0:
if abs(start_index - end_d) < dis_limit:
string_gaps.append((gap, start_index))
return string_gaps
def lang_model(destination):
#return 1.0
#return KENLangModel.queryLM(destination.split(" "),len(destination.split(" "))) # for SRILM wrapper
'''
l = len(destination.split(" "))
if l > 5:
l = 5
return getNGramProb(lm, destination , l, 'true')
'''
return 1.0
def generate_all_hypothesis(hp):
#This will contains all the possible hypothesis
hypothesis = []
string_gaps = generate_gaps(hp.trans_source, hp.end_d)
#generating unigrams, bigrams and trigrams
for gap in string_gaps:
#length 1
for i in range(0, len(gap[0])):
#unigram
uni = gap[0][i]
if pt.phrase_table.has_key(uni):
for trans in pt.phrase_table[uni]:
uni_trans_source = copy.deepcopy(hp.trans_source)
uni_trans_source[gap[1]+i] = (gap[0][i], 1)
uni_dest = hp.dest + trans[0] + " "
uni_p_Lm = lang_model(uni_dest.rstrip())
uni_p_pt = hp.get_mld() * float(trans[1])
uni_dis = pow(alpha, abs(hp.end_d-i-gap[1]))
uni_stack_id = hp.stack_id + 1
uni_end_d = i + gap[1]
hpu = Hypothesis(trans_source = uni_trans_source, dest=uni_dest, p_Lm=uni_p_Lm, p_pt=uni_p_pt, dis=uni_dis, stack_id=uni_stack_id, end_d=uni_end_d)
hypothesis.append(hpu)
#bigram
if i > 0 :
bi = gap[0][i-1]+" "+gap[0][i]
if pt.phrase_table.has_key(bi):
for trans in pt.phrase_table[bi]:
bi_trans_source = copy.deepcopy(hp.trans_source)
bi_trans_source[gap[1]+i] = (gap[0][i], 1)
bi_trans_source[gap[1]+i-1] = (gap[0][i-1], 1)
bi_dest = hp.dest + trans[0] + " "
bi_p_Lm = lang_model(bi_dest.rstrip())
bi_p_pt = hp.get_mld() * float(trans[1])
bi_dis = pow(alpha, abs(hp.end_d-i-1-gap[1]))
bi_stack_id = hp.stack_id + 2
bi_end_d = i + gap[1]
hpb = Hypothesis(trans_source = bi_trans_source, dest=bi_dest, p_Lm=bi_p_Lm, p_pt=bi_p_pt, dis=bi_dis, stack_id=bi_stack_id, end_d=bi_end_d)
hypothesis.append(hpb)
#trigram
if i > 1 :
tri = gap[0][i-2]+" "+gap[0][i-1]+" "+gap[0][i]
if pt.phrase_table.has_key(tri):
for trans in pt.phrase_table[tri]:
tri_trans_source = copy.deepcopy(hp.trans_source)
tri_trans_source[gap[1]+i] = (gap[0][i], 1)
tri_trans_source[gap[1]+i-1] = (gap[0][i-1], 1)
tri_trans_source[gap[1]+i-2] = (gap[0][i-2], 1)
tri_dest = hp.dest + trans[0] + " "
tri_p_Lm = lang_model(tri_dest.rstrip())
tri_p_pt = hp.get_mld() * float(trans[1])
tri_dis = pow(alpha, abs(hp.end_d-i-2-gap[1]))
tri_stack_id = hp.stack_id + 3
tri_end_d = i + gap[1]
hpt = Hypothesis(trans_source = tri_trans_source, dest=tri_dest, p_Lm=tri_p_Lm, p_pt=tri_p_pt, dis=tri_dis, stack_id=tri_stack_id, end_d=tri_end_d)
hypothesis.append(hpt)
return hypothesis
class Decoder:
def __init__(self, phrase_table):
self.phrase_table = phrase_table
self.source = ""
self.stacks = {}
def decode(self, source):
self.source = source.split(" ")
self.clear_stacks()
self.init_stacks()
#initialing the first stack
trans_source = []
for i in range(0, len(self.source)):
trans_source.append((self.source[i], 0))
hp = Hypothesis(trans_source=trans_source, dest="", p_Lm=1, p_pt=1, dis=1, stack_id=0, end_d=0)
self.stacks[0].push(1, hp)
for i in range(0, len(self.source)):
#popping all the elements from the ith stack
#print "size of the %s stack = " % i , self.stacks[i].__len__()
while self.stacks[i].__len__() > 0:
hp = self.stacks[i].pop()
for hypothesis in generate_all_hypothesis(hp):
stack_no = hypothesis.stack_id
if self.stacks[stack_no].__len__() >= beam:
#get the root
root_prob = self.stacks[stack_no]._heap[0][0]
if root_prob < hypothesis.get_priority():
self.stacks[stack_no].pop()
self.stacks[stack_no].push(hypothesis.get_priority(), hypothesis)
else:
self.stacks[stack_no].push(hypothesis.get_priority(), hypothesis)
return self.stacks[len(self.source)]
def clear_stacks(self):
del self.stacks
self.stacks = {}
def init_stacks(self):
for i in range(0, len(self.source)+1):
self.stacks[i] = Heap()
if __name__ == '__main__':
global beam, dis_limit, pt, alpha
if len(sys.argv) == 7:
phrase_table_filename = sys.argv[1]
decoder_input_filename = sys.argv[2]
beam = int(sys.argv[3])
alpha = float(sys.argv[4])
dis_limit = int(sys.argv[5])
n_best = int(sys.argv[6])
else:
print >> sys.stderr, "usage:python %s phrase_table_file decoder_input beam alpha dis_limit n_best" % sys.argv[0]
sys.exit(-1)
pt = phrase_table(phrase_table_filename)
decoder = Decoder(pt)
for line in open(decoder_input_filename, "r"):
output = decoder.decode(line[:-1].rstrip())
stack = []
print ">>>>>>>> ", line[:-1].rstrip(), " <<<<<<<<<"
#print "length ",output.__len__()
if output.__len__() == 0:
print "Could not translate"
continue
while output.__len__() > 0:
obj = output.pop()
stack.append(obj)
#print obj.dest, obj.get_priority(), obj.p_Lm
#print output._heap[i][1].dest, output._heap[i][1].get_priority()
for i in range(0, n_best):
if len(stack) == 0:
break
obj = stack.pop()
print obj.dest
| true
|
929ba3170f72363612087881490594282104fcfa
|
Python
|
sghosh1991/InterviewPrepPython
|
/LeetCodeProblemsMedium/162_peak_finding.py
|
UTF-8
| 900
| 3.484375
| 3
|
[] |
no_license
|
"""
"""
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lo = 0
hi = len(nums) - 1
numElements = len(nums)
if hi < 0:
return -1
while lo <= hi:
mid = lo + (hi - lo)/2
next = (mid + 1)%numElements
prev = (mid + numElements - 1)%numElements
if nums[mid] >= nums[prev] and nums[next] <= nums[mid]:
return nums[mid]
elif nums[mid] < nums[next]:
lo = mid + 1
else:
hi = mid - 1
if __name__ == "__main__":
x = Solution()
print x.findPeakElement([])
print x.findPeakElement([1])
print x.findPeakElement([1,1])
print x.findPeakElement([8,6,5,3])
print x.findPeakElement([8,9,10,11])
print x.findPeakElement([8,6,15,1])
| true
|
bac5c6bd93c199640e9cdcf5488b36ca55fd034c
|
Python
|
zer0tonin/Ulfenkarn
|
/ulfenkarn/util.py
|
UTF-8
| 240
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
import re
mention_regex = re.compile("<@!([0-9]+)>")
def mention_to_id(mention):
match = mention_regex.findall(mention)
if len(match) == 1:
return match[0]
raise ValueError("Invalid user mention: {}".format(mention))
| true
|
2b3a51a0910d7224418ec45f185fcccdf757a467
|
Python
|
wglass/rotterdam
|
/rotterdam/team.py
|
UTF-8
| 2,379
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
import errno
import os
import signal
import sys
class Team(object):
def __init__(self, master, worker_class):
self.master = master
self.worker_class = worker_class
self.workers = {}
@property
def size(self):
return len(self.workers)
def set_size(self, new_size):
while len(self.workers) > new_size:
self.remove_worker()
while len(self.workers) < new_size:
self.add_worker()
def add_worker(self):
worker = self.worker_class(self.master)
pid = os.fork()
if pid != 0:
self.workers[pid] = worker
return
try:
worker.run()
sys.exit(0)
except SystemExit:
raise
except Exception:
self.master.logger.exception(
"Unhandled exception in %s process", worker.name
)
sys.exit(-1)
finally:
self.master.logger.info("%s process exiting", worker.name)
def remove_worker(self):
(oldest_worker_pid, _) = sorted(
self.workers.items(),
key=lambda i: i[1].age
).pop(0)
self.workers.pop(oldest_worker_pid)
self.send_signal(signal.SIGQUIT, oldest_worker_pid)
def broadcast(self, signal):
for worker_pid in self.workers:
self.send_signal(signal, worker_pid)
def send_signal(self, signal, worker_pid):
try:
os.kill(worker_pid, signal)
except OSError as error:
if error.errno == errno.ESRCH:
try:
self.workers.pop(worker_pid)
except KeyError:
return
raise
def regroup(self, regenerate=True):
exited_worker_pids = []
for worker_pid in self.workers:
try:
pid, _ = os.waitpid(worker_pid, os.WNOHANG)
if pid == worker_pid:
exited_worker_pids.append(worker_pid)
except OSError as e:
if e.errno == errno.ECHILD:
self.workers.pop(worker_pid)
raise
for worker_pid in exited_worker_pids:
try:
self.workers.pop(worker_pid)
except KeyError:
pass
if regenerate:
self.add_worker()
| true
|
2e7b0be488687ab05b790892b03c90480ea8af1e
|
Python
|
thomas-vl/airbyte
|
/airbyte-integrations/connectors/destination-cumulio/destination_cumulio/destination.py
|
UTF-8
| 5,455
| 2.734375
| 3
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] |
permissive
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from logging import Logger, getLogger
from typing import Any, Iterable, Mapping
from airbyte_cdk.destinations import Destination
from airbyte_cdk.models import AirbyteConnectionStatus, AirbyteMessage, ConfiguredAirbyteCatalog, DestinationSyncMode, Status, Type
from destination_cumulio.client import CumulioClient
from destination_cumulio.writer import CumulioWriter
logger = getLogger("airbyte")
class DestinationCumulio(Destination):
def write(
self,
config: Mapping[str, Any],
configured_catalog: ConfiguredAirbyteCatalog,
input_messages: Iterable[AirbyteMessage],
) -> Iterable[AirbyteMessage]:
"""Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received in the
input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been successfully
persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json. Current format:
{
'api_host': '<api_host_url, e.g. https://api.cumul.io>',
'api_key': '<api_key>',
'api_token': '<api_token>'
}
:param configured_catalog: schema of the data being received and how it should be persisted in the destination.
:param input_messages: stream of input messages received from the source.
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs.
"""
writer = CumulioWriter(config, configured_catalog, logger)
for configured_stream in configured_catalog.streams:
# Cumul.io does not support removing all data from an existing dataset, and removing the dataset itself will break existing
# dashboards built on top of it.
# Instead, the connector will make sure to push the first batch of data as a "replace" action: this will cause all existing data
# to be replaced with the first batch of data. All next batches will be pushed as an "append" action.
if configured_stream.destination_sync_mode == DestinationSyncMode.overwrite:
writer.delete_stream_entries(configured_stream.stream.name)
for message in input_messages:
if message.type == Type.STATE:
# Yielding a state message indicates that all records which came before it have been written to the destination.
# We flush all write buffers in the writer, and then output the state message itself.
writer.flush_all()
yield message
elif message.type == Type.RECORD:
record = message.record
assert record is not None
assert record.stream is not None
assert record.data is not None
writer.queue_write_operation(record.stream, record.data)
else:
# ignore other message types for now
continue
# Make sure to flush any records still in the queue
writer.flush_all()
def check(self, logger: Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""Tests if the input configuration can be used to successfully connect to the destination with the needed permissions.
This will test whether the combination of the Cumul.io API host, API key and API token is valid.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
client = CumulioClient(config, logger)
# Verify access by hitting Cumul.io authentication endpoint
client.test_api_token()
# We're no longer using testing a data push as this might take some time.
# If the API host, key, and token are valid, we can assume Data can be pushed using it.
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
# The Cumul.io Python SDK currently returns a generic error message when an issue occurs during the request,
# or when the request return e.g. a 401 Unauthorized HTTP response code.
# We'll assume that either the API host is incorrect, or the API key and token are no longer valid.
if not e == "Something went wrong":
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
return AirbyteConnectionStatus(
status=Status.FAILED,
message="An exception occurred: could it be that the API host is incorrect, or the API key and token are no longer valid?",
)
| true
|
9d335e78678663016f636f47c9db749ce067f5a5
|
Python
|
kariln/Machine-Learning
|
/Linear Regression/logistic_regression_gradient_descent.py
|
UTF-8
| 544
| 3.078125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 16:55:37 2020
@author: Kari Ness
"""
"""
Logistic regression with gradient descent
"""
import pandas as pd
#Loads 1D dataset
train = pd.read_csv('dataset/regression/train_1d_reg_data.csv')
test = pd.read_csv('dataset/regression/test_1d_reg_data.csv')
class logisticRegression():
#initializing object
def __init__(self):
self.weights = None
#extracts the weights from the object
def getWeights(self):
return self.weights
| true
|
635a1e96b113238b6718998020710c9a9ff19c1c
|
Python
|
wonjoonSeol/ScienceScape
|
/bibliotools3/scripts/merging_corpus.py
|
UTF-8
| 6,398
| 2.765625
| 3
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
import os
import datetime
'''
File: merging_corpus.py
This script merges all the data files found in 'data-wos' and writes
all parseable lines to a single file, Result/one_file_corpus.txt
'''
CONFIG = {}
"""
Writes year distributions for all spans into a CSV file.
"""
def write_year_distribution(reports_directory, years_spans):
print("reports dir: " + str(reports_directory))
years_distribution = open(os.path.join(reports_directory, "years_distribution.csv"), "w")
years_distribution.write("year,nb_articles\n")
for y,n in sorted(((y,n) for (y,n) in years_spans.items()), key = lambda a: a[0]):
years_distribution.write("%s,%s\n" %(y,n))
print("%s: %s articles" %(y,n))
years_distribution.close()
print("\nYear distribution reported in %s" %os.path.join(reports_directory,"years_distribution.csv"))
"""
Counts occurrences of years within years_spans and reports the resulting year distribution.
"""
def count_occurences(one_file_corpus, reports_directory):
# Output the article numbers by year
years_spans = {}
onefile_output = open(one_file_corpus, "r")
onefile_output.readline() # Remove the headers
for line in onefile_output.readlines():
# Filter the blank lines out
if "\t" in line:
year = line.split("\t")[CONFIG["year_index_position"]]
years_spans[year] = years_spans[year] + 1 if year in years_spans else 1
onefile_output.close()
write_year_distribution(reports_directory, years_spans) # Report year distribution for information
"""
Writes text to a file.
"""
def write_to_file(open_file, text):
open_file.write(text)
""" Return the number of columns present in a line of text, split by tab. """
def number_columns(line):
return len(line.split("\t"))
""" Return an integer that is set to 1 if the line was stripped of a trailing tab.
Parse an input line, filtering out blank lines and ensuring that the number of columns
corresponds to the selected headers.
"""
def parse_line(l, nb_values_in_wos, parseable_lines, lines_with_errors):
repaired = 0
if "\t" in l:
# Filtering blank lines in the file
if number_columns(l) > nb_values_in_wos: # If there are too many columns, the line is not parseable
if l[-1] == "\t":
parseable_lines.append(l[:-1]) # Stripping extra tab
repaired += 1
else:
print("Warning! Too many columns with %s" %l[-20:])
lines_with_errors.append(l)
elif number_columns(l) < nb_values_in_wos: # If there are too few columns, the line is not parseable
print("Warning! Too few columns with %s"%l[-20:])
lines_with_errors.append(l)
else:
parseable_lines.append(l)
return repaired # For statistics
"""
Write the parsed output and errors output to result files.
"""
def write_report(parseable_lines, lines_with_errors, onefile_output, errorsfile_output):
write_to_file(onefile_output, "\n".join(parseable_lines) + "\n")
write_to_file(errorsfile_output, "\n".join(lines_with_errors) + "\n")
print("Found %s non-parseable lines, reported in wos_lines_with_errors.csv" %(len(lines_with_errors)))
""" Return a counter of all lines that were repaired in the file.
Parse an entire file, removing the headers and parsing each line individually.
"""
def parse_file(file, root, nb_values_in_wos, onefile_output, errorsfile_output):
new_trailing_tabs = 0
if not file.startswith('.'):
filepath = os.path.join(root, file)
print("Merging %s" %filepath)
with open(filepath, "r") as f:
# Remove the first line (containing headers)
lines = f.read().split("\n")[1:]
lines = [l.strip(" ") for l in lines]
lines = [l.strip("\r") for l in lines]
parseable_lines = []
lines_with_errors = []
for line in lines:
new_trailing_tabs += parse_line(line, nb_values_in_wos, parseable_lines, lines_with_errors)
write_report(parseable_lines, lines_with_errors, onefile_output, errorsfile_output)
return new_trailing_tabs
""" Return the final output file.
Construct output file.
"""
def prepare_output_file(one_file_corpus, wos_headers):
if not os.path.exists(os.path.dirname(one_file_corpus)):
os.makedirs(os.path.dirname(one_file_corpus))
onefile_output = open(one_file_corpus, "w")
write_to_file(onefile_output, wos_headers + "\n")
return onefile_output
"""
Construct a directory to place reports in.
"""
def prepare_report_directory(reports_directory):
if not os.path.exists(reports_directory):
os.mkdir(reports_directory)
elif not os.path.isdir(reports_directory):
print("Remove file %s or change 'reports_directory' value in config.py" %reports_directory)
exit()
"""
Construct a file to place error reports in.
"""
def prepare_error_file(reports_directory, wos_headers):
errorsfile_output = open(os.path.join(reports_directory, "wos_lines_with_errors.csv"), "w")
write_to_file(errorsfile_output, wos_headers + "\n")
return errorsfile_output
"""
Parse and merge all output files in the WOS corpus into one.
"""
def merge_corpus(one_file_corpus, wos_headers, reports_directory, wos_data):
print("wos data is " + str(wos_data))
nb_values_in_wos = len(wos_headers.split("\t"))
# Prepare output files/folders (write headers and have them ready for writing)
onefile_output = prepare_output_file(one_file_corpus, wos_headers)
prepare_report_directory(reports_directory)
errorsfile_output = prepare_error_file(reports_directory, wos_headers)
# Go through all the files in the WOS corpus
nb_extra_trailing_tab = 0
for root, _, files in os.walk(wos_data):
for file in files:
print("file!")
nb_extra_trailing_tab += parse_file(file, root, nb_values_in_wos, onefile_output, errorsfile_output)
print("All files have been merged into %s \nRepaired %s lines with trailing extra tab \n" %(one_file_corpus, nb_extra_trailing_tab))
onefile_output.close()
errorsfile_output.close()
count_occurences(one_file_corpus, reports_directory)
# -- Main script --
def run():
merge_corpus(CONFIG["one_file_corpus"], CONFIG["wos_headers"], CONFIG["reports_directory"], CONFIG["wos_data"])
| true
|
5dda6d92baca823a1406f2c5ff6071498b36a63c
|
Python
|
GearL/landlord
|
/landlord/common/time_table.py
|
UTF-8
| 3,608
| 2.5625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
'''
the strategies for choosing the time_table
'''
import copy
from datetime import time
from django.utils.dateformat import time_format
from landlord.custom.table_util import generate_date_list
from landlord.custom.table_util import get_application_this_week
def generate_time_choices():
"""为 checkbox 标签生成时间选项
:returns: 返回 list.
[(time(8,0), '8点-8点30分'),
(time(8,30), '8点30分-9点'),
...
(time(22,30), '22点30分-23点'),]
"""
def create_format(lha, rha):
return time_format(time(lha, rha), 'H:i:s')
choices = list()
ptn = u'%d点-%d点30分'
ptn_half = u'%d点30分-%d点'
for i in xrange(8, 23):
choices.append((create_format(i, 0), ptn % (i, i)))
choices.append((create_format(i, 30), ptn_half % (i, i + 1)))
return choices
class Table(object):
ident = None
label = None
def __init__(self, field):
self.field = field
def __unicode__(self):
return u'%d : %s' % (self.ident, self.label)
def create_table(self, model, PLACE):
raise NotImplementedError
class Stuact_Table(Table):
ident = 01
label = 'the time_table of the stuact'
def create_table(self, model, PLACE):
this_week_apps = get_application_this_week(model)
TIME = [u'早上', u'下午', u'晚上']
table = {}
empty_time_dict = {time: None for time in TIME}
# made the table but not sort
for short_name, full_name in PLACE:
table[full_name] = \
[copy.copy(empty_time_dict) for i in range(7)]
for app in this_week_apps:
for time in app.time:
table[app.place.name][app.date.weekday()][time] = app
# sorted by time
for l, place in PLACE:
for day in range(7):
table[place][day] = \
[table[place][day][time] for time in TIME]
# sorted by place
content = [(place, table[place]) for l, place in PLACE]
return {'date': generate_date_list(),
'content': content}
class Mroom_Table(Table):
ident = 02
label = 'the time_table of the mroom'
def create_table(self, model, PLACE):
this_week_apps = get_application_this_week(model)
TIME = generate_time_choices()
table = {}
empty_time_dict = {time: None for time, l in TIME}
for short_name, full_name in PLACE:
table[full_name] = \
[copy.copy(empty_time_dict) for i in range(7)]
for app in this_week_apps:
for time in app.time:
table[app.place.name][app.date.weekday()][time] = app
# sorted by time
for l, place in PLACE:
for day in range(7):
table[place][day] = \
[table[place][day][time] for time, l in TIME]
# sorted by place
content = [(place, table[place]) for l, place in PLACE]
return {'date': generate_date_list(),
'time_list': tuple(l for time, l in TIME),
'content': content}
_TABLE_SET = [Stuact_Table, Mroom_Table]
_TABLE_MAP = {cls.ident: cls for cls in _TABLE_SET}
def make_table_by_ident(ident, field):
if ident not in _TABLE_MAP:
raise ValueError('unknown strategy with ident %r' % ident)
table_cls = _TABLE_MAP[ident]
return table_cls(field)
def make_table_choices():
return tuple((cls.ident, cls.label) for cls in _TABLE_SET)
| true
|
bb33581a8f6a967394db9e41418743feff866cf7
|
Python
|
allwak/algorithms
|
/Sprint12/Theme1/1h.py
|
UTF-8
| 180
| 3.25
| 3
|
[] |
no_license
|
with open('input.txt', "r") as f:
phrase = f.readline().rstrip()
phrase = [i.lower() for i in phrase if i.isalpha()]
new_str = ''.join(phrase)
print(new_str == new_str[::-1])
| true
|
a336d8cd261f2c772f7e609f34276f6b4524054c
|
Python
|
YichaoOU/HemTools
|
/bin/merge_bed.py
|
UTF-8
| 2,327
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
import os
import pandas as pd
import datetime
import getpass
import uuid
import argparse
import glob
"""
Every python wrapper is supposed to be similar, since they are using the same convention.
The only thing need to be changed is the guess_input function and the argparser function.
look for ## CHANGE THE FUNCTION HERE FOR DIFFERENT WRAPPER
variable inherents from utils:
myData
myPars
myPipelines
"""
def my_args():
username = getpass.getuser()
addon_string = str(uuid.uuid4()).split("-")[-1]
mainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,description="merge bedfiles into one")
mainParser.add_argument('file', type=str, nargs='+')
mainParser.add_argument('-o',"--output", help="output table name",default=username+"_"+str(datetime.date.today())+"_"+addon_string+".bed")
mainParser.add_argument('--cut3', help="only use first 3 columns", action='store_true')
mainParser.add_argument('--keep_info', help="merge a bed6 file and randomly keep 4,5,6 columns if there is overlap", action='store_true')
##------- add parameters above ---------------------
args = mainParser.parse_args()
return args
def main():
args = my_args()
input_files = " ".join(args.file)
# os.system("module load bedtools; cat %s | sort -k1,1 -k2,2n - | bedtools merge -i - > %s"%(input_files,args.output))
# os.system("module load bedtools; cat %s |cut -f 1,2,3| sort -k1,1 -k2,2n - | bedtools merge -i - > %s"%(input_files,args.output))
if args.cut3:
os.system("module load bedtools; cat %s | sort -k1,1 -k2,2n - |cut -f 1,2,3 | bedtools merge -i - > %s"%(input_files,args.output))
elif args.keep_info:
os.system("module load bedtools; cat {0} | sort -k1,1 -k2,2n - |cut -f 1,2,3 | bedtools merge -i - > {1}.tmp;bedtools intersect -a {0} -b {1}.tmp -wa -wb > {1}.tmp.tmp".format(input_files,args.output))
df = pd.read_csv("%s.tmp.tmp"%(args.output),sep="\t",header=None)
df = df.drop_duplicates([6,7,8])
df = df[[0,1,2,3,4,5]]
df.to_csv(args.output,sep="\t",header=False,index=False)
os.system("rm %s.tmp*"%(args.output))
else:
os.system("module load bedtools; cat %s | sort -k1,1 -k2,2n - | bedtools merge -i - > %s"%(input_files,args.output))
if __name__ == "__main__":
main()
| true
|
b7f9f25595213346f8b9d3884422498fa0223fd7
|
Python
|
DavidMStraub/ckmutil
|
/ckmutil/ckm.py
|
UTF-8
| 4,267
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
"""Functions needed for the CKM quark mixing matrix."""
from math import cos,sin
from cmath import exp, sqrt
import numpy as np
def ckm_standard(t12, t13, t23, delta):
r"""CKM matrix in the standard parametrization and standard phase
convention.
Parameters
----------
- `t12`: CKM angle $\theta_{12}$ in radians
- `t13`: CKM angle $\theta_{13}$ in radians
- `t23`: CKM angle $\theta_{23}$ in radians
- `delta`: CKM phase $\delta=\gamma$ in radians
"""
c12 = cos(t12)
c13 = cos(t13)
c23 = cos(t23)
s12 = sin(t12)
s13 = sin(t13)
s23 = sin(t23)
return np.array([[c12*c13,
c13*s12,
s13/exp(1j*delta)],
[-(c23*s12) - c12*exp(1j*delta)*s13*s23,
c12*c23 - exp(1j*delta)*s12*s13*s23,
c13*s23],
[-(c12*c23*exp(1j*delta)*s13) + s12*s23,
-(c23*exp(1j*delta)*s12*s13) - c12*s23,
c13*c23]])
def tree_to_wolfenstein(Vus, Vub, Vcb, gamma):
laC = Vus/sqrt(1-Vub**2)
A = Vcb/sqrt(1-Vub**2)/laC**2
rho = Vub*cos(gamma)/A/laC**3
eta = Vub*sin(gamma)/A/laC**3
rhobar = rho*(1 - laC**2/2.)
etabar = eta*(1 - laC**2/2.)
return laC, A, rhobar, etabar
def ckm_wolfenstein(laC, A, rhobar, etabar):
r"""CKM matrix in the Wolfenstein parametrization and standard phase
convention.
This function does not rely on an expansion in the Cabibbo angle but
defines, to all orders in $\lambda$,
- $\lambda = \sin\theta_{12}$
- $A\lambda^2 = \sin\theta_{23}$
- $A\lambda^3(\rho-i \eta) = \sin\theta_{13}e^{-i\delta}$
where $\rho = \bar\rho/(1-\lambda^2/2)$ and
$\eta = \bar\eta/(1-\lambda^2/2)$.
Parameters
----------
- `laC`: Wolfenstein parameter $\lambda$ (sine of Cabibbo angle)
- `A`: Wolfenstein parameter $A$
- `rhobar`: Wolfenstein parameter $\bar\rho = \rho(1-\lambda^2/2)$
- `etabar`: Wolfenstein parameter $\bar\eta = \eta(1-\lambda^2/2)$
"""
rho = rhobar/(1 - laC**2/2.)
eta = etabar/(1 - laC**2/2.)
return np.array([[sqrt(1 - laC**2)*sqrt(1 - A**2*laC**6*((-1j)*eta + rho)*((1j)*eta + rho)),
laC*sqrt(1 - A**2*laC**6*((-1j)*eta + rho)*((1j)*eta + rho)),
A*laC**3*((-1j)*eta + rho)],
[-(laC*sqrt(1 - A**2*laC**4)) - A**2*laC**5*sqrt(1 - laC**2)*((1j)*eta + rho),
sqrt(1 - laC**2)*sqrt(1 - A**2*laC**4) - A**2*laC**6*((1j)*eta + rho),
A*laC**2*sqrt(1 - A**2*laC**6*((-1j)*eta + rho)*((1j)*eta + rho))],
[A*laC**3 - A*laC**3*sqrt(1 - laC**2)*sqrt(1 - A**2*laC**4)*((1j)*eta + rho),
-(A*laC**2*sqrt(1 - laC**2)) - A*laC**4*sqrt(1 - A**2*laC**4)*((1j)*eta + rho),
sqrt(1 - A**2*laC**4)*sqrt(1 - A**2*laC**6*((-1j)*eta + rho)*((1j)*eta + rho))]])
def ckm_tree(Vus, Vub, Vcb, gamma):
r"""CKM matrix in the tree parametrization and standard phase
convention.
In this parametrization, the parameters are directly measured from
tree-level $B$ decays. It is thus particularly suited for new physics
analyses because the tree-level decays should be dominated by the Standard
Model. This function involves no analytical approximations.
Relation to the standard parametrization:
- $V_{us} = \cos \theta_{13} \sin \theta_{12}$
- $|V_{ub}| = |\sin \theta_{13}|$
- $V_{cb} = \cos \theta_{13} \sin \theta_{23}$
- $\gamma=\delta$
Parameters
----------
- `Vus`: CKM matrix element $V_{us}$
- `Vub`: Absolute value of CKM matrix element $|V_{ub}|$
- `Vcb`: CKM matrix element $V_{cb}$
- `gamma`: CKM phase $\gamma=\delta$ in radians
"""
return np.array([[sqrt(1 - Vub**2)*sqrt(1 - Vus**2/(1 - Vub**2)),
Vus,
Vub/exp(1j*gamma)],
[-((sqrt(1 - Vcb**2/(1 - Vub**2))*Vus)/sqrt(1 - Vub**2)) - (Vub*exp(1j*gamma)*Vcb*sqrt(1 - Vus**2/(1 - Vub**2)))/sqrt(1 - Vub**2),
-((Vub*exp(1j*gamma)*Vcb*Vus)/(1 - Vub**2)) + sqrt(1 - Vcb**2/(1 - Vub**2))*sqrt(1 - Vus**2/(1 - Vub**2)),
Vcb],
[(Vcb*Vus)/(1 - Vub**2) - Vub*exp(1j*gamma)*sqrt(1 - Vcb**2/(1 - Vub**2))*sqrt(1 - Vus**2/(1 - Vub**2)),
-((Vub*exp(1j*gamma)*sqrt(1 - Vcb**2/(1 - Vub**2))*Vus)/sqrt(1 - Vub**2)) - (Vcb*sqrt(1 - Vus**2/(1 - Vub**2)))/sqrt(1 - Vub**2),
sqrt(1 - Vub**2)*sqrt(1 - Vcb**2/(1 - Vub**2))]])
| true
|
89fe8f1b7daa5dc78b07ce25f150a56a0aa837f3
|
Python
|
QGtiger/justforfun
|
/Sign_Scan/Scan_Sign.py
|
UTF-8
| 1,457
| 2.59375
| 3
|
[] |
no_license
|
"""
author:lightfish
Time:2018.11.28
note:扫码签到
"""
from tornado import web, httpserver, ioloop
from create_qr_code import get_code_by_str
import time
class IndexPageHandler(web.RequestHandler):
def get(self, *args, **kwargs):
# self.write('Hello Tornado...')
self.render('index.html')
class CodeHandler(web.RequestHandler):
def get(self, *args, **kwargs):
img_handler = get_code_by_str('Hello Tornado...')
self.write(img_handler.getvalue())
class SignHandler(web.RequestHandler):
def get(self, *args, **kwargs):
self.render('sign.html')
def post(self, *args, **kwargs):
name = self.get_argument('name')
department = self.get_argument('department')
num = self.get_argument('num')
if name and department and num:
with open('User.txt','a') as f:
f.write('name: {}\ndepartment: {}\nnum: {}\n{}\n'.format(name,department,num,'='*80))
self.write('签到成功...')
self.render('index.html')
else:
self.write('请填写正确的信息!!!')
#time.sleep(3)
self.render('sign.html')
application = web.Application([
(r'/index', IndexPageHandler),
(r'/qr_code', CodeHandler),
(r'/sign', SignHandler),
])
if __name__ == '__main__':
http_server = httpserver.HTTPServer(application)
http_server.listen(9000)
ioloop.IOLoop.current().start()
| true
|
64502e228a3002aa941ae224e1ecfaddfa605e7a
|
Python
|
dariauzun/test-06-03-21
|
/bot.py
|
UTF-8
| 8,908
| 2.71875
| 3
|
[] |
no_license
|
"""
имеем проект развития образовательной платформы GAZ Campus - многоформатной площадки образования клиентов с лекциями, МК и встречами с экспертами
создадим чат-бот в Телеграм для отправки напоминаний клиентам и приглашений к участию
импортируем библиотеку c помощью функции import и подключаем Телеграм-бот на python
программируем чат-бот на отправку конкретного текста (приглашения), задаем время отправки через alert
напишем обработчик входящих сообщений через команды help и schedule (1 отвечает за краткую информацию о платформе, 2 высылает расписание мероприятий)
"""
import telebot
import gzcamp_bot
from telebot import types
import datetime
# TOKEN = gzcamp_bot.1883015533:AAGn0-6zRCyq9tJV0gCJ6kyj5-0l85dyT7c
bot = telebot.TeleBot('1883015533:AAGn0-6zRCyq9tJV0gCJ6kyj5-0l85dyT7c')
@bot.message_handler(commands=['start'])
def start(message):
bot.send_message(message.from_user.id, 'Здравствуйте! Напишите Кампус')
#bot.register_next_step_handler(message, data)
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
if message.text == "Кампус":
main_markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
but1 = types.KeyboardButton('Формат')
but2 = types.KeyboardButton('Месяц')
but3 = types.KeyboardButton('Конкретное событие')
but4 = types.KeyboardButton('Тип мероприятия')
main_markup.add(but1, but2, but3, but4)
bot.send_message(message.chat.id, "Что Вас интересует?", reply_markup = main_markup)
bot.register_next_step_handler(message, name)
else:
bot.send_message(message.chat.id, "Я Вас не понимаю. Напишите /start.")
def name(message):
if message.text == 'Формат':
main_markup1 = types.ReplyKeyboardMarkup(resize_keyboard=True)
form1 = types.KeyboardButton('Online')
form2 = types.KeyboardButton('Offline')
main_markup1.add(form1, form2)
bot.send_message(message.chat.id, "Выберите подходящий формат", reply_markup = main_markup1)
bot.register_next_step_handler(message, form)
elif message.text == 'Месяц':
main_markup2 = types.ReplyKeyboardMarkup(resize_keyboard=True)
month1 = types.KeyboardButton('Май')
month2 = types.KeyboardButton('Июнь')
month3 = types.KeyboardButton('Июль')
main_markup2.add(month1, month2, month3)
bot.send_message(message.chat.id, "Какой месяц Вас интересует?", reply_markup = main_markup2)
bot.register_next_step_handler(message, month)
elif message.text == 'Конкретное событие':
main_markup3 = types.ReplyKeyboardMarkup(resize_keyboard=True)
ev1 = types.KeyboardButton('Пять обязательных шагов для логистики при развитии e-commerce')
ev2 = types.KeyboardButton('Системный подход к управлению опытом кандидата и сотрудника')
ev3 = types.KeyboardButton('Как организовать бизнес в сфере ритейла')
ev4 = types.KeyboardButton('Новейшие инструменты продвижения в соцсетях')
main_markup3.add(ev1, ev2, ev3, ev4)
bot.send_message(message.chat.id, "Какое событие Вас интересует?", reply_markup = main_markup3)
bot.register_next_step_handler(message, ev)
elif message.text == 'Тип мероприятия':
main_markup4 = types.ReplyKeyboardMarkup(resize_keyboard=True)
typ1 = types.KeyboardButton('Мастермайнд')
typ2 = types.KeyboardButton('Разговор с экспертом')
main_markup4.add(typ1, typ2)
bot.send_message(message.chat.id, "Какой формат Вам интересен?", reply_markup = main_markup4)
bot.register_next_step_handler(message, typ)
else:
bot.send_message(message.chat.id, "Я Вас не понимаю. Напишите /start.")
def form(message):
if message.text == 'Online':
main_markup5 = types.ReplyKeyboardMarkup(resize_keyboard=True)
fo1 = types.KeyboardButton('05/06')
fo2 = types.KeyboardButton('09/06')
main_markup5.add(fo1, fo2)
bot.send_message(message.chat.id, "Выберите событие", reply_markup = main_markup5)
bot.register_next_step_handler(message, fo)
elif message.text == 'Offline':
main_markup6 = types.ReplyKeyboardMarkup(resize_keyboard=True)
orm1 = types.KeyboardButton('Мастермайнд')
orm2 = types.KeyboardButton('Разговор с экспертом')
main_markup6.add(orm1, orm2)
bot.send_message(message.chat.id, "Выберите событие", reply_markup = main_markup6)
bot.register_next_step_handler(message, orm)
else:
bot.send_message(message.chat.id, "Я Вас не понимаю. Напишите /start.")
def month(message):
if message.text == 'Июнь':
main_markup7 = types.ReplyKeyboardMarkup(resize_keyboard=True)
mo1 = types.KeyboardButton('05/06')
mo2 = types.KeyboardButton('09/06')
main_markup7.add(mo1, mo2)
bot.send_message(message.chat.id, "Выберите число", reply_markup = main_markup7)
elif message.text == 'Июль':
main_markup8 = types.ReplyKeyboardMarkup(resize_keyboard=True)
nth1 = types.KeyboardButton('01/07')
nth2 = types.KeyboardButton('05/07')
main_markup8.add(orm1, orm2)
bot.send_message(message.chat.id, "Выберите число", reply_markup = main_markup8)
else:
bot.send_message(message.chat.id, "Я Вас не понимаю. Напишите /start.")
def fo(message):
if message.text == '05/06':
bot.send_message(message.chat.id, "Пять обязательных шагов для логистики при развитии e-commerce, 05/06/2021, 16:00, платформа Webinar")
elif message.text == '09/06':
bot.send_message(message.chat.id, "Системный подход к управлению опытом кандидата и сотрудника, 09/06/2021, 16:00, платформа Webinar")
def orm(message):
if message.text == 'Мастермайнд':
bot.send_message(message.chat.id, "10/06/2021, 16:00, шоу-рум ГАЗ на Белорусской")
elif message.text == 'Разговор с экспертом':
bot.send_message(message.chat.id, "11/06/2021б 16:00б шоу-рум ГАЗ на Белорусской")
def ev(message):
if message.text == 'Пять обязательных шагов для логистики при развитии e-commerce':
bot.send_message(message.chat.id, "05/06/2021, 16:00, платформа Webinar")
elif message.text == 'Июль':
bot.send_message(message.chat.id, "09/06/2021, 16:00, платформа Webinar")
def typ(message):
if message.text == 'Мастермайнд':
bot.send_message(message.chat.id, "10/06/2021, 16:00, шоу-рум ГАЗ на Белорусской")
elif message.text == 'Разговор с экспертом':
bot.send_message(message.chat.id, "11/06/2021б 16:00б шоу-рум ГАЗ на Белорусской")
bot.polling(none_stop=True)
def send_welcome(message):
name = bot.get_me()
print(name)
bot.reply_to(message, "Welcome")
with open('gazdata.txt','r') as f:
lines = f.readlines()
class MyEvent(object):
event_name = 'Как настроить логистику для вашего бизнеса'
event_date = datetime.date(2021, 6, 5)
def init(self, line):
self.event_name = line.split(' ')[0]
pass
events = []
for l in lines:
#'Как настроить логистику для вашего бизнеса', '2021, 5, 27'
events.append(MyEvent(l)) #['Как настроить логистику для вашего бизнеса', '27/05']
class MyUser(object):
name = '@uzunishe'
user_events = []
my_users = {'tg_name': MyUser}
for l in lines:
pass
#bot.polling(none_stop=True)
#@bot.message_handler(commands=['info'])
| true
|
6beb0706a1b053add0f035326a667efac7129361
|
Python
|
cuthbertLab/daseki
|
/daseki/retro/pitch.py
|
UTF-8
| 1,710
| 2.5625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: pitch.py
# Purpose: a single pitch event in a retrosheet eventfile event
#
# Authors: Michael Scott Cuthbert
#
# Copyright: Copyright © 2015 Michael Scott Cuthbert / cuthbertLab
# License: BSD, see license.txt
# -----------------------------------------------------------------------------
class Pitch(object):
'''
A single Pitch in a game. Not used yet.
'''
pitchEvents = {'+': 'following pickoff throw by catcher',
'*': 'following pitch was blocked by catcher',
'.': 'play not involving the batter',
'1': 'pickoff throw to first',
'2': 'pickoff throw to second',
'3': 'pickoff throw to third',
'>': 'runner going on the pitch',
'B': 'ball',
'C': 'called strike',
'F': 'foul',
'H': 'hit batter',
'I': 'intentional ball',
'K': 'strike (unknown type)',
'L': 'foul bunt',
'M': 'missed bunt attempt',
'N': 'no pitch (balks and interference)',
'O': 'foul tip on bunt',
'P': 'pitchout',
'Q': 'swinging on pitchout',
'S': 'swinging strike',
'T': 'foul tip',
'U': 'unknown or missed pitch',
'V': 'called ball because pitcher went to mouth',
'X': 'ball put into play by batter',
'Y': 'ball put into play on pitchout'
}
| true
|
6dd03f9dd9238313f7e83110836cd1b0318ded52
|
Python
|
DingGuodong/LinuxBashShellScriptForOps
|
/projects/LinuxSystemOps/AutoDevOps/pythonSelf/pyShutitOps.py
|
UTF-8
| 1,926
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/python
# encoding: utf-8
# -*- coding: utf8 -*-
"""
Created by PyCharm.
File: LinuxBashShellScriptForOps:pyShutitOps.py
User: Guodong
Create Date: 2017/6/29
Create Time: 16:12
shutit: An programmable automation tool designed for complex builds
An programmable shell-based (pexpect) automation tool designed for complex builds.
See: http://ianmiell.github.io/shutit
see also: expect or pexpect
note: works on posix system only
"""
import shutit
def shutit_example_1():
# use shutit for ssh
session = shutit.create_session('bash')
password = session.get_input('', ispass=True)
session.login('[email protected]', user='root', password=password)
session.send('hostname', echo=True)
session.logout()
def shutit_example_2():
# use shutit for ssh then command
capacity_command = """df / | awk '{print $5}' | tail -1 | sed s/[^0-9]//"""
session1 = shutit.create_session('bash')
session2 = shutit.create_session('bash')
password1 = session1.get_input('Password for server1', ispass=True)
password2 = session2.get_input('Password for server2', ispass=True)
session1.login('ssh [email protected]', user='you', password=password1)
session2.login('ssh [email protected]', user='you', password=password2)
capacity = session1.send_and_get_output(capacity_command)
if int(capacity) < 10:
print('RUNNING OUT OF SPACE ON server1!')
capacity = session2.send_and_get_output(capacity_command)
if int(capacity) < 10:
print('RUNNING OUT OF SPACE ON server2!')
session1.logout()
session2.logout()
def shutit_example_3():
# use shutit for telnet
session = shutit.create_session('bash')
session.send('telnet', expect='>', echo=True)
session.send('open google.com 80', expect='scape character', echo=True)
session.send('GET /', echo=True, check_exit=False)
session.logout()
| true
|
52c3c3a39894f0580b9693ab7c50d3b4d25e6a28
|
Python
|
EthanZeigler/Bobby-Tables
|
/backend/BobbyTables/src/main/python/disaster_dump.py
|
UTF-8
| 1,196
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
import psycopg2
import csv
import sys
conn = psycopg2.connect(database="sample_db", user = "ethanzeigler", host = "127.0.0.1", port = "5432")
cur = conn.cursor()
filename = "/Users/ethanzeigler/Downloads/Disasters_final_date_nocomma.csv"
with open(filename, 'rt') as csvfile:
rdr = csv.reader(csvfile, delimiter=',', quotechar='|')
count = 0
for row in rdr:
try:
cur.execute("INSERT INTO disaster (entry_id, fema_id, type, name, start_date, end_date) VALUES (%s, %s, %s, %s, to_date(%s, 'YYYY-MM-DD'), to_date(%s, 'YYYY-MM-DD'));", (count, int(row[0].replace('\ufeff', '')), row[2], row[3], row[4], row[5]))
conn.commit()
cur.execute("SELECT geofib FROM county WHERE name ILIKE %s AND state ILIKE %s;", (row[6].strip(), row[1]))
response = cur.fetchall()
if len(response) > 0:
cur.execute("INSERT INTO county_disaster_link (geofib, disaster_id) VALUES (%s, %s);", (int(response[0][0]), int(count)))
conn.commit()
print("Written")
else:
print("No mathing geofib")
except:
print("error")
count += 1
conn.close()
| true
|
891e2854b62afd0e735ab20688d2278fff7d27ad
|
Python
|
danlarsson/WiFi-tools
|
/ssid-strength-meter.py
|
UTF-8
| 1,960
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/python
'''
Messure the signal strength of a specific BSSID and present the MIN, MAX and AVERAGE values.
SA3ARK, 2017-11-06
'''
import pyshark
#sudo tcpdump -I -i enX -p
#sudo killall airportd
# ToDo: Set channel to capture on
# CHANNEL = 1
BSSID = '30:85:a9:6a:0a:df'
NR_OF_MESSUREMENTS = 200
SHOW_EVERY = 10
# Get beacon frames with correct checksum for transmitter address...
capture_filter = 'wlan.fc.type_subtype eq 8 && wlan.sa == ' + BSSID + ' && wlan.fcs.status == 1'
capture = pyshark.LiveCapture('en0', display_filter=capture_filter, monitor_mode=True)
start_ssid = ''
start_mac = ''
start_channel = 0
signal_values = []
head = False
force_result = True
for packet in capture.sniff_continuously():
tmp = packet.__dict__
wlan = tmp['layers'][1]
signal = int(tmp['layers'][1].get_field_value('signal_dbm'))
channel = int(tmp['layers'][1].get_field_value('channel'))
mac_address = (tmp['layers'][2].get_field_value("ta"))
ssid_name = (tmp['layers'][3].get_field_value("ssid"))
if mac_address != start_mac:
print 'BSSID: %s' % mac_address
start_mac = mac_address
head = True
if ssid_name != start_ssid:
print 'SSID: %s ' % ssid_name
start_ssid = ssid_name
head = True
if channel != start_channel:
print 'Channel: %i' % channel
start_channel = channel
head = True
if NR_OF_MESSUREMENTS-1 <= len(signal_values):
force_result = True
head = True
if head:
print
print 'Nr Signal MAX MIN AVG'
head = False
signal_values.append(signal)
average = sum(signal_values)/len(signal_values)
if len(signal_values)%SHOW_EVERY == 0 or force_result:
print '%-6s %s %s %s %i' % (len(signal_values), signal, min(signal_values), max(signal_values), average)
force_result = False
if NR_OF_MESSUREMENTS <= len(signal_values):
exit()
| true
|
3a7c5107c1064933c97a51c39931cad0bfcc66ae
|
Python
|
blowekamp/SimpleITK-Notebook-Answers
|
/Utilities/Hooks/RemoveIpythonNotebookOutputs.py
|
UTF-8
| 1,862
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
## This script was from a stack-overflow recommendation hosted on a github gist
"""strip outputs from an IPython Notebook
Opens a notebook, strips its output, and writes the outputless version to the original file.
Useful mainly as a git pre-commit hook for users who don't want to track output in VCS.
This does mostly the same thing as the `Clear All Output` command in the notebook UI.
"""
import io
import sys
from IPython.nbformat import current
def strip_output(nb):
is_changed = False
"""strip the outputs from a notebook object"""
nb.metadata.pop('signature', None)
for cell in nb.worksheets[0].cells:
if 'outputs' in cell:
if len(cell['outputs']) != 0:
is_changed = True
cell['outputs'] = []
if 'prompt_number' in cell:
if cell['prompt_number'] is not None:
is_changed = True
cell['prompt_number'] = None
return nb, is_changed
if __name__ == '__main__':
if len(sys.argv) < 2:
print "USAGE: {0} <filename.ipynb> [optional_outfilename.ipynb]".format(sys.argv[0])
print ""
print "for i in *.ipynb; do ./{0} $i $i; done".format(sys.argv[0])
sys.exit(-1)
filename = sys.argv[1]
with io.open(filename, 'r', encoding='utf8') as f:
nb = current.read(f, 'json')
nb_out, is_changed = strip_output(nb)
if is_changed:
if len(sys.argv) == 3:
outfilename = sys.argv[2]
with io.open(outfilename, 'w', encoding='utf8') as f:
current.write(nb_out, f, 'json')
else:
print("\nWARNING: IPython Notebook Outputs not stripped!, run the following to command:")
print("="*80)
print("{0} {1} {1}".format(sys.argv[0],sys.argv[1]))
sys.exit(-1)
sys.exit(0)
| true
|
172f976e91235166ed95fd6af9646174a4d51b36
|
Python
|
vvspearlvvs/CodingTest
|
/2.프로그래머스lv1/카카오_다트게임/solution.py
|
UTF-8
| 643
| 3.3125
| 3
|
[] |
no_license
|
def solution(dartResult):
answer = 0
score=[]
for i,num in enumerate(dartResult,1):
if num =="S":
score[-1] **=1
elif num =="D":
score[-1] **=2
elif num =="T":
score[-1] **=3
elif num =="*":
score[-1] *=2
if len(score)>=2:
score[-1] *=2
elif num =="#":
score[-1] *=-1
else: #0~10 숫자
if dartResult[i-1:i+1]=='10':
score.append(10)
elif dartResult[i-2:i] !='10':
score.append(int(num))
return sum(score)
print(solution('1S2D*3T'))
| true
|
b7a1be63a69e0eb45fc3382636d2f75b893e7911
|
Python
|
daniel-reich/ubiquitous-fiesta
|
/ZrAnDiPTbmrJMHWHD_5.py
|
UTF-8
| 85
| 3.03125
| 3
|
[] |
no_license
|
def is_central(txt):
return len(txt)%2 and txt[len(txt)//2:len(txt)//2+1] != " "
| true
|
054ebdd06335d410235ca29c91c677304023f660
|
Python
|
tiidadavena/cracking-the-coding-interview
|
/chapter-1/implementations/arraylist.py
|
UTF-8
| 2,433
| 3.4375
| 3
|
[] |
no_license
|
import timeit
import ctypes
class ArrayList:
def __init__(self, capacity=2):
self.index = 0
self.capacity = capacity
self.array_list = [None for _ in range(self.capacity)]
def append(self, value):
if self.index >= self.capacity:
self._resize()
self.array_list[self.index] = value
self.index += 1
def _resize(self):
self.array_list.extend([None] * self.capacity)
self.capacity = self.capacity * 2
class ArrayListV2:
def __init__(self, capacity=2):
self.index = 0
self.capacity = capacity
self.array_list = [None for _ in range(self.capacity)]
def append(self, value):
if self.index >= self.capacity:
self._resize()
self.array_list[self.index] = value
self.index += 1
def _resize(self):
self.capacity = self.capacity * 2
new_array_list = [None for _ in range(self.capacity)]
for i in range(self.index):
new_array_list[i] = self.array_list[i]
self.array_list = new_array_list
class ArrayListV3:
def __init__(self, capacity=2):
self.index = 0
self.capacity = capacity
self.array_list = (self.capacity * ctypes.c_int64)()
def append(self, value):
if self.index >= self.capacity:
self._resize()
self.array_list[self.index] = value
self.index += 1
def _resize(self):
self.capacity = self.capacity * 2
new_array_list = (self.capacity * ctypes.c_int64)()
for i in range(self.index):
new_array_list[i] = self.array_list[i]
self.array_list = new_array_list
def main():
n = 100000
start = timeit.default_timer()
al = []
for i in range(n):
al.append(i)
stop = timeit.default_timer()
print('Time: ', stop - start)
start = timeit.default_timer()
al = ArrayList(n)
for i in range(n):
al.append(i)
stop = timeit.default_timer()
print('Time V1: ', stop - start)
start = timeit.default_timer()
al = ArrayListV2(n)
for i in range(n):
al.append(i)
stop = timeit.default_timer()
print('Time V2: ', stop - start)
start = timeit.default_timer()
al = ArrayListV3(n)
for i in range(n):
al.append(i)
stop = timeit.default_timer()
print('Time V3: ', stop - start)
if __name__ == '__main__':
main()
| true
|
290f6b8259a68324b8ae9057d5c1f64d4d8ee29d
|
Python
|
maiff/shit-game
|
/util.py
|
UTF-8
| 318
| 2.6875
| 3
|
[] |
no_license
|
import pygame
from pygame.locals import *
def cropimg(image, region):
x1,y1,x2,y2 = region
buttonStates = pygame.image.load(image).convert_alpha()
print(region[2:])
cropped = pygame.Surface((x2-x1, y2-y1), flags=SRCALPHA)
cropped.blit(buttonStates, (0, 0), (x1,y1,x2-x1, y2-y1))
return cropped
| true
|
af9feb473a68775ec2fae9e06d7cbfd04c3b3a6b
|
Python
|
chihaoui-dev/Arcade-CS-Games
|
/minigames/its_raining_beer/beer_manager.py
|
UTF-8
| 1,198
| 3.203125
| 3
|
[] |
no_license
|
import random
from beer import Beer
class BeerManager():
def __init__(self, max_beers, default_speed, game_size):
self.max_beers = max_beers
self.default_speed = default_speed
self.width, self.height = game_size
self.beers = []
self.ticks = 0
def update(self):
self.ticks += 1
if len(self.beers) < self.max_beers:
if self.ticks % 12 == 0:
self.beers.append(Beer((random.randint(20, self.width - 20), -50), int(0.05 * self.width), self.default_speed + random.random() * 0.25))
for beer in self.beers:
beer.move()
self._delete_beers([beer for beer in self.beers if beer.rect.y > self.height])
def detect_collision(self, player):
old_beers = []
for beer in self.beers:
if player.rect.colliderect(beer.rect):
old_beers.append(beer)
player.score += 1
self._delete_beers(old_beers)
def blit(self, screen):
for beer in self.beers:
beer.blit(screen)
def _delete_beers(self, old_beers):
for beer in old_beers:
beer.kill()
self.beers.remove(beer)
| true
|
1e72c2bd94951ca304f86615b0c298ee92c250cf
|
Python
|
amirrouh/genepy
|
/parse.py
|
UTF-8
| 4,132
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
def read(link, data_dim):
"""
This module downloads gene expression profile from NCBI GEO FTP website and parses it
Parameters
----------
link : str
This is the link to GEO soft_full.gz file on NCBI website
data_dim : int
This gets number of genes needed to be considered ( data_dim = 10; only first 10 genes from the
top of the input file will be considered in parsing data and data_dim = None means all the data)
Returns
-------
dataset : numpy array
dataset is a numpy array. Each row represents one cell gene expression data, the i_th column
from the left shows the i_th gene expression values from the top in row input data table and
the last column on the right shows the subset description type 0 being the first one showing
in the input file.
"""
import gzip
import numpy as np
import pandas as pd
import pickle
from matplotlib.pyplot import plot as plt
import os
file_name = link.split('/')[-1]
dir_root = os.listdir()
if 'temp' not in dir_root:
os.mkdir('temp')
else:
pass
dir_temp = os.listdir('temp/')
# If the input file does not exist then it will download the file, otherfiles,
# the code will use the existing file
if file_name not in dir_temp:
import urllib.request
urllib.request.urlretrieve(link, 'temp/' + file_name)[0]
else:
pass
# Data input dimension to simplify is defined (None => imports all the genes unless number of genes are declared)
with gzip.open('temp/' + file_name, 'rt') as f:
# sd: subset description
# si: subset id
# ge: gene expression
sd, si, ge = [], [], []
# Obtain subset description
for line in f:
if "!subset_description" in line:
sd.append(line.split('=')[1].strip())
elif "!subset_sample_id" in line:
si.append(line.split('=')[1].strip().split(','))
elif "!dataset_table_begin" in line:
break
subset_number = sum(len(sd) for sd in si)
# Read the gene info table
for line in f:
if "!dataset_table_begin" in line:
break
elif "!dataset_table_end" in line:
break
ge.append(line.split()[:2 + subset_number])
ge = pd.DataFrame(ge)
new_header = ge.iloc[0]
ge.columns = new_header
ge = ge[1:]
'''
# Here, we create a temporary directory to store needed files
ge.to_pickle('temp/ge')
pickle.dump(sd , open( 'temp/sd', 'wb' ))
pickle.dump(si , open( 'temp/si', 'wb' ))
'''
ge_array = np.array(ge)
d = ge_array[:, 2:].astype(float)
# Convert numpy array to training format for SVM solver
data = []
for i in range(len(d[0,:])):
data.append(d[:data_dim,i])
data = np.array(data)
# Assign numbers to subset types and make a target vector for classification
labels = []
for i in range(0, len(sd)):
labels.append(len(si[i]) * [i])
# Merge the target groups (each type is a list in python,
# this part merges the parts to have unit target vector)
label_tmp = []
for j in range(len(labels)):
label_tmp += labels[j]
labels = np.array(label_tmp)
# dimension of input gene expression
label_dimension = len(ge_array[0,2:])
labels = labels.reshape((label_dimension,1))
# This line joins the data and labels as a new 2D array
dataset = np.concatenate((data, labels), axis=1)
# This part randomly shuffles the data to be ready for training and testing purposes
np.random.shuffle(dataset)
# This file will be saved into temp folder in case of any need for review
np.savetxt('temp/dataset.csv', dataset, fmt='%.3f', delimiter=',', newline='\n', header='')
# This binary file will be save in the temp folder for faster load in other modules
np.save('temp/dataset_binary', dataset)
return dataset
| true
|
7a46fd0f0767f7180cef288e40c0f97fcec305ae
|
Python
|
zabcdefghijklmnopqrstuvwxy/AI-Study
|
/1.numpy/topic84/topic84.py
|
UTF-8
| 273
| 2.75
| 3
|
[] |
no_license
|
import numpy as np
N=3
S=10
Z=np.zeros(shape=(3,3))
arr=np.random.randint(0,100,size=(S,S))
print(arr)
for j in range(S):
if(j < S - (N -1)):
for i in range(S):
if(i < S - (N - 1)):
Z=arr[j:j+N,i:i+N]
print(Z)
| true
|
61e3be001b4644ea40846cd2e5d95ac2af11d676
|
Python
|
nitin-gupta/Python_Apps
|
/TestCode_3_GlobalFunc_Vars/TestCode.py
|
UTF-8
| 2,511
| 3.375
| 3
|
[] |
no_license
|
# This is the test code which performs the following
# a) Detects the Python interpreter version at runtime
# b) Depending upon the Python version, calls the main function
import sys
import threading
import time, datetime
from threading import Event, Thread
import GlobalFunc
import GlobalVars
l_welcome = "Welcome to the World of Python-Timer Interrupt"
i = 0
WAIT_SECONDS = 0.01
class RepeatedTimer:
"""Repeat `function` every `interval` seconds."""
def __init__(self, interval, function, *args, **kwargs):
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.start = time.time()
self.event = Event()
self.thread = Thread(target=self._target)
self.thread.start()
def _target(self):
while not self.event.wait(self._time):
self.function(*self.args, **self.kwargs)
@property
def _time(self):
return self.interval - ((time.time() - self.start) % self.interval)
def stop(self):
self.event.set()
self.thread.join()
#
class PythonSwitchStatement:
def switch(self, month):
default = "Incorrect month"
return getattr(self, 'case_' + str(month), lambda: default)()
def case_1(self):
print("January")
return "January"
def case_2(self):
return "February"
def case_3(self):
return "March"
def case_4(self):
return "April"
def case_5(self):
return "May"
def case_6(self):
return "June"
# Defining main function
def main1():
print(l_welcome)
print(time.ctime())
#threading.Timer(WAIT_SECONDS, major_loop).start()
def foo():
#print("Call foo")
GlobalFunc.major_loop()
threading.Timer(WAIT_SECONDS, foo).start()
print("Detecting Python version....")
print (sys.version)
print("Version info.")
print (sys.version_info)
# Auto Detection of Python Interpreter
'''
if(sys.version_info[0] >= 3):
print("Result : Python Version 3")
main1()
major_loop()
else:
print("Python Version 2")
# Using the special variable
# __name__
if __name__=="__main__":
main1()
'''
if __name__ == "__main__":
GlobalVars.g_state_flag = False
GlobalVars.g_state_ctr = 0
print(GlobalVars.g_state_flag)
print(GlobalVars.g_state_ctr)
foo()
while True:
time.sleep(0.01)
print("In while : " + str(time.ctime()))
| true
|
f2bbfaa510af6ed37384a0af86330126fcc8be82
|
Python
|
luckyzhangqian/Rumor_indentify
|
/Evaluation_index.py
|
UTF-8
| 859
| 3.296875
| 3
|
[] |
no_license
|
import numpy as np
def evalution_rate(predict_label, real_label):
length = len(predict_label)
addition_label = predict_label + real_label
subtraction_label = predict_label - real_label
tp = np.sum(addition_label == 2) # 预测谣言,实际谣言
fp = np.sum(subtraction_label == 2) # 预测谣言,实际非谣言
fn = np.sum(subtraction_label == -2) # 预测非谣言,实际谣言
tn = np.sum(addition_label == -2) # 预测非谣言,实际非谣言
accuracy_rate = (tp + tn) / length # 准确率
precision_rate = tp / (tp + fp) # 精确度
recall_rate = tp / (tp + fn) # 召回率
f1 = 2 * precision_rate * recall_rate / (precision_rate + recall_rate)
print('accuracy_rate', accuracy_rate)
print('precision_rate', precision_rate)
print('recall_rate', recall_rate)
print('F1', f1)
| true
|