code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0009_auto_20150517_0922'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='is_admin',
),
]
| NTsystems/NoTes-API | notes/apps/account/migrations/0010_remove_user_is_admin.py | Python | mit | 352 |
# Code generated by cfonts_to_trans_py.py
import TFTfont
_dejavu10lean = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x30\xc3\x0c\x30\xc3\x0c\x30\xc0\x00\x00\x03\x00\x00\x00\x00\x00'\
b'\x00\x33\x33\x33\x33\x33\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x01\x8c\x01\x98\x01\x98\x03\x18\x1f\xff\x03\x30\x03\x30\x06\x30\x06\x30\x3f\xfe\x06\x60\x06\x60\x0c\x60\x0c\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x30\x03\x00\xfc\x3b\x63\x30\x33\x03\x30\x1b\x00\xfc\x03\x60\x33\x03\x30\x33\x33\x61\xfc\x03\x00\x30\x03\x00\x00\x00\x00\x00'\
b'\x00\x00\x03\xc0\xc0\xcc\x18\x19\x86\x06\x30\xc0\xc6\x30\x0c\xcc\x01\x99\x80\x1e\x63\x00\x0d\xf8\x03\x33\x00\xc6\x60\x19\x86\x06\x19\x81\xc3\x30\x30\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\x80\x31\x80\xc0\x01\x80\x03\x80\x03\x00\x07\x00\x1b\x06\x63\x0c\xc3\x31\x83\x63\x03\x86\x07\x06\x1f\x0f\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\x33\x33\x30\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x18\x30\xc1\x83\x0c\x18\x30\x60\xc1\x83\x03\x06\x0c\x0c\x18\x18\x00\x00\x00'\
b'\x00\x60\x60\xc0\xc1\x83\x03\x06\x0c\x18\x30\xc1\x83\x06\x18\x30\xc0\x00\x00\x00'\
b'\x00\x00\x00\xc1\xb3\x3f\x87\x81\xf1\xf7\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x3f\xff\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x31\x80\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00'\
b'\x00\x0c\x18\x30\xc1\x83\x06\x18\x30\x61\x83\x06\x18\x30\x61\x80\x00\x00\x00\x00'\
b'\x00\x00\x78\x1c\xc1\x86\x30\x63\x03\x30\x33\x03\x30\x33\x03\x30\x33\x03\x30\x73\x86\x18\x60\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x60\x7c\x19\x80\x30\x06\x00\xc0\x18\x03\x00\x60\x0c\x01\x80\x30\x06\x00\xc0\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf8\x38\xc0\x06\x00\x60\x06\x00\x60\x0e\x00\xc0\x18\x03\x00\x60\x0c\x01\x80\x30\x07\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf8\x38\xe0\x06\x00\x60\x06\x00\x60\x0c\x0f\xc0\x06\x00\x60\x03\x00\x30\x06\x00\x63\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1c\x01\xc0\x3c\x06\xc0\x6c\x0c\xc1\x8c\x18\xc3\x0c\x70\xc7\xff\x00\xc0\x0c\x00\xc0\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xf8\x60\x0c\x01\x80\x30\x06\x00\xfe\x00\xe0\x0c\x01\x80\x30\x06\x00\xc0\x39\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x7e\x0c\x01\x80\x18\x03\x00\x30\x03\xfc\x38\x63\x83\x30\x33\x03\x30\x31\x83\x18\x60\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xff\x00\x60\x06\x00\x60\x0c\x00\xc0\x18\x01\x80\x18\x03\x00\x30\x03\x00\x60\x06\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf8\x18\xe3\x06\x30\x63\x06\x18\x61\x8c\x0f\xc1\x86\x30\x33\x03\x30\x33\x03\x18\x61\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\x1c\xc3\x86\x30\x63\x03\x30\x33\x07\x38\x71\xcf\x07\xb0\x03\x00\x30\x06\x00\xc1\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x63\x00\x00\x00\x00\x03\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x63\x00\x00\x00\x00\x03\x19\x80\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x3c\x01\xe0\x0f\x00\x38\x00\x1e\x00\x03\x80\x00\xf0\x00\x1e\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\xff\x00\x00\x00\x00\x3f\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x00\x0f\x00\x01\xe0\x00\x38\x00\x0f\x00\x1e\x00\xf0\x03\x80\x1e\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x07\xc3\x18\x03\x00\xc0\x30\x18\x0c\x06\x03\x00\xc0\x30\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x07\xf8\x01\x81\x80\xe0\x18\x18\x01\x86\x1c\x18\xc6\xf3\x39\x8e\x66\x30\xcc\xc6\x19\x98\xc3\x33\x18\x66\x33\x1d\x86\x3f\xe0\xe0\x00\x0c\x00\x00\xc0\x60\x0e\x38\x00\x7c\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x00\x38\x01\xe0\x06\xc0\x1b\x00\xcc\x03\x18\x18\x60\x61\xc1\x83\x0f\xfc\x30\x18\xc0\x66\x01\x98\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\xfc\x0c\x38\x60\x63\x03\x18\x18\xc0\xc6\x0c\x3f\xe1\x81\x8c\x06\x60\x33\x01\x98\x18\xc0\xc7\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\xc0\xe1\xc6\x00\x18\x00\xc0\x03\x00\x0c\x00\x30\x00\xc0\x03\x00\x0c\x00\x30\x00\x60\x00\xc0\xc1\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x7f\x00\xc3\xc1\x80\xc3\x00\xc6\x01\x8c\x03\x98\x03\x30\x06\x60\x0c\xc0\x19\x80\x63\x00\xc6\x03\x0c\x0e\x1f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xff\x30\x03\x00\x30\x03\x00\x30\x03\x00\x3f\xe3\x00\x30\x03\x00\x30\x03\x00\x30\x03\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x07\xfc\xc0\x18\x03\x00\x60\x0c\x01\x80\x3f\xc6\x00\xc0\x18\x03\x00\x60\x0c\x01\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\xc0\xe1\xc6\x00\x18\x00\xc0\x03\x00\x0c\x00\x30\x00\xc1\xf3\x00\xcc\x03\x30\x0c\x60\x30\xc0\xc1\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xc0\x33\x00\xcc\x03\x30\x0c\xc0\x33\x00\xcc\x03\x3f\xfc\xc0\x33\x00\xcc\x03\x30\x0c\xc0\x33\x00\xcc\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\x33\x33\x33\x33\x33\x33\x33\x00\x00\x00'\
b'\x03\x33\x33\x33\x33\x33\x33\x33\x33\x3e\x00'\
b'\x00\x00\xc0\xc6\x0c\x30\xc1\x8c\x0c\xc0\x6c\x03\xc0\x1c\x00\xf0\x06\xc0\x33\x01\x8c\x0c\x30\x60\xc3\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\x00\x30\x03\x00\x30\x03\x00\x30\x03\x00\x30\x03\x00\x30\x03\x00\x30\x03\x00\x30\x03\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x38\x0c\x70\x38\xe0\x71\xe0\xe3\xc3\xc7\x87\x8d\x9b\x1b\x36\x33\x6c\x67\x98\xcf\x31\x8e\x63\x00\xc6\x01\x8c\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\xc0\xce\x06\x78\x33\xe1\x9b\x0c\xdc\x66\x63\x31\x99\x8c\xcc\x36\x61\xb3\x07\x98\x3c\xc0\xe6\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x07\xc0\x38\xe0\xc0\xe3\x80\xc6\x00\xcc\x01\x98\x03\x30\x06\x60\x0c\xc0\x19\x80\x33\x00\xc3\x01\x83\x06\x03\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xf8\x30\xe3\x06\x30\x33\x03\x30\x33\x07\x30\xe3\xf8\x30\x03\x00\x30\x03\x00\x30\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x07\xc0\x38\xe0\xc0\xe3\x80\xc6\x00\xcc\x01\x98\x03\x30\x06\x60\x0c\xc0\x19\x80\x33\x00\xc3\x01\x83\x06\x03\xf8\x01\xb0\x00\x70\x00\x70\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xfe\x03\x0e\x0c\x18\x30\x30\xc0\xc3\x03\x0c\x18\x30\xe0\xff\x03\x06\x0c\x1c\x30\x30\xc0\xe3\x01\x8c\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xfc\x18\x73\x00\x30\x03\x00\x30\x01\x80\x0f\x80\x0e\x00\x30\x03\x00\x30\x03\x30\x61\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x07\xff\xc0\xc0\x06\x00\x30\x01\x80\x0c\x00\x60\x03\x00\x18\x00\xc0\x06\x00\x30\x01\x80\x0c\x00\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\x80\xcc\x06\x60\x33\x01\x98\x0c\xc0\x66\x03\x30\x19\x80\xcc\x06\x60\x33\x01\x8c\x0c\x60\xc1\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\x80\x36\x00\xdc\x06\x30\x18\xc0\xc1\x83\x06\x0c\x18\x60\x31\x80\xc6\x03\xb0\x06\xc0\x1e\x00\x38\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x0c\x0c\xc1\xc1\x98\x38\x63\x0f\x0c\x61\xe1\x86\x36\x30\xc6\xcc\x19\x99\x83\x33\x30\x36\x36\x06\xc7\x80\xf0\xf0\x1e\x1e\x01\xc1\xc0\x38\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\x80\xce\x0c\x30\x60\xc6\x07\x60\x1b\x00\x70\x03\x80\x1c\x01\xb0\x19\xc0\xc6\x0c\x18\xc0\xe6\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\x00\xd8\x0c\x60\xc1\x86\x0c\x60\x36\x00\xf0\x07\x00\x18\x00\xc0\x06\x00\x30\x01\x80\x0c\x00\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\xff\xf0\x01\x80\x0e\x00\x30\x01\x80\x0c\x00\x60\x03\x00\x0c\x00\x60\x03\x00\x18\x00\xc0\x07\x00\x1f\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x7c\xc1\x83\x06\x0c\x18\x30\x60\xc1\x83\x06\x0c\x18\x30\x60\xf8\x00\x00\x00'\
b'\x01\x81\x83\x06\x06\x0c\x18\x38\x30\x60\xc0\xc1\x83\x03\x06\x0c\x00\x00\x00\x00'\
b'\x00\x7c\x18\x30\x60\xc1\x83\x06\x0c\x18\x30\x60\xc1\x83\x06\x0c\xf8\x00\x00\x00'\
b'\x00\x00\x01\x80\x07\x80\x1b\x80\x63\x81\x83\x86\x01\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\xfe\x00\x00'\
b'\x38\x30\x30\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x3f\x0c\x30\x03\x00\x61\xfc\xe1\x98\x33\x06\x61\xcc\x78\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\x00\x30\x03\x00\x30\x03\x7c\x3c\x63\x83\x30\x33\x03\x30\x33\x03\x30\x33\x83\x38\x63\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x1f\xce\x01\x80\x60\x0c\x01\x80\x30\x06\x00\x60\x0c\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x03\x00\x30\x03\x00\x30\x7f\x1c\x71\x83\x30\x33\x03\x30\x33\x03\x30\x31\x83\x1c\x70\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x18\x63\x06\x30\x33\x03\x3f\xf3\x00\x30\x03\x00\x18\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\x18\x18\x18\x7e\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x1f\x06\x39\x83\x30\x66\x0c\xc1\x98\x33\x06\x61\xc6\x38\x7f\x00\x60\x0c\x01\x9f\xe0\x00\x00\x00'\
b'\x00\x03\x00\x30\x03\x00\x30\x03\xfc\x38\x63\x86\x30\x63\x03\x30\x33\x03\x30\x33\x03\x30\x33\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\x00\x03\x33\x33\x33\x33\x33\x00\x00\x00'\
b'\x03\x00\x03\x33\x33\x33\x33\x33\x33\x3e\x00'\
b'\x00\x03\x00\x60\x0c\x01\x80\x30\xc6\x30\xcc\x1b\x03\xc0\x70\x0f\x01\xb0\x33\x06\x30\xc3\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\x33\x33\x33\x33\x33\x33\x33\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\xf3\xe3\xc7\x8c\xe1\xc3\x30\x60\xcc\x18\x33\x06\x0c\xc1\x83\x30\x60\xcc\x18\x33\x06\x0c\xc1\x83\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x03\x7c\x3c\x63\x86\x30\x63\x03\x30\x33\x03\x30\x33\x03\x30\x33\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x18\xe3\x06\x30\x63\x03\x30\x33\x03\x30\x63\x06\x18\x60\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x03\x7c\x3c\x63\x83\x30\x33\x03\x30\x33\x03\x30\x33\x83\x38\x63\xfc\x30\x03\x00\x30\x03\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x7f\x1c\x71\x83\x30\x33\x03\x30\x33\x03\x30\x31\x83\x1c\x70\xff\x00\x30\x03\x00\x30\x03\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x01\xbc\xf0\x70\x30\x18\x0c\x06\x03\x01\x80\xc0\x60\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x07\xe3\x00\xc0\x30\x0e\x00\xf0\x06\x00\xc0\x36\x0d\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x30\x7f\x30\x30\x30\x30\x30\x30\x30\x30\x18\x0f\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x03\x03\x30\x33\x03\x30\x33\x03\x30\x33\x03\x30\x33\x07\x18\xf0\xfb\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xc0\xcc\x19\x83\x30\xc3\x18\x63\x06\xc0\xd8\x1e\x01\xc0\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x38\xcc\x71\x98\xe3\x33\xc6\x66\xd8\x6d\xb0\xdb\x61\xe6\xc3\xc7\x03\x8e\x07\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x60\xc6\x30\xc6\x0d\x80\xe0\x1c\x07\x80\xd8\x31\x8c\x1b\x83\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x06\x03\x30\x63\x06\x30\xc1\x8c\x18\xc0\xd8\x0d\x80\xf0\x07\x00\x70\x06\x00\x60\x0c\x03\x80\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff\xc0\x30\x0c\x03\x80\x60\x18\x06\x01\x80\x60\x18\x03\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf0\x30\x0c\x03\x00\xc0\x30\x0c\x03\x01\x80\xe0\x1c\x03\x00\xc0\x30\x0c\x03\x00\xc0\x3c\x00\x00\x00\x00'\
b'\x00\xc6\x31\x8c\x63\x18\xc6\x31\x8c\x63\x18\xc6\x31\x80'\
b'\x00\x0f\x01\x80\xc0\x60\x30\x18\x0c\x07\x01\x80\xe0\xe0\x60\x30\x18\x0c\x06\x03\x07\x80\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x1f\xc6\x30\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf9\x99\x99\x99\x99\xf0\x00\x00\x00'\
_dejavu10lean_index = b'\x00\x00\x1c\x00\x2d\x00\x43\x00\x6f\x00\x90\x00\xc5\x00\xef\x00'\
b'\xfa\x00\x0e\x01\x22\x01\x3e\x01\x6a\x01\x78\x01\x8c\x01\x9a\x01'\
b'\xae\x01\xcf\x01\xee\x01\x0f\x02\x30\x02\x51\x02\x70\x02\x91\x02'\
b'\xb2\x02\xd3\x02\xf4\x02\x02\x03\x10\x03\x3c\x03\x68\x03\x94\x03'\
b'\xb0\x03\xe5\x03\x0c\x04\x30\x04\x57\x04\x81\x04\xa2\x04\xc1\x04'\
b'\xe8\x04\x0f\x05\x1a\x05\x25\x05\x49\x05\x6a\x05\x94\x05\xb8\x05'\
b'\xe2\x05\x03\x06\x2d\x06\x54\x06\x75\x06\x99\x06\xbd\x06\xe4\x06'\
b'\x19\x07\x3d\x07\x61\x07\x88\x07\x9c\x07\xb0\x07\xc4\x07\xee\x07'\
b'\x0d\x08\x21\x08\x40\x08\x61\x08\x80\x08\xa1\x08\xc2\x08\xd8\x08'\
b'\xf7\x08\x18\x09\x23\x09\x2e\x09\x4d\x09\x58\x09\x8a\x09\xab\x09'\
b'\xcc\x09\xed\x09\x0e\x0a\x27\x0a\x43\x0a\x59\x0a\x7a\x0a\x99\x0a'\
b'\xc3\x0a\xe2\x0a\x03\x0b\x22\x0b\x3e\x0b\x4c\x0b\x65\x0b\x91\x0b'\
b'\x9c\x0b'
dejavu10lean = TFTfont.TFTFont(_dejavu10lean, _dejavu10lean_index, 22, 19, 96)
fonts = {"dejavu10lean":dejavu10lean,
}
| robert-hh/SSD1963-TFT-Library-for-PyBoard | fonts/dejavu10lean.py | Python | mit | 13,426 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"owndb.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| ofilipowicz/owndb | manage.py | Python | mit | 251 |
"""
stringjumble.py
Author: Dina
Credit: Me and Jazzy and Anoushka
Assignment:
The purpose of this challenge is to gain proficiency with
manipulating lists.
Write and submit a Python program that accepts a string from
the user and prints it back in three different ways:
* With all letters in reverse.
* With words in reverse order, but letters within each word in
the correct order.
* With all words in correct order, but letters reversed within
the words.
Output of your program should look like this:
Please enter a string of text (the bigger the better): There are a few techniques or tricks that you may find handy
You entered "There are a few techniques or tricks that you may find handy". Now jumble it:
ydnah dnif yam uoy taht skcirt ro seuqinhcet wef a era erehT
handy find may you that tricks or techniques few a are There
erehT era a wef seuqinhcet ro skcirt taht uoy yam dnif ydnah
"""
text = input("Please enter a string of text (the bigger the better): ")
print('You entered "' + text + '". Now jumble it: ')
#part 1 (in reverse)
print(text[::-1])
#part 2 (words right)
words = text.split(" ")
print (' '.join(words[::-1]))
#part 3
a= len(words)
b= 0
while int(b) < int(a):
word = words[int(b)]
print(word[::-1], end= " ")
b = int(b)+1
| dina-hertog/String-Jumble | stringjumble.py | Python | mit | 1,281 |
# Copyright(c) 2017, Dimitar Venkov
# @5devene, [email protected]
# www.badmonkeys.net
def tolist(x):
if hasattr(x,'__iter__'): return x
else : return [x]
def n2s(n, digits):
if digits is not None:
n = round(n, digits)
s1 = str(n)
if s1[-2:] == '.0':
s1 = s1[:-2]
return s1
def p2s(p, sep=IN[1], digits=IN[2]):
x = n2s(p.X, digits)
y = n2s(p.Y, digits)
z = n2s(p.Z, digits)
return ''.join( (x, sep, y, sep, z) )
pts = tolist(IN[0])
OUT = map(p2s, pts) | dimven/SpringNodes | py/Point.ToString.py | Python | mit | 473 |
from wtforms import Form, StringField, DateField, RadioField, BooleanField, validators
class TaskForm(Form):
name = StringField('Name', [validators.InputRequired()])
date = DateField('Date', [validators.InputRequired()])
done = BooleanField('Done')
priority = RadioField('Priority', coerce=int,
choices=[(-1, 'unimportant'),
(0, 'standard'),
(1, 'important')])
| roxel/planner | app/planner/forms.py | Python | mit | 476 |
from distutils.core import setup
import py2exe
setup(console=['main.py'], py_modules=['main', 'friendship']) | DenBaum/lolm8guesser | setup.py | Python | mit | 112 |
import unittest
import os
import logging
from datetime import date
from decimal import Decimal
from ibrokersflex import parse_flex_accounts, parse_flex_positions, parse_flex_flows
class LoadFlexResultsTestCase(unittest.TestCase):
def setUp(self):
self.tree = None
example_file_path = os.path.abspath(os.sep.join(['tests-data', 'example-result.xml']))
logging.info('loading example result file: {}'.format(example_file_path))
with open(example_file_path, 'r') as result_content:
lines = result_content.readlines()
content = ''.join(lines)
self.content = content
def test_load_accounts(self):
accounts = parse_flex_accounts(self.content)
self.assertEqual(accounts['U1812119']['nav_change'], 93262)
self.assertEqual(accounts['U1812946']['account_alias'], 'Vol 946')
self.assertEqual(accounts['U1812119']['cash'], 1520018)
self.assertEqual(accounts['U1812946']['cash'], 1515189)
total_cash = 0.
total_nav = 0.
for account_code in accounts:
total_cash += accounts[account_code]['cash']
total_nav += accounts[account_code]['nav_end']
self.assertAlmostEqual(total_cash, 8245576, places=0)
self.assertAlmostEqual(total_nav, 12176892, places=0)
def test_load_flows(self):
flows = parse_flex_flows(self.content)
self.assertTrue(date(2017, 8, 7) in flows.index)
self.assertTrue(date(2017, 8, 6) not in flows.index)
result = flows.loc[date(2017, 8, 7)]
expected = {
'U1812119': Decimal('-650000'),
'U2036485': Decimal('250000'),
}
self.assertDictEqual(result.to_dict(), expected)
def test_load_positions(self):
positions = parse_flex_positions(self.content)
self.assertEqual(positions['U1760542']['132757576']['description'], 'EUR 18SEP17')
self.assertEqual(positions['U1812038']['215313814']['description'], 'BAX 17DEC18')
self.assertEqual(positions['U1812038']['119750055']['currency'], 'USD')
self.assertEqual(positions['U1812038']['259910541']['assetCategory'], 'FUT')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
unittest.main()
| chris-ch/lemvi-risk | tests/test_load_flex.py | Python | mit | 2,329 |
import _plotly_utils.basevalidators
class TickformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickformat", parent_name="indicator.gauge.axis", **kwargs
):
super(TickformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/indicator/gauge/axis/_tickformat.py | Python | mit | 478 |
#!/usr/bin/env python
import os
import json
import re
from pymongo import MongoClient
import config
def connect():
client = MongoClient()
db = client['disasters']
return db
db = connect()
data_dir = config.data_dir
def get_dpla(item):
if "object" not in item:
return None
if item["object"] == "http://fdlp.gov/images/gpo-tn.jpg":
return None
record = {
"id": item['id'],
"title": item["sourceResource"]["title"],
"thumbnail": item["object"],
"nativeView": item['isShownAt'],
"platformView": "http://dp.la/item/" + item["id"],
"provider": item["provider"]["name"],
"displayDate":
item.get("sourceResource").get("date", {}).get("displayDate"),
"dateBegin": item.get("sourceResource").get("date", {}).get("begin"),
"dateEnd": item.get("sourceResource").get("date", {}).get("end"),
"description": item.get("sourceResource").get("description"),
"rights": item.get("sourceResource").get("rights"),
"source": "dpla"
}
# account for missing data and different data types
if "collection" in item["sourceResource"]:
record["coll"] = []
try: # if list of dictionaries
record["coll"].append(item["sourceResource"]["collection"]["title"])
except: # if dictionary
for collection in item["sourceResource"]["collection"]:
if len(collection.get("title")):
record["coll"].append(collection.get("title"))
try:
record["coll"][0] == "Records of the Federal Emergency Management Agency, 1956 - 2008"
return None
except:
pass
if "spatial" in item.get("sourceResource"):
record["spatial"] = []
points = []
for space in item["sourceResource"]["spatial"]:
record["spatial"].append(space)
# filter out coordinates for center of US and
# put coordinates in its own list: points
if "coordinates" in space:
if space["coordinates"] == "39.4432563782, -98.9573364258":
pass
else:
points.append(space["coordinates"])
if len(points):
record["points"] = points
if "subject" in item.get("sourceResource"):
record["subjects"] = []
for subject in item["sourceResource"]["subject"]:
record["subjects"].append(subject["name"])
return record
def get_ppoc(item):
if re.match('.*(lithograph|drawing|photomechanical|engraving|silkscreen|wood)',
item["medium"]) != None:
return None
record = {
"id" : item["pk"],
"title": item["title"],
"thumbnail": item["image"]["thumb"],
"nativeView": item["links"]["resource"],
"platformView": item["links"]["item"],
"provider": "Library of Congress Prints and Photographs Division",
"displayDate": item["created_published_date"],
"coll": item["collection"],
"subject": item["subjects"],
"source": "ppoc",
}
return record
for filename in os.listdir(data_dir):
parts = filename.split("-")
collection = parts[0]
source = parts[5].strip(".json")
filename = os.path.join(data_dir, filename)
print "loading %s" % filename
for item in json.load(open(filename)):
if source == "dpla":
record = get_dpla(item)
elif source == "ppoc":
record = get_ppoc(item)
else:
raise Exception("unknown source: %s" % source)
if record:
db[collection].insert(record)
| lwrubel/disasterview | load.py | Python | mit | 3,738 |
"""Provides helpers for Z-Wave JS device automations."""
from __future__ import annotations
from typing import cast
import voluptuous as vol
from zwave_js_server.const import ConfigurationValueType
from zwave_js_server.model.node import Node
from zwave_js_server.model.value import ConfigurationValue
NODE_STATUSES = ["asleep", "awake", "dead", "alive"]
CONF_SUBTYPE = "subtype"
CONF_VALUE_ID = "value_id"
VALUE_ID_REGEX = r"([0-9]+-[0-9]+-[0-9]+-).+"
def get_config_parameter_value_schema(node: Node, value_id: str) -> vol.Schema | None:
"""Get the extra fields schema for a config parameter value."""
config_value = cast(ConfigurationValue, node.values[value_id])
min_ = config_value.metadata.min
max_ = config_value.metadata.max
if config_value.configuration_value_type in (
ConfigurationValueType.RANGE,
ConfigurationValueType.MANUAL_ENTRY,
):
return vol.All(vol.Coerce(int), vol.Range(min=min_, max=max_))
if config_value.configuration_value_type == ConfigurationValueType.ENUMERATED:
return vol.In({int(k): v for k, v in config_value.metadata.states.items()})
return None
def generate_config_parameter_subtype(config_value: ConfigurationValue) -> str:
"""Generate the config parameter name used in a device automation subtype."""
parameter = str(config_value.property_)
if config_value.property_key:
parameter = f"{parameter}[{hex(config_value.property_key)}]"
return f"{parameter} ({config_value.property_name})"
| rohitranjan1991/home-assistant | homeassistant/components/zwave_js/device_automation_helpers.py | Python | mit | 1,521 |
#!/usr/bin/python
# coding: utf-8
import sys
class Solution(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
m1, m2 = sys.maxint, sys.maxint
for n in nums:
if m1 >= n:
m1 = n
elif m2 >= n:
m2 = n
else:
return True
return False
| Lanceolata/code-problems | python/leetcode_medium/Question_334_Increasing_Triplet_Subsequence.py | Python | mit | 411 |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 20:12:52 2016
@author: Hunter
"""
from scipy import misc
from scipy import ndimage
import os
import numpy as np
def crop(img_array, size):
lx, ly, lz = img_array.shape
if lx > ly + 1:
crop_img = img_array[int((lx-ly)/2): - int((lx-ly)/2), 0: ly]
else:
if ly > lx + 1:
crop_img = img_array[0: lx, int((ly-lx)/2): - int((ly-lx)/2)]
else: crop_img = img_array
newlx, newly, newlz = crop_img.shape
if newlx != size and newly != size:
crop_img = misc.imresize(crop_img, (size, size))
return crop_img
def crop_around_circle(img, c = None, default_radius = 0.2, default_background=[103.939,116.779,123.68]):
sx, sy, sz = img.shape
if c is None:
cx = sx / 2
cy = cx
radius = sx * default_radius
else:
cy, cx, radius = c
r2 = radius * radius
background = np.array(default_background)
for i in range(sx):
iDist = (i - cx) * (i - cx)
for j in range(sy):
jDist = (j - cy) * (j - cy)
if iDist + jDist > r2:
img[i,j] = background
return img
import cv2
import numpy as np
def get_best_hough_circle(img_path, radius_min, radius_max):
img = cv2.imread(img_path,0)
x,y = img.shape
img = cv2.medianBlur(img,5)
try:
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=int(x * radius_min),maxRadius=int(x * radius_max))
circles = np.uint16(np.around(circles))
i = circles[0][0]
except:
i = None
return i
def zoom_crop(img, scale):
lx, ly, lz = img.shape
img = img[int((lx - lx * scale) / 2):int((lx + lx * scale) / 2), int((ly - ly * scale) / 2):int((ly + ly * scale) / 2)]
return img
def create_rotated(rotations, rotation_dir, img_array, name):
for i in range(rotations):
angle_deg = int(i * 360 / rotations)
class_name = name.split('.')[0]
suffix = name.split('.')[1]
rotated = ndimage.rotate(img_array, angle_deg, reshape=False)
rotated = zoom_crop(rotated, 0.7)
file_name = class_name + "_" + str(angle_deg) + "." + suffix
misc.imsave(os.path.join(rotation_dir, file_name), rotated)
import shutil
def crop_hough(imgPath, radius_min, radius_max):
bestCircle = get_best_hough_circle(imgPath, radius_min, radius_max)
img = misc.imread(imgPath)
return crop_around_circle(img, bestCircle)
def crop_resize_to_squares(img_folder, target_root_dir, size = 224):
squares_folder = os.path.join(target_root_dir, "square" + str(size) + "px")
shutil.rmtree(squares_folder, ignore_errors=True, onerror=None)
os.mkdir(squares_folder)
print(img_folder)
print(os.listdir(img_folder))
for each_img in os.listdir(img_folder):
img_path = os.path.join(img_folder, each_img)
print (each_img)
cropped = crop(misc.imread(img_path),size)
misc.imsave(os.path.join(squares_folder, each_img), cropped)
def create_background_cropped_training_set(img_folder, target_root_dir, background=True, crop_circle=True, zoom_factor=None, radius_min=0.13, radius_max=0.3):
description = "trainset"
if background:
description += "_original_background"
if crop_circle:
description += "_removed_background"
if zoom_factor:
description += "_zoom_" + str(zoom_factor)
background_crop_folder = os.path.join(target_root_dir, description)
shutil.rmtree(background_crop_folder, ignore_errors=True, onerror=None)
os.mkdir(background_crop_folder)
print(img_folder)
print(os.listdir(img_folder))
for each_img in os.listdir(img_folder):
img_path = os.path.join(img_folder, each_img)
print (each_img)
class_name = each_img.split(".")[0]
if background:
target_file_path = os.path.join(background_crop_folder, class_name + "_orig.jpg")
if zoom_factor is None:
shutil.copy(img_path, target_file_path)
else:
img = misc.imread(img_path)
img = zoom_crop(img, zoom_factor)
misc.imsave(target_file_path, img)
if crop_circle:
circle = crop_hough(img_path, radius_min, radius_max)
target_file_path = os.path.join(background_crop_folder, class_name + "_circle.jpg")
if zoom_factor is not None:
circle = zoom_crop(circle, zoom_factor)
misc.imsave(target_file_path, circle)
def perform_rotations(rotations, source_dir, target_root_dir):
rotation_dir = os.path.join(target_root_dir, str(rotations) + "_rotations")
shutil.rmtree(rotation_dir, ignore_errors=True, onerror=None)
os.mkdir(rotation_dir)
for each_img in os.listdir(source_dir):
img_path = os.path.join(source_dir, each_img)
create_rotated(rotations, rotation_dir, misc.imread(img_path), each_img)
from PIL import Image
def create_labels(source_dir, target_dir, label):
if not os.path.exists(target_dir):
os.mkdir(target_dir)
count = 0
for each_img in os.listdir(source_dir):
img_path = os.path.join(source_dir, each_img)
labeled_path = os.path.join(target_dir, label + "_" + str(count) + ".jpg")
im = Image.open(img_path)
im.convert('RGB').save(labeled_path, "JPEG")
count += 1
import random
def create_train_test(target_root_dir, source_dir, classes, test_percent=0.2):
train_dir = os.path.join(target_root_dir, "raw_train")
test_dir = os.path.join(target_root_dir, "raw_test")
shutil.rmtree(train_dir, ignore_errors=True, onerror=None)
os.mkdir(train_dir)
shutil.rmtree(test_dir, ignore_errors=True, onerror=None)
os.mkdir(test_dir)
print(os.listdir(source_dir))
for each_img in os.listdir(source_dir):
if each_img.split("_")[0] in classes:
thisfile = os.path.join(source_dir, each_img)
if random.random() > test_percent:
shutil.copy(thisfile, train_dir)
else:
shutil.copy(thisfile, test_dir) | hprovyn/keras-experiments | hough_img_prep.py | Python | mit | 6,506 |
# coding: utf8
# Copyright 2014-2015 Vincent Jacques <[email protected]>
"""
When given a :class:`UpdateTable`, the connection will return a :class:`UpdateTableResponse`:
.. testsetup::
table = "LowVoltage.Tests.Doc.UpdateTable.1"
table2 = "LowVoltage.Tests.Doc.UpdateTable.2"
table3 = "LowVoltage.Tests.Doc.UpdateTable.3"
table4 = "LowVoltage.Tests.Doc.UpdateTable.4"
connection(CreateTable(table).hash_key("h", STRING).provisioned_throughput(1, 1))
connection(
CreateTable(table2).hash_key("h", STRING).provisioned_throughput(1, 1)
.global_secondary_index("gsi").hash_key("hh", STRING).range_key("rr", NUMBER).provisioned_throughput(1, 1).project_all()
)
connection(
CreateTable(table3).hash_key("h", STRING).provisioned_throughput(1, 1)
.global_secondary_index("gsi").hash_key("hh", STRING).range_key("rr", NUMBER).provisioned_throughput(1, 1).project_all()
)
connection(CreateTable(table4).hash_key("h", STRING).provisioned_throughput(1, 1))
wait_for_table_activation(connection, table)
wait_for_table_activation(connection, table2)
wait_for_table_activation(connection, table3)
wait_for_table_activation(connection, table4)
>>> r = connection(
... UpdateTable(table)
... .provisioned_throughput(2, 2)
... )
>>> r
<LowVoltage.actions.update_table.UpdateTableResponse ...>
>>> r.table_description.table_status
u'UPDATING'
Note that you can use the :func:`.wait_for_table_activation` compound to poll the table status until it's updated. See :ref:`actions-vs-compounds` in the user guide.
.. testcleanup::
wait_for_table_activation(connection, table)
wait_for_table_activation(connection, table2)
wait_for_table_activation(connection, table3)
wait_for_table_activation(connection, table4)
connection(DeleteTable(table))
connection(DeleteTable(table2))
connection(DeleteTable(table3))
connection(DeleteTable(table4))
wait_for_table_deletion(connection, table)
wait_for_table_deletion(connection, table2)
wait_for_table_deletion(connection, table3)
wait_for_table_deletion(connection, table4)
"""
import datetime
import LowVoltage as _lv
import LowVoltage.testing as _tst
from .action import Action
from .next_gen_mixins import variadic, proxy
from .next_gen_mixins import (
TableName,
)
from .return_types import TableDescription, _is_dict
class UpdateTableResponse(object):
"""
UpdateTableResponse()
The `UpdateTable response <http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html#API_UpdateTable_ResponseElements>`__.
"""
def __init__(
self,
TableDescription=None,
**dummy
):
self.__table_description = TableDescription
@property
def table_description(self):
"""
The description of the table you just updated.
:type: ``None`` or :class:`.TableDescription`
"""
if _is_dict(self.__table_description):
return TableDescription(**self.__table_description)
class UpdateTable(Action):
"""
The `UpdateTable request <http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html#API_UpdateTable_RequestParameters>`__.
"""
def __init__(self, table_name=None):
"""
Passing ``table_name`` to the constructor is like calling :meth:`table_name` on the new instance.
"""
super(UpdateTable, self).__init__("UpdateTable", UpdateTableResponse)
self.__table_name = TableName(self, table_name)
self.__attribute_definitions = {}
self._read_capacity_units = None
self._write_capacity_units = None
self.__gsis = {}
self.__active_index = self
@property
def payload(self):
# @todo Simplify, make more linear
data = {}
data.update(self.__table_name.payload)
if self.__attribute_definitions:
data["AttributeDefinitions"] = [
{"AttributeName": name, "AttributeType": typ}
for name, typ in self.__attribute_definitions.iteritems()
]
throughput = {}
if self._read_capacity_units:
throughput["ReadCapacityUnits"] = self._read_capacity_units
if self._write_capacity_units:
throughput["WriteCapacityUnits"] = self._write_capacity_units
if throughput:
data["ProvisionedThroughput"] = throughput
if self.__gsis:
data["GlobalSecondaryIndexUpdates"] = [i.payload for i in self.__gsis.itervalues()]
return data
class _Index(object):
def __init__(self, verb, name):
self._verb = verb
self.__name = name
self._hash_key = None
self._range_key = None
self._projection = None
self._read_capacity_units = None
self._write_capacity_units = None
@property
def payload(self):
data = {"IndexName": self.__name}
schema = []
if self._hash_key:
schema.append({"AttributeName": self._hash_key, "KeyType": "HASH"})
if self._range_key:
schema.append({"AttributeName": self._range_key, "KeyType": "RANGE"})
if schema:
data["KeySchema"] = schema
if isinstance(self._projection, basestring):
data["Projection"] = {"ProjectionType": self._projection}
elif self._projection:
data["Projection"] = {"ProjectionType": "INCLUDE", "NonKeyAttributes": self._projection}
throughput = {}
if self._read_capacity_units:
throughput["ReadCapacityUnits"] = self._read_capacity_units
if self._write_capacity_units:
throughput["WriteCapacityUnits"] = self._write_capacity_units
if throughput:
data["ProvisionedThroughput"] = throughput
return {self._verb: data}
@proxy
def table_name(self, table_name):
"""
See :meth:`update_global_secondary_index` for an example.
"""
return self.__table_name.set(table_name)
def hash_key(self, name, typ=None):
"""
Set the hash key in KeySchema for the active index.
If you provide a second argument, :meth:`attribute_definition` will be called as well.
:raise: :exc:`.BuilderError` if called when no index is active or if the active index is not being created.
See :meth:`create_global_secondary_index` for an example.
"""
self.__check_active_index()
self.__active_index._hash_key = name
if typ is not None:
self.attribute_definition(name, typ)
return self
def range_key(self, name, typ=None):
"""
Set the range key in KeySchema for the active index.
If you provide a second argument, :meth:`attribute_definition` will be called as well.
:raise: :exc:`.BuilderError` if called when no index is active or if the active index is not being created.
See :meth:`create_global_secondary_index` for an example.
"""
self.__check_active_index()
self.__active_index._range_key = name
if typ is not None:
self.attribute_definition(name, typ)
return self
def attribute_definition(self, name, typ):
"""
Set the type of an attribute in AttributeDefinitions.
Key attribute must be typed. See :mod:`.attribute_types` for constants to be passed to this method.
"""
self.__attribute_definitions[name] = typ
return self
def provisioned_throughput(self, read_capacity_units, write_capacity_units):
"""
Set the new provisioned throughput for the table or the active index.
See :meth:`create_global_secondary_index` for an example.
"""
self.__active_index._read_capacity_units = read_capacity_units
self.__active_index._write_capacity_units = write_capacity_units
return self
def create_global_secondary_index(self, name):
"""
Create a new GSI.
This method sets the active index: methods like :meth:`provisioned_throughput` will apply to the index.
>>> connection(
... UpdateTable(table4)
... .create_global_secondary_index("gsi")
... .hash_key("hh", STRING)
... .range_key("rr", NUMBER)
... .project_all()
... .provisioned_throughput(2, 2)
... )
<LowVoltage.actions.update_table.UpdateTableResponse ...>
"""
if name not in self.__gsis:
self.__gsis[name] = self._Index("Create", name)
self.__active_index = self.__gsis[name]
return self
def update_global_secondary_index(self, name):
"""
Update an existing GSI.
This method sets the active index: methods like :meth:`provisioned_throughput` will apply to the index.
>>> connection(
... UpdateTable()
... .table_name(table2)
... .update_global_secondary_index("gsi")
... .provisioned_throughput(2, 2)
... )
<LowVoltage.actions.update_table.UpdateTableResponse ...>
"""
if name not in self.__gsis:
self.__gsis[name] = self._Index("Update", name)
self.__active_index = self.__gsis[name]
return self
def delete_global_secondary_index(self, name):
"""
Mark a GSI for deletion.
This method does not set the active index, because there is nothing to modify.
>>> connection(
... UpdateTable(table3)
... .delete_global_secondary_index("gsi")
... )
<LowVoltage.actions.update_table.UpdateTableResponse ...>
"""
self.__gsis[name] = self._Index("Delete", name)
return self
def table(self):
"""
Reset the active index: methods like :meth:`provisioned_throughput` will apply to the table.
"""
self.__active_index = self
return self
def project_all(self):
"""
Set ProjectionType to ALL for the active index.
:raise: :exc:`.BuilderError` if called when no index is active or if the active index is not being created.
See :meth:`create_global_secondary_index` for an example.
"""
self.__check_active_index()
self.__active_index._projection = "ALL"
return self
def project_keys_only(self):
"""
Set ProjectionType to KEYS_ONLY for the active index.
:raise: :exc:`.BuilderError` if called when no index is active or if the active index is not being created.
"""
self.__check_active_index()
self.__active_index._projection = "KEYS_ONLY"
return self
@variadic(basestring)
def project(self, *attrs):
"""
Set ProjectionType to INCLUDE for the active index and add names to NonKeyAttributes.
:raise: :exc:`.BuilderError` if called when no index is active or if the active index is not being created.
"""
self.__check_active_index()
if not isinstance(self.__active_index._projection, list):
self.__active_index._projection = []
self.__active_index._projection.extend(attrs)
return self
def __check_active_index(self):
if self.__active_index is self or self.__active_index._verb != "Create":
raise _lv.BuilderError("No active index or active index not being created.")
class UpdateTableUnitTests(_tst.UnitTests):
def test_name(self):
self.assertEqual(UpdateTable("Foo").name, "UpdateTable")
def test_constructor(self):
self.assertEqual(UpdateTable("Foo").payload, {"TableName": "Foo"})
def test_table_name(self):
self.assertEqual(UpdateTable().table_name("Foo").payload, {"TableName": "Foo"})
def test_throughput(self):
self.assertEqual(
UpdateTable("Foo").provisioned_throughput(42, 43).payload,
{
"TableName": "Foo",
"ProvisionedThroughput": {"ReadCapacityUnits": 42, "WriteCapacityUnits": 43},
}
)
def test_attribute_definition(self):
self.assertEqual(
UpdateTable("Foo").attribute_definition("a", "B").payload,
{
"TableName": "Foo",
"AttributeDefinitions": [{"AttributeName": "a", "AttributeType": "B"}],
}
)
def test_create_gsi(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi"}},
],
}
)
def test_create_gsi_provisioned_throughput(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").provisioned_throughput(1, 2).payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "ProvisionedThroughput": {"ReadCapacityUnits": 1, "WriteCapacityUnits": 2}}},
],
}
)
def test_create_gsi_hash_key(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").hash_key("h").payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "KeySchema": [{"AttributeName": "h", "KeyType": "HASH"}]}},
],
}
)
def test_create_gsi_range_key(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").range_key("r").payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "KeySchema": [{"AttributeName": "r", "KeyType": "RANGE"}]}},
],
}
)
def test_create_gsi_hash_key_with_type(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").hash_key("h", "S").payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "KeySchema": [{"AttributeName": "h", "KeyType": "HASH"}]}},
],
"AttributeDefinitions": [{"AttributeName": "h", "AttributeType": "S"}]
}
)
def test_create_gsi_range_key_with_type(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").range_key("r", "N").payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "KeySchema": [{"AttributeName": "r", "KeyType": "RANGE"}]}},
],
"AttributeDefinitions": [{"AttributeName": "r", "AttributeType": "N"}]
}
)
def test_create_gsi_project_all(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").project_all().payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "Projection": {"ProjectionType": "ALL"}}},
],
}
)
def test_create_gsi_project_keys_only(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").project_keys_only().payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "Projection": {"ProjectionType": "KEYS_ONLY"}}},
],
}
)
def test_create_gsi_project(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").project("a", ["b", "c"]).project("d").payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "Projection": {"ProjectionType": "INCLUDE", "NonKeyAttributes": ["a", "b", "c", "d"]}}},
],
}
)
def test_update_gsi(self):
self.assertEqual(
UpdateTable("Foo").update_global_secondary_index("the_gsi").payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Update": {"IndexName": "the_gsi"}},
],
}
)
def test_update_gsi_provisioned_throughput(self):
self.assertEqual(
UpdateTable("Foo").update_global_secondary_index("the_gsi").provisioned_throughput(42, 43).payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Update": {"IndexName": "the_gsi", "ProvisionedThroughput": {"ReadCapacityUnits": 42, "WriteCapacityUnits": 43}}},
],
}
)
def test_delete_gsi(self):
self.assertEqual(
UpdateTable("Foo").delete_global_secondary_index("the_gsi").payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Delete": {"IndexName": "the_gsi"}},
],
}
)
def test_back_to_update_gsi_after_back_to_table(self):
self.assertEqual(
UpdateTable("Foo").update_global_secondary_index("the_gsi").table().provisioned_throughput(12, 13)
.update_global_secondary_index("the_gsi").provisioned_throughput(42, 43).payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Update": {"IndexName": "the_gsi", "ProvisionedThroughput": {"ReadCapacityUnits": 42, "WriteCapacityUnits": 43}}},
],
"ProvisionedThroughput": {"ReadCapacityUnits": 12, "WriteCapacityUnits": 13},
}
)
def test_back_to_create_gsi_after_back_to_table(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").table().provisioned_throughput(12, 13)
.create_global_secondary_index("the_gsi").provisioned_throughput(42, 43).payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "ProvisionedThroughput": {"ReadCapacityUnits": 42, "WriteCapacityUnits": 43}}},
],
"ProvisionedThroughput": {"ReadCapacityUnits": 12, "WriteCapacityUnits": 13},
}
)
def test_back_to_update_gsi_after_back_to_table_after_create_gsi(self):
self.assertEqual(
UpdateTable("Foo").create_global_secondary_index("the_gsi").table().provisioned_throughput(12, 13)
.update_global_secondary_index("the_gsi").provisioned_throughput(42, 43).payload,
{
"TableName": "Foo",
"GlobalSecondaryIndexUpdates": [
{"Create": {"IndexName": "the_gsi", "ProvisionedThroughput": {"ReadCapacityUnits": 42, "WriteCapacityUnits": 43}}},
],
"ProvisionedThroughput": {"ReadCapacityUnits": 12, "WriteCapacityUnits": 13},
}
)
def test_hash_key_without_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").hash_key("h")
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_range_key_without_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").range_key("r")
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_project_all_without_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").project_all()
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_project_without_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").project("a")
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_project_keys_only_without_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").project_keys_only()
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_hash_key_with_updating_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").update_global_secondary_index("gsi").hash_key("h")
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_range_key_with_updating_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").update_global_secondary_index("gsi").range_key("r")
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_project_all_with_updating_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").update_global_secondary_index("gsi").project_all()
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_project_with_updating_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").update_global_secondary_index("gsi").project("a")
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
def test_project_keys_only_with_updating_active_index(self):
with self.assertRaises(_lv.BuilderError) as catcher:
UpdateTable("Foo").update_global_secondary_index("gsi").project_keys_only()
self.assertEqual(catcher.exception.args, ("No active index or active index not being created.",))
class UpdateTableResponseUnitTests(_tst.UnitTests):
def test_all_none(self):
r = UpdateTableResponse()
self.assertIsNone(r.table_description)
def test_all_set(self):
r = UpdateTableResponse(TableDescription={})
self.assertIsInstance(r.table_description, TableDescription)
| jacquev6/LowVoltage | LowVoltage/actions/update_table.py | Python | mit | 22,948 |
'''
Created on Dec 3, 2014
Based on the work from the roverplot project ().
@author: gearsad
'''
from roverpylot import rover
import LCMRover
import time
import pygame
import sys
import signal
def _signal_handler(signal, frame):
frame.f_locals['rover'].close()
sys.exit(0)
if __name__ == '__main__':
stra = "Hello bitches"
biting = list(bytearray(stra))
if len(sys.argv) != 2:
print "Run arnerve_bot with the following parameter - botname"
sys.exit()
botname = sys.argv[1]
# Create an LCM Rover object
rover = LCMRover.LCMRover()
rover.Initialize(botname)
# Set up signal handler for CTRL-C
signal.signal(signal.SIGINT, _signal_handler)
# Loop till Quit hit
while True:
rover.Update()
time.sleep(0.01)
rover.close() | GearsAD/semisorted_arnerve | arnerve_bot/arnerve_bot/arnerve_bot.py | Python | mit | 828 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.reference'
db.add_column('orders', 'reference',
self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Order.reference'
db.delete_column('orders', 'reference')
models = {
u'uppsell.address': {
'Meta': {'object_name': 'Address', 'db_table': "'addresses'"},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'other': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'province_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'uppsell.card': {
'Meta': {'object_name': 'Card'},
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
'expiry': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'holder': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last4': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'default': "'UNKNOWN'", 'max_length': '12'}),
'pan': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
u'uppsell.cart': {
'Meta': {'object_name': 'Cart', 'db_table': "'carts'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'uppsell.cartitem': {
'Meta': {'unique_together': "(('cart', 'product'),)", 'object_name': 'CartItem', 'db_table': "'cart_items'"},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Cart']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Listing']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'uppsell.coupon': {
'Meta': {'object_name': 'Coupon', 'db_table': "'coupons'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']", 'null': 'True', 'blank': 'True'}),
'discount_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'discount_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_uses': ('django.db.models.fields.PositiveIntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Product']", 'null': 'True', 'blank': 'True'}),
'product_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.ProductGroup']", 'null': 'True', 'blank': 'True'}),
'remaining': ('django.db.models.fields.PositiveIntegerField', [], {}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {}),
'valid_until': ('django.db.models.fields.DateTimeField', [], {})
},
u'uppsell.couponspend': {
'Meta': {'unique_together': "(('customer', 'coupon'),)", 'object_name': 'CouponSpend', 'db_table': "'coupon_spends'"},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Coupon']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'uppsell.customer': {
'Meta': {'object_name': 'Customer', 'db_table': "'customers'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_logged_in_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'uppsell.invoice': {
'Meta': {'object_name': 'Invoice', 'db_table': "'invoices'"},
'billing_address': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'coupon': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'order_shipping_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'order_sub_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'order_tax_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'order_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'payment_made_ts': ('django.db.models.fields.DateTimeField', [], {}),
'payment_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'products': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'shipping_address': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'store_id': ('django.db.models.fields.IntegerField', [], {}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'upptalk_username': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_email': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user_fullname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_mobile_msisdn': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'uppsell.linkedaccount': {
'Meta': {'object_name': 'LinkedAccount', 'db_table': "'linked_accounts'"},
'account_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'linked_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.LinkedAccountType']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'uppsell.linkedaccounttype': {
'Meta': {'object_name': 'LinkedAccountType', 'db_table': "'linked_account_types'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'uppsell.listing': {
'Meta': {'object_name': 'Listing', 'db_table': "'listings'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True', 'blank': 'True'}),
'features': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '24', 'decimal_places': '12'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Product']"}),
'shipping': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '24', 'decimal_places': '12'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']"}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tax_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.SalesTaxRate']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'uppsell.order': {
'Meta': {'object_name': 'Order', 'db_table': "'orders'"},
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'billing_address'", 'null': 'True', 'to': u"orm['uppsell.Address']"}),
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Coupon']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
'fraud_state': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_state': ('django.db.models.fields.CharField', [], {'default': "'init'", 'max_length': '30'}),
'payment_made_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_state': ('django.db.models.fields.CharField', [], {'default': "'init'", 'max_length': '30'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shipping_address'", 'null': 'True', 'to': u"orm['uppsell.Address']"}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']"}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'uppsell.orderevent': {
'Meta': {'object_name': 'OrderEvent', 'db_table': "'order_events'"},
'action_type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Order']"}),
'state_after': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'state_before': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'uppsell.orderitem': {
'Meta': {'object_name': 'OrderItem', 'db_table': "'order_items'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['uppsell.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Listing']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'uppsell.product': {
'Meta': {'object_name': 'Product', 'db_table': "'products'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '10000'}),
'features': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.ProductGroup']"}),
'has_stock': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'provisioning_codes': ('uppsell.models.SeparatedValuesField', [], {'max_length': '5000', 'null': 'True', 'blank': 'True'}),
'shipping': ('django.db.models.fields.BooleanField', [], {}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'stock_units': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'uppsell.productgroup': {
'Meta': {'object_name': 'ProductGroup', 'db_table': "'product_groups'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'uppsell.salestaxrate': {
'Meta': {'object_name': 'SalesTaxRate'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': "'10'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'20'"}),
'rate': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '6', 'decimal_places': '5'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']"})
},
u'uppsell.store': {
'Meta': {'object_name': 'Store', 'db_table': "'stores'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'default_lang': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['uppsell'] | upptalk/uppsell | uppsell/migrations/0029_auto__add_field_order_reference.py | Python | mit | 19,572 |
# -*- coding: utf-8 -*-
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.forms.widgets import ClearableFileInput
def change_widget_to_NoFullPathLinkFileInput(f):
formfield = f.formfield()
if isinstance(f, models.FileField):
formfield.widget = NoFullPathLinkFileInput()
return formfield
class NoFullPathLinkFileInput(ClearableFileInput):
"new widget that removes the link and full path from a uploaded file, for security"
# template_with_initial = u'%(initial_text)s: %(initial)s %(clear_template)s<br />%(input_text)s: %(input)s'
def render(self, name, value, attrs=None):
from Avaliacao.Questao.models import Fonte, FonteGabarito
# import pdb; pdb.set_trace()
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = u'%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "instance"):
self.template_with_initial = u'%(initial_text)s: %(initial)s %(clear_template)s'
value_new = force_unicode(value).split('/')[-1]
template = self.template_with_initial
url = "#"
if isinstance(value.instance,Fonte):
url = reverse('exibir_arquivo_fonte',args=[value.instance.pk])
elif isinstance(value.instance,FonteGabarito):
url = reverse('exibir_arquivo_fonte_gabarito',args=[value.instance.pk])
substitutions['initial'] = (u'<a href="%s" target="_blank">%s</a>' % (escape(url),escape(value_new)))
return mark_safe(template % substitutions)
| arruda/amao | AMAO/libs/forms_amao/widgets.py | Python | mit | 1,947 |
# Project Euler Problem 7
# Created on: 2012-06-13
# Created by: William McDonald
import math
import time
# Short list of prime numbers under 20
primeList = [2, 3, 5, 7, 9, 11, 13, 17, 19]
# Returns True if n is prime, otherwise False
def isPrime(n):
prime = True
for i in primeList:
if n % i == 0:
prime = False
break
if i > math.floor(math.sqrt(n)):
break
return prime
# Returns the nth prime number
def getPrime(n):
if n < len(primeList):
return primeList[n - 1]
else:
p = primeList[len(primeList) - 1] + 2
while len(primeList) <= n:
if isPrime(p):
primeList.append(p)
p += 2
return primeList[len(primeList) - 1]
start = time.time()
ans = getPrime(10001)
cost = time.time() - start
print(ans)
print("Time: {}".format(cost)) | WalrusCow/euler | Solutions/problem07.py | Python | mit | 912 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 France-IOI, MIT license
#
# http://opensource.org/licenses/MIT
# This companion tool to the taskgrader prints an evaluation report in a
# human-readable form.
import json, os, sys
def showCaptureReport(report):
"""Print a captureReport."""
print("""File `%(name)s`: (size %(sizeKb)dKb, truncated: %(wasTruncated)s)
`%(data)s`""" % report)
def showExecutionReport(originalReport, name='program'):
"""Print an executionReport."""
# Fill non-required fields with default values
report = {
"memoryUsedKb": -1,
"exitSig": -1,
"continueOnError": False,
"stdout": {'data': ''},
"stderr": {'data': ''},
"files": [],
"noFeedback": False
}
report.update(originalReport)
# Make summary string
if report['exitCode'] == 0:
success = 'success'
else:
if report['wasKilled'] or report['exitSig'] > 0:
success = 'killed, exitcode %(exitCode)d, signal %(exitSig)d' % report
else:
success = 'failure, exitcode %d' % report['exitCode']
if report['continueOnError']:
success += ' (ignored)'
# Execution information
print("Execution %s: %s" % (name, success))
print("""Cmd: %(commandLine)s
Exit code %(exitCode)d, sig %(exitSig)d / cached: %(wasCached)s continueOnError: %(continueOnError)s, noFeedback: %(noFeedback)s
Time: %(timeTakenMs)gms (real %(realTimeTakenMs)gms, limit %(timeLimitMs)gms) / memory: %(memoryUsedKb)gKb (limit %(memoryLimitKb)gKb)""" % report)
# Stdout / stderr
if report['stdout']['data']:
showCaptureReport(report['stdout'])
if report['stderr']['data']:
showCaptureReport(report['stderr'])
if not report['stdout']['data'] and not report['stderr']['data']:
print("no output")
# Captured files
if len(report['files']) > 0:
print("Files captured:")
for fileReport in report['files']:
showCaptureReport(fileReport)
print('')
def showEvaluationReport(json):
"""Print a full evaluation report."""
# Generators
if len(json['generators']) > 0:
print("* Generators compilation")
for generator in json['generators']:
showExecutionReport(generator['compilationExecution'], name="generator '%s' compilation" % generator['id'])
# Generations
if len(json['generations']) > 0:
print("* Generations")
for generation in json['generations']:
showExecutionReport(generation['generatorExecution'], name="generation '%s'" % generation['id'])
if 'outputGeneratorExecution' in generation:
showExecutionReport(generation['outputGeneratorExecution'], name="output generation '%s'" % generation['id'])
# Sanitizer and checker
print("* Sanitizer and checker")
showExecutionReport(json['sanitizer'], name='sanitizer compilation')
showExecutionReport(json['checker'], name='checker compilation')
# Solutions
if len(json['solutions']) > 0:
print("* Solutions compilation")
for solution in json['solutions']:
showExecutionReport(solution['compilationExecution'], name="solution '%s' compilation" % solution['id'])
# Executions
for execution in json['executions']:
print("* Execution %(id)s (solution %(name)s)" % execution)
if len(execution['testsReports']) == 0:
print("No test report.")
continue
for testReport in execution['testsReports']:
print("-> Test %s" % testReport['name'])
showExecutionReport(testReport['sanitizer'], name="sanitizer on test '%s'" % testReport['name'])
if 'execution' in testReport:
showExecutionReport(testReport['execution'], name="solution '%s' on test '%s'" % (execution['name'], testReport['name']))
else:
print("Test rejected, solution not executed.")
if 'checker' in testReport:
showExecutionReport(testReport['checker'], name="checker on test '%s'" % testReport['name'])
else:
print("Solution returned an error, answer not checker.")
if __name__ == '__main__':
# Read stdin
inputData = sys.stdin.read()
try:
resultJson = json.loads(inputData)
except:
if inputData.strip() != '':
print('Error: no valid JSON data read from stdin. Received:')
print(inputData)
sys.exit(1)
showEvaluationReport(resultJson)
| France-ioi/taskgrader | tools/stdGrade/fullReport.py | Python | mit | 4,579 |
#!c:\python36\python.exe
# -*- coding: UTF-8 -*-
"""
expander.py: this is the pyexpander application.
NOTE: THIS IS A MODIFIED VERSION OF PYEXPANDER as of July 2018
License: https://bitbucket.org/goetzpf/pyexpander/src/b466de6fd801545650edfa790a18f022dc7e151a/LICENSE?at=default&fileviewer=file-view-default
Original: http://pyexpander.sourceforge.net/
"""
# pylint: disable=C0322,C0103,R0903
from optparse import OptionParser
#import string
import os.path
import sys
import pyexpander.lib as pyexpander
# version of the program:
__version__= "1.8.3" #VERSION#
assert __version__==pyexpander.__version__
def process_files(options,args):
"""process all the command line options."""
my_globals={}
if options.eval is not None:
for expr in options.eval:
# pylint: disable=W0122
exec(expr, my_globals)
# pylint: enable=W0122
filelist= []
if options.file is not None:
filelist= options.file
if len(args)>0: # extra arguments
filelist.extend(args)
if len(filelist)<=0:
pyexpander.expandFile(None,
my_globals,
options.simple_vars,
options.auto_continuation,
options.auto_indent,
options.include,
options.no_stdin_msg)
else:
for f in filelist:
# all files are expanded in a single scope:
my_globals= \
pyexpander.expandFile(f,
my_globals,
options.simple_vars,
options.auto_continuation,
options.auto_indent,
options.include)
def script_shortname():
"""return the name of this script without a path component."""
return os.path.basename(sys.argv[0])
def print_summary():
"""print a short summary of the scripts function."""
print(("%-20s: a powerful macro expension language "+\
"based on python ...\n") % script_shortname())
def main():
"""The main function.
parse the command-line options and perform the command
"""
# command-line options and command-line help:
usage = "usage: %prog [options] {files}"
parser = OptionParser(usage=usage,
version="%%prog %s" % __version__,
description="expands macros in a file "+\
"with pyexpander.")
parser.add_option("--summary",
action="store_true",
help="Print a summary of the function of the program.",
)
parser.add_option("-f", "--file",
action="append",
type="string",
help="Specify a FILE to process. This "
"option may be used more than once "
"to process more than one file but note "
"than this option is not really needed. "
"Files can also be specified directly after "
"the other command line options. If not given, "
"the program gets it's input from stdin.",
metavar="FILE"
)
parser.add_option("--eval",
action="append",
type="string",
help="Evaluate PYTHONEXPRESSION in global context.",
metavar="PYTHONEXPRESSION"
)
parser.add_option("-I", "--include",
action="append",
type="string",
help="Add PATH to the list of include paths.",
metavar="PATH"
)
parser.add_option("-s", "--simple-vars",
action="store_true",
help="Allow variables without brackets.",
)
parser.add_option("-a", "--auto-continuation",
action="store_true",
help="Assume '\' at the end of lines with commands",
)
parser.add_option("-i", "--auto-indent",
action="store_true",
help="Automatically indent macros.",
)
parser.add_option("--no-stdin-msg",
action="store_true",
help= "Do not print a message on stderr when the "
"program is reading it's input from stdin."
)
# x= sys.argv
(options, args) = parser.parse_args()
# options: the options-object
# args: list of left-over args
if options.summary:
print_summary()
sys.exit(0)
process_files(options,args)
sys.exit(0)
if __name__ == "__main__":
main()
| Aquafina-water-bottle/Command-Compiler-Unlimited | fena_pyexpander_rewrite/expander3.py | Python | mit | 5,013 |
#!python
# -*- coding: utf-8 -*-
"""File: smoothing.py
Description:
Smoothing techniques are included here
History:
0.1.0 The first version.
"""
__version__ = '0.1.0'
__author__ = 'SpaceLis'
import dataset
from anatool.dm.db import GEOTWEET
def smoothing_by_city(twt_lst, city):
"""Smooth the tweet set"""
pid_set = set()
for twt in twt_lst:
pid_set.add(twt['place_id'])
for pid in pid_set:
#FIXME
pass
def rand_sel(twt, plc):
"""probably select a tweet"""
#FIXME
def merge(dst_lst, twt_lst, pid):
pidpool = set(dst_lst.distinct('place_id'))
for twt in twt_lst:
twt['place_id'] = pid
if twt['id'] not in pidpool:
dst_lst.append(twt)
return dst_lst
def cate_smooth(twt_lst, ratio, sel, lmd):
"""Smoothing the dataset by place category"""
rst_lst = dataset.Dataset()
pid_lst = twt_lst.distinct('place_id')
twt_dist = twt_lst.groupfunc('place_id', len)
tid_set = set(twt_lst.distinct('place_id'))
pid_set = set(pid_lst)
for pid in pid_lst:
plc = dataset.loadrows(GEOTWEET, ('id', 'lat', 'lng', 'super_category'), \
('id = \'{0}\''.format(pid),), 'place')
plc_type = plc[0]['super_category']
tmp_lst = list()
cand = dataset.type_random(plc_type)
for twt in cand:
if twt['id'] not in tid_set and twt['place_id'] not in pid_lst:
if sel(twt, plc):
twt['place_id'] = pid
tid_set.add(twt['id'])
pid_set.add(twt['place_id'])
tmp_lst.append(twt)
if len(tmp_lst) >= ratio * twt_dist[pid]: break
rst_lst.extend(tmp_lst)
rst_lst.extend(twt_lst)
return rst_lst
if __name__ == '__main__':
twt_lst = cate_smooth(dataset.loadrows(GEOTWEET, ('text', 'place_id'), ('place_id = \'0002ac59702e20cf\'',)), 10, lambda x, y: True)
print '----------------------'
for twt in twt_lst:
print twt
| spacelis/anatool | anatool/analysis/smoothing.py | Python | mit | 2,088 |
#Autor: Arturo
#Fecha: 20/Agosto/2017
#Descripcion: Variables y tipos de datos
#Contacto: @Jav_Arturo
x = "Hola"
for i in range(len(x)-1,-1,-1):
print(x[i])
print(range(len(x)-1,-1,-1))
print("data camp")
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Print out second element from areas
print(areas[1])
# Print out last element from areas
print[areas[9]]
# Print out the area of the living room
print(areas[5])
| Jav10/Python | C-digosPython/docPrueba.py | Python | mit | 522 |
# Uses python3
def calc_fib(n):
if (n <= 1):
return n
fibs = [1 for i in range(n+1)]
for i in range(2, n+1):
fibs[i] = fibs[i-1] + fibs[i-2]
return fibs[n-1]
n = int(input())
print(calc_fib(n))
| euccas/CodingPuzzles-Python | course/ucsd_algorithm_toolbox/fibonacci_small_dp.py | Python | mit | 228 |
from ..entry import Entry
from .csv import load as _load
from .csv import dump as _dump
from .csv import Writer as _Writer
content_type = 'text/tab-separated-values; charset=utf-8'
def load(self, text, fieldnames=None):
"""Entry from TSV representation."""
_load(self, text, delimiter="\t", quotechar="", lineterminator='\n')
def dump(self):
"""TSV representation."""
return _dump(self, delimiter="\t", quotechar=None, lineterminator='\n')
class Writer(_Writer):
def __init__(self, stream, fieldnames):
_Writer.__init__(self, stream,
fieldnames=fieldnames,
delimiter="\t", quotechar=None, lineterminator='\n')
Entry.tsv = property(dump, load)
| byrondover/entry | entry/representations/tsv.py | Python | mit | 731 |
#
# Copyright (c) 2012 Atis Elsts
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Components for Atmega-based platforms
#
from avr import *
| IECS/MansOS | tools/seal/components/atmega.py | Python | mit | 1,393 |
# pylint: disable=C0111
# pylint: disable=W0401
# pylint: disable=W0614
"""
Development settings.
In the development environment:
- Debug mode is enabled
- The secret key is hardcoded in the file
- The django-debug-toolbar is configured
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '::1', 'testserver']
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = 'yoloyolo123'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
STATIC_URL = '/static/'
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
INSTALLED_APPS += ['debug_toolbar']
INTERNAL_IPS = ['127.0.0.1']
DEBUG_TOOLBAR_CONFIG = {
}
| chaosdorf/chaospizza | src/config/settings/dev.py | Python | mit | 1,078 |
#coding=utf-8
from redis import Redis
from qpush.conf import redis as rds_conf
class AppDao(object):
def get_appkey_by_appid(self, appid):
rds = Redis(rds_conf['host'], rds_conf['port'], rds_conf['db'])
return rds.get("qpush:appid:%d:appkey" % appid)
def delete_app_by_appid(self, appid):
rds = Redis(rds_conf['host'], rds_conf['port'], rds_conf['db'])
return rds.delete("qpush:appid:%d:appkey" % appid)
| tiaotiao/qpush | qpush/models/app.py | Python | mit | 450 |
from .exceptions import (NotZippedError, NotIndexedError) | moonso/filter_variants | filter_variants/warnings/__init__.py | Python | mit | 57 |
#!/usr/bin/env python
"""
Your task is to complete the 'porsche_query' function and in particular the query
to find all autos where the manufacturer field matches "Porsche".
Please modify only 'porsche_query' function, as only that will be taken into account.
Your code will be run against a MongoDB instance that we have provided.
If you want to run this code locally on your machine,
you have to install MongoDB and download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials at
the following link:
https://www.udacity.com/wiki/ud032
"""
def porsche_query():
# Please fill in the query to find all autos manuafactured by Porsche.
query = {"manufacturer": "Porsche"}
return query
# Do not edit code below this line in the online code editor.
# Code here is for local use on your own computer.
def get_db(db_name):
# For local use
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def find_porsche(db, query):
# For local use
return db.autos.find(query)
if __name__ == "__main__":
# For local use
db = get_db('examples')
query = porsche_query()
p = find_porsche(db, query)
| onyb/mooca | Udacity/UD032_Data_Wrangling_with_MongoDB/Lesson_4/10-Finding_Porsche/find_porsche.py | Python | mit | 1,246 |
"""
Django settings for ibc project.
Generated by 'django-admin startproject' using Django 1.11.12.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k7xqb1b)3(q*s8p9kfomy)w&5r$vq@w$&px_pzji=z_&#n=gek'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sorl.thumbnail',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'django_filters',
'core',
'pmm',
'min',
'rte',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ibc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ibc.wsgi.application'
AUTH_USER_MODEL = 'core.User'
# Default JWT preferences
JWT_AUTH = {
'JWT_PAYLOAD_HANDLER': 'core.serializers.token.jwt_payload_handler',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=5*1800),
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
}, {
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
}, {
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
}, {
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Rest framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
'core.permissions.BaseModelPermissions',
),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.SearchFilter',
'django_filters.rest_framework.DjangoFilterBackend',
),
}
| sauli6692/ibc-server | ibc/settings/common.py | Python | mit | 3,865 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# python-boilerplate documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import python-boilerplate
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-boilerplate'
copyright = u'2015, Jeremiah Lant'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = python-boilerplate.__version__
# The full version, including alpha/beta/rc tags.
release = python-boilerplate.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-boilerplatedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'python-boilerplate.tex',
u'python-boilerplate Documentation',
u'Jeremiah Lant', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python-boilerplate',
u'python-boilerplate Documentation',
[u'Jeremiah Lant'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python-boilerplate',
u'python-boilerplate Documentation',
u'Jeremiah Lant',
'python-boilerplate',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| jlant/playground | python/hello-cookiecutter/python-boilerplate/docs/conf.py | Python | mit | 8,534 |
#!/usr/bin/python
"""
Author:rockylinux
E-mail:[email protected]
"""
import commands
import time
#display ethe's bandwidth of each interface
#return a list containning bandwidth of each interfaces
#[interface, rx_bytes, tx_bytes]
#the bandwidth is bytes
class BANDWIDTH:
"""
constructor function
"""
def __init__(self):
self.__name = 'BANDWIDTH'
def getdata(self):
"""
Get the execute results
"""
interfaces = commands.getstatusoutput('ls /sys/class/net')[1]
interfaces = interfaces.split('\n')
time.sleep(5);
results = []
for iface in interfaces:
tx_start= commands.getstatusoutput("cat /sys/class/net/" + iface + "/statistics/tx_bytes")[1]
rx_start= commands.getstatusoutput("cat /sys/class/net/" + iface + "/statistics/rx_bytes")[1]
time.sleep(2);
tx_end = commands.getstatusoutput("cat /sys/class/net/" + iface + "/statistics/tx_bytes")[1]
rx_end = commands.getstatusoutput("cat /sys/class/net/" + iface + "/statistics/rx_bytes")[1]
results.append([iface, int(rx_end.strip()) - int(rx_start.strip()), int(tx_end.strip()) - int(tx_start.strip())])
#results.append([iface, int(rx_end.strip()) - int(rx_start.strip()), int(tx_end.strip()) - int(tx_start.strip())])
tmp = []
for i in results:
if i[1] and i[2]:
tmp = [j for j in i]
break
##############
##please remind it
##############
if len(tmp) == 0:
tmp = ['wlan0', 0, 0]
print tmp
def testgetdata(self, test):
"""
Test whether the function is work
"""
if type(test) == type([]):
for i in test:
print i
else:
print test
if __name__ == '__main__':
OBJ = BANDWIDTH()
DATA = OBJ.getdata()
#OBJ.testgetdata(DATA)
| china-x-orion/infoeye | tools/bandwidth.py | Python | mit | 2,017 |
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from SocketServer import ThreadingMixIn
import threading
import argparse
import re
import cgi
import os
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
if None != re.search('/api/values/*', self.path):
recordID = int(self.path.split('/')[-1])
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(recordID)
_remote_commands = {
"ITUNES": [
"""
osascript -e 'tell application "System Events"
tell application "iTunes" to activate
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "iTunes" to activate
key code 123 using command down
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "iTunes" to activate
key code 49
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "iTunes" to activate
key code 124 using command down
end tell'
"""
],
"KEYNOTE": [
"""
osascript -e 'tell application "System Events"
tell application "Keynote" to activate
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "Keynote" to activate
key code 123 using command down
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "Keynote" to activate
key code 35 using {command down, option down}
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "Keynote" to activate
key code 124 using command down
end tell'
"""
],
"POWERPOINT": [
"""
osascript -e 'tell application "Microsoft PowerPoint"
activate
end tell
""",
"""
osascript -e 'tell application "Microsoft PowerPoint"
activate
go to previous slide slide show view of slide show window 1
end tell'
""",
"""
osascript -e 'tell application "Microsoft PowerPoint"
activate
run slide show slide show settings of active presentation
end tell'
""",
"""
osascript -e 'tell application "Microsoft PowerPoint"
activate
go to next slide slide show view of slide show window 1
end tell'
"""
]
}
#### Change command to KEYNOTE, POWERPOINT or ITUNES ###
cmd = _remote_commands["ITUNES"][recordID]
os.system(cmd)
else:
self.send_response(400, 'Bad Request: record does not exist')
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class SimpleHttpServer():
def __init__(self, ip, port):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def waitForThread(self):
self.server_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
if __name__=='__main__':
server = SimpleHttpServer('', 1337)
print 'HTTP Server Running...........'
server.start()
server.waitForThread()
| orviwan/ControlR-for-Pebble---Mac-Companion | ControlR.py | Python | mit | 3,462 |
# Code generated by cfonts_to_trans_py.py
import TFTfont
_dejavu12 = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\xc6\x31\x8c\x63\x18\xc6\x30\x00\x63\x00\x00\x00\x00'\
b'\x00\x33\x33\x33\x33\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x8c\x01\x8c\x01\x88\x01\x18\x1f\xff\x1f\xff\x03\x10\x03\x30\x02\x30\x3f\xfe\x3f\xfe\x06\x20\x04\x60\x0c\x60\x0c\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x40\x08\x07\xc1\xfc\x74\x8c\x81\x90\x1f\x01\xf8\x0b\x81\x30\x26\x45\xcf\xf0\xfc\x02\x00\x40\x08\x00\x00\x00'\
b'\x00\x00\x1c\x04\x1b\x02\x18\xc2\x0c\x63\x06\x31\x03\x19\x00\xd8\x80\x38\x8e\x00\x8d\x80\x4c\x60\x46\x30\x63\x18\x21\x8c\x20\x6c\x10\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\x00\x3f\x00\xe2\x01\x80\x03\x00\x07\x00\x0f\x00\x3f\x0c\xe7\x39\x87\x63\x07\xc6\x07\x07\x1f\x0f\xf7\x07\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\x33\x33\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x30\x86\x18\x43\x0c\x30\xc3\x0c\x30\xc1\x86\x18\x20\xc0\x00'\
b'\x00\xc1\x06\x18\x20\xc3\x0c\x30\xc3\x0c\x31\x86\x18\x43\x00\x00'\
b'\x00\x04\x02\x11\x16\xb0\xe0\x70\xd6\x88\x84\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x01\x80\x06\x00\x18\x00\x60\x01\x80\xff\xf3\xff\xc0\x60\x01\x80\x06\x00\x18\x00\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x33\x36\x40\x00'\
b'\x00\x00\x00\x00\x00\x00\x01\xf7\xc0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x33\x00\x00\x00'\
b'\x00\x0c\x38\x60\xc1\x86\x0c\x18\x60\xc1\x86\x0c\x18\x70\xc0\x00\x00\x00\x00'\
b'\x00\x01\xe0\x7e\x18\x63\x0c\xc0\xd8\x1b\x03\x60\x6c\x0d\x81\xb0\x33\x0c\x61\x87\xe0\x78\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\xe0\xfc\x19\x80\x30\x06\x00\xc0\x18\x03\x00\x60\x0c\x01\x80\x30\x06\x07\xf8\xff\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\xc7\xf9\x07\x00\xc0\x30\x0c\x07\x03\x81\xc0\x60\x70\x38\x1c\x07\xfd\xff\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x07\xe1\xfe\x20\xe0\x0c\x01\x80\x70\x78\x0f\x80\x18\x01\x80\x30\x06\x81\xdf\xf1\xf8\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1c\x03\xc0\x2c\x06\xc0\x4c\x0c\xc1\x8c\x10\xc3\x0c\x60\xc7\xff\x7f\xf0\x0c\x00\xc0\x0c\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\xe3\xf8\xc0\x30\x0c\x03\xf0\xfe\x21\xc0\x30\x0c\x03\x00\xd0\x67\xf8\xf8\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf0\x7f\x1c\x23\x00\xe0\x18\x03\x7c\x7f\xce\x1d\x81\xb0\x36\x06\x61\xcf\xf0\x7c\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\xfd\xff\x80\x60\x0c\x01\x80\x60\x0c\x01\x80\x60\x0c\x01\x80\x60\x0c\x01\x80\x60\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xf0\xff\x38\x76\x06\xc0\xcc\x30\xfc\x3f\xc6\x19\x81\xb0\x36\x06\xe1\xcf\xf0\xfc\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xe0\xff\x38\x66\x06\xc0\xd8\x1b\x87\x3f\xe3\xcc\x01\x80\x70\x0c\x43\x8f\xe0\xf0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x33\x00\x00\x00\x33\x00\x00\x00'\
b'\x00\x00\x00\x33\x00\x00\x00\x33\x36\x40\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x03\xc0\x3f\x03\xf0\x3f\x00\x78\x00\xfc\x00\x3f\x00\x0f\xc0\x03\xc0\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xff\x3f\xfe\x00\x00\x00\x01\xff\xf3\xff\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x02\x00\x07\x80\x07\xe0\x01\xf8\x00\x7e\x00\x3c\x01\xf8\x1f\x81\xf8\x07\x80\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x1f\x1f\xc8\x70\x18\x0c\x0e\x0e\x0e\x0e\x06\x03\x01\x80\x00\x60\x30\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x3f\x00\x1f\xfc\x07\x83\xc1\xc0\x1c\x70\x01\x8c\x3b\x1b\x0f\xe3\x63\x9c\x6c\x61\x8d\x8c\x33\xb1\xce\xe6\x1f\xf8\x61\xdc\x0e\x00\x00\xe0\x10\x0e\x0e\x00\xff\x80\x07\xc0\x00\x00\x00\x00\x00'\
b'\x00\x00\x38\x01\xc0\x0e\x00\xd8\x06\xc0\x77\x03\x18\x18\xc1\x83\x0c\x18\x7f\xc7\xff\x30\x19\x80\xd8\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xf8\x3f\xc3\x0e\x30\x63\x06\x30\xe3\xfc\x3f\xc3\x06\x30\x33\x03\x30\x33\x07\x3f\xe3\xfc\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x3f\x87\xfe\x38\x13\x00\x38\x01\x80\x0c\x00\x60\x03\x00\x18\x00\xe0\x03\x00\x0e\x04\x3f\xe0\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xff\x03\xff\x0c\x1e\x30\x18\xc0\x73\x00\xcc\x03\x30\x0c\xc0\x33\x00\xcc\x07\x30\x18\xc1\xe3\xff\x0f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x07\xfc\xff\x98\x03\x00\x60\x0c\x01\xff\x3f\xe6\x00\xc0\x18\x03\x00\x60\x0f\xf9\xff\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\xf3\xfc\xc0\x30\x0c\x03\x00\xfe\x3f\x8c\x03\x00\xc0\x30\x0c\x03\x00\xc0\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x1f\xc1\xff\x8f\x02\x30\x01\xc0\x06\x00\x18\x00\x60\x7d\x81\xf6\x00\xdc\x03\x30\x0c\x70\x30\xff\x81\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\x80\xcc\x06\x60\x33\x01\x98\x0c\xc0\x67\xff\x3f\xf9\x80\xcc\x06\x60\x33\x01\x98\x0c\xc0\x66\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\x33\x33\x33\x33\x33\x33\x33\x00\x00\x00'\
b'\x03\x33\x33\x33\x33\x33\x33\x33\x37\xec\x00'\
b'\x00\x01\x83\x8c\x38\x63\x83\x38\x1b\x80\xf8\x07\x80\x3c\x01\xf0\x0d\xc0\x67\x03\x1c\x18\x70\xc1\xc6\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x06\x00\xc0\x18\x03\x00\x60\x0c\x01\x80\x30\x06\x00\xc0\x18\x03\x00\x60\x0f\xf9\xff\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\x3c\xf0\x79\xe0\xf3\x63\x66\xc6\xcd\x8d\x99\xb3\x33\x66\x66\xcc\xc7\x19\x8e\x33\x1c\x66\x00\xcc\x01\x98\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\xc0\xcf\x06\x78\x33\xe1\x9b\x0c\xdc\x66\x63\x33\x99\x8c\xcc\x76\x61\xb3\x0f\x98\x3c\xc0\xe6\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\xc0\x7f\xe0\xe1\xc3\x00\xce\x01\xd8\x01\xb0\x03\x60\x06\xc0\x0d\x80\x1b\x80\x73\x00\xc3\x87\x07\xfe\x03\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x07\xf0\xff\x18\x73\x06\x60\xcc\x19\x87\x3f\xc7\xf0\xc0\x18\x03\x00\x60\x0c\x01\x80\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0f\xc0\x7f\xe0\xe1\xe3\x00\xce\x01\xd8\x01\xb0\x03\x60\x06\xc0\x0d\x80\x1b\x80\x73\x00\xc3\x87\x07\xfc\x03\xf0\x00\x70\x00\x70\x00\x60\x00\x00\x00\x00'\
b'\x00\x01\xfc\x0f\xf0\x61\xc3\x06\x18\x30\xc3\x87\xf8\x3f\x81\x8e\x0c\x38\x60\xc3\x06\x18\x18\xc0\xc6\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xf0\xff\x38\x26\x00\xc0\x1c\x01\xf0\x1f\x80\x38\x01\x80\x30\x06\x81\xdf\xf1\xfc\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\xff\xff\xf0\x60\x06\x00\x60\x06\x00\x60\x06\x00\x60\x06\x00\x60\x06\x00\x60\x06\x00\x60\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\x80\xcc\x06\x60\x33\x01\x98\x0c\xc0\x66\x03\x30\x19\x80\xcc\x06\x60\x33\x83\x8c\x18\x7f\xc0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x06\x00\xd8\x0c\xc0\x66\x03\x18\x30\xc1\x86\x0c\x18\xc0\xc6\x07\x70\x1b\x00\xd8\x03\x80\x1c\x00\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1e\x0d\x83\xc1\x98\x78\x63\x0f\x0c\x63\x31\x8c\x66\x30\xcc\xcc\x19\x99\x83\x61\xb0\x6c\x36\x0f\x87\x80\xf0\xf0\x1e\x1e\x03\x81\xc0\x70\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x01\xc1\xc6\x0c\x38\xc0\xce\x03\x60\x1e\x00\x70\x03\x80\x3c\x01\xf0\x19\x81\xc6\x0c\x38\xc0\xce\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0e\x07\x60\x63\x0c\x39\xc1\x98\x1f\x80\xf0\x06\x00\x60\x06\x00\x60\x06\x00\x60\x06\x00\x60\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\xff\xdf\xfe\x00\x60\x06\x00\x70\x07\x00\x70\x03\x00\x30\x03\x80\x38\x01\x80\x18\x01\xff\xef\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\xf3\xcc\x30\xc3\x0c\x30\xc3\x0c\x30\xc3\x0c\x30\xf3\xc0\x00'\
b'\x01\x83\x83\x06\x0c\x0c\x18\x30\x30\x60\xc0\xc1\x83\x07\x06\x00\x00\x00\x00'\
b'\x00\xf3\xc3\x0c\x30\xc3\x0c\x30\xc3\x0c\x30\xc3\x0c\xf3\xc0\x00'\
b'\x00\x00\x07\x00\x36\x01\x8c\x0c\x18\x60\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\xff\xff\xc0'\
b'\x60\xc1\x83\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x07\xc3\xf8\x87\x00\xc7\xf3\xfd\x83\x60\xd8\x77\xfc\xfb\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x03\x00\x30\x03\x00\x30\x03\x7c\x3f\xe3\x86\x30\x33\x03\x30\x33\x03\x30\x33\x86\x3f\xe3\x7c\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x03\xe3\xfc\xe1\x70\x18\x06\x01\x80\x70\x0e\x13\xfc\x3e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x01\x80\x30\x06\x3e\xcf\xf9\x87\x60\x6c\x0d\x81\xb0\x36\x06\x61\xcf\xf8\xfb\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x1f\x0f\xf1\x87\x60\x6f\xfd\xff\xb0\x06\x00\x70\x47\xf8\x7e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x3c\xf9\x83\x1f\xff\x98\x30\x60\xc1\x83\x06\x0c\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x3e\xcf\xf9\x87\x60\x6c\x0d\x81\xb0\x36\x06\x61\xcf\xf8\xfb\x00\x64\x18\xff\x0f\x80\x00'\
b'\x00\x06\x00\xc0\x18\x03\x00\x6f\x0f\xf1\xc7\x30\x66\x0c\xc1\x98\x33\x06\x60\xcc\x19\x83\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\x30\x03\x33\x33\x33\x33\x33\x00\x00\x00'\
b'\x03\x30\x03\x33\x33\x33\x33\x33\x33\xee\x00'\
b'\x00\x06\x00\xc0\x18\x03\x00\x61\xcc\x71\x9c\x37\x07\xc0\xf0\x1f\x03\x70\x67\x0c\x71\x87\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\x33\x33\x33\x33\x33\x33\x33\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0d\xe3\xc3\xfd\xf8\xe3\xc7\x30\x60\xcc\x18\x33\x06\x0c\xc1\x83\x30\x60\xcc\x18\x33\x06\x0c\xc1\x83\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x6f\x0f\xf1\xc7\x30\x66\x0c\xc1\x98\x33\x06\x60\xcc\x19\x83\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x1e\x0f\xf1\x86\x60\x6c\x0d\x81\xb0\x36\x06\x61\x8f\xf0\x78\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x03\x7c\x3f\xe3\x86\x30\x33\x03\x30\x33\x03\x30\x33\x86\x3f\xe3\x7c\x30\x03\x00\x30\x03\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x3e\xcf\xf9\x87\x60\x6c\x0d\x81\xb0\x36\x06\x61\xcf\xf8\xfb\x00\x60\x0c\x01\x80\x30\x00'\
b'\x00\x00\x00\x00\x00\x37\x3f\x38\x30\x30\x30\x30\x30\x30\x30\x30\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xf9\xfe\xc1\x60\x3e\x07\xc0\x70\x1a\x0d\xfe\x7c\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x30\x7f\x7f\x30\x30\x30\x30\x30\x30\x30\x3f\x1f\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x60\xcc\x19\x83\x30\x66\x0c\xc1\x98\x33\x06\x71\xc7\xf8\x7b\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x01\x80\xd8\x33\x06\x60\xc6\x30\xc6\x0d\x81\xb0\x36\x03\x80\x70\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x61\xc3\x61\xc3\x31\x46\x33\x66\x33\x66\x33\x66\x1a\x2c\x1e\x3c\x1e\x3c\x0c\x18\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x01\xc1\xdc\x71\xdc\x1b\x01\xc0\x38\x0f\x81\xb0\x63\x1c\x77\x07\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x01\x80\xd8\x33\x06\x71\x86\x30\xee\x0d\x81\xb0\x1c\x03\x80\x60\x0c\x03\x81\xe0\x38\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x1f\xf7\xfc\x06\x03\x01\xc0\xe0\x70\x18\x0e\x07\xfd\xff\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x3c\x0f\x81\x80\x30\x06\x00\xc0\x18\x03\x03\xc0\x78\x03\x80\x30\x06\x00\xc0\x18\x03\x00\x7c\x07\x80\x00\x00'\
b'\x00\xc6\x31\x8c\x63\x18\xc6\x31\x8c\x63\x18\xc6\x31\x80'\
b'\x00\x0f\x03\xe0\x18\x06\x01\x80\x60\x18\x07\x00\xf0\x3c\x1c\x06\x01\x80\x60\x18\x06\x0f\x83\xc0\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x82\x7f\xfc\xc3\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xf9\x99\x99\x99\x99\xf0\x00\x00\x00'\
_dejavu12_index = b'\x00\x00\x1b\x00\x29\x00\x3e\x00\x68\x00\x85\x00\xb2\x00\xda\x00'\
b'\xe5\x00\xf5\x00\x05\x01\x1d\x01\x42\x01\x4d\x01\x5d\x01\x68\x01'\
b'\x7b\x01\x98\x01\xb5\x01\xd0\x01\xed\x01\x0d\x02\x28\x02\x45\x02'\
b'\x62\x02\x7f\x02\x9c\x02\xa7\x02\xb2\x02\xda\x02\x02\x03\x2a\x03'\
b'\x42\x03\x74\x03\x97\x03\xb7\x03\xda\x03\xff\x03\x1c\x04\x37\x04'\
b'\x5c\x04\x7f\x04\x8a\x04\x95\x04\xb8\x04\xd5\x04\xfd\x04\x20\x05'\
b'\x48\x05\x65\x05\x8d\x05\xb0\x05\xcd\x05\xed\x05\x10\x06\x33\x06'\
b'\x65\x06\x88\x06\xa8\x06\xcb\x06\xdb\x06\xee\x06\xfe\x06\x23\x07'\
b'\x3e\x07\x4e\x07\x69\x07\x89\x07\xa4\x07\xc1\x07\xde\x07\xf1\x07'\
b'\x0e\x08\x2b\x08\x36\x08\x41\x08\x5e\x08\x69\x08\x99\x08\xb6\x08'\
b'\xd3\x08\xf3\x08\x10\x09\x25\x09\x3d\x09\x52\x09\x6f\x09\x8c\x09'\
b'\xb6\x09\xd3\x09\xf0\x09\x0b\x0a\x28\x0a\x36\x0a\x51\x0a\x79\x0a'\
b'\x84\x0a'
dejavu12 = TFTfont.TFTFont(_dejavu12, _dejavu12_index, 21, 19, 96)
fonts = {"dejavu12":dejavu12,
}
| robert-hh/SSD1963-TFT-Library-for-PyBoard | fonts/dejavu12.py | Python | mit | 12,278 |
# -*- coding: utf-8 -*-
#******************************************************************************
# (C) 2008 Ableton AG
#******************************************************************************
__docformat__ = "restructuredtext en"
from turbomail.message import Message
from turbomail.control import interface
from genshi.template.loader import package
from genshi.template import MarkupTemplate, NewTextTemplate
#-------------------------------------------------------------------------------
class TemplateMessage(Message):
"""
This is a genshi-based template mail renderer.
"""
# we need the staticmethod to prevent self being bound as first param
loader = staticmethod(package("abl.devtools", ""))
def __init__(self, **kwargs):
"""
This is a genshi-based template mail renderer.
It derives from L{Message} and introduces three
additional keyword-args:
- "html" for a MarkupTemplate
- "text" for a TextTemplate
- "subject" for a TextTemplate to use for the subject
The templates are currently always loaded relative
to the package::
abl.devtools
"""
html = kwargs.pop("html", None)
text = kwargs.pop("text", None)
subject = kwargs.pop("subject", None)
super(TemplateMessage, self).__init__(**kwargs)
self._html_template = self._text_template = None
if html is not None:
_, _, inf, _ = self.loader(html)
self._html_template = MarkupTemplate(inf)
if text is not None:
_, _, inf, _ = self.loader(text)
self._text_template = NewTextTemplate(inf)
else:
self._text_template = None
if subject is not None:
_, _, inf, _ = self.loader(subject)
self._subject_template = NewTextTemplate(inf)
else:
self._subject_template = None
def render(self, **values):
if self._html_template is not None:
self.rich = self._html_template.generate(**values).render('html', doctype='html')
if self._text_template is not None:
self.plain = self._text_template.generate(**values).render()
if self._subject_template is not None:
subject = self._subject_template.generate(**values).render()
# ensure the subject is just one line
subject = " ".join(subject.split("\n"))
self.subject = subject
def send(self):
interface.send(self)
def configure(conf):
"""
Configures the turbomail system.
"""
default_conf = {
"manager" : "immediate",
"transport" : "smtp",
"smtp.server" : "localhost",
"message.encoding" : "utf-8",
"utf8qp.on" : True,
}
default_conf.update(conf)
for key in default_conf.keys():
default_conf["mail." + key] = default_conf[key]
del default_conf[key]
default_conf["mail.on"] = True
interface.start(default_conf)
| AbletonAG/abl.robot | abl/robot/mail.py | Python | mit | 3,077 |
# -*- coding: utf-8 -*-
from datetime import datetime as dt
import traceback, os, sys, random
# turbogears imports
from tg import expose, redirect, validate, flash, request, response
from tg.controllers import CUSTOM_CONTENT_TYPE
# third party imports
from paste.fileapp import FileApp
from pylons.controllers.util import forward
from repoze.what import predicates, authorize
from repoze.what.predicates import not_anonymous, in_group, has_permission
# project specific imports
from ordering.lib.base import BaseController
from ordering.model import *
from ordering.util.common import *
from ordering.util.excel_helper import *
from ordering.widgets.order import *
__all__=['ReportController']
class ReportController(BaseController):
#Uncomment this line if your controller requires an authenticated user
allow_only=authorize.not_anonymous()
# allow_only=check_anonymous()
@expose('ordering.templates.report.index')
@tabFocus(tab_type="report")
def index(self):
try:
report_form=order_report
return dict(report_form=report_form, values={})
except:
traceback.print_exc()
@expose()
def export(self, **kw):
result_data=[]
additionInfo=[]
current=dt.now()
dateStr=current.today().strftime("%Y%m%d")
fileDir=os.path.join(os.path.abspath(os.path.curdir), "report_download", "%s"%dateStr)
if not os.path.exists(fileDir): os.makedirs(fileDir)
timeStr=current.time().strftime("%H%M%S")
rn=random.randint(0, 10000)
username=request.identity['repoze.who.userid']
filename=os.path.join(fileDir, "%s_%s_%d.xls"%(username, timeStr, rn))
templatePath=os.path.join(os.path.abspath(os.path.curdir), "report_download/TEMPLATE/JCP_TEMPLATE.xls")
pe=JCPExcel(templatePath=templatePath, destinationPath=filename)
query_fields={"orderDate": "Order Date",
"customerPO": "PO Number",
"tel": "Tel",
"requestShipDate": "Request Ship Date",
"shipMethod": "Ship Method",
"billCompany": "Bill Company",
"billAddress": "Bill Address",
"billAttn": "Bill Attn",
"billTel": "Bill Tel",
"billFax": "Bill Fax",
"shipCompany": "Ship Company",
"shipAddress": "Ship Address",
"shipAttn": "Ship Attn",
"shipTel": "Ship Tel",
"shipFax": "Ship Fax",
"brandStyle": "Brand Style",
"itemCode": "Item Code",
"itemDescription": "Item Description",
"careInstr": "Care Instr",
"fabricContent": "Fabric Content",
"origin": "Origin",
"supplier": "Supplier",
"rnCode": "RN Code",
"wplCode": "WPL Code",
"specialInstr": "Special Instr",
#"labelCode": "Label System",
}
if kw:
for k, v in kw.iteritems():
if kw[k]:
additionItem=query_fields[k]+": "+kw[k]
additionInfo.append(additionItem)
try:
results=self._query_result(**kw)
if results:
for result in results:
result_data.append(self._format_value(result))
pe.inputData(additionInfo=additionInfo, data=result_data)
pe.outputData()
return serveFile(unicode(filename))
except:
traceback.print_exc()
if pe: pe.clearData()
flash("Error occur in the Excel Exporting !")
raise redirect("report")
def _format_value(self, jcp_form):
fields=['header', 'orderDate', 'customerPO',
'billCompany', 'billAddress',
'billAttn', 'billTel', 'billFax', 'billEmail',
'shipCompany', 'shipAddress', 'shipAttn', 'shipTel',
'shipFax', 'shipEmail',
'origin',
'rnCode', 'wplCode', 'specialInstr', #'labelCode'
]
results=[]
# for index, attr in enumerate(fields):
for attr in fields:
result=getattr(jcp_form, attr)
if isinstance(result, basestring): result=result.decode("utf8")
elif isinstance(result, dt): result=Date2Text(result).decode("utf8")
elif isinstance(result, JCPHeaderPO): result=jcp_form.header.poNo
# results.append(jcp_form[index])
results.append(result)
return results
def _query_result(self, **kw):
try:
conditions=[]
if kw.get("orderDate", False):
date=dt.strptime(kw.get("orderDate", '2009-12-1200:00:00')+"00:00:00", "%Y-%m-%d%H:%M:%S")
conditions.append(JCPOrderForm.orderDate>=date)
if kw.get("customerPO", False):
conditions.append(JCPOrderForm.customerPO.like("%%%s%%"%kw.get("customerPO", "")))
if kw.get("labelCode", False):
conditions.append(JCPOrderForm.labelCode.like("%%%s%%"%kw.get("labelCode", "")))
if kw.get("tel", False):
conditions.append(JCPOrderForm.tel.like("%%%s%%"%kw.get("tel", "")))
if kw.get("requestShipDate", False):
date=dt.strptime(kw.get("requestShipDate", '2009-12-1200:00:00')+"23:59:59", "%Y-%m-%d%H:%M:%S")
conditions.append(JCPOrderForm.requestShipDate==date)
if kw.get("shipMethod", False):
conditions.append(JCPOrderForm.shipMethod.like("%%%s%%"%kw.get("shipMethod", "")))
if kw.get("billCompany", False):
conditions.append(JCPOrderForm.billCompany.like("%%%s%%"%kw.get("billCompany", "")))
if kw.get("billAddress", False):
conditions.append(JCPOrderForm.billAddress.like("%%%s%%"%kw.get("billAddress", "")))
if kw.get("billAttn", False):
conditions.append(JCPOrderForm.billAttn.like("%%%s%%"%kw.get("billAttn", "")))
if kw.get("billTel", False):
conditions.append(JCPOrderForm.billTel.like("%%%s%%"%kw.get("billTel", "")))
if kw.get("billFax", False):
conditions.append(JCPOrderForm.billFax.like("%%%s%%"%kw.get("billFax", "")))
if kw.get("shipCompany", False):
conditions.append(JCPOrderForm.shipCompany.like("%%%s%%"%kw.get("shipCompany", "")))
if kw.get("shipAddress", False):
conditions.append(JCPOrderForm.shipAddress.like("%%%s%%"%kw.get("shipAddress", "")))
if kw.get("shipAttn", False):
conditions.append(JCPOrderForm.shipAttn.like("%%%s%%"%kw.get("shipAttn", "")))
if kw.get("shipTel", False):
conditions.append(JCPOrderForm.shipTel.like("%%%s%%"%kw.get("shipTel", "")))
if kw.get("shipFax", False):
conditions.append(JCPOrderForm.shipFax.like("%%%s%%"%kw.get("shipFax", "")))
if kw.get("brandStyle", False):
conditions.append(JCPOrderForm.brandStyle.like("%%%s%%"%kw.get("brandStyle", "")))
if kw.get("itemCode", False):
conditions.append(JCPOrderForm.itemCodeid==JCPItemCodeMaster.id)
conditions.append(JCPItemCodeMaster.id==kw.get("itemCode", ""))
if kw.get("itemDescription", False):
conditions.append(JCPOrderForm.itemDescription.like("%%%s%%"%kw.get("itemDescription", "")))
if kw.get("careInstr", False):
conditions.append(JCPOrderForm.careInstr.like("%%%s%%"%kw.get("careInstr", "")))
if kw.get("fabricContent", False):
conditions.append(JCPOrderForm.fabricContent.like("%%%s%%"%kw.get("fabricContent", "")))
if kw.get("origin", False):
conditions.append(JCPOrderForm.origin.like("%%%s%%"%kw.get("origin", "")))
if kw.get("supplier", False):
conditions.append(JCPOrderForm.supplier.like("%%%s%%"%kw.get("supplier", "")))
if kw.get("rnCode", False):
conditions.append(JCPOrderForm.rnCode.like("%%%s%%"%kw.get("rnCode", "")))
if kw.get("wplCode", False):
conditions.append(JCPOrderForm.wplCode.like("%%%s%%"%kw.get("wplCode", "")))
if kw.get("specialInstr", False):
conditions.append(JCPOrderForm.specialInstr.like("%%%s%%"%kw.get("specialInstr", "")))
if not authorize.has_permission('VIEW_FULL_ORDER') and request.identity["user"].belong_to_customer_id:
conditions.append(JCPOrderForm.headerId==JCPHeaderPO.id)
conditions.append(JCPHeaderPO.customer_id==request.identity["user"].belong_to_customer_id)
if len(conditions):
obj=DBSession.query(JCPOrderForm)
for condition in conditions: obj=obj.filter(condition)
result=obj.filter(JCPOrderForm.active==0).all()
else:
result=DBSession.query(JCPOrderForm).filter(JCPOrderForm.active==0).all()
# conn = DBSession.connection()
#
# result = conn.execute("""select header.po_no,
# form.order_date,
# form.po_no,
# form.bill_company,
# form.bill_address,
# form.bill_attn,
# form.bill_tel,
# form.bill_fax,
# form.bill_email,
# form.ship_company,
# form.ship_address,
# form.ship_attn,
# form.ship_tel,
# form.ship_fax,
# form.ship_email,
# sum(detail.quantity),
# detail.sub,
# detail.lot,
# detail.stock
# from jcp_header_po header,
# jcp_detail_po detail,
# jcp_order_form form
# where header.active = 0
# and header.order_type = 'AUTO'
# and header.combo_order in ('2', '3')
# and header.id = form.header_id
# and header.id = detail.header_id
# and header.po_date > '2013-1-1 00:00:00'
# and header.po_date < '2013-6-1 00:00:00'
# group by detail.sub,
# detail.lot,
# detail.stock,
# header.po_no,
# form.order_date,
# form.po_no,
# form.bill_company,
# form.bill_address,
# form.bill_attn,
# form.bill_tel,
# form.bill_fax,
# form.bill_email,
# form.ship_company,
# form.ship_address,
# form.ship_attn,
# form.ship_tel,
# form.ship_fax,
# form.ship_email
# order by form.order_date""")
# print '*' * 20, '\n', dir(result[0])
return result
except:
traceback.print_exc()
| LamCiuLoeng/jcp | ordering/controllers/report.py | Python | mit | 11,714 |
UP = b'\x01'
DEL = b'\x02'
MOV = b'\x03'
| onitu/onitu | onitu/referee/cmd.py | Python | mit | 41 |
from django.conf.urls import patterns, url
from webapp.testapp.views import hello
urlpatterns = patterns('',
url('^$', hello),
)
| tctimmeh/embed-cherrypy-django | webapp/testproj/urls.py | Python | mit | 135 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import yaml
def merge_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise ValueError("Error merging variables: '{}' and '{}'".format(
type(a).__name__, type(b).__name__
))
result = a.copy()
for key, val in b.items():
if isinstance(result.get(key), dict):
result[key] = merge_dicts(a[key], b[key])
else:
result[key] = val
return result
def load_manifest(manifest_file):
with open(manifest_file) as f:
manifest = yaml.load(f)
if 'inherit' in manifest:
inherit_file = os.path.join(os.path.dirname(manifest_file), manifest.pop('inherit'))
manifest = merge_dicts(load_manifest(inherit_file), manifest)
return manifest
def load_variables(vars_files):
variables = {}
for vars_file in vars_files:
with open(vars_file) as f:
variables = merge_dicts(variables, yaml.load(f))
return {
k.upper(): json.dumps(v) if isinstance(v, (dict, list)) else v
for k, v in variables.items()
}
def paas_manifest(manifest_file, *vars_files):
manifest = load_manifest(manifest_file)
variables = load_variables(vars_files)
for key in manifest.get('env', {}):
if key in variables:
manifest['env'][key] = variables[key]
return yaml.dump(manifest, default_flow_style=False, allow_unicode=True)
if __name__ == "__main__":
print('---') # noqa
print(paas_manifest(*sys.argv[1:])) # noqa
| gov-cjwaszczuk/notifications-admin | scripts/generate_manifest.py | Python | mit | 1,590 |
#!/usr/bin/python
'''
Computer Club of WMU Mailer
by cpg
This is a library of functions for managing an email list and sending out
emails to the proper list.
It uses a json file to store the email list in an array called "mailing_list".
This array is composed of objects containing a key for "email" and
"subscriptions".
The "subscriptions" key has an array of subscriptions for its value. This
array's items should correspond to directories in the data directory in this
repo.
'''
import json
def load_mailing_list(mailing_list_filename):
''' Gets the mailing list from a json file. '''
with open(mailing_list_filename, 'r') as mailing_list_file:
return json.load(mailing_list_file)["mailing_list"]
def save_mailing_list(mailing_list, mailing_list_filename):
''' Saves a mailing list to a file, overwriting. '''
with open(mailing_list_filename, 'w') as mailing_list_file:
json.dump({mailing_list}, mailing_list_file)
def add_email(mailing_list, email):
''' Adds an email address to the mailing list, with no subscriptions. '''
if email not in mailing_list:
mailing_list.append({ "email": email, subscriptions: []})
def add_subscription(mailing_list, email, subscription):
''' Adds a subscription associated with the given email address. '''
if email in mailing_list
mailing_list[mailing_list.index(email)]["subscriptions"].append(subscription)
def remove_subscription(mailing_list, email, subscription):
''' Removes a subscription from the given email address. '''
mailing_list[mailing_list.index(email)]["subscriptions"].
def get_emails(mailing_list, subscription):
''' Gets a list of emails that have signed up for a given subscription. '''
# Array to hold email addresses
emails = []
# For ever email entry in the mailing list, check whether the subscription
# is in the subscriptions array. If it is, add it to the emails list.
for email_entry in mailing_list:
if subscription in email_entry["subscriptions"]:
emails.append(email_entry["email"])
return emails
| cpgillem/markdown_publisher | mailer.py | Python | mit | 2,094 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import sys
import math
class ServoClass:
'''
Representer of a servo connected at pin "pin"
'''
def save_decorator(f):
def inner_func(self, *args, **kw):
try:
return f(self, *args, **kw)
finally:
self.do_save()
return inner_func
def __init__(self, pin=None):
self.__max_value = 200
self.__min_value = 60
self.__start_value = 140
self.__servo_file = '/dev/servoblaster'
self.angle = self.__start_value
if pin < 0 or pin > 7:
raise
self.pin = pin
self.do_save()
def __str__(self):
return "Servo nr: {0}, Angle: {1}".format(self.pin, self.angle)
@save_decorator
def angle_reset(self):
'''
Return to start value
'''
self.angle = self.__start_value
@save_decorator
def increase(self):
'''
Reduces the angle of the servo by 1
'''
self.angle = max(self.angle - 1, self.__min_value)
@save_decorator
def decrease(self):
'''
Increases the angle of servo by 1
'''
self.angle = min(self.angle + 1, self.__max_value)
@save_decorator
def go_random(self):
'''
Positions the servo to a random position between the min and max values.
Mosly used for demoing purpose.
'''
minrandom = self.angle-30
maxrandom = self.angle+30
if minrandom < self.__min_value:
maxrandom += self.__min_value - minrandom
minrandom = self.__min_value
if maxrandom > self.__max_value:
minrandom += self.__max_value - maxrandom
maxrandom = self.__max_value
self.angle = random.randint(minrandom, maxrandom)
#self.angle = random.randint(self.angle-40 if self.angle-40 >= self.__min_value, self.angle+40 if self.angle+40 <= self.__max_value)
@save_decorator
def go_circle(self, angle, servo):
'''
Positions the servo to the specified angle.
'''
if servo == 0:
self.angle = int(10*math.sin(math.radians(angle)) + self.__min_value + (self.__max_value / 2) -20)
else:
self.angle = int(10*math.cos(math.radians(angle)) + self.__min_value + (self.__max_value / 2) - 20)
def do_save(self):
'''
Writes the angle to the file used by servoblaster kernel module
'''
if sys.flags.debug:
sys.stderr.write('{0}={1}\n'.format(self.pin, self.angle))
else:
with open(self.__servo_file, 'w') as sblaster:
sblaster.write('{0}={1}\n'.format(self.pin, self.angle))
| twistedretard/LaserSimulatedSecurityTurret | src/turret/servo.py | Python | mit | 2,812 |
# copied from http://docs.sqlalchemy.org/en/latest/orm/tutorial.html to spare
# creation of a pretend table.
from .base import BaseB
from sqlalchemy import Column, Integer, String
class Person (BaseB):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
nameb = Column(String(32))
fullname = Column(String(50))
password = Column(String(40)) | whiteavian/data_compare | data_compare/test/model/db_b/person.py | Python | mit | 380 |
# -*- coding: utf-8 -*-
try:
from django.db.models import Model
from django.db.models.query import QuerySet
has_django = True
except:
has_django = False
try:
import mongoengine
has_mongoengine = True
except ImportError:
has_mongoengine = False
from serializer import SerializedObject
__all__ = ()
class Preformatter(object):
def handle_value(self, value):
if getattr(type(value), '__name__', 'n/a') == 'dict':
return self.handle_dict(value)
elif getattr(type(value), '__name__', 'n/a') == 'list':
return self.handle_list(value)
else:
return self.parse_value(value)
def parse_value(self, value):
if has_django and isinstance(value, (Model, QuerySet)):
value = SerializedObject(value)
if has_mongoengine and isinstance(value,
(mongoengine.document.BaseDocument,
mongoengine.queryset.QuerySet)):
value = SerializedObject(value)
if isinstance(value, SerializedObject):
return value.to_python()
return value
def handle_list(self, old_list):
new_list = []
for item in old_list:
new_list.append(self.handle_value(item))
return new_list
def handle_dict(self, old_dict):
new_dict = {}
for key, value in old_dict.iteritems():
new_dict[key] = self.handle_value(value)
return new_dict
def run(self, result):
return self.handle_value(result) | flosch/simpleapi | simpleapi/server/preformat.py | Python | mit | 1,523 |
#!/usr/bin/env python
"""Sorts results by max-age, ignoring preloaded entries."""
import sys
import json
import ast
from urlparse import urlparse
import re
import operator
def get_hsts_preloads():
"""Get the latest HSTS preloads in Chromium HEAD."""
preloads = []
# This is just the latest from
# https://code.google.com/p/chromium/codesearch#chromium/src/net/http/transport_security_state_static.json
# with comments removed. TODO: Script updating this.
loaded = json.load(open('transport_security_state_static.json'))
for entry in loaded['entries']:
if entry.get('mode') == 'force-https':
preloads.append(entry['name'])
return preloads
def get_url_and_max_age(line):
"""
Extracts a mapping from URL to max age of the HSTS entry. If there are
multiple max ages, go with the lowest one.
"""
parsed = ast.literal_eval(line)
if not isinstance(parsed, dict):
sys.stderr.write('Malformed entry, ignoring: ' + line + '\n')
return {}
# This is not very optimal; parsed should always have only one entry.
return {urlparse(k).hostname: get_max_age_(v) for k, v in parsed.iteritems()}
def get_max_age_(directives):
"""Gets the max age time of the HSTS directive."""
max_ages = []
max_age_re = re.compile(r'max-age=(\d+)')
# TODO: Ignore the directive entirely if it is not valid RFC 6797 syntax.
if not isinstance(directives, list):
sys.stderr.write('Malformed directives, ignoring: ' + str(directives))
return 0
for directive in directives:
ages = max_age_re.findall(directive)
if len(ages) != 1:
sys.stderr.write('Malformed directive, ignoring: ' + str(directive))
return 0
else:
max_ages.append(int(ages[0]))
# Per RFC6797, only the first directive is processed. Since we don't know
# what order the UA will see them, pick the lowest one.
return min(max_ages)
def main():
"""Process the scraped results, order by max-age."""
results = sys.argv[1]
preloads = get_hsts_preloads()
results_dict = {}
with open(results) as results_file:
for line in results_file:
results_dict.update(get_url_and_max_age(line))
# Filter out preloaded entries. TODO: Check includeSubdomains
filtered = {k: v for (k, v) in results_dict.iteritems()
if not k in preloads}
# A list of tuples ordered from highest to lowest max-age
# final = sorted(filtered.items(), key=operator.itemgetter(1), reverse=True)
for host, time in filtered.items():
if time >= 86400:
print '"http://' + host + ':443/favicon.ico",'
if __name__ == "__main__":
main()
| diracdeltas/sniffly | util/process.py | Python | mit | 2,731 |
iTunes = {
'artists' : {
'The Neighbourhood':
#album name
'Wiped Out!': {
#title of song : duration
'Prey' : 3.22,
'Cry Baby' : 4.02,
'A Moment of Silence' : 2.05
},
'Kendrick Lamar':
'To Pimp a Butterfly': {
'King Kunta': 3.54,
'Alright': 3.39,
'u': 4.28
},
'BANKS': {
'Goddess':
'Waiting Game': 3.27,
'This Is What It Feels Like': 5.02,
'Brain': 4.42
},
'The Acid':
'Liminal':{
'Animal': 4.37,
'Ra': 3.22,
'Basic Instinct': 4.49,
},
'The Arctic Monkeys':
'AM': {
'Do I Wanna Know': 4.32,
'Arabella': 3.27,
'R U Mine?': 3.21
}
}
} | ArtezGDA/text-IO | Heike/iTunes.py | Python | mit | 726 |
from django.conf import settings
GATEWAY_URL = ""
#GATEWAY_TEST_URL = u"http://shopping.maroctelecommerce.com/test/gateway/paiement.asp"
GATEWAY_TEST_URL = settings.MT_URL
STORE_ID = settings.MT_STORE_ID
SECRET = settings.MT_SECRET
LANG = getattr(settings, "MT_LANG", "EN")
| coulix/django-maroc-telecommerce | maroc_telecommerce/settings.py | Python | mit | 277 |
"""jiphy/parser.py
Contains the basic Jiphy code parser.
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from .pie_slice import *
class Parser(object):
"""Defines the direct interaction between jiphy and the file content"""
def __init__(self, code):
self.index = 0
self.code = code
self.output = []
def text_till(self, strings, keep_index=False):
"""Returns all text till it encounters the given string (or one of the given strings)"""
if isinstance(strings, str):
strings = [strings]
original_index = self.index
text = ""
matched_string = ""
while self.more:
test_against = self.characters(len(max(strings, key=len)))
for string in strings:
if string.startswith("^"):
if test_against[0] in (" ", "\t", "\n", ")", "(") and test_against[1:].startswith(string[1:]):
matched_string = string
break
if test_against.startswith(string):
matched_string = string
break
if matched_string:
break
text += self.pop()
self += 1
if keep_index:
self.index = original_index
return (text, matched_string)
def __getitem__(self, index):
return self.code[index]
def text_after(self, start, match_on):
"""Returns all text till it encounters the given string (or one of the given strings)"""
text = ""
index = start - 1
while index > 0:
text = self.code[index:start]
if text.startswith(match_on):
return text.lstrip(match_on)
index -= 1
return text.lstrip(match_on)
def pop(self):
"""removes the current character then moves to the next one, returning the current character"""
char = self.code[self.index]
self.index += 1
return char
def characters(self, numberOfCharacters):
"""Returns characters at index + number of characters"""
return self.code[self.index:self.index + numberOfCharacters]
def __iadd__(self, other):
self.index += other
return self
def __isub__(self, other):
self.index -= other
return self
@property
def more(self):
"""Returns true if there is more code to parse"""
return self.index < len(self)
def __len__(self):
return len(self.code)
def behind(self, start, difference):
"""Returns the specified number of characters behind 'start'"""
return self.code[start - difference: start]
def ahead(self, start, difference):
"""Returns the specified number of characters in front of 'start'"""
return self.code[start: start + difference]
def next_content(self, start, amount=1):
"""Returns the next non-whitespace characters"""
while start < len(self.code) and self.code[start] in (' ', '\t', '\n'):
start += 1
return self.code[start: start + amount]
def prev_content(self, start, amount=1):
"""Returns the prev non-whitespace characters"""
while start > 0 and self.code[start] in (' ', '\t', '\n'):
start -= 1
return self.code[(start or amount) - amount: start]
def __str__(self):
return "".join(self.output)
| timothycrosley/jiphy | jiphy/parser.py | Python | mit | 4,548 |
# Django settings for test_project project.
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
},
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8%&n%y*+x=hqypq58zuxx@7tfehdgar9gq-&-q6f1g(j!vbksz'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django_tenant_templates.loaders.TenantFileSystemLoader',
'django_tenant_templates.loaders.TenantAppLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'tests.integration.test_project.middleware.TenantMiddleware',
'django_tenant_templates.middleware.TenantMiddleware',
)
ROOT_URLCONF = 'tests.integration.test_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tests.integration.test_project.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
# This is just for LiveServerTestCase
STATIC_URL = 'static/'
INSTALLED_APPS = (
'tests.integration.test_app',
)
| grampajoe/django-tenant-templates | tests/integration/test_project/settings.py | Python | mit | 1,287 |
#!/usr/bin/env python
"""Test suite for rstcheck."""
from __future__ import unicode_literals
import unittest
import rstcheck
# We don't do this in the module itself to avoid mutation.
rstcheck.ignore_sphinx()
class Tests(unittest.TestCase):
def assert_lines_equal(self, line_numbers, results):
self.assertEqual(set(line_numbers), set(dict(results)))
def test_parse_gcc_style_error_message(self):
self.assertEqual(
(32, 'error message'),
rstcheck.parse_gcc_style_error_message(
'filename:32:7: error message',
filename='filename'))
def test_parse_gcc_style_error_message_with_no_column(self):
self.assertEqual(
(32, 'error message'),
rstcheck.parse_gcc_style_error_message(
'filename:32: error message',
filename='filename',
has_column=False))
def test_parse_gcc_style_error_message_with_parsing_error(self):
with self.assertRaises(ValueError):
rstcheck.parse_gcc_style_error_message(
':32:3 error message',
filename='filename')
with self.assertRaises(IndexError):
rstcheck.parse_gcc_style_error_message(
'filename:32: error message',
filename='filename',
has_column=True)
def test_check(self):
self.assert_lines_equal(
[6],
rstcheck.check(
"""\
Test
====
.. code:: python
print(
"""))
def test_check_code_block(self):
self.assert_lines_equal(
[6],
rstcheck.check(
"""\
Test
====
.. code-block:: python
print(
"""))
def test_check_json(self):
self.assert_lines_equal(
[7],
rstcheck.check(
"""\
Test
====
.. code-block:: json
{
'abc': 123
}
"""))
def test_check_json_with_ignore(self):
self.assert_lines_equal(
[],
rstcheck.check(
"""\
Test
====
.. code-block:: json
{
'abc': 123
}
.. rstcheck: ignore-language=json,python,rst
"""))
def test_check_json_with_unmatched_ignores_only(self):
self.assert_lines_equal(
[7],
rstcheck.check(
"""\
Test
====
.. code-block:: json
{
'abc': 123
}
.. rstcheck: ignore-language=cpp,python,rst
"""))
def test_check_json_with_bad_ignore(self):
self.assert_lines_equal(
[7, 10],
rstcheck.check(
"""\
Test
====
.. code-block:: json
{
'abc': 123
}
.. rstcheck: ignore-language json,python,rst
"""))
def test_check_xml(self):
self.assert_lines_equal(
[8],
rstcheck.check(
"""\
Test
====
.. code-block:: xml
<?xml version="1.0" encoding="UTF-8"?>
<root>
</abc>123<abc>
</root>
"""))
def test_check_xml_with_ignore(self):
self.assert_lines_equal(
[],
rstcheck.check(
"""\
Test
====
.. code-block:: xml
<?xml version="1.0" encoding="UTF-8"?>
<root>
</abc>123<abc>
</root>
.. rstcheck: ignore-language=xml,python,rst
"""))
def test_check_xml_with_unmatched_ignores_only(self):
self.assert_lines_equal(
[8],
rstcheck.check(
"""\
Test
====
.. code-block:: xml
<?xml version="1.0" encoding="UTF-8"?>
<root>
</abc>123<abc>
</root>
.. rstcheck: ignore-language=cpp,python,rst
"""))
def test_check_xml_with_bad_ignore(self):
self.assert_lines_equal(
[8, 11],
rstcheck.check(
"""\
Test
====
.. code-block:: xml
<?xml version="1.0" encoding="UTF-8"?>
<root>
</abc>123<abc>
</root>
.. rstcheck: ignore-language xml,python,rst
"""))
def test_check_regex(self):
self.assert_lines_equal(
[6],
rstcheck.check(
"""\
Test
====
.. code-block:: regex
[0-9]++
"""))
def test_check_regex_with_ignore(self):
self.assert_lines_equal(
[],
rstcheck.check(
"""\
Test
====
.. code-block:: regex
[0-9]++
.. rstcheck: ignore-language=regex,python,rst
"""))
def test_check_regex_with_unmatched_ignores_only(self):
self.assert_lines_equal(
[6],
rstcheck.check(
"""\
Test
====
.. code-block:: regex
[0-9]++
.. rstcheck: ignore-language=cpp,python,rst
"""))
def test_check_regex_with_bad_ignore(self):
self.assert_lines_equal(
[6, 8],
rstcheck.check(
"""\
Test
====
.. code-block:: regex
[0-9]++
.. rstcheck: ignore-language regex,python,rst
"""))
def test_check_with_extra_blank_lines_before(self):
self.assert_lines_equal(
[8],
rstcheck.check(
"""\
Test
====
.. code-block:: python
print(
"""))
def test_check_with_extra_blank_lines_after(self):
self.assert_lines_equal(
[6],
rstcheck.check(
"""\
Test
====
.. code-block:: python
print(
"""))
def test_check_with_extra_blank_lines_before_and_after(self):
self.assert_lines_equal(
[8],
rstcheck.check(
"""\
Test
====
.. code-block:: python
print(
"""))
def test_check_rst(self):
self.assert_lines_equal(
[2],
rstcheck.check(
"""\
Test
===
"""))
def test_check_rst_report_level(self):
self.assert_lines_equal(
[],
rstcheck.check(
"""\
Test
===
""",
report_level=5))
def test_check_nested_rst(self):
self.assert_lines_equal(
[32],
rstcheck.check(
"""\
Test
====
.. code-block:: rst
Test
====
.. code-block:: rst
Test
====
.. code-block:: rst
Test
====
.. code-block:: rst
Test
====
.. code-block:: rst
Test
====
.. code-block:: python
print(
"""))
@unittest.skipIf(not rstcheck.SPHINX_INSTALLED,
'Requires Sphinx')
def test_ignore_sphinx_directives(self):
self.assert_lines_equal(
[],
rstcheck.check(
"""\
.. toctree::
:maxdepth: 2
intro
strings
datatypes
numeric
(many more documents listed here)
.. highlight:: python
:linenothreshold: 5
::
print('Hello')
.. code-block:: ruby
:linenos:
puts "Hello!"
.. code-block:: python
:linenos:
:emphasize-lines: 3,5
def some_function():
interesting = False
print('This line is highlighted.')
print('This one is not...')
print('...but this one is.')
.. literalinclude:: rstcheck.py
:language: python
:linenos:
"""))
def test_check_doctest(self):
self.assert_lines_equal(
[5],
rstcheck.check(
"""\
Testing
=======
>>> x = 1
>>>> x
1
"""))
def test_check_doctest_do_not_crash_when_indented(self):
"""docutils does not provide line number when indented."""
list(rstcheck.check(
"""\
Testing
=======
>>> x = 1
>>>> x
1
"""))
def test_check_doctest_with_ignore(self):
self.assert_lines_equal(
[],
rstcheck.check(
"""\
Testing
=======
>>> x = 1
>>>> x
1
.. rstcheck: ignore-language=doctest
"""))
@unittest.skipIf(rstcheck.SPHINX_INSTALLED,
'Does not work with Sphinx')
def test_check_doctest_in_code(self):
self.assert_lines_equal(
[7],
rstcheck.check(
"""\
Testing
=======
.. code:: doctest
>>> x = 1
>>>> x
1
"""))
def test_check_doctest_in_code_block(self):
self.assert_lines_equal(
[7],
rstcheck.check(
"""\
Testing
=======
.. code-block:: doctest
>>> x = 1
>>>> x
1
"""))
def test_check_doctest_in_python_code_block(self):
"""I'm not sure if this is correct, but I've seen people do it."""
self.assert_lines_equal(
[7],
rstcheck.check(
"""\
Testing
=======
.. code-block:: python
>>> x = 1
>>>> x
1
"""))
def main():
with rstcheck.enable_sphinx_if_possible():
unittest.main()
if __name__ == '__main__':
main()
| sameersingh7/rstcheck | test_rstcheck.py | Python | mit | 8,793 |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# courses :
url(r'^', include('teacher.apps.public.urls')),
# dashboard:
url(r'^js/accounts/', include('teacher.apps.accounts.urls')),
url(r'^accounts/', include('teacher.apps.accounts.urls')),
# dashboard:
url(r'^js/dashboard/', include('teacher.apps.dashboard.urls')),
url(r'^dashboard/', include('teacher.apps.dashboard.urls')),
# courses:
url(r'^js/course/', include('teacher.apps.courses.urls')),
url(r'^course/', include('teacher.apps.courses.urls')),
# chapter
url(r'^js/chapter/', include('teacher.apps.courses.chapters.urls')),
url(r'^chapter/', include('teacher.apps.courses.chapters.urls')),
# video
url(r'^js/video/', include('teacher.apps.courses.videos.urls')),
url(r'^video/', include('teacher.apps.courses.videos.urls')),
# classroom
url(r'^js/classroom/', include('teacher.apps.courses.classroom.urls')),
url(r'^classroom/', include('teacher.apps.courses.classroom.urls')),
# collaboration
url(r'^js/collaboration/', include('teacher.apps.collaboration.urls')),
url(r'^collaboration/', include('teacher.apps.collaboration.urls')),
# inbox
url(r'^js/inbox/', include('teacher.apps.inbox.urls')),
url(r'^inbox/', include('teacher.apps.inbox.urls')),
# document
url(r'^js/document/', include('teacher.apps.courses.documents.urls')),
url(r'^document/', include('teacher.apps.courses.documents.urls')),
# collaboration
url(r'^js/forum/', include('teacher.apps.courses.forum.urls')),
url(r'^forum/', include('teacher.apps.courses.forum.urls')),
# collaboration
url(r'^js/thread/', include('teacher.apps.courses.forum.urls')),
url(r'^thread/', include('teacher.apps.courses.forum.urls'))
)
| houssemFat/MeeM-Dev | teacher/urls.py | Python | mit | 1,930 |
from django import template
from Grundgeruest.models import Nutzer
register = template.Library()
@register.inclusion_tag('Grundgeruest/kopfzeile_knopf.html')
def kopfleiste_knoepfe(user):
""" Der tag erwartet von der Funktion ein dict, in dem die Liste der
url-text-Paare für die Knöpfe der Kopfleiste steht """
return {'knoepfe': Nutzer.knoepfe_kopf(user)}
@register.inclusion_tag('Grundgeruest/menueleiste_knopf.html')
def menueleiste_knoepfe(user):
""" gibt ein dict zurück, in dem die Liste der url-text-Paare für die
Knöpfe der Menüleiste steht """
return {'knoepfe': Nutzer.knoepfe_menü(user)}
| wmles/olymp | Grundgeruest/templatetags/nutzerdaten.py | Python | mit | 645 |
# -*- coding: utf-8 -*-
from unittest import TestCase
from sklearn.tree import DecisionTreeClassifier
from tests.estimator.classifier.Classifier import Classifier
from tests.language.Go import Go
class DecisionTreeClassifierGoTest(Go, Classifier, TestCase):
def setUp(self):
super(DecisionTreeClassifierGoTest, self).setUp()
self.estimator = DecisionTreeClassifier(random_state=0)
def tearDown(self):
super(DecisionTreeClassifierGoTest, self).tearDown()
| nok/sklearn-porter | tests/estimator/classifier/DecisionTreeClassifier/DecisionTreeClassifierGoTest.py | Python | mit | 493 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
import numpy as np
import easytile.core as mut
def feature_to_div_bounds(dim_ranges, n_tilings, n_divss, offsetss, feature):
indices = np.arange(feature.shape[0], dtype=np.int)[feature]
div_coords = np.unravel
class NumpyTestCase(TestCase):
def assertArrayEqual(self, a, b): # pylint: disable=no-self-use
if not np.array_equal(a, b):
raise AssertionError(
"Arrays differ: {} != {}\nDifference: {}".format(a, b, b - a))
class TestFeature(NumpyTestCase):
def setUp(self):
self.feature_1dim = mut.make_feature_fn(
[[0, 20]], 3, [5], [[0], [-2], [-4.5]])
def test_features_1dim(self):
self.assertArrayEqual(
self.feature_1dim(11),
np.array([0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
dtype=np.bool))
class TestMakeFeatureFn(TestCase):
def test_bad_offset_fails_1dim(self):
with self.assertRaises(AssertionError):
mut.make_feature_fn([[-5, 34]], 3, [5], [[-10]])
| rmoehn/cartpole | easytile/tests/test_core.py | Python | mit | 1,152 |
#!/bin/python3
import sys
n,k,q = input().strip().split(' ')
n,k,q = [int(n),int(k),int(q)]
a = [int(a_temp) for a_temp in input().strip().split(' ')]
for i in range(q):
m = int(input().strip())
print(a[(m-k)%len(a)]) | vipmunot/HackerRank | Algorithms/Circular Array Rotation.py | Python | mit | 228 |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 16 15:02:05 2016
@author: Ryan Jones
"""
import os
import csv
import pandas as pd
import numpy as np
import shutil
import pdb
directory = os.getcwd()
# from the database
Geographies = pd.read_csv(os.path.join(directory, 'inputs', 'Geographies.csv'))
GeographiesData = pd.read_csv(os.path.join(directory, 'inputs', 'GeographiesData.csv'))
GeographyMapKeys = pd.read_csv(os.path.join(directory, 'inputs', 'GeographyMapKeys.csv'))
# "human legible" formatting
GeographiesSpatialJoin = pd.read_csv(os.path.join(directory, 'inputs', 'GeographiesSpatialJoin.csv'))
# Parse inputs and set up mapping
id_to_gau = dict(Geographies.values)
gau_to_id = {v: k for k, v in id_to_gau.iteritems()}
gaus = [id_to_gau[id] for id in GeographiesData['geography_id'].values]
GeographiesDataDict = dict(zip(zip(gaus, GeographiesData['name']), GeographiesData['id'].values))
MapKeysDict = dict(zip(GeographyMapKeys['name'], GeographyMapKeys['id']))
SpatialJoinColumns = GeographiesSpatialJoin.columns
gau_columns = [c for c in SpatialJoinColumns if c in gau_to_id]
map_key_columns = [c for c in SpatialJoinColumns if c in GeographyMapKeys['name'].values]
##########
# tables to produce
##########
# GeographyIntersection
# GeographyIntersectionData
# GeographyMap
##########
GeographyIntersection = [['id']]+[[i] for i in range(1, len(GeographiesSpatialJoin)+1)]
GeographyIntersectionData = []
GeographyMap = []
# Iterate through each row of the spatial join table
for row in GeographiesSpatialJoin.iterrows():
intersection_id = row[0]+1
# iterate down the columns in each row
for col in row[1].iteritems():
if col[0] in gau_columns:
if col[1] is not np.nan:
try:
print GeographiesDataDict[col]
except:
pdb.set_trace()
GeographyIntersectionData.append([intersection_id, GeographiesDataDict[col]])
elif col[0] in map_key_columns:
GeographyMap.append([intersection_id, MapKeysDict[col[0]], col[1]])
else:
raise ValueError('column {} not found in Geographies'.format(col[0]))
# add id column to the results
GeographyIntersectionData = [row+[i+1] for i, row in enumerate(GeographyIntersectionData)]
GeographyMap = [row+[i+1] for i, row in enumerate(GeographyMap)]
# add a header to the results
GeographyIntersectionData = [['intersection_id', 'gau_id', 'id']]+GeographyIntersectionData
GeographyMap = [['intersection_id', 'geography_map_key_id', 'value', 'id']]+GeographyMap
# write the results to the outputs folder
def csv_write(path, data):
with open(path, 'wb') as outfile:
csvwriter = csv.writer(outfile)
for row in data:
csvwriter.writerow(row)
csv_write(os.path.join(directory, 'outputs', 'GeographyIntersection.csv'), GeographyIntersection)
csv_write(os.path.join(directory, 'outputs', 'GeographyIntersectionData.csv'), GeographyIntersectionData)
csv_write(os.path.join(directory, 'outputs', 'GeographyMap.csv'), GeographyMap)
shutil.copyfile(os.path.join(directory, 'inputs', 'Geographies.csv'), os.path.join(directory, 'outputs', 'Geographies.csv'))
shutil.copyfile(os.path.join(directory, 'inputs', 'GeographiesData.csv'), os.path.join(directory, 'outputs', 'GeographiesData.csv'))
shutil.copyfile(os.path.join(directory, 'inputs', 'GeographyMapKeys.csv'), os.path.join(directory, 'outputs', 'GeographyMapKeys.csv')) | energyPATHWAYS/energyPATHWAYS | model_building_tools/create_geography_tables/create_geography_tables.py | Python | mit | 3,480 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import global_search
from frappe.test_runner import make_test_objects
import frappe.utils
class TestGlobalSearch(unittest.TestCase):
def setUp(self):
global_search.setup_global_search_table()
self.assertTrue('__global_search' in frappe.db.get_tables())
doctype = "Event"
global_search.reset()
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "subject", "in_global_search", 1, "Int")
make_property_setter(doctype, "event_type", "in_global_search", 1, "Int")
make_property_setter(doctype, "roles", "in_global_search", 1, "Int")
make_property_setter(doctype, "repeat_on", "in_global_search", 0, "Int")
def tearDown(self):
frappe.db.sql("DELETE FROM `tabProperty Setter` WHERE `doc_type`='Event'")
frappe.clear_cache(doctype='Event')
frappe.db.sql('DELETE FROM `tabEvent`')
frappe.db.sql('DELETE FROM `__global_search`')
make_test_objects('Event')
frappe.db.commit()
def insert_test_events(self):
frappe.db.sql('DELETE FROM `tabEvent`')
phrases = ['"The Sixth Extinction II: Amor Fati" is the second episode of the seventh season of the American science fiction.',
'After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ',
'Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.']
for text in phrases:
frappe.get_doc(dict(
doctype='Event',
subject=text,
repeat_on='Every Month',
starts_on=frappe.utils.now_datetime())).insert()
frappe.db.commit()
def test_search(self):
self.insert_test_events()
results = global_search.search('awakens')
self.assertTrue('After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ' in results[0].content)
results = global_search.search('extraterrestrial')
self.assertTrue('Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.' in results[0].content)
results = global_search.search('awakens & duty & alien')
self.assertTrue('After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ' in results[0].content)
def test_update_doc(self):
self.insert_test_events()
test_subject = 'testing global search'
event = frappe.get_doc('Event', frappe.get_all('Event')[0].name)
event.subject = test_subject
event.save()
frappe.db.commit()
results = global_search.search('testing global search')
self.assertTrue('testing global search' in results[0].content)
def test_update_fields(self):
self.insert_test_events()
results = global_search.search('Every Month')
self.assertEqual(len(results), 0)
doctype = "Event"
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "repeat_on", "in_global_search", 1, "Int")
global_search.rebuild_for_doctype(doctype)
results = global_search.search('Every Month')
self.assertEqual(len(results), 3)
def test_delete_doc(self):
self.insert_test_events()
event_name = frappe.get_all('Event')[0].name
event = frappe.get_doc('Event', event_name)
test_subject = event.subject
results = global_search.search(test_subject)
self.assertEqual(len(results), 1)
frappe.delete_doc('Event', event_name)
results = global_search.search(test_subject)
self.assertEqual(len(results), 0)
def test_insert_child_table(self):
frappe.db.sql('delete from tabEvent')
phrases = ['Hydrus is a small constellation in the deep southern sky. ',
'It was first depicted on a celestial atlas by Johann Bayer in his 1603 Uranometria. ',
'The French explorer and astronomer Nicolas Louis de Lacaille charted the brighter stars and gave their Bayer designations in 1756. ',
'Its name means "male water snake", as opposed to Hydra, a much larger constellation that represents a female water snake. ',
'It remains below the horizon for most Northern Hemisphere observers.',
'The brightest star is the 2.8-magnitude Beta Hydri, also the closest reasonably bright star to the south celestial pole. ',
'Pulsating between magnitude 3.26 and 3.33, Gamma Hydri is a variable red giant some 60 times the diameter of our Sun. ',
'Lying near it is VW Hydri, one of the brightest dwarf novae in the heavens. ',
'Four star systems have been found to have exoplanets to date, most notably HD 10180, which could bear up to nine planetary companions.']
for text in phrases:
doc = frappe.get_doc({
'doctype':'Event',
'subject': text,
'starts_on': frappe.utils.now_datetime()
})
doc.insert()
frappe.db.commit()
def test_get_field_value(self):
cases = [
{
"case_type": "generic",
"data": '''
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>
<script>
var options = {
foo: "bar"
}
</script>
<p class="p1"><span class="s1">Contrary to popular belief, Lorem Ipsum is not simply random text. It has
roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock,
a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur,
from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source.
Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero,
written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum,
"Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.</span></p>
''',
"result": ('Description : Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical '
'Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, '
'looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word '
'in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum '
'et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular '
'during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.')
},
{
"case_type": "with_style",
"data": '''
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>Lorem Ipsum Dolor Sit Amet
''',
"result": "Description : Lorem Ipsum Dolor Sit Amet"
},
{
"case_type": "with_script",
"data": '''
<script>
var options = {
foo: "bar"
}
</script>
Lorem Ipsum Dolor Sit Amet
''',
"result": "Description : Lorem Ipsum Dolor Sit Amet"
}
]
for case in cases:
doc = frappe.get_doc({
'doctype':'Event',
'subject': 'Lorem Ipsum',
'starts_on': frappe.utils.now_datetime(),
'description': case["data"]
})
field_as_text = ''
for field in doc.meta.fields:
if field.fieldname == 'description':
field_as_text = global_search.get_formatted_value(doc.description, field)
self.assertEqual(case["result"], field_as_text)
| RicardoJohann/frappe | frappe/tests/test_global_search.py | Python | mit | 7,852 |
"""
ADD A DESCRIPTION OF WHAT THIS FILE IS FOR
"""
__author__ = 'brendan'
import os
import numpy as np
import csv
import ast
import requests
import json
username = 'unc_networks'
password = 'UNCSTATS'
def json_to_dict(json_path):
with open(json_path) as data_file:
data = json.load(data_file)
return data
def url_to_dict(url):
"""
:param url: String representing a json-style object on Court Listener's REST API
:return: html_as_dict, a dictionary of the data on the HTML page
"""
response = requests.get(url, auth=(username, password))
html = response.text
html = html.replace('false', 'False')
html = html.replace('true', 'True')
html = html.replace('null', 'None')
html_as_dict = ast.literal_eval(html)
return html_as_dict
def csv_to_list(directory, filename, has_header=0, want_header=0):
if directory[-1] == '/' or filename[0] == '/':
pass
else:
directory += '/'
the_file = open(directory + filename)
filelist = list(the_file)
the_file.close()
if has_header == 1 and want_header == 0:
first_row = 1
else:
first_row = 0
for i in range(len(filelist)):
filelist[i] = filelist[i][:-1].rsplit(',')
filelist = filelist[first_row:]
return filelist
def list_to_dict(the_list, has_header=0, want_header=0):
return_dict = {}
if has_header == 1 and want_header == 0:
first_row = 1
else:
first_row = 0
for row in the_list[first_row:]:
if len(row[1:]) > 1:
values = row[1:]
else:
values = row[1]
return_dict[row[0]] = values
return return_dict
def column_to_list(the_list, column_index, has_header=0, want_header=0):
output = []
if has_header == 1 and want_header == 0:
first_row = 1
else:
first_row = 0
for row in the_list[first_row:]:
output += [row[column_index]]
return output
def print_dict(dict):
for element in dict:
print element, ": ", dict[element]
def print_list(input_list):
for row in input_list:
print row
def compare_dicts(list_of_dicts):
"""
Requires that dicts have the same keys
"""
for element in list_of_dicts[0]:
print element, ": ", [dict_i[element] for dict_i in list_of_dicts]
def dict_values(dict, tuple_index, tuple_index_value):
"""
:param dict: a dictionary whose keys are a tuple
:param tuple_index: index of tuple that is of interest
:param tuple_index_value: value required of tuple at tuple_index
:return: list of appropriate keys of dict & corresponding values
"""
keys = []
values = []
for tuple in dict:
tuple_value_of_interest = tuple[tuple_index]
if tuple_value_of_interest == tuple_index_value:
keys.append(tuple)
values.append(dict[tuple])
else:
pass
return keys, values
def sum_x_to_y(dictionary, x, y):
total = 0
for i in range(x, y+1):
total += dictionary[i]
return total
def cumulate(input_dict, lifetime=float("inf")):
output = {}
start = min(input_dict.keys())
end = max(input_dict.keys())
for year in range(start, end+1):
cum_to_now = sum_x_to_y(input_dict, start, year)
if year - start >= lifetime:
retired_years = year - start - lifetime
ret_to_now = sum_x_to_y(input_dict, start, start + retired_years)
else:
ret_to_now = 0
output[year] = cum_to_now - ret_to_now
return output
def unique(the_list):
elements = []
for element in the_list:
if element in elements:
pass
else:
elements.append(element)
return elements
def list_to_csv(directory_and_filename, list):
if directory_and_filename[-4:] == '.csv':
directory_and_filename = directory_and_filename[:-4]
with open(directory_and_filename + '.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
for row in list:
try:
spamwriter.writerow(row)
except UnicodeEncodeError:
new_row = []
for element in row:
if type(element) is unicode:
new_row.append(element.encode('utf-8'))
else:
new_row.append(element)
csvfile.close()
def add_row_to_csv(directory, filename, row, columns_to_skip):
row = ['' for i in range(columns_to_skip)] + row
with open(directory + '/' + filename + '.csv', 'a') as csvfile:
spamwriter = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(row)
csvfile.close()
def add_version_to_filename(directory, filename):
if directory[-1] == '/':
pass
else:
directory += '/'
file_name = directory + filename
counter = 1
file_name_parts = os.path.splitext(file_name) # returns ('/path/file', '.ext')
while os.path.isfile(file_name):
file_name = file_name_parts[0] + '_' + str(counter) + file_name_parts[1]
counter += 1
return file_name
def list_value_finder(list, key, key_index, value_index):
for row in list:
if row[key_index] == key:
value = row[value_index]
break
return value
def column_of_list(list,column_index):
column_values = []
for row in list:
column_values.append(row[column_index])
return column_values
def table_to_tupled_dictionary(list, num_col_headers, Bool_has_header_row):
num_rows = len(list)
num_cols = len(list[0])
tupled_dictionary = {}
if num_col_headers > 0:
header_col_values = column_of_list(list, 0)
print "header_col_values = ", header_col_values
else:
header_col_values = [i for i in range(0, num_rows)]
print "header_col_values = ", header_col_values
if Bool_has_header_row == 1:
header_row_values = list[0]
print "header_row_values = ", header_row_values
else:
header_row_values = [j for j in range(0, num_cols)]
print "header_row_values = ", header_row_values
for i in range(1, num_rows):
for j in range(1, num_cols):
print "i = ", i
print "j = ", j
tupled_dictionary[(header_col_values[i], header_row_values[j])] = list[i][j]
print (header_col_values[i], header_row_values[j]), " : ", list[i][j]
return tupled_dictionary
def dict_to_list(dictionary):
output_list = []
for key in dictionary:
key_list = [str(key)]
value_list = [dictionary[key]]
output_row = key_list + value_list
output_list.append(output_row)
return output_list
def comparison_test(dict1, dict2):
success_bool = True
max_delta = 0
for element in dict1:
if dict1[element] != dict2[element]:
delta = abs(dict1[element] - dict2[element])
if delta > max_delta:
max_delta = delta
success_bool = False
print "Largest delta: ", max_delta
return success_bool
def remove_by_element(input_list, element_to_remove, all_or_first):
if all_or_first == "all":
list_so_far = []
for element in input_list:
if element == element_to_remove:
pass
else:
list_so_far.append(element)
return list_so_far
elif all_or_first == "first":
for i in range(len(input_list)):
if i == len(input_list) - 1 and input_list[i] == element_to_remove:
return input_list[:i]
elif input_list[i] == element_to_remove:
return input_list[:i] + input_list[i+1:]
def pairwise_add_lists(list1, list2):
summed_list = []
for i in range(len(list1)):
summed_list.append(list1[i] + list2[i])
return summed_list
def pairwise_add_dicts(dict1, dict2):
summed_dict = {}
for key in dict1:
summed_dict[key] = dict1[key] + dict2[key]
return summed_dict
def pairwise_divide_lists(nums, denoms):
divided_list = []
for i in range(len(nums)):
divided_list.append(float(nums[i])/float(denoms[i]))
return divided_list
def remove_by_index(input_list, index_to_remove):
output_list = input_list[:index_to_remove] + input_list[index_to_remove+1:]
return output_list
def index_finder(list_with_header_row, column_header):
for j in range(len(list_with_header_row[0])):
if list_with_header_row[0][j] == column_header:
return j
print "Column header %s not found." % column_header
def calc_excel_npv(rate, values):
orig_npv = np.npv(rate, values)
excel_npv = orig_npv/(1+rate)
return excel_npv
def dictionary_to_xlnpv(rate, dictionary, years):
values = [dictionary[year] for year in years]
excel_npv = calc_excel_npv(rate, values)
return excel_npv
def isnumeric(num):
try:
float(num)
return True
except ValueError:
return False
def delete_filenames_starting_with(directory, start_string):
for filename in os.listdir(directory):
if filename.startswith(start_string):
os.remove(os.path.join(directory, filename))
def has_duplicates(list):
elements = []
for element in list:
if element in elements:
print 'Duplicate detected.'
return True
else:
elements.append(element)
print "No duplicates detected."
return False
def find_dict_key(dict, value):
for key in dict:
if dict[key] == value:
return key
print "Value not found."
return None
def sum_of_list_column(list, column_index, rows_to_skip):
total = 0
for row in list[rows_to_skip:]:
total += row[column_index]
return total
def avg_of_list_column(list, column_index, rows_to_skip=0):
total = 0
row_count = 0
for row in list[rows_to_skip:]:
total += float(row[column_index])
row_count += 1
if row_count > 0:
return float(total)/float(row_count)
else:
print "Div by 0 error"
def CreateTotalRow(List,RowsToSkip,ColsToSkip):
TotalRow = []
for j in range(ColsToSkip, len(List[0])):
TotalRow.append(sum_of_list_column(List, j, RowsToSkip))
return TotalRow
def Flatten(ListOfLists):
ReturnList = [item for sublist in ListOfLists for item in sublist]
return ReturnList
def PrintTabularResults(header_list, data_list):
import tabulate
tabulated_data = tabulate.tabulate(data_list, tuple(header_list))
print tabulated_data
return tabulated_data
def TransposeTable(table):
transposed_table = [[x[i] for x in table] for i in range(len(table[0]))]
return transposed_table
def InterfaceCountdown(SecondsToCountdown):
import time
SecsLeft = int(SecondsToCountdown)
while SecsLeft > 0:
print "... %d ..." % SecsLeft
time.sleep(1)
SecsLeft -= 1
def SortListByColumn(List,ColumnIndex):
from operator import itemgetter
sorted_List = sorted(List, key=itemgetter(ColumnIndex))
return sorted_List
def SortListByRow(List,RowIndex):
List = zip(*List)
List.sort(key=lambda x: x[RowIndex])
List = zip(*List)
return List
def PercentileOfList(List, Percentile, Ascending=True):
"""
:param List: list of values [value1, .... , valueN]
:param Percentile: desired percentile of List (inclusive)
:return:
"""
n = len(List)
k = int(Percentile*n) # Number of data points
ascending_list = sorted(List)
if Ascending is True:
return ascending_list[:k]
else:
return ascending_list[::-1][:k]
| brschneidE3/LegalNetworks | python_code/helper_functions.py | Python | mit | 11,787 |
# coding=utf-8
from .simulator import Simulator
| vialette/ultrastorage | ultrastorage/simulator/__init__.py | Python | mit | 49 |
import copy
def heap_sort(sorting_list):
step = 0
ls = copy.deepcopy(sorting_list)
def sift_down(start, end, step):
root = start
while True:
child = 2 * root + 1
if child > end:
break
if child + 1 <= end and ls[child] < ls[child+1]:
child += 1
if ls[root] < ls[child]:
step += 1
(ls[root], ls[child]) = (ls[child], ls[root])
root = child
else:
break
#create heap
for start in xrange((len(ls)-2)/2, -1, -1):
sift_down(start, len(ls) - 1, step)
#sort
for end in xrange(len(ls)-1, 0, -1):
(ls[0], ls[end]) = (ls[end], ls[0])
step += 1
sift_down(0, end-1, step)
return (ls, step)
if __name__ == "__main__":
sList=[ 13, 14, 94, 33, 82, 25, 59, 94, 65, 23, 45, 27, 73, 25, 39, 10 ]
print(heap_sort(sList)) | JoshOY/DataStructureCourseDesign | PROB10/my_sort/heapSort.py | Python | mit | 948 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ApplicationInsightsManagementClientConfiguration(Configuration):
"""Configuration for ApplicationInsightsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
"""
def __init__(
self,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
super(ApplicationInsightsManagementClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.api_version = "2020-06-02-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-applicationinsights/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2020_06_02_preview/aio/_configuration.py | Python | mit | 3,020 |
# -*- coding: utf-8 -*
from django import forms
from sgce.models import Event
from decimal import Decimal
class EnrollmentForm(forms.Form):
event_id = forms.CharField(required=False)
total_price = forms.DecimalField(required=False)
off_price = forms.DecimalField(required=False)
total_points = forms.DecimalField(required=False)
token = forms.CharField(max_length=5, required=False)
cc_number = forms.CharField(required=False)
cc_date = forms.CharField(required=False)
cc_cod = forms.CharField(max_length=3, required=False)
def __init__(self, event_id, activities, *args, **kwargs):
super(EnrollmentForm, self).__init__(*args, **kwargs)
self.fields['event_id'].initial = event_id
self.fields['activities'] = forms.MultipleChoiceField(choices=[ (a.id, a.name) for a in activities])
self.fields['total_price'].initial = sum(a.price for a in activities)
self.fields['off_price'].initial = self.fields['total_price'].initial - self.fields['total_price'].initial * Decimal('0.2')
self.fields['total_points'].initial = sum(a.points for a in activities)
def clean_token(self):
data = self.cleaned_data['token']
event = Event.objects.get(id=self.fields['event_id'].initial)
if not data:
return data
try:
voucher = event.vouchers.get(token=data)
except:
raise forms.ValidationError("Voucher inválido, verifique se o token foi digitado corretamente.")
if voucher.used:
raise forms.ValidationError("O voucher especificado já está em uso, verifique se o token foi digitado corretamente.")
return data
| ramonsaraiva/sgce | sgce/sgceusr/forms.py | Python | mit | 1,547 |
"""
Anyone who've tried to get through the A Song of Ice and Fire books written by George R.R. Martin (the basis for the HBO
show Game of Thrones) knows that while the books are generally excellent, there are a lot of characters. A staggering
number, in fact, and it can be very hard to remember who's who and who is related to who and who had an incestual
relationship with what sister or brother.
So, today, we make that a little bit easier! What follows at the end here is a list of 50+ characters from the books and
a list detailing how their related. Each character is given a two-letter code (for instance "AA" or "BQ") and a
specification of their gender ("M" or "F"), and then what follows is a list detailing how they're related to the other
characters.
To make things simple, there's only one "basic" relationship, which is "A is parent to B", written as "->". So, for
instance, if Arya Stark has the code "AI" and Eddard Stark has the code "AB", then "AB->AI" means "Eddard Stark is
parent to Arya Stark". Each person may have 0, 1 or 2 parents specified somewhere in the list, but no more.
(I should point out here that this family tree contains no spoilers. This is the family tree as it stands in the
beginning of Book 1, though some of the characters you wont meet until much later. For those of you who've read the
books or seen the show, please don't put any spoilers in the comments, even in hidden spoiler text.)
Write a program that parses this list, and can then answer questions about the relationships between people. Here are a
list of functions you should write:
ancestors(person) which gives the direct ancestors of that person (parents, grand-parents, great-grand-parents,
etc.). For instance, ancestors("Asha Greyjoy") should return ["Balon Greyjoy", "Alannys Harlaw", "Quellon Greyjoy"].
What is the result to ancestors("Daenerys Targaryen")?
descendants(person) which gives you the direct descendants of that person (children, grand-children,
great-grand-children, etc.). What is the result of descendants("Jaehaerys Targaryen")?
brothers(person) and sisters(person) which gives the brothers and sisters of the specified person (including
half-brothers and half-sisters, though you could write special functions for full siblings and half siblings if you
want).
aunts(person) and uncles(person) which gives you the aunts and uncles of the specified person.
cousins(person), which gives you the 1st cousins of the specified person.
Bonus: As a bonus, write a function called relationship(person1, person2) which returns person1's relationshipt to
person2 as a string (i.e. "Grandfather", "1st cousin", "Brother", "Great uncle", "Not related" etc.). As with all
bonuses on /r/dailyprogrammer, this is entirely optional. EDIT: Since this chart gives no indication about who is
married to whom, you can safely exclude all familial relationships that somehow involves marriage. That means that
relationship("Eddard Stark", "Catelyn Tully") should return "Not related", and you can also skip all
brother/sister/mother/father in-laws. Only relationships "by blood", so to speak.
And now, here is the family tree of some of the major characters in A Song of Ice and Fire:
AA = Rickard Stark (M) AB = Eddard Stark (M) AC = Catelyn Tully (F)
AD = Brandon Stark (M) AE = Benjen Stark (M) AF = Jon Snow (M)
AG = Robb Stark (M) AH = Sansa Stark (F) AI = Arya Stark (F)
AJ = Bran Stark (M) AK = Rickon Stark (M) AL = Hoster Tully (M)
AM = Minisa Whent (F) AN = Edmure Tully (M) AO = Lysa Tully (F)
AP = Jon Arryn (M) AQ = Robert Arryn (M) AR = Tytos Lannister (M)
AS = Tywin Lannister (M) AT = Joanna Lannister (F) AU = Kevan Lannister (M)
AV = Cersei Lannister (F) AW = Jamie Lannister (M) AX = Tyrion Lannister (M)
AY = Robert Baratheon (M) AZ = Joffrey Baratheon (M) BA = Myrcella Baratheon (F)
BB = Tommen Baratheon (M) BC = Lancel Lannister (M) BD = Steffon Baratheon (M)
BE = Stannis Baratheon (M) BF = Selyse Florent (F) BG = Shireen Baratheon (F)
BH = Renly Baratheon (M) BI = Jaehaerys Targaryen (M) BJ = Aerys Targaryen (M)
BK = Rhaella Targaryen (F) BL = Rhaegar Targaryen (M) BM = Elia Martell (F)
BN = Rhaenys Targaryen (F) BO = Aegon Targaryen (M) BP = Viserys Targaryen (M)
BQ = Daenerys Targaryen (F) BR = Quellon Greyjoy (M) BS = Balon Greyjoy (M)
BT = Euron Greyjoy (M) BU = Victarion Greyjoy (M) BV = Urrigon Greyjoy (M)
BW = Aeron Greyjoy (M) BX = Rodrik Greyjoy (M) BY = Maron Greyjoy (M)
BZ = Asha Greyjoy (F) CA = Theon Greyjoy (M) CB = Alannys Harlaw (F)
---------------------------------------------------------------------------------------
AA->AB, AA->AD, AA->AE, AB->AF, AB->AG, AB->AH, AB->AI, AB->AJ, AB->AK, AC->AG,
AC->AH, AC->AI, AC->AJ, AC->AK, AL->AC, AL->AN, AL->AO, AM->AC, AM->AN, AM->AO,
AO->AQ, AP->AQ, AR->AS, AR->AU, AS->AV, AS->AW, AS->AX, AT->AV, AT->AW, AT->AX,
AU->BC, AV->AZ, AV->BA, AV->BB, AY->AZ, AY->BA, AY->BB, BD->AY, BD->BE, BD->BH,
BE->BG, BF->BG, BI->BJ, BI->BK, BJ->BL, BJ->BP, BJ->BQ, BK->BL, BK->BP, BK->BQ,
BL->BN, BL->BO, BM->BN, BM->BO, BR->BS, BR->BT, BR->BU, BR->BV, BR->BW, BS->BX,
BS->BY, BS->BZ, BS->CA, CB->BX, CB->BY, CB->BZ, CB->CA
"""
import re
class Character:
def __init__(self, name, gender):
self.name = name
self.gender = gender
self.parents = []
self.children = []
def extract_data(input_string):
char_dict = {}
code_dict = {}
for char in re.findall('([A-Z]{2} = [A-Za-z ]* \([FM]\))', input_string):
code = char[:2]
name = char[5:-4]
gender = char[-2]
code_dict[code] = name
char_dict[name] = Character(name, gender)
for rel in re.findall('([A-Z]{2}->[A-Z]{2})', input_string):
parent = code_dict[rel[:2]]
child = code_dict[rel[-2:]]
char_dict[parent].children.append(child)
char_dict[child].parents.append(parent)
return char_dict
def ancestors(char_dict, person):
parents = char_dict[person].parents
res = set(parents[:])
for p in parents:
res.update(ancestors(char_dict, p))
return list(res)
def descendants(char_dict, person):
children = char_dict[person].children
res = set(children[:])
for c in children:
res.update(descendants(char_dict, c))
return list(res)
def siblings(char_dict, person, gender=None, fullness=0):
"""fullness=0 all siblings
fullness=1 full siblings only
fullness=2 half siblings only
"""
parents = char_dict[person].parents
if gender:
children = []
for p in parents:
res = []
for c in char_dict[p].children:
if char_dict[c].gender == gender:
res.append(c)
children.append(res)
else:
children = [char_dict[p].children for p in parents]
if len(children) == 1:
children.append([])
answer = []
if fullness == 0:
answer = list(set(children[0]) | set(children[1]))
elif fullness == 1:
answer = list(set(children[0]) & set(children[1]))
elif fullness == 2:
answer = list(set(children[0]) ^ set(children[1]))
if person in answer:
answer.remove(person)
return answer
def parents_siblings(char_dict, person, gender=None):
parents = set(char_dict[person].parents)
grandparents = [char_dict[p].parents for p in parents]
grandparents = {p for sublist in grandparents for p in sublist}
# return list({char_dict[g].children for g in grandparents})
res = {''}
for g in grandparents:
if gender:
for c in char_dict[g].children:
if char_dict[c].gender == gender:
res.update([c])
else:
res.update(char_dict[g].children)
return list(res - parents)[1:] # [1:] to remove empty name
def cousins(char_dict, person):
upper_list = parents_siblings(char_dict, person)
return list({c for upper in upper_list for c in char_dict[upper].children})
def relationship(person1, person2):
pass
def main():
input_string = ("AA = Rickard Stark (M) AB = Eddard Stark (M) AC = Catelyn Tully (F) "
"AD = Brandon Stark (M) AE = Benjen Stark (M) AF = Jon Snow (M) "
"AG = Robb Stark (M) AH = Sansa Stark (F) AI = Arya Stark (F) "
"AJ = Bran Stark (M) AK = Rickon Stark (M) AL = Hoster Tully (M) "
"AM = Minisa Whent (F) AN = Edmure Tully (M) AO = Lysa Tully (F) "
"AP = Jon Arryn (M) AQ = Robert Arryn (M) AR = Tytos Lannister (M) "
"AS = Tywin Lannister (M) AT = Joanna Lannister (F) AU = Kevan Lannister (M) "
"AV = Cersei Lannister (F) AW = Jamie Lannister (M) AX = Tyrion Lannister (M) "
"AY = Robert Baratheon (M) AZ = Joffrey Baratheon (M) BA = Myrcella Baratheon (F) "
"BB = Tommen Baratheon (M) BC = Lancel Lannister (M) BD = Steffon Baratheon (M) "
"BE = Stannis Baratheon (M) BF = Selyse Florent (F) BG = Shireen Baratheon (F) "
"BH = Renly Baratheon (M) BI = Jaehaerys Targaryen (M) BJ = Aerys Targaryen (M) "
"BK = Rhaella Targaryen (F) BL = Rhaegar Targaryen (M) BM = Elia Martell (F) "
"BN = Rhaenys Targaryen (F) BO = Aegon Targaryen (M) BP = Viserys Targaryen (M) "
"BQ = Daenerys Targaryen (F) BR = Quellon Greyjoy (M) BS = Balon Greyjoy (M) "
"BT = Euron Greyjoy (M) BU = Victarion Greyjoy (M) BV = Urrigon Greyjoy (M) "
"BW = Aeron Greyjoy (M) BX = Rodrik Greyjoy (M) BY = Maron Greyjoy (M) "
"BZ = Asha Greyjoy (F) CA = Theon Greyjoy (M) CB = Alannys Harlaw (F) "
"--------------------------------------------------------------------------------------- "
"AA->AB, AA->AD, AA->AE, AB->AF, AB->AG, AB->AH, AB->AI, AB->AJ, AB->AK, AC->AG, "
"AC->AH, AC->AI, AC->AJ, AC->AK, AL->AC, AL->AN, AL->AO, AM->AC, AM->AN, AM->AO, "
"AO->AQ, AP->AQ, AR->AS, AR->AU, AS->AV, AS->AW, AS->AX, AT->AV, AT->AW, AT->AX, "
"AU->BC, AV->AZ, AV->BA, AV->BB, AY->AZ, AY->BA, AY->BB, BD->AY, BD->BE, BD->BH, "
"BE->BG, BF->BG, BI->BJ, BI->BK, BJ->BL, BJ->BP, BJ->BQ, BK->BL, BK->BP, BK->BQ, "
"BL->BN, BL->BO, BM->BN, BM->BO, BR->BS, BR->BT, BR->BU, BR->BV, BR->BW, BS->BX, "
"BS->BY, BS->BZ, BS->CA, CB->BX, CB->BY, CB->BZ, CB->CA")
data = extract_data(input_string)
print(ancestors(data, "Daenerys Targaryen"))
print(descendants(data, "Jaehaerys Targaryen"))
print(siblings(data, "Jon Snow", gender=None, fullness=0))
print(parents_siblings(data, "Joffrey Baratheon", gender=None))
print(cousins(data, "Jamie Lannister"))
if __name__ == "__main__":
main()
| DayGitH/Python-Challenges | DailyProgrammer/DP20120615B.py | Python | mit | 11,381 |
# -*- coding: utf-8 -*-
from .gni import parse, search, details
from .gnr import datasources, resolve
| sckott/pytaxize | pytaxize/gn/__init__.py | Python | mit | 103 |
"""
Serialization functions for the SimpleMRS format.
"""
# Author: Michael Wayne Goodman <[email protected]>
from __future__ import print_function
from collections import deque, defaultdict
import re
from warnings import warn
from delphin.mrs import Xmrs, Mrs
from delphin.mrs.components import (
ElementaryPredication, Pred, Lnk, HandleConstraint, IndividualConstraint,
sort_vid_split, var_sort, var_re, hcons, icons
)
from delphin.mrs.config import (HANDLESORT, CONSTARG_ROLE)
from delphin.mrs.util import rargname_sortkey
from delphin.exceptions import (
XmrsDeserializationError as XDE,
XmrsError,
XmrsWarning
)
try:
from pygments import highlight as highlight_
from pygments.formatters import TerminalFormatter
from delphin.extra.highlight import SimpleMrsLexer, mrs_colorscheme
lexer = SimpleMrsLexer()
formatter = TerminalFormatter(bg='dark', colorscheme=mrs_colorscheme)
def highlight(text):
return highlight_(text, lexer, formatter)
except ImportError:
# warnings.warn
def highlight(text):
return text
# versions are:
# * 1.0 long running standard
# * 1.1 added support for MRS-level lnk, surface and EP-level surface
_default_version = 1.1
_latest_version = 1.1
_valid_hcons = ['qeq', 'lheq', 'outscopes']
# pretty-print options
_default_mrs_delim = '\n'
##############################################################################
##############################################################################
# Pickle-API methods
def load(fh, single=False, version=_default_version,
strict=False, errors='warn'):
"""
Deserialize SimpleMRSs from a file (handle or filename)
Args:
fh: filename or file object
single: if `True`, only return the first read [Xmrs] object
strict: deprecated; a `True` value is the same as
`errors='strict', and a `False` value is the same as
`errors='warn'
errors: if `strict`, ill-formed MRSs raise an error; if
`warn`, raise a warning instead; if `ignore`, do not warn
or raise errors for ill-formed MRSs
Returns:
a generator of [Xmrs] objects (unless the *single* option is
`True`)
"""
if isinstance(fh, str):
s = open(fh, 'r').read()
else:
s = fh.read()
return loads(s, single=single, version=version,
strict=strict, errors=errors)
def loads(s, single=False, version=_default_version,
strict=False, errors='warn'):
"""
Deserialize SimpleMRS string representations
Args:
s: a SimpleMRS string
single: if `True`, only return the first read [Xmrs] object
Returns:
a generator of [Xmrs] objects (unless the *single* option is
`True`)
"""
ms = deserialize(s, version=version, strict=strict, errors=errors)
if single:
return next(ms)
else:
return ms
def dump(fh, ms, single=False, version=_default_version,
pretty_print=False, color=False, **kwargs):
"""
Serialize [Xmrs] objects to a SimpleMRS representation and write
to a file
Args:
fh: filename or file object
ms: an iterator of [Xmrs] objects to serialize (unless the
*single* option is `True`)
single: if `True`, treat *ms* as a single [Xmrs] object
instead of as an iterator
pretty_print: if `True`, the output is formatted to be easier
to read
color: if `True`, colorize the output with ANSI color codes
Returns:
None
"""
print(dumps(ms,
single=single,
version=version,
pretty_print=pretty_print,
color=color,
**kwargs),
file=fh)
def dumps(ms, single=False, version=_default_version,
pretty_print=False, color=False, **kwargs):
"""
Serialize an [Xmrs] object to a SimpleMRS representation
Args:
ms: an iterator of [Xmrs] objects to serialize (unless the
*single* option is `True`)
single: if `True`, treat *ms* as a single [Xmrs] object instead
of as an iterator
pretty_print: if `True`, the output is formatted to be easier to
read
color: if `True`, colorize the output with ANSI color codes
Returns:
a SimpleMrs string representation of a corpus of [Xmrs]
"""
if single:
ms = [ms]
return serialize(ms, version=version,
pretty_print=pretty_print, color=color, **kwargs)
# for convenience
load_one = lambda fh, **kwargs: load(fh, single=True, **kwargs)
loads_one = lambda s, **kwargs: loads(s, single=True, **kwargs)
dump_one = lambda fh, m, **kwargs: dump(fh, m, single=True, **kwargs)
dumps_one = lambda m, **kwargs: dumps(m, single=True, **kwargs)
##############################################################################
##############################################################################
# Deserialization
# The _tokenizer has 3 sub-regexen:
# the first is for strings (e.g. "_dog_n_rel", "\"quoted string\"")
# the second looks for unquoted type preds (lookahead for space or lnk)
# the second is for args, variables, preds, etc (e.g. ARG1, _dog_n_rel, x4)
# the last is for contentful punctuation (e.g. [ ] < > : # @)
_tokenizer = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*"'
r'|_(?:[^\s<]|<(?![-0-9:#@ ]*>))*'
r'|[^\s:#@\[\]"<>]+'
r'|[:#@\[\]<>])')
def tokenize(string):
"""Split the SimpleMrs string into tokens."""
return deque(_tokenizer.findall(string))
def _invalid_token_error(token, expected):
raise XDE('Invalid token: "{}"\tExpected: "{}"'.format(token, expected))
def deserialize(string, version=_default_version, strict=True, errors='warn'):
if strict:
warnings.warn(
'strict=True parameter is deprecated; use errors=\'strict\'',
DeprecationWarning
)
errors = 'strict'
# FIXME: consider buffering this so we don't read the whole string at once
tokens = tokenize(string)
while tokens:
yield _read_mrs(tokens, version, errors)
def _read_literals(tokens, *toks):
for tok in toks:
token = tokens.popleft()
if token != tok:
raise XDE(
'Expected \'{}\': {}'.format(tok, ' '.join([tok] + list(tokens)))
)
def _read_mrs(tokens, version, errors):
#return read_mrs(tokens)
try:
_read_literals(tokens, '[')
top = idx = surface = lnk = None
vars_ = {}
if version >= 1.1:
if tokens[0] == '<':
lnk = _read_lnk(tokens)
if tokens[0].startswith('"'): # and tokens[0].endswith('"'):
surface = tokens.popleft()[1:-1] # get rid of first quotes
if tokens[0].upper() in ('LTOP', 'TOP'):
tokens.popleft() # LTOP / TOP
_read_literals(tokens, ':')
top = tokens.popleft()
vars_[top] = []
if tokens[0].upper() == 'INDEX':
tokens.popleft() # INDEX
_read_literals(tokens, ':')
idx = tokens.popleft()
vars_[idx] = _read_props(tokens)
rels = _read_rels(tokens, vars_)
hcons = _read_cons(tokens, 'HCONS', vars_)
icons = _read_cons(tokens, 'ICONS', vars_)
_read_literals(tokens, ']')
# at this point, we could uniquify proplists in vars_, but most
# likely it isn't necessary, and might night harm things if we
# leave potential dupes in there. let's see how it plays out.
m = Xmrs(top=top, index=idx, eps=rels,
hcons=hcons, icons=icons, vars=vars_,
lnk=lnk, surface=surface)
except IndexError:
_unexpected_termination_error()
if errors != 'ignore':
try:
m.validate()
except XmrsError as ex:
if errors == 'warn':
warn(str(ex), XmrsWarning)
elif errors == 'strict':
raise
return m
def _read_props(tokens):
props = []
if tokens[0] == '[':
tokens.popleft() # [
vartype = tokens.popleft() # this gets discarded though
while tokens[0] != ']':
key = tokens.popleft()
_read_literals(tokens, ':')
val = tokens.popleft()
props.append((key, val))
tokens.popleft() # ]
return props
def _read_rels(tokens, vars_):
rels = None
nid = 10000
if tokens[0].upper() == 'RELS':
rels = []
tokens.popleft() # RELS
_read_literals(tokens, ':', '<')
while tokens[0] != '>':
rels.append(_read_ep(tokens, nid, vars_))
nid += 1
tokens.popleft() # >
return rels
def _read_ep(tokens, nid, vars_):
# reassign these locally to avoid global lookup
CARG = CONSTARG_ROLE
_var_re = var_re
# begin parsing
_read_literals(tokens, '[')
pred = Pred.string_or_grammar_pred(tokens.popleft())
lnk = _read_lnk(tokens)
surface = label = None
if tokens[0].startswith('"'):
surface = tokens.popleft()[1:-1] # get rid of first quotes
if tokens[0].upper() == 'LBL':
tokens.popleft() # LBL
_read_literals(tokens, ':')
label = tokens.popleft()
vars_[label] = []
args = {}
while tokens[0] != ']':
role = tokens.popleft().upper()
_read_literals(tokens, ':')
val = tokens.popleft()
if _var_re.match(val) is not None and role.upper() != CARG:
props = _read_props(tokens)
if val not in vars_:
vars_[val] = []
vars_[val].extend(props)
args[role] = val
tokens.popleft() # ]
return (nid, pred, label, args, lnk, surface)
def _read_cons(tokens, constype, vars_):
cons = None
if tokens[0].upper() == constype:
cons = []
tokens.popleft() # (H|I)CONS
_read_literals(tokens, ':', '<')
while tokens[0] != '>':
left = tokens.popleft()
lprops = _read_props(tokens)
reln = tokens.popleft().lower()
rght = tokens.popleft()
rprops = _read_props(tokens)
cons.append((left, reln, rght))
# update properties
if left not in vars_: vars_[left] = []
vars_[left].extend(lprops)
if rght not in vars_: vars_[rght] = []
vars_[rght].extend(lprops)
tokens.popleft() # >
return cons
def _read_lnk(tokens):
"""Read and return a tuple of the pred's lnk type and lnk value,
if a pred lnk is specified."""
# < FROM : TO > or < FROM # TO > or < TOK... > or < @ EDGE >
lnk = None
if tokens[0] == '<':
tokens.popleft() # we just checked this is a left angle
if tokens[0] == '>':
pass # empty <> brackets the same as no lnk specified
# edge lnk: ['@', EDGE, ...]
elif tokens[0] == '@':
tokens.popleft() # remove the @
lnk = Lnk.edge(tokens.popleft()) # edge lnks only have one number
# character span lnk: [FROM, ':', TO, ...]
elif tokens[1] == ':':
lnk = Lnk.charspan(tokens.popleft(), tokens[1])
tokens.popleft() # this should be the colon
tokens.popleft() # and this is the cto
# chart vertex range lnk: [FROM, '#', TO, ...]
elif tokens[1] == '#':
lnk = Lnk.chartspan(tokens.popleft(), tokens[1])
tokens.popleft() # this should be the hash
tokens.popleft() # and this is the to vertex
# tokens lnk: [(TOK,)+ ...]
else:
lnkdata = []
while tokens[0] != '>':
lnkdata.append(int(tokens.popleft()))
lnk = Lnk.tokens(lnkdata)
_read_literals(tokens, '>')
return lnk
def _unexpected_termination_error():
raise XDE('Invalid MRS: Unexpected termination.')
##############################################################################
##############################################################################
# Encoding
def serialize(ms, version=_default_version, pretty_print=False, color=False):
"""Serialize an MRS structure into a SimpleMRS string."""
delim = '\n' if pretty_print else _default_mrs_delim
output = delim.join(
_serialize_mrs(m, version=version, pretty_print=pretty_print)
for m in ms
)
if color:
output = highlight(output)
return output
def _serialize_mrs(m, version=_default_version, pretty_print=False):
# note that varprops is modified as a side-effect of the lower
# functions
varprops = {v: vd['props'] for v, vd in m._vars.items() if vd['props']}
toks = []
if version >= 1.1:
header_toks = []
if m.lnk is not None and m.lnk.data != (-1, -1): # don't do <-1:-1>
header_toks.append(_serialize_lnk(m.lnk))
if m.surface is not None:
header_toks.append('"{}"'.format(m.surface))
if header_toks:
toks.append(' '.join(header_toks))
if m.top is not None:
toks.append(_serialize_argument(
'TOP' if version >= 1.1 else 'LTOP', m.top, varprops
))
if m.index is not None:
toks.append(_serialize_argument(
'INDEX', m.index, varprops
))
delim = ' ' if not pretty_print else '\n '
toks.append('RELS: < {eps} >'.format(
eps=delim.join(_serialize_ep(ep, varprops, version=version)
for ep in m.eps())
))
toks += [_serialize_hcons(hcons(m))]
icons_ = icons(m)
if icons_: # make unconditional for "ICONS: < >"
toks += [_serialize_icons(icons_)]
delim = ' ' if not pretty_print else '\n '
return '{} {} {}'.format('[', delim.join(toks), ']')
def _serialize_argument(rargname, value, varprops):
"""Serialize an MRS argument into the SimpleMRS format."""
_argument = '{rargname}: {value}{props}'
props = ''
if value in varprops:
props = ' [ {} ]'.format(
' '.join(
[var_sort(value)] +
list(map('{0[0]}: {0[1]}'.format,
[(k.upper(), v) for k, v in varprops[value]]))
)
)
del varprops[value] # only print props once
return _argument.format(
rargname=rargname,
value=str(value),
props=props
)
def _serialize_ep(ep, varprops, version=_default_version):
"""Serialize an Elementary Predication into the SimpleMRS encoding."""
# ('nodeid', 'pred', 'label', 'args', 'lnk', 'surface', 'base')
args = ep[3]
arglist = ' '.join([_serialize_argument(rarg, args[rarg], varprops)
for rarg in sorted(args, key=rargname_sortkey)])
if version < 1.1 or len(ep) < 6 or ep[5] is None:
surface = ''
else:
surface = ' "%s"' % ep[5]
lnk = None if len(ep) < 5 else ep[4]
pred = ep[1]
predstr = pred.string
return '[ {pred}{lnk}{surface} LBL: {label}{s}{args} ]'.format(
pred=predstr,
lnk=_serialize_lnk(lnk),
surface=surface,
label=str(ep[2]),
s=' ' if arglist else '',
args=arglist
)
def _serialize_lnk(lnk):
"""Serialize a predication lnk to surface form into the SimpleMRS
encoding."""
s = ""
if lnk is not None:
s = '<'
if lnk.type == Lnk.CHARSPAN:
cfrom, cto = lnk.data
s += ''.join([str(cfrom), ':', str(cto)])
elif lnk.type == Lnk.CHARTSPAN:
cfrom, cto = lnk.data
s += ''.join([str(cfrom), '#', str(cto)])
elif lnk.type == Lnk.TOKENS:
s += ' '.join([str(t) for t in lnk.data])
elif lnk.type == Lnk.EDGE:
s += ''.join(['@', str(lnk.data)])
s += '>'
return s
def _serialize_hcons(hcons):
"""Serialize [HandleConstraints] into the SimpleMRS encoding."""
toks = ['HCONS:', '<']
for hc in hcons:
toks.extend(hc)
# reln = hcon[1]
# toks += [hcon[0], rel, str(hcon.lo)]
toks += ['>']
return ' '.join(toks)
def _serialize_icons(icons):
"""Serialize [IndividualConstraints] into the SimpleMRS encoding."""
toks = ['ICONS:', '<']
for ic in icons:
toks.extend(ic)
# toks += [str(icon.left),
# icon.relation,
# str(icon.right)]
toks += ['>']
return ' '.join(toks)
| matichorvat/pydelphin | delphin/mrs/simplemrs.py | Python | mit | 16,588 |
import pytest
import skitai
import confutil
import threading
import time
import sys
def enforce ():
time.sleep (2)
ex = skitai.was.executors
ex.executors [1].maintern (time.time ())
ex.executors [1].shutdown ()
def foo (a, timeout = 0):
time.sleep (timeout)
return a
def test_was_process (app):
@app.route ("/")
def index (was, timeout = 0):
task = was.Process (foo, 'hello', int (timeout))
return task.fetch ()
@app.route ("/1")
def index2 (was, timeout = 0):
tasks = was.Tasks ([was.Process (foo, 'hello', int (timeout))])
return tasks.fetch ()[0]
with app.test_client ("/", confutil.getroot ()) as cli:
resp = cli.get ("/")
assert resp.status_code == 200
assert resp.data == 'hello'
resp = cli.get ("/1")
assert resp.status_code == 200
assert resp.data == 'hello'
def test_was_thread (app):
@app.route ("/")
def index (was, timeout = 0):
task = was.Thread (foo, 'hello', int (timeout))
return task.fetch ()
@app.route ("/1")
def index2 (was, timeout = 0):
tasks = was.Tasks ([was.Thread (foo, ['hello'], int (timeout))])
return tasks.one ()[0]
with app.test_client ("/", confutil.getroot ()) as cli:
resp = cli.get ("/")
assert resp.status_code == 200
assert resp.data == 'hello'
resp = cli.get ("/1")
assert resp.status_code == 200
assert resp.data == 'hello'
def test_was_async_requests (app):
@app.route ("/")
def index (was, timeout = 0):
def respond (was, task):
assert task.fetch () == "hello"
return was.API ("201 Created", data = task.fetch ())
return was.Process (foo, 'hello', int (timeout)).then (respond)
with app.test_client ("/", confutil.getroot ()) as cli:
threading.Thread (target = enforce).start ()
resp = cli.get ("/")
assert resp.status_code == 201
assert resp.data ['data'] == 'hello'
time.sleep (3)
threading.Thread (target = enforce).start ()
resp = cli.get ("/?timeout=5")
assert resp.status_code == 502
| hansroh/skitai | tests/level3/test_executors_then.py | Python | mit | 2,268 |
import unittest
import hail as hl
from hail.utils.java import Env, scala_object
from ..helpers import *
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
def create_backward_compatibility_files():
import os
all_values_table, all_values_matrix_table = create_all_values_datasets()
file_version = Env.hail().expr.ir.FileFormat.version().toString()
supported_codecs = scala_object(Env.hail().io, 'BufferSpec').specs()
table_dir = resource(os.path.join('backward_compatability', str(file_version), 'table'))
if not os.path.exists(table_dir):
os.makedirs(table_dir)
matrix_table_dir = resource(os.path.join('backward_compatability', str(file_version), 'matrix_table'))
if not os.path.exists(matrix_table_dir):
os.makedirs(matrix_table_dir)
i = 0
for codec in supported_codecs:
all_values_table.write(os.path.join(table_dir, f'{i}.ht'), overwrite=True, _codec_spec=codec.toString())
all_values_matrix_table.write(os.path.join(matrix_table_dir, f'{i}.hmt'), overwrite=True,
_codec_spec=codec.toString())
i += 1
class Tests(unittest.TestCase):
@unittest.skip # comment this line to generate files for new versions
def test_write(self):
create_backward_compatibility_files()
@skip_when_service_backend('''intermittent worker failure:
> assert backward_compatible_same(all_values_table, ds)
Caused by: java.lang.AssertionError: assertion failed
at scala.Predef$.assert(Predef.scala:208)
at is.hail.io.BlockingInputBuffer.ensure(InputBuffers.scala:389)
at is.hail.io.BlockingInputBuffer.readInt(InputBuffers.scala:412)
at __C1210collect_distributed_array.__m1218INPLACE_DECODE_r_binary_TO_r_binary(Unknown Source)
at __C1210collect_distributed_array.__m1217INPLACE_DECODE_r_struct_of_r_binaryEND_TO_r_tuple_of_r_binaryEND(Unknown Source)
at __C1210collect_distributed_array.__m1216INPLACE_DECODE_r_struct_of_r_struct_of_r_binaryENDEND_TO_r_struct_of_r_tuple_of_r_binaryENDEND(Unknown Source)
at __C1210collect_distributed_array.__m1215DECODE_r_struct_of_r_struct_of_r_struct_of_r_binaryENDENDEND_TO_SBaseStructPointer(Unknown Source)
at __C1210collect_distributed_array.apply(Unknown Source)
at __C1210collect_distributed_array.apply(Unknown Source)
at is.hail.backend.BackendUtils.$anonfun$collectDArray$2(BackendUtils.scala:31)
at is.hail.utils.package$.using(package.scala:627)
at is.hail.annotations.RegionPool.scopedRegion(RegionPool.scala:144)
at is.hail.backend.BackendUtils.$anonfun$collectDArray$1(BackendUtils.scala:30)
at is.hail.backend.service.Worker$.main(Worker.scala:120)
at is.hail.backend.service.Worker.main(Worker.scala)
... 11 more''')
def test_backward_compatability(self):
import os
def backward_compatible_same(current, old):
if isinstance(current, hl.Table):
current = current.select_globals(*old.globals)
current = current.select(*old.row_value)
else:
current = current.select_globals(*old.globals)
current = current.select_rows(*old.row_value)
current = current.select_cols(*old.col_value)
current = current.select_entries(*old.entry)
return current._same(old)
all_values_table, all_values_matrix_table = create_all_values_datasets()
resource_dir = resource('backward_compatability')
fs = hl.current_backend().fs
versions = [os.path.basename(x['path']) for x in fs.ls(resource_dir)]
n = 0
for v in versions:
table_dir = os.path.join(resource_dir, v, 'table')
i = 0
f = os.path.join(table_dir, '{}.ht'.format(i))
while fs.exists(f):
ds = hl.read_table(f)
assert backward_compatible_same(all_values_table, ds)
i += 1
f = os.path.join(table_dir, '{}.ht'.format(i))
n += 1
matrix_table_dir = os.path.join(resource_dir, v, 'matrix_table')
i = 0
f = os.path.join(matrix_table_dir, '{}.hmt'.format(i))
while fs.exists(f):
ds = hl.read_matrix_table(f)
assert backward_compatible_same(all_values_matrix_table, ds)
i += 1
f = os.path.join(matrix_table_dir, '{}.hmt'.format(i))
n += 1
assert n == 88
| hail-is/hail | hail/python/test/hail/matrixtable/test_file_formats.py | Python | mit | 4,479 |
import json
import sublime
import subprocess
import os
import sys
from .helpers import fmtpos
main_protocol_version = 3
class MerlinExc(Exception):
""" Exception returned by merlin. """
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Failure(MerlinExc):
""" Failure exception. """
pass
class Error(MerlinExc):
""" Error exception. """
pass
class MerlinException(MerlinExc):
""" Standard exception. """
pass
class MerlinProcess(object):
"""
This class launches a merlin process and send/receive commands to
synchronise buffer, autocomplete...
"""
def clear(self):
self._settings = None
self._binary_path = None
self._last_commands = []
self._verbosity_counter = (None, None)
def __init__(self):
self.clear()
def settings(self):
if self._settings is None:
self._settings = sublime.load_settings("Merlin.sublime-settings")
return self._settings
def binary_path(self):
"""
Return the path of the ocamlmerlin binary."
"""
if self._binary_path is None:
merlin_path = self.settings().get('ocamlmerlin_path')
if merlin_path:
self._binary_path = merlin_path
else:
# For Mac OS X, add the path for homebrew
if "/usr/local/bin" not in os.environ['PATH'].split(os.pathsep):
os.environ['PATH'] += os.pathsep + "/usr/local/bin"
opam_process = subprocess.Popen('opam config var bin', stdout=subprocess.PIPE, shell=True)
opam_bin_path = opam_process.stdout.read().decode('utf-8').rstrip() + '/ocamlmerlin'
if os.path.isfile(opam_bin_path) and os.access(opam_bin_path, os.X_OK):
self._binary_path = opam_bin_path
else:
self._binary_path = 'ocamlmerlin'
return self._binary_path
def store_last_command(self, command, response, errors):
if self._last_commands and self._last_commands[0] == (command, None, None):
self._last_commands[0] = (command, response, errors)
else:
self._last_commands.insert(0, (command, response, errors))
if len(self._last_commands) > 5:
self._last_commands.pop()
def track_verbosity(self, key, args):
if key:
if key is True:
key = args
if self._verbosity_counter[0] == key:
self._verbosity_counter = (key, self._verbosity_counter[1]+1)
else:
self._verbosity_counter = (key, 0)
return ["-verbosity", str(self._verbosity_counter[1])]
else:
return []
def exec(self, arguments, binary_path=None, input=""):
""" Start a merlin process. """
try:
if binary_path is None:
binary_path = self.binary_path()
command = [binary_path]
command.extend(arguments)
self.store_last_command(command, None, None)
# win32 means windows, either 64 or 32 bits.
# Note that owing to a long-standing bug in Python, stderr must be given
# (see https://bugs.python.org/issue3905)
if sys.platform == "win32":
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=info,
universal_newlines=True,
)
else:
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
print(command)
(response, errors) = process.communicate(input=input)
self.store_last_command(command, response, errors)
return response
except (OSError, FileNotFoundError) as e:
print("Failed starting ocamlmerlin. Please ensure that ocamlmerlin"
"binary is executable.")
raise e
def restart(self):
self.exec(["server", "stop-server"])
self.clear()
def command(self, args, binary_path=None, filename=None, extensions=None, packages=None, dot_merlins=None, input=None, other_flags=None, debug=False, build_path=None, source_path=None, track_verbosity=None):
"""
Send a command to merlin and wait to return the results.
Raise an exception if merlin returned an error message.
"""
cmdline = ["server"]
cmdline.extend(args)
if filename:
cmdline.extend(["-filename", filename])
verbosity = self.track_verbosity(track_verbosity, args)
cmdline.extend(verbosity)
for ext in extensions or []:
cmdline.extend(["-extension",ext])
for pkg in packages or []:
cmdline.extend(["-package",pkg])
for dm in dot_merlins or []:
cmdline.extend(["-dot-merlin", dm])
for path in build_path or []:
cmdline.extend(["-build-path", path])
for path in source_path or []:
cmdline.extend(["-source-path", path])
if debug:
cmdline.extend(["-log-file", "-"])
flags = self.settings().get('ocamlmerlin_flags') or []
cmdline.extend(flags)
if other_flags:
cmdline.extend(other_flags)
result = self.exec(cmdline, binary_path=binary_path, input=input)
print(result)
result = json.loads(result)
class_ = result['class']
content = result['value']
for msg in result['notifications']:
print("merlin: {}".format(msg))
if class_ == "return":
return content
elif class_ == "failure":
raise Failure(content)
elif class_ == "error":
raise Error(content)
elif class_ == "exception":
raise MerlinException(content)
class MerlinView(object):
"""
This class wraps commands local to a view/buffer
"""
def __init__(self, process, view):
self.process = process
self.view = view
def command(self, args, track_verbosity=None):
settings = self.view.settings()
return self.process.command(
args,
binary_path=settings.get("ocamlmerlin_path"),
dot_merlins=settings.get("ocamlmerlin_dot_merlins"),
extensions=settings.get("ocamlmerlin_extensions"),
filename=self.view.file_name(),
input=self.view.substr(sublime.Region(0, self.view.size())),
other_flags=settings.get("ocamlmerlin_flags"),
packages=settings.get("ocamlmerlin_packages"),
build_path=settings.get("ocamlmerlin_buildpath"),
source_path=settings.get("ocamlmerlin_sourcepath"),
track_verbosity=track_verbosity
)
def complete_cursor(self, base, line, col):
""" Return possible completions at the current cursor position. """
with_doc = self.process.settings().get("ocamlmerlin_complete_with_doc")
cmd = ["complete-prefix"]
cmd.extend(["-position", fmtpos((line,col)), "-prefix", base])
cmd.extend(["-doc", (with_doc and "y" or "n")])
return self.command(cmd, track_verbosity=True)
def report_errors(self):
"""
Return all errors detected by merlin while parsing the current file.
"""
return self.command(["errors"])
def find_list(self):
""" List all possible external modules to load. """
return self.command(['findlib-list'])
def set_packages(self, packages):
""" Find and load external modules. """
self.view.settings().set("ocamlmerlin_packages", packages)
def project(self):
"""
Returns a tuple
(dot_merlins, failures)
where dot_merlins is a list of loaded .merlin files
and failures is the list of errors which occured during loading
"""
result = self.send_query("project", "get")
return (result['result'], result['failures'])
# Path management
def list_build_path(self):
return self.view.settings().get("ocamlmerlin_buildpath") or []
def add_build_path(self, path):
paths = self.list_build_path()
paths.append(path)
self.view.settings.set("ocamlmerlin_buildpath", paths)
def list_source_path(self):
return self.view.settings().get("ocamlmerlin_sourcepath") or []
def add_source_path(self, path):
paths = self.list_source_path()
paths.append(path)
self.view.settings.set("ocamlmerlin_sourcepath", paths)
# File selection
def which_path(self, names):
cmd = ["path-of-source"]
for name in names:
cmd.extend(["-file",name])
return self.command(cmd)
def which_with_ext(self, extensions):
cmd = ["list-modules"]
for ext in extensions:
cmd.extend(["-ext",ext])
return self.command(cmd)
# Type information
def type_enclosing(self, line, col):
cmd = ["type-enclosing", "-position", fmtpos((line,col))]
return self.command(cmd, track_verbosity=True)
# Extensions management
def extension_list(self):
return self.command(["extension-list"])
def extension_enable(self, exts):
merlin_exts = self.view.settings().get("ocamlmerlin_extensions")
for ext in exts:
if not ext in merlin_exts:
merlin_exts.append(ext)
self.view.settings().set("ocamlmerlin_extensions", merlin_exts)
def extension_disable(self, exts):
merlin_exts = self.view.settings().get("ocamlmerlin_extensions")
for ext in exts:
if ext in merlin_exts:
merlin_exts.remove(ext)
self.view.settings().set("ocamlmerlin_extensions", merlin_exts)
def locate(self, line, col, ident="", kind="mli"):
cmd = ["locate", "-look-for", kind]
if not (line is None or col is None):
cmd.extend(["-prefix", ident])
return self.command(cmd)
| def-lkb/sublime-text-merlin | merlin/process.py | Python | mit | 10,719 |
from django.core.management.base import BaseCommand
from django.utils import autoreload
import os
import sys
import time
INPROGRESS_FILE = 'testing.inprogress'
def get_test_command():
"""
Return an instance of the Command class to use.
This method can be patched in to run a test command other than the on in
core Django. For example, to make a runtester for South:
from django.core.management.commands import runtester
from django.core.management.commands.runtester import Command
def get_test_command():
from south.management.commands.test import Command as TestCommand
return TestCommand()
runtester.get_test_command = get_test_command
"""
from test_extensions.management.commands.test import Command as TestCommand
return TestCommand()
def my_reloader_thread():
"""
Wait for a test run to complete before exiting.
"""
# If a file is saved while tests are being run, the base reloader just
# kills the process. This is bad because it wedges the database and then
# the user is prompted to delete the database. Instead, wait for
# INPROGRESS_FILE to disappear, then exit. Exiting the thread will then
# rerun the suite.
while autoreload.RUN_RELOADER:
if autoreload.code_changed():
while os.path.exists(INPROGRESS_FILE):
time.sleep(1)
sys.exit(3) # force reload
time.sleep(1)
# monkeypatch the reloader_thread function with the one above
autoreload.reloader_thread = my_reloader_thread
class Command(BaseCommand):
option_list = BaseCommand.option_list
help = "Starts a command that tests upon saving files."
args = '[optional apps to test]'
# Validation is called explicitly each time the suite is run
requires_model_validation = False
def handle(self, *args, **options):
if os.path.exists(INPROGRESS_FILE):
os.remove(INPROGRESS_FILE)
def inner_run():
try:
open(INPROGRESS_FILE, 'wb').close()
test_command = get_test_command()
test_command.handle(*args, **options)
finally:
if os.path.exists(INPROGRESS_FILE):
os.remove(INPROGRESS_FILE)
autoreload.main(inner_run)
| garethr/django-test-extensions | src/test_extensions/management/commands/runtester.py | Python | mit | 2,305 |
# vim: fdm=marker
'''
author: Fabio Zanini/Richard Neher
date: 25/04/2015
content: Data access module HIV patients.
'''
# Modules
from collections import Counter
from operator import itemgetter
import numpy as np
import pandas as pd
from Bio import SeqIO,Seq
from .samples import *
from .af_tools import *
from .sequence import alpha, alphaa
# Classes
class Patient(pd.Series):
'''
Class providing access to longitudinal sequencing data of HIV-1 populations
in participants of the HIVEVO study. The class contains time-ordered samples
and access methods to single nucleotide variants, pair frequencies, and genomic
features of the the HIV poputions
'''
_metadata = ['samples',
'_cd4',
'_viral_load',
'_times',
'_n_templates_dilutions',
'_n_templates_viral_load',
'initial_sequence',
'pos_to_feature',
'reference',
'annotation',
]
def __init__(self, *args, **kwargs):
'''Initialize a patient with all his samples'''
include_cell = kwargs.pop('include_cell', False)
super(Patient, self).__init__(*args, **kwargs)
self.samples = sorted(load_samples_sequenced(patients=[self.name]),
key=itemgetter('days since infection'))
self._cd4 = [x['CD4+ count'] for x in self.samples]
self._viral_load = [x['viral load'] for x in self.samples]
# We take 400 ul of serum
# We typically have 6 reactions with that total volume (plus the F4 dilution
# series, but each of those uses only 0.1x template which is very little)
self._n_templates_viral_load = np.array([x*0.4/6.1 for x in self._viral_load], dtype = float)
self._n_templates_dilutions = np.ma.masked_invalid([x.get_n_templates_dilutions() for x in self.samples])
self._times = []
self.reference = self.load_reference()
# translate genbank encoded sequence features into a dictionary
self.annotation = {x.qualifiers['note'][-1]: x for x in self.reference.features}
self._initial_consensus_noinsertions()
# TODO: this is experimental
self.positions_to_features(sources=['annotations'])
@classmethod
def load(cls, pname):
from .filenames import get_table_filename
patients = pd.read_csv(get_table_filename('patients'),
sep='\t',
index_col=0)
if pname not in patients.index:
raise ValueError('Patient '+str(pname)+' not found')
return cls(patients.loc[pname])
@property
def _constructor(self):
return Patient
@property
def _constructor_expanddim(self):
# TODO: implement pd.DataFrame for several patients at once
raise NotImplementedError
@property
def viral_load(self):
return self._viral_load
@property
def cd4(self):
return self._cd4
@property
def dsi(self):
return self.times(unit='days')
@property
def msi(self):
return self.times(unit='month')
@property
def ysi(self):
return self.times(unit='year')
def times(self, unit='days'):
'''Get the times from transmission'''
delta_days = [float(s['days since infection']) for s in self.samples]
if unit.startswith('day'):
return np.array(delta_days, dtype=float)
elif unit.startswith('year'):
return np.array(delta_days, dtype=float)/365.24
elif unit.startswith('month'):
return np.array(delta_days, dtype=float)/365.24*12
else:
raise ValueError("bad time unit")
@property
def n_templates_dilutions(self):
'''Get the time course of the number of templates to PCR, limiting depth'''
return self._n_templates_dilutions
@property
def n_templates_viral_load(self):
'''Get the number of templates, estimated from the viral load'''
return self._n_templates_viral_load
@property
def initial_sample(self):
'''The initial sample used as a mapping reference'''
return self.samples[0]
def load_reference(self):
from .filenames import get_initial_reference_filename
return SeqIO.read(get_initial_reference_filename(self.name, "genomewide", format='gb'), 'gb')
def _region_to_indices(self,region):
'''returns a list of positions corresponding to a genomic region'''
if region=='genomewide':
return np.arange(len(self.reference))
elif region in self.annotation:
return np.array([int(x) for x in self.annotation[region]], dtype=int)
else:
raise ValueError('no annotation with name '+region)
def _annotation_to_fragment_indices(self, anno):
'''
returns coordinates of a region specified in the annotation
in terms of the fragments F1 to F6. This is needed to extract
region specific allele counts, frequencies etc.
returns a dict containing 'length', 'start' (of the region in the genome)
and for each fragment involved 'F1': (indices in the region of interest, indices on the fragment)
'''
coordinates = {}
region_indices = self._region_to_indices(anno)
coordinates['start'] = min(region_indices)
coordinates['length'] = len(region_indices)
fragments = ['F'+str(i) for i in range(1,7)]
# if requested region is a fragment, return only this fragment
if anno in fragments:
coordinates[anno] = (np.arange(coordinates['length']), np.arange(coordinates['length']))
# loop over fragments and extract the indices of the region on this fragment
else:
for frag in fragments:
# indices of the fragment
frag_ind = set(self._region_to_indices(frag))
# intersection of region and fragment positions
region_indices_on_fragment = sorted(frag_ind.intersection(region_indices))
# attach indices in region and on fragment
if len(region_indices_on_fragment):
anno_indices_self = np.arange(coordinates['length'])[np.in1d(region_indices, region_indices_on_fragment)]
coordinates[frag] = (anno_indices_self,
np.array(region_indices_on_fragment)- int(self.annotation[frag].location.start))
return coordinates
def get_coverage_trajectories(self, region, **kwargs):
'''Get coverage as a function of time'''
coordinates = self._annotation_to_fragment_indices(region)
cov = np.ma.array([tmp_sample.get_coverage(coordinates, **kwargs) for tmp_sample in self.samples])
return cov
def get_allele_count_trajectories(self, region, safe=False, type='nuc', **kwargs):
'''Get the allele count trajectories from files
Args:
region (str): region to study, a fragment or a genomic feature (e.g. V3)
type (str): 'nuc' for nucleotides, 'aa' for amino acids
**kwargs: passed down to the function (VERBOSE, etc.).
Note: the genomewide counts are currently saved to file.
Examples:
1. Nucleotides from a certain region:
patient.get_allele_count_trajectories('PR', type='nuc')
2. Amino acids from a certain region:
patient.get_allele_count_trajectories('PR', type='aa')
'''
if type == 'nuc':
coordinates = self._annotation_to_fragment_indices(region)
elif type == 'aa':
coordinates = {region: None}
else:
raise ValueError('Data type not understood')
act = np.ma.array([tmp_sample.get_allele_counts(coordinates, type=type, **kwargs)
for tmp_sample in self.samples],
hard_mask=True,
shrink=False)
if len(act.mask.shape) < 1:
act.mask = np.zeros_like(act, dtype=bool)
return act
def get_insertion_trajectories(self, region, **kwargs):
'''Get insertion trajectories
Returns:
ict (pd.Series): multiindexed array of insertion counts
To manipulate the result, pandas methods are recommended, e.g.
ict_matrix = ict.unstack('DSI').fillna(0).T
'''
coordinates = self._annotation_to_fragment_indices(region)
ict = Counter()
for tmp_sample in self.samples:
time = tmp_sample['days since infection']
ic = tmp_sample.get_insertions(coordinates, **kwargs)
for (position, insertion), value in ic.items():
ict[(time, position, insertion)] = value
ict = pd.Series(ict, name='insertions')
if len(ict):
ict.index.names = ['DSI', 'position', 'insertion', "accessibility"]
return ict
def get_allele_frequency_trajectories(self, region, safe=False, error_rate=2e-3, type='nuc', **kwargs):
'''Get the allele count trajectories from files
Args:
region (str): region to study, a fragment or a genomic feature (e.g. V3)
type (str): 'nuc' for nucleotides, 'aa' for amino acids
**kwargs: passed down to the function (VERBOSE, etc.).
Note: the genomewide counts are currently saved to file.
Examples:
1. Nucleotides from a certain region:
patient.get_allele_frequencies_trajectories('PR', type='nuc')
2. Amino acids from a certain region:
patient.get_allele_frequencies_trajectories('PR', type='aa')
'''
if type == 'nuc':
coordinates = self._annotation_to_fragment_indices(region)
elif type == 'aa':
coordinates = {region: None}
else:
raise ValueError('Data type not understood')
aft = np.ma.array([tmp_sample.get_allele_frequencies(coordinates, type=type, **kwargs)
for tmp_sample in self.samples], hard_mask=True, shrink=False)
# set very low frequencies to zero, these are likely sequencing errors
aft[aft<error_rate]=0
if len(aft.mask.shape)<1:
aft.mask = np.zeros_like(aft, dtype=bool)
return aft
def get_constrained(self, region):
if region in self.annotation and self.annotation[region].type in ['gene', 'protein']:
return np.array([self.pos_to_feature[pos]['RNA']>0 \
or self.pos_to_feature[pos]['gene']>1
for pos in self.annotation[region]])
else:
print(region, "is not a valid protein or gene")
return None
def get_gaps_by_codon(self, region, pad=0, threshold=0.1):
if region in self.annotation and self.annotation[region].type in ['gene', 'protein']:
aft = self.get_allele_frequency_trajectories(region)
gap_index = list(alpha).index(b'-')
gaps = np.zeros(aft.shape[-1],dtype=bool)
for ci in range(0, aft.shape[-1],3):
if np.any(aft[:,gap_index,ci:ci+3]>threshold):
gaps[max(0,ci-3*pad):ci+3*(1+pad)]=True
return gaps
else:
print(region,"is not a valid protein or gene")
return None
def get_syn_mutations(self, region, mask_constrained=True):
if region in self.annotation and self.annotation[region].type in ['gene', 'protein']:
try :
aft = self.get_allele_frequency_trajectories(region)
if len(aft.mask.shape) == 0:
aft_valid = np.ones((aft.shape[0], aft.shape[-1]), dtype=bool)
else:
aft_valid = ~np.array([af.mask.sum(axis=0) for af in aft], dtype=bool)
gaps = self.get_gaps_by_codon(region)
initial_seq = self.get_initial_sequence(region)
consensi = []
for af in aft:
tmp = consensus(af)
tmp[gaps]='N'
consensi.append(tmp)
cons_aa = np.array([np.fromstring(Seq.translate(''.join(cons.astype('U'))),
dtype='S1') for cons in consensi])
no_substitution = np.repeat(np.array([len(np.unique(col[ind]))==1
for ind, col in zip(aft_valid.T[::3], cons_aa.T)], dtype=bool), 3)
syn_muts = np.zeros(aft.shape[1:], dtype=bool)
for pos in range(aft.shape[-1]):
ci = pos//3
rf = pos%3
codon = ''.join(initial_seq[ci*3:(ci+1)*3].astype("U"))
for ni,nuc in enumerate(alpha[:4].astype("U")):
mod_codon = codon[:rf] + nuc + codon[rf+1:]
try:
syn_muts[ni,pos] = (Seq.translate(codon)==Seq.translate(mod_codon))\
*no_substitution[pos]
except:
syn_muts[ni,pos] = False
if mask_constrained:
syn_muts[:,self.get_constrained(region)] = False
return syn_muts
except:
import ipdb; ipdb.set_trace()
else:
print(region,"is not a valid protein or gene")
return None
def _initial_consensus_noinsertions(self, region='genomewide', VERBOSE=0, type='nuc'):
'''Make initial consensus from allele frequencies, keep coordinates and masked
sets: indices and sequence of initial sequence
'''
aft = self.get_allele_frequency_trajectories(region, type=type)
tmp_alpha = alpha if type=='nuc' else alphaa
# Fill the masked positions with N...
mask_index = len(tmp_alpha)-1
cons_ind = aft[0].argmax(axis=0)
cons_ind[aft.mask[0].max(axis=0)] = mask_index
for af_later in aft[1:]:
cons_ind_later = af_later.argmax(axis=0)
cons_ind_later[af_later.mask.max(axis=0)] = mask_index
ind_Ns = (cons_ind == mask_index) & (cons_ind_later != mask_index)
if ind_Ns.sum():
cons_ind[ind_Ns] = cons_ind_later[ind_Ns]
if region == 'genomewide' and type == 'nuc':
self.initial_indices = cons_ind
self.initial_sequence = alpha[cons_ind]
else:
return cons_ind
def get_initial_indices(self, region, type='nuc'):
if type=='nuc':
if region == 'genomewide':
return self.initial_indices.copy()
elif region in self.annotation:
return np.array([self.initial_indices[pos] for pos in self.annotation[region]])
else:
print("Not a valid annotation:", region)
return None
elif type=='aa':
return self._initial_consensus_noinsertions(region, type=type)
def get_initial_sequence(self, region, type='nuc'):
tmp_ind = self.get_initial_indices(region, type=type)
tmp_alpha = alpha if type=='nuc' else alphaa
if tmp_ind is not None:
return tmp_alpha[tmp_ind]
else:
return None
def get_diversity(self, region):
aft = self.get_allele_frequency_trajectories(region)
return np.array(list(map(diversity, aft)))
def get_consensi(self, region):
aft = self.get_allele_frequency_trajectories(region)
return [''.join(consensus(x).astype("U")) for x in aft]
def get_divergence(self, region):
aft = self.get_allele_frequency_trajectories(region)
region_initial_indices = self.initial_indices[self._region_to_indices(region)]
return np.array([divergence(x,region_initial_indices) for x in aft])
def map_to_external_reference(self, roi, refname='HXB2', in_patient=True):
'''
Map positions in the patient to a reference genomewide
Args:
roi -- region of interest given as a string or a tuple (start, end)
refname -- reference to compare to
in_patient -- specifies whether the (start, end) refers to reference or patient coordinates
returns:
a (len(roi), 3) array with reference coordinates in first column,
patient coordinates in second
roi coordinates in third column
'''
from .filenames import get_coordinate_map_filename
coo_fn = get_coordinate_map_filename(self.name, refname=refname)
genomewide_map = np.loadtxt(coo_fn, dtype=int)
if roi in self.annotation:
roi_pos = np.array([x for x in self.annotation[roi]], dtype=int)
ind = np.in1d(genomewide_map[:,1], roi_pos)
roi_indices = np.in1d(roi_pos, genomewide_map[:,1]).nonzero()[0]
return np.vstack((genomewide_map[ind].T, [roi_indices])).T
elif roi == "genomewide":
return np.vstack((genomewide_map.T, [genomewide_map[:,1]])).T
else:
try:
start, stop = list(map(int, roi))
start_ind = np.searchsorted(genomewide_map[:,in_patient], start)
stop_ind = np.searchsorted(genomewide_map[:,in_patient], stop)
return np.vstack((genomewide_map[start_ind:stop_ind].T,
[genomewide_map[start_ind:stop_ind, in_patient] - start])).T
except:
raise ValueError("ROI not understood")
def map_to_external_reference_aminoacids(self, roi, refname='HXB2', in_patient=True):
'''Map positions in the patient to a reference, for amino acids in a protein region
Args:
roi -- region of interest, a string or a triple (protein, start, stop)
refname -- reference to compare to
in_patient -- specifies whether the (start, end) refers to reference or patient coordinates
returns:
a (len(roi), 2) array with reference protein coordinates in first column,
patient protein coordinates in second
'''
from .filenames import get_coordinate_map_filename, get_custom_reference_filename
coo_fn = get_coordinate_map_filename(self.name, refname=refname)
genomewide_map = np.loadtxt(coo_fn, dtype=int)
if roi in self.annotation:
region = roi
else:
region = roi[0]
# translate genbank encoded sequence features into a dictionary
seq = SeqIO.read(get_custom_reference_filename(refname, format='gb'), format='genbank')
ref_annotation = {x.qualifiers['note'][-1]:x for x in seq.features}
ref_region_pos = list(ref_annotation[region])
pat_region_pos = list(self.annotation[region])
# this is asymmetric because we want the region as defined in the patient
ind = genomewide_map[np.in1d(genomewide_map[:,1], pat_region_pos)]
# take protein coordinates
# NOTE: because the genomewide map may have non-codon gaps, we should check the other two
# codon positions, but this might give rise to incongruities. It's ok for usual cases
ind_tmp = []
for r, p in ind[::3]:
try:
r = ref_region_pos.index(r)
p = pat_region_pos.index(p)
except ValueError:
continue
ind_tmp.append([r, p])
ind = np.array(ind_tmp, int) // 3
if roi not in self.annotation:
start, stop = list(map(int, roi[1:]))
ind = ind[(ind[:, in_patient] >= start) & (ind[:, in_patient] < stop)]
return ind
# TODO: the following is experimental. was meant as a way to easily get an
# idea what kind of stuff a site is involved in
def positions_to_features(self, sources=['annotations']):
'''
map of positions to features, including the number of proteins, RNA, etc this pos is part of
Parameters:
sources: list of sources to take the features from. Allowed sources are
'annotations', 'shape'.
'''
self.pos_to_feature = [{} for nuc in self.reference]
# Add info from genomic annotations
if 'annotations' in sources:
for posfea in self.pos_to_feature:
posfea['gene'] = 0
posfea['RNA'] = 0
posfea['LTR'] = 0
posfea['codons'] = []
posfea['protein_codon'] = []
for fname, feature in self.annotation.items():
for ii, pos in enumerate(feature):
if feature.type=='gene':
self.pos_to_feature[pos]['gene']+=1
self.pos_to_feature[pos]['codons'].append((fname, ii//3, ii%3))
elif feature.type=='protein':
self.pos_to_feature[pos]['protein_codon'].append((fname, ii//3, ii%3))
elif 'LTR' in fname:
self.pos_to_feature[pos]['LTR']+=1
elif feature.type=='RNA_structure':
self.pos_to_feature[pos]['RNA']+=1
# Add info from SHAPE (Siegfried et al. 2014)
if 'shape' in sources:
for posfea in self.pos_to_feature:
posfea['RNA pairing probability'] = None
posfea['RNA partner'] = None
from .external import load_pairing_probability_NL43
pp = load_pairing_probability_NL43()
m = self.map_to_external_reference('genomewide', 'NL4-3')[:, :2]
md = dict(m)
for pos_ref, pos_pat in m:
if pos_ref in pp.index:
#NOTE: more than one pairing is reported
tmp = pp.loc[pos_ref]
if isinstance(tmp, pd.Series):
partner = tmp['partner']
prob = tmp['probability']
else:
tmp = tmp.set_index('partner')['probability']
partner = tmp.argmax()
prob = tmp.loc[partner]
self.pos_to_feature[pos_pat]['RNA pairing probability'] = prob
if partner in md:
self.pos_to_feature[pos_pat]['RNA partner'] = md[partner]
else:
self.pos_to_feature[pos_pat]['RNA partner'] = 'missing'
if 'disorder' in sources:
from .external import load_disorder_scores_HXB2
# the following returns a dictionary for each protein that contains a list of positions
# and values extracted from the tables by Li et al, Positions are codons (hopefully) from hxb2, zero numbering
dscores = load_disorder_scores_HXB2()
for prot in dscores:
# ignore tat and rev since they are split
if prot in self.annotation and prot not in ['tat', 'rev']:
m = self.map_to_external_reference(prot, 'HXB2')
# remove trailing stop codon from the protein
if prot in ['p6', 'IN', 'gp41', 'vif', 'nef', 'vpu','vpr']:
m=m[:-3]
hxb2_codons = (m[:,0] - m[0,0])[::3]//3
try:
# loop over every codon that aligns to HXB2
for pos, val in zip(m[0::3,1], dscores[prot]['val'][hxb2_codons]):
for ii in range(3): # add the same score to every position in the codon
if 'disorder' not in self.pos_to_feature[pos+ii]:
self.pos_to_feature[pos+ii]['disorder']={}
self.pos_to_feature[pos+ii]['disorder'][prot] = val
except:
print("Can't import disorder scores")
#import ipdb; ipdb.set_trace()
if 'accessibility' in sources:
from .external import load_accessibility
ascores = load_accessibility()
for prot in ascores:
# ignore tat and rev since they are split
if prot in self.annotation and prot not in ['tat', 'rev']:
m = self.map_to_external_reference(prot, 'HXB2')
# remove trailing stop codon from the protein
if prot in ['p6', 'IN', 'gp41', 'vif', 'nef', 'vpu','vpr']:
m=m[:-3]
try:
# loop over position value pairs
for pos, val in ascores[prot]:
for ii in range(3): # loop over positions in codon
nuc_pos = m[0,0] + pos*3 + ii #nucleotide position in hxb2
if nuc_pos in m[:,0]: #if maps to patient
# find index and corresponding position in patient
nuc_ii = np.searchsorted(m[:,0], nuc_pos)
pat_pos = m[nuc_ii,1]
if 'accessibility' not in self.pos_to_feature[pat_pos]:
self.pos_to_feature[pat_pos]['accessibility']={}
self.pos_to_feature[pat_pos]['accessibility'][prot] = val
except:
import ipdb; ipdb.set_trace()
if 'structural' in sources:
from .external import load_structural_effects_NL43
struct_scores, cons_seqs = load_structural_effects_NL43()
if struct_scores is not None:
for prot in struct_scores:
# ignore tat and rev since they are split
if prot in self.annotation and prot not in ['tat', 'rev']:
m = self.map_to_external_reference(prot, 'NL4-3')
# remove trailing stop codon from the protein
if prot in ['p6', 'IN', 'gp41', 'vif', 'nef', 'vpu','vpr']:
m=m[:-3]
try:
# loop over position value pairs
for pi, (pos, val) in enumerate(struct_scores[prot]):
for ii in range(3): # loop over positions in codon
nuc_pos = m[0,0] + pos*3 + ii #nucleotide position in hxb2
if prot=='pol': # they start numbering at the start of the PR
nuc_pos+=56*3
if nuc_pos in m[:,0]: #if maps to patient
# find index and corresponding position in patient
nuc_ii = np.searchsorted(m[:,0], nuc_pos)
pat_pos = m[nuc_ii,1]
if 'structural' not in self.pos_to_feature[pat_pos]:
self.pos_to_feature[pat_pos]['structural']={}
self.pos_to_feature[pat_pos]['ref']={}
self.pos_to_feature[pat_pos]['structural'][prot] = val
self.pos_to_feature[pat_pos]['ref'][prot] = cons_seqs[prot][pi]
except:
import ipdb; ipdb.set_trace()
else:
print("couldn load structural scores")
def get_fragment_depth(self, pad=False, limit_to_dilution = False):
c = self._annotation_to_fragment_indices('genomewide')
depth = np.ma.array([s.fragment_depth(c,cov_min=100, var_min=0.05, min_points=10)
for s in self.samples])
if pad:
for si in range(len(self.samples)):
depth[si][depth.mask[si]] = self.n_templates_dilutions[si]
depth.mask[si] = False
if limit_to_dilution:
for si in range(len(self.samples)):
depth[si] = np.minimum(depth[si], self.n_templates_dilutions[si])
return depth
def get_hla_type(self, MHC=1):
'''Get a list with all HLA loci
Parameters:
MHC (None/1/2): MHC class I/II only, or all
'''
if MHC == 1:
loci = ('A', 'B', 'C')
elif MHC == 2:
loci = ('DRB1', 'DRQ1')
else:
loci = ('A', 'B', 'C', 'DRB1', 'DRQ1')
hla = np.concatenate([[locus+self['HLA-'+locus],
locus+self['HLA-'+locus+'-2']]
for locus in loci]).tolist()
return hla
def get_ctl_epitopes(self,
regions=['gag', 'pol',
'gp120', 'gp41',
'vif', 'vpr', 'vpu', 'nef'],
kind='mhci=80',
):
'''Get list of CTL epitopes
Parameters:
regions (list): restrict to epitopes within these regions
kind (str): LANL/epitoolkit/mhci=<n>, where <n> is the cutoff for
the MHCi predicted list: the first <n> entries are taken.
'''
# Get epitope table for patient HLA
if kind == 'LANL':
from hivwholeseq.cross_sectional.ctl_epitope_map import (get_ctl_epitope_map,
get_ctl_epitope_hla)
ctl_table_main = get_ctl_epitope_map(species='human')
hla = self.get_hla_type(MHC=1)
ctl_table_main = get_ctl_epitope_hla(ctl_table_main, hla)
del ctl_table_main['HXB2 start']
del ctl_table_main['HXB2 end']
del ctl_table_main['HXB2 DNA Contig']
del ctl_table_main['Protein']
del ctl_table_main['Subprotein']
elif 'mhci=' in kind:
n_entries = int(kind[5:])
from .filenames import get_ctl_epitope_map_filename
ctl_table_main = pd.read_csv(get_ctl_epitope_map_filename(self.name),
skiprows=3,
sep='\t',
usecols=['peptide'],
# NOTE: top epitopes only, this is a parameter
nrows=n_entries,
)
ctl_table_main.drop_duplicates(inplace=True)
ctl_table_main.rename(columns={'peptide': 'Epitope'}, inplace=True)
else:
raise ValueError('kind of CTL table not understood')
data = []
for region in regions:
# Restrict epitope table to founder virus sequence
fea = self.annotation[region]
regpos = fea.location.nofuzzy_start
seq = fea.extract(self.reference)
prot = str(seq.seq.translate())
ind = [i for i, epi in enumerate(ctl_table_main['Epitope']) if epi in prot]
ctl_table = ctl_table_main.iloc[ind].copy()
# Set position in region
# NOTE: the same epitope could be there twice+ in a protein, so we use
# regular expressions for that
import re
tmp = []
for epi in ctl_table['Epitope']:
for match in re.finditer(epi, prot):
pos = match.start()
tmp.append({'Epitope': epi,
'start_region': 3 * pos,
'end_region': 3 * (pos + len(epi)),
})
ctl_table = pd.DataFrame(tmp)
if not len(ctl_table):
continue
# Set position genomewide
ctl_table['start'] = ctl_table['start_region'] + regpos
ctl_table['end'] = ctl_table['end_region'] + regpos
# Set start/end positions in HXB2 coordinates
comap = dict(self.map_to_external_reference(region)[:, ::-2])
poss = []
for x in ctl_table['start_region']:
while True:
if x in comap:
poss.append(comap[x])
break
elif x < 0:
poss.append(-1)
break
x -= 1
ctl_table['start_HXB2'] = np.array(poss, int)
poss = []
for x in ctl_table['end_region']:
while True:
if x in comap:
poss.append(comap[x])
break
elif x > 10000:
poss.append(-1)
break
x += 1
ctl_table['end_HXB2'] = np.array(poss, int)
# Filter out epitopes for which we cannot find an HXB2 position
ctl_table = ctl_table.loc[(ctl_table[['start_HXB2', 'end_HXB2']] != -1).all(axis=1)]
ctl_table['region'] = region
data.append(ctl_table)
ctl_table = pd.concat(data).sort('start_HXB2')
ctl_table.index = list(range(len(ctl_table)))
return ctl_table
def get_haplotype_alignment(self, region):
'''Get alignment of minor haplotypes from whole infection
Parameters:
region (str or ROI): if a string, it indicates a genomic region
(e.g. V3) and the haplotype alignment is read from file. If a
ROI, i.e. a triple (region, start, end), it extracts the haplotypes
de novo from the BAM files (TODO).
TODO: ROI de novo computation not implemented yet.
'''
if isinstance(region, str):
from Bio import AlignIO
from .filenames import get_haplotype_alignment_filename
fn = get_haplotype_alignment_filename(self.name, region, 'fasta')
return AlignIO.read(fn, 'fasta')
else:
raise NotImplementedError
def get_haplotype_alignment_for_insertion(self, position, length=40):
'''Get alignment of minor haplotypes close to an insertion'''
def find_closest_alignment_filename():
import os
from .filenames import get_haplotype_alignment_filename
for dist in range(10000):
for start in [position - dist, position + dist]:
region = 'insertion_'+str(start)+'-'+str(start+length)
fn = get_haplotype_alignment_filename(self.name, region, 'fasta')
if os.path.isfile(fn):
return {'start': start,
'filename': fn}
else:
raise IOError('No alignment found')
from Bio import AlignIO
datum = find_closest_alignment_filename()
datum['ali'] = AlignIO.read(datum['filename'], 'fasta')
return datum
| neherlab/HIVEVO_access | hivevo/patients.py | Python | mit | 35,292 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def gcd(a, b):
'''
Calculate the greatest common divisor use Euclid's gcd algorithm.
Ref: http://www.cut-the-knot.org/blue/Euclid.shtml
'''
if a == 0 or b == 0:
return 0
while(b != 0):
if a > b:
a -= b
else:
b -= a
return a
def mod_gcd(a, b):
'''
Calculate the greatest common divisor use the modulo operator.
'''
if a == b:
return a
if a == 0 or b == 0:
return 0
if a < b:
a, b = b, a
while(b != 0):
r = a % b
a = b
b = r
return a
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description='Calculate the greatest common divisor.')
parser.add_argument('a', type=int, help='argument 1, type integer.')
parser.add_argument('b', type=int, help='argument 2, type integer.')
parser.add_argument('--mod', action='store_true', help='use modulo operator to calc gcd.')
args = parser.parse_args()
if args.mod:
print mod_gcd(args.a, args.b)
else:
print gcd(args.a, args.b)
# Use the following line to get the elapsed time.
#python -m timeit -s "from gcd import gcd" -n 1 -r 1 "gcd(1000000, 8)"
| weichen2046/algorithm-study | algorithms/python/miscellaneous/gcd.py | Python | mit | 1,303 |
from time import sleep
dick = '''
-----------------------
----------OO-----------
-------OOOOOOOO--------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
-----OOOOOOOOOOOO------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
------OOOOOOOOOO-------
-OOOOOOOOOOOOOOOOOOOOO-
OOOOOOOOOOOOOOOOOOOOOOO
OOOOOOOOOOOOOOOOOOOOOOO
OOOOOOOOOOOOOOOOOOOOOOO
OOOOOOOOOOOOOOOOOOOOOOO
-OOOOOOOOOOOOOOOOOOOOO-
-----------------------
'''
dicklines = dick.split('\n')
for line in dicklines:
print line
sleep(0.3) | mitchrule/Miscellaneous | Misc/dick.py | Python | mit | 731 |
from __future__ import print_function
from google.cloud import vision
client = vision.Client()
image = client.image(filename='res/text.jpg')
texts = image.detect_text()
print(texts[0].locale)
for text in texts:
print(text.description)
| y-wan/cloud-vision-python | text_detection.py | Python | mit | 239 |
from collections import defaultdict
from datetime import timedelta
import time
from django.conf import settings
from django.utils import timezone
from django.contrib.auth import get_user_model
from channels.db import database_sync_to_async
try:
import aioredis
except ImportError:
aioredis = None
User = get_user_model()
MAX_AGE_SECONDS = 60
MAX_AGE = timedelta(seconds=MAX_AGE_SECONDS)
class RedisContext:
async def __aenter__(self):
self.redis = await aioredis.create_redis(settings.REDIS_URL)
return self.redis
async def __aexit__(self, exc_type, exc, tb):
self.redis.close()
await self.redis.wait_closed()
def get_presence_manager(room):
if aioredis is None or not getattr(settings, 'REDIS_URL', None):
return DummyUserPresenceManager(room)
return RedisUserPresenceManager(room)
class BaseUserPresenceManager():
def __init__(self, room):
self.room = room
async def touch(self, user):
raise NotImplementedError
async def list_present(self):
raise NotImplementedError
async def is_present(self, user):
raise NotImplementedError
async def remove(self, user):
raise NotImplementedError
async def expire(self):
raise NotImplementedError
class DummyUserPresenceManager(BaseUserPresenceManager):
presence = defaultdict(dict)
def __init__(self, room):
super().__init__(room)
async def touch(self, user):
self.presence[self.room][user.id] = timezone.now()
def _list_present_user_ids(self):
self._expire()
now = timezone.now()
for user_id, timestamp in self.presence[self.room].items():
if timestamp + MAX_AGE >= now:
yield user_id
def _list_present_users(self):
return list(User.objects.filter(
id__in=self._list_present_user_ids()
))
async def list_present(self):
return await database_sync_to_async(self._list_present_users)()
async def is_present(self, user):
return user.id in self.presence[self.room]
async def remove(self, user):
if self.room in self.presence:
if user.id in self.presence[self.room]:
del self.presence[self.room][user.id]
def _expire(self):
now = timezone.now()
self.presence[self.room] = {
uid: timestamp for uid, timestamp in
self.presence[self.room].items()
if timestamp + MAX_AGE >= now
}
async def expire(self):
self._expire()
class RedisUserPresenceManager(BaseUserPresenceManager):
@property
def key(self):
return 'froide_presence_{}'.format(self.room)
# Wait for Python 3.8
# @asynccontextmanager
# async def get_redis(self):
# redis = await aioredis.create_redis(settings.REDIS_URL)
# yield redis
# await redis.wait_closed()
get_redis = RedisContext
def get_time(self):
return int(time.time())
def _is_expired(self, score):
return self.get_time() - int(score) > MAX_AGE_SECONDS
async def touch(self, user):
async with self.get_redis() as redis:
await redis.zadd(self.key, self.get_time(), user.id)
async def _list_present_user_ids(self):
async with self.get_redis() as redis:
has_expired = False
async for user_id, score in redis.izscan(self.key):
if self._is_expired(score):
has_expired = True
continue
yield user_id
if has_expired:
await self._expire(redis)
def _list_present_users(self, user_ids):
return list(User.objects.filter(
id__in=user_ids
))
async def list_present(self):
user_ids = [x async for x in self._list_present_user_ids()]
return await database_sync_to_async(self._list_present_users)(user_ids)
async def is_present(self, user):
async with self.get_redis() as redis:
score = await redis.zscore(self.key, user.id)
if score is None:
return False
is_expired = self._is_expired(score)
if is_expired:
await self._remove(user)
return True
async def _remove(self, redis, user):
await redis.zrem(self.key, user.id)
async def remove(self, user):
async with self.get_redis() as redis:
await self._remove(redis, user)
async def _expire(self, redis):
max_val = self.get_time() - MAX_AGE_SECONDS
await redis.zremrangebyscore(self.key, max=max_val)
async def expire(self):
async with self.get_redis() as redis:
await self._expire(redis)
| stefanw/froide | froide/helper/presence.py | Python | mit | 4,757 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'User.middle_name'
db.add_column('fandjango_user', 'middle_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False)
# Adding field 'User.timezone'
db.add_column('fandjango_user', 'timezone', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'User.quotes'
db.add_column('fandjango_user', 'quotes', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'User.middle_name'
db.delete_column('fandjango_user', 'middle_name')
# Deleting field 'User.timezone'
db.delete_column('fandjango_user', 'timezone')
# Deleting field 'User.quotes'
db.delete_column('fandjango_user', 'quotes')
models = {
'fandjango.oauthtoken': {
'Meta': {'object_name': 'OAuthToken'},
'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued_at': ('django.db.models.fields.DateTimeField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'fandjango.user': {
'Meta': {'object_name': 'User'},
'authorized': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {}),
'facebook_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hometown': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_seen_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'oauth_token': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fandjango.OAuthToken']", 'unique': 'True'}),
'political_views': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'profile_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'quotes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'relationship_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'verified': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['fandjango']
| jgorset/fandjango | fandjango/migrations/0005_auto__add_field_user_middle_name__add_field_user_timezone__add_field_u.py | Python | mit | 4,458 |
"""
Piezo sensitivity analysis module.
"""
import warnings
import numpy as np
from monty.dev import requires
import pymatgen.io.phonopy
from pymatgen.core.tensors import Tensor
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer as sga
try:
from phonopy import Phonopy
from phonopy.harmonic import dynmat_to_fc as dyntofc
except ImportError:
Phonopy = None
__author__ = "Handong Ling"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Handong Ling"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "Feb, 2019"
class BornEffectiveCharge:
"""
This class describes the Nx3x3 born effective charge tensor
"""
def __init__(self, structure, bec, pointops, tol=1e-3):
"""
Create an BornEffectiveChargeTensor object defined by a
structure, point operations of the structure's atomic sites.
Note that the constructor uses __new__ rather than __init__
according to the standard method ofsubclassing numpy ndarrays.
Args:
input_matrix (Nx3x3 array-like): the Nx3x3 array-like
representing the born effective charge tensor
"""
self.structure = structure
self.bec = bec
self.pointops = pointops
self.BEC_operations = None
if np.sum(self.bec) >= tol:
warnings.warn("Input born effective charge tensor does " "not satisfy charge neutrality")
def get_BEC_operations(self, eigtol=1e-05, opstol=1e-03):
"""
Returns the symmetry operations which maps the tensors
belonging to equivalent sites onto each other in the form
[site index 1, site index 2, [Symmops mapping from site
index 1 to site index 2]]
Args:
eigtol (float): tolerance for determining if two sites are
related by symmetry
opstol (float): tolerance for determining if a symmetry
operation relates two sites
Return:
list of symmetry operations mapping equivalent sites and
the indexes of those sites.
"""
bec = self.bec
struc = self.structure
ops = sga(struc).get_symmetry_operations(cartesian=True)
uniquepointops = []
for op in ops:
uniquepointops.append(op)
for ops in self.pointops:
for op in ops:
if op not in uniquepointops:
uniquepointops.append(op)
passed = []
relations = []
for site, val in enumerate(bec):
unique = 1
eig1, vecs1 = np.linalg.eig(val)
index = np.argsort(eig1)
neweig = np.real([eig1[index[0]], eig1[index[1]], eig1[index[2]]])
for index, p in enumerate(passed):
if np.allclose(neweig, p[1], atol=eigtol):
relations.append([site, index])
unique = 0
passed.append([site, p[0], neweig])
break
if unique == 1:
relations.append([site, site])
passed.append([site, neweig])
BEC_operations = []
for atom, r in enumerate(relations):
BEC_operations.append(r)
BEC_operations[atom].append([])
for op in uniquepointops:
new = op.transform_tensor(self.bec[relations[atom][1]])
# Check the matrix it references
if np.allclose(new, self.bec[r[0]], atol=opstol):
BEC_operations[atom][2].append(op)
self.BEC_operations = BEC_operations
def get_rand_BEC(self, max_charge=1):
"""
Generate a random born effective charge tensor which obeys a structure's
symmetry and the acoustic sum rule
Args:
max_charge (float): maximum born effective charge value
Return:
np.array Born effective charge tensor
"""
struc = self.structure
symstruc = sga(struc)
symstruc = symstruc.get_symmetrized_structure()
l = len(struc)
BEC = np.zeros((l, 3, 3))
for atom, ops in enumerate(self.BEC_operations):
if ops[0] == ops[1]:
temp_tensor = Tensor(np.random.rand(3, 3) - 0.5)
temp_tensor = sum([temp_tensor.transform(symm_op) for symm_op in self.pointops[atom]]) / len(
self.pointops[atom]
)
BEC[atom] = temp_tensor
else:
tempfcm = np.zeros([3, 3])
for op in ops[2]:
tempfcm += op.transform_tensor(BEC[self.BEC_operations[atom][1]])
BEC[ops[0]] = tempfcm
if len(ops[2]) != 0:
BEC[ops[0]] = BEC[ops[0]] / len(ops[2])
# Enforce Acoustic Sum
disp_charge = np.einsum("ijk->jk", BEC) / l
add = np.zeros([l, 3, 3])
for atom, ops in enumerate(self.BEC_operations):
if ops[0] == ops[1]:
temp_tensor = Tensor(disp_charge)
temp_tensor = sum([temp_tensor.transform(symm_op) for symm_op in self.pointops[atom]]) / len(
self.pointops[atom]
)
add[ops[0]] = temp_tensor
else:
temp_tensor = np.zeros([3, 3])
for op in ops[2]:
temp_tensor += op.transform_tensor(add[self.BEC_operations[atom][1]])
add[ops[0]] = temp_tensor
if len(ops) != 0:
add[ops[0]] = add[ops[0]] / len(ops[2])
BEC = BEC - add
return BEC * max_charge
class InternalStrainTensor:
"""
This class describes the Nx3x3x3 internal tensor defined by a
structure, point operations of the structure's atomic sites.
"""
def __init__(self, structure, ist, pointops, tol=1e-3):
"""
Create an InternalStrainTensor object.
Args:
input_matrix (Nx3x3x3 array-like): the Nx3x3x3 array-like
representing the internal strain tensor
"""
self.structure = structure
self.ist = ist
self.pointops = pointops
self.IST_operations = None
obj = self.ist
if not (obj - np.transpose(obj, (0, 1, 3, 2)) < tol).all():
warnings.warn("Input internal strain tensor does " "not satisfy standard symmetries")
def get_IST_operations(self, opstol=1e-03):
"""
Returns the symmetry operations which maps the tensors
belonging to equivalent sites onto each other in the form
[site index 1, site index 2, [Symmops mapping from site
index 1 to site index 2]]
Args:
opstol (float): tolerance for determining if a symmetry
operation relates two sites
Return:
list of symmetry operations mapping equivalent sites and
the indexes of those sites.
"""
struc = self.structure
ops = sga(struc).get_symmetry_operations(cartesian=True)
uniquepointops = []
for op in ops:
uniquepointops.append(op)
for ops in self.pointops:
for op in ops:
if op not in uniquepointops:
uniquepointops.append(op)
IST_operations = []
for atom in range(len(self.ist)): # pylint: disable=C0200
IST_operations.append([])
for j in range(0, atom):
for op in uniquepointops:
new = op.transform_tensor(self.ist[j])
# Check the matrix it references
if np.allclose(new, self.ist[atom], atol=opstol):
IST_operations[atom].append([j, op])
self.IST_operations = IST_operations
def get_rand_IST(self, max_force=1):
"""
Generate a random internal strain tensor which obeys a structure's
symmetry and the acoustic sum rule
Args:
max_force(float): maximum born effective charge value
Return:
InternalStrainTensor object
"""
l = len(self.structure)
IST = np.zeros((l, 3, 3, 3))
for atom, ops in enumerate(self.IST_operations):
temp_tensor = np.zeros([3, 3, 3])
for op in ops:
temp_tensor += op[1].transform_tensor(IST[op[0]])
if len(ops) == 0:
temp_tensor = Tensor(np.random.rand(3, 3, 3) - 0.5)
for dim in range(3):
temp_tensor[dim] = (temp_tensor[dim] + temp_tensor[dim].T) / 2
temp_tensor = sum([temp_tensor.transform(symm_op) for symm_op in self.pointops[atom]]) / len(
self.pointops[atom]
)
IST[atom] = temp_tensor
if len(ops) != 0:
IST[atom] = IST[atom] / len(ops)
return IST * max_force
class ForceConstantMatrix:
"""
This class describes the NxNx3x3 force constant matrix defined by a
structure, point operations of the structure's atomic sites, and the
shared symmetry operations between pairs of atomic sites.
"""
def __init__(self, structure, fcm, pointops, sharedops, tol=1e-3):
"""
Create an ForceConstantMatrix object.
Args:
input_matrix (NxNx3x3 array-like): the NxNx3x3 array-like
representing the force constant matrix
"""
self.structure = structure
self.fcm = fcm
self.pointops = pointops
self.sharedops = sharedops
self.FCM_operations = None
def get_FCM_operations(self, eigtol=1e-05, opstol=1e-05):
"""
Returns the symmetry operations which maps the tensors
belonging to equivalent sites onto each other in the form
[site index 1a, site index 1b, site index 2a, site index 2b,
[Symmops mapping from site index 1a, 1b to site index 2a, 2b]]
Args:
eigtol (float): tolerance for determining if two sites are
related by symmetry
opstol (float): tolerance for determining if a symmetry
operation relates two sites
Return:
list of symmetry operations mapping equivalent sites and
the indexes of those sites.
"""
struc = self.structure
ops = sga(struc).get_symmetry_operations(cartesian=True)
uniquepointops = []
for op in ops:
uniquepointops.append(op)
for ops in self.pointops:
for op in ops:
if op not in uniquepointops:
uniquepointops.append(op)
passed = []
relations = []
for atom1 in range(len(self.fcm)): # pylint: disable=C0200
for atom2 in range(atom1, len(self.fcm)):
unique = 1
eig1, vecs1 = np.linalg.eig(self.fcm[atom1][atom2])
index = np.argsort(eig1)
neweig = np.real([eig1[index[0]], eig1[index[1]], eig1[index[2]]])
for entry, p in enumerate(passed):
if np.allclose(neweig, p[2], atol=eigtol):
relations.append([atom1, atom2, p[0], p[1]])
unique = 0
break
if unique == 1:
relations.append([atom1, atom2, atom2, atom1])
passed.append([atom1, atom2, np.real(neweig)])
FCM_operations = []
for entry, r in enumerate(relations):
FCM_operations.append(r)
FCM_operations[entry].append([])
good = 0
for op in uniquepointops:
new = op.transform_tensor(self.fcm[r[2]][r[3]])
if np.allclose(new, self.fcm[r[0]][r[1]], atol=opstol):
FCM_operations[entry][4].append(op)
good = 1
if r[0] == r[3] and r[1] == r[2]:
good = 1
if r[0] == r[2] and r[1] == r[3]:
good = 1
if good == 0:
FCM_operations[entry] = [
r[0],
r[1],
r[3],
r[2],
]
FCM_operations[entry].append([])
for op in uniquepointops:
new = op.transform_tensor(self.fcm[r[2]][r[3]])
if np.allclose(
new.T,
self.fcm[r[0]][r[1]],
atol=opstol,
):
FCM_operations[entry][4].append(op)
self.FCM_operations = FCM_operations
return FCM_operations
def get_unstable_FCM(self, max_force=1):
"""
Generate an unsymmeterized force constant matrix
Args:
max_charge (float): maximum born effective charge value
Return:
numpy array representing the force constant matrix
"""
struc = self.structure
operations = self.FCM_operations
# set max force in reciprocal space
numsites = len(struc.sites)
D = (1 / max_force) * 2 * (np.ones([numsites * 3, numsites * 3]))
for op in operations:
same = 0
transpose = 0
if op[0] == op[1] and op[0] == op[2] and op[0] == op[3]:
same = 1
if op[0] == op[3] and op[1] == op[2]:
transpose = 1
if transpose == 0 and same == 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = np.zeros([3, 3])
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = np.zeros([3, 3])
for symop in op[4]:
tempfcm = D[3 * op[2] : 3 * op[2] + 3, 3 * op[3] : 3 * op[3] + 3]
tempfcm = symop.transform_tensor(tempfcm)
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] += tempfcm
if len(op[4]) != 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
] / len(op[4])
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
].T
continue
temp_tensor = Tensor(np.random.rand(3, 3) - 0.5) * max_force
temp_tensor_sum = sum([temp_tensor.transform(symm_op) for symm_op in self.sharedops[op[0]][op[1]]])
temp_tensor_sum = temp_tensor_sum / (len(self.sharedops[op[0]][op[1]]))
if op[0] != op[1]:
for pair in range(len(op[4])):
temp_tensor2 = temp_tensor_sum.T
temp_tensor2 = op[4][pair].transform_tensor(temp_tensor2)
temp_tensor_sum = (temp_tensor_sum + temp_tensor2) / 2
else:
temp_tensor_sum = (temp_tensor_sum + temp_tensor_sum.T) / 2
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = temp_tensor_sum
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = temp_tensor_sum.T
return D
def get_symmetrized_FCM(self, unsymmetrized_fcm, max_force=1):
"""
Generate a symmeterized force constant matrix from an unsymmeterized matrix
Args:
unsymmetrized_fcm (numpy array): unsymmeterized force constant matrix
max_charge (float): maximum born effective charge value
Return:
3Nx3N numpy array representing the force constant matrix
"""
operations = self.FCM_operations
D = unsymmetrized_fcm
for op in operations:
same = 0
transpose = 0
if op[0] == op[1] and op[0] == operations[2] and op[0] == op[3]:
same = 1
if op[0] == op[3] and op[1] == op[2]:
transpose = 1
if transpose == 0 and same == 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = np.zeros([3, 3])
for symop in op[4]:
tempfcm = D[3 * op[2] : 3 * op[2] + 3, 3 * op[3] : 3 * op[3] + 3]
tempfcm = symop.transform_tensor(tempfcm)
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] += tempfcm
if len(op[4]) != 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
] / len(op[4])
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
].T
continue
temp_tensor = Tensor(D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3])
temp_tensor_sum = sum([temp_tensor.transform(symm_op) for symm_op in self.sharedops[op[0]][op[1]]])
if len(self.sharedops[op[0]][op[1]]) != 0:
temp_tensor_sum = temp_tensor_sum / (len(self.sharedops[op[0]][op[1]]))
# Apply the proper transformation if there is an equivalent already
if op[0] != op[1]:
for pair in range(len(op[4])):
temp_tensor2 = temp_tensor_sum.T
temp_tensor2 = op[4][pair].transform_tensor(temp_tensor2)
temp_tensor_sum = (temp_tensor_sum + temp_tensor2) / 2
else:
temp_tensor_sum = (temp_tensor_sum + temp_tensor_sum.T) / 2
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = temp_tensor_sum
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = temp_tensor_sum.T
return D
def get_stable_FCM(self, fcm, fcmasum=10):
"""
Generate a symmeterized force constant matrix that obeys the objects symmetry
constraints, has no unstable modes and also obeys the acoustic sum rule through an
iterative procedure
Args:
fcm (numpy array): unsymmeterized force constant matrix
fcmasum (int): number of iterations to attempt to obey the acoustic sum
rule
Return:
3Nx3N numpy array representing the force constant matrix
"""
check = 0
count = 0
while check == 0:
# if resymmetrizing brings back unstable modes 20 times, the method breaks
if count > 20:
check = 1
break
eigs, vecs = np.linalg.eig(fcm)
maxeig = np.max(-1 * eigs)
eigsort = np.argsort(np.abs(eigs))
for i in range(3, len(eigs)):
if eigs[eigsort[i]] > 1e-06:
eigs[eigsort[i]] = -1 * maxeig * np.random.rand()
diag = np.real(np.eye(len(fcm)) * eigs)
fcm = np.real(np.matmul(np.matmul(vecs, diag), vecs.T))
fcm = self.get_symmetrized_FCM(fcm)
fcm = self.get_asum_FCM(fcm)
eigs, vecs = np.linalg.eig(fcm)
unstable_modes = 0
eigsort = np.argsort(np.abs(eigs))
for i in range(3, len(eigs)):
if eigs[eigsort[i]] > 1e-06:
unstable_modes = 1
if unstable_modes == 1:
count = count + 1
continue
check = 1
return fcm
# acoustic sum
def get_asum_FCM(self, fcm, numiter=15):
"""
Generate a symmeterized force constant matrix that obeys the objects symmetry
constraints and obeys the acoustic sum rule through an iterative procedure
Args:
fcm (numpy array): 3Nx3N unsymmeterized force constant matrix
numiter (int): number of iterations to attempt to obey the acoustic sum
rule
Return:
numpy array representing the force constant matrix
"""
# set max force in reciprocal space
operations = self.FCM_operations
numsites = len(self.structure)
D = np.ones([numsites * 3, numsites * 3])
for num in range(numiter):
X = np.real(fcm)
# symmetry operations
pastrow = 0
total = np.zeros([3, 3])
for col in range(numsites):
total = total + X[0:3, col * 3 : col * 3 + 3]
total = total / (numsites)
for op in operations:
same = 0
transpose = 0
if op[0] == op[1] and op[0] == op[2] and op[0] == op[3]:
same = 1
if op[0] == op[3] and op[1] == op[2]:
transpose = 1
if transpose == 0 and same == 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = np.zeros([3, 3])
for symop in op[4]:
tempfcm = D[3 * op[2] : 3 * op[2] + 3, 3 * op[3] : 3 * op[3] + 3]
tempfcm = symop.transform_tensor(tempfcm)
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] += tempfcm
if len(op[4]) != 0:
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
] / len(op[4])
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = D[
3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3
].T
continue
# Get the difference in the sum up to this point
currrow = op[0]
if currrow != pastrow:
total = np.zeros([3, 3])
for col in range(numsites):
total = total + X[currrow * 3 : currrow * 3 + 3, col * 3 : col * 3 + 3]
for col in range(currrow):
total = total - D[currrow * 3 : currrow * 3 + 3, col * 3 : col * 3 + 3]
total = total / (numsites - currrow)
pastrow = currrow
# Apply the point symmetry operations of the site
temp_tensor = Tensor(total)
temp_tensor_sum = sum([temp_tensor.transform(symm_op) for symm_op in self.sharedops[op[0]][op[1]]])
if len(self.sharedops[op[0]][op[1]]) != 0:
temp_tensor_sum = temp_tensor_sum / (len(self.sharedops[op[0]][op[1]]))
# Apply the proper transformation if there is an equivalent already
if op[0] != op[1]:
for pair in range(len(op[4])):
temp_tensor2 = temp_tensor_sum.T
temp_tensor2 = op[4][pair].transform_tensor(temp_tensor2)
temp_tensor_sum = (temp_tensor_sum + temp_tensor2) / 2
else:
temp_tensor_sum = (temp_tensor_sum + temp_tensor_sum.T) / 2
D[3 * op[0] : 3 * op[0] + 3, 3 * op[1] : 3 * op[1] + 3] = temp_tensor_sum
D[3 * op[1] : 3 * op[1] + 3, 3 * op[0] : 3 * op[0] + 3] = temp_tensor_sum.T
fcm = fcm - D
return fcm
@requires(Phonopy, "phonopy not installed!")
def get_rand_FCM(self, asum=15, force=10):
"""
Generate a symmeterized force constant matrix from an unsymmeterized matrix
that has no unstable modes and also obeys the acoustic sum rule through an
iterative procedure
Args:
force (float): maximum force constant
asum (int): number of iterations to attempt to obey the acoustic sum
rule
Return:
NxNx3x3 np.array representing the force constant matrix
"""
numsites = len(self.structure.sites)
structure = pymatgen.io.phonopy.get_phonopy_structure(self.structure)
pnstruc = Phonopy(structure, np.eye(3), np.eye(3))
dyn = self.get_unstable_FCM(force)
dyn = self.get_stable_FCM(dyn)
dyn = np.reshape(dyn, (numsites, 3, numsites, 3)).swapaxes(1, 2)
dynmass = np.zeros([len(self.structure), len(self.structure), 3, 3])
masses = []
for j in range(numsites):
masses.append(self.structure.sites[j].specie.atomic_mass)
dynmass = np.zeros([numsites, numsites, 3, 3])
for m in range(numsites):
for n in range(numsites):
dynmass[m][n] = dyn[m][n] * np.sqrt(masses[m]) * np.sqrt(masses[n])
supercell = pnstruc.get_supercell()
primitive = pnstruc.get_primitive()
converter = dyntofc.DynmatToForceConstants(primitive, supercell)
dyn = np.reshape(np.swapaxes(dynmass, 1, 2), (numsites * 3, numsites * 3))
converter.set_dynamical_matrices(dynmat=[dyn])
converter.run()
fc = converter.get_force_constants()
return fc
def get_piezo(BEC, IST, FCM, rcond=0.0001):
"""
Generate a random piezoelectric tensor based on a structure and corresponding
symmetry
Args:
BEC (numpy array): Nx3x3 array representing the born effective charge tensor
IST (numpy array): Nx3x3x3 array representing the internal strain tensor
FCM (numpy array): NxNx3x3 array representing the born effective charge tensor
rcondy (float): condition for excluding eigenvalues in the pseudoinverse
Return:
3x3x3 calculated Piezo tensor
"""
numsites = len(BEC)
temp_fcm = np.reshape(np.swapaxes(FCM, 1, 2), (numsites * 3, numsites * 3))
eigs, vecs = np.linalg.eig(temp_fcm)
K = np.linalg.pinv(
-temp_fcm,
rcond=np.abs(eigs[np.argsort(np.abs(eigs))[2]]) / np.abs(eigs[np.argsort(np.abs(eigs))[-1]]) + rcond,
)
K = np.reshape(K, (numsites, 3, numsites, 3)).swapaxes(1, 2)
return np.einsum("ikl,ijlm,jmno->kno", BEC, K, IST) * 16.0216559424
@requires(Phonopy, "phonopy not installed!")
def rand_piezo(struc, pointops, sharedops, BEC, IST, FCM, anumiter=10):
"""
Generate a random piezoelectric tensor based on a structure and corresponding
symmetry
Args:
struc (pymatgen structure): structure whose symmetry operations the piezo tensor must obey
pointops: list of point operations obeyed by a single atomic site
sharedops: list of point operations shared by a pair of atomic sites
BEC (numpy array): Nx3x3 array representing the born effective charge tensor
IST (numpy array): Nx3x3x3 array representing the internal strain tensor
FCM (numpy array): NxNx3x3 array representing the born effective charge tensor
anumiter (int): number of iterations for acoustic sum rule convergence
Return:
list in the form of [Nx3x3 random born effective charge tenosr,
Nx3x3x3 random internal strain tensor, NxNx3x3 random force constant matrix, 3x3x3 piezo tensor]
"""
bec = BornEffectiveCharge(struc, BEC, pointops)
bec.get_BEC_operations()
rand_BEC = bec.get_rand_BEC()
ist = InternalStrainTensor(struc, IST, pointops)
ist.get_IST_operations()
rand_IST = ist.get_rand_IST()
fcm = ForceConstantMatrix(struc, FCM, pointops, sharedops)
fcm.get_FCM_operations()
rand_FCM = fcm.get_rand_FCM()
P = get_piezo(rand_BEC, rand_IST, rand_FCM) * 16.0216559424 / struc.volume
return (rand_BEC, rand_IST, rand_FCM, P)
| gmatteo/pymatgen | pymatgen/analysis/piezo_sensitivity.py | Python | mit | 27,600 |
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from elasticsearch import Elasticsearch
def count_words_at_url(url):
resp = requests.get(url)
return len(resp.text.split())
def simplework(url):
return url
def indexpage(url):
try:
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'html.parser')
soup.get_text()
es = Elasticsearch()
es.index(index="bc", doc_type='webpage', body={"timestamp": datetime.now(),"text":soup.get_text(),"url":url})
except requests.ConnectionError:
#log error
return False
return True
def dobookmarks(filename):
soup = BeautifulSoup(file(filename).read(), 'html.parser')
for anchor in soup.findAll('a', href=True):
indexpage(anchor['href'])
return True
| andreydelpozo2/breadcrumbs | src/indexworker.py | Python | mit | 821 |
from camera import Camera
from database import COLMAPDatabase
from image import Image
from scene_manager import SceneManager
from rotation import Quaternion, DualQuaternion
| trueprice/pycolmap | pycolmap/__init__.py | Python | mit | 173 |
# djangocms_concurrent_users/__init__.py
__version__ = '0.0.5'
default_app_config = 'djangocms_concurrent_users.apps.ConcurrentUsersConfig' | Blueshoe/djangocms-concurrent-users | djangocms_concurrent_users/__init__.py | Python | mit | 139 |
from django.conf import settings
from django.contrib import auth
from django.contrib.auth import load_backend
from django.core.exceptions import ImproperlyConfigured
from serverauth.backends import ServerAuthBackend
class ServerAuthMiddleware(object):
"""
Middleware for utilizing Web-server-provided authentication. Based on
django.contrib.auth.RemoteUserMiddleware, but enables other fields in the
user profile to be filled out from metadata, and only requires
authentication-related environment variables to be set on the login page.
"""
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django server auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the ServerAuthMiddleware class.")
if settings.SERVER_USER not in request.META:
# If the required variable isn't available, don't touch the request:
# the user may already be logged in via a session.
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if request.user.get_username() == self.extract_username(request):
return
else:
# An authenticated user is associated with the request, but
# it does not match the authorized user in the header.
self._remove_invalid_user(request)
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(request=request)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def extract_username(self, request):
"""
Asks the backend to parse the username out of the request."
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
return backend.extract_username(request)
def _remove_invalid_user(self, request):
"""
Removes the current authenticated user in the request which is invalid
but only if the user is authenticated via the ServerAuthBackend.
"""
try:
stored_backend = load_backend(request.session.get(auth.BACKEND_SESSION_KEY, ''))
except ImportError:
# backend failed to load
auth.logout(request)
else:
if isinstance(stored_backend, ServerAuthBackend):
auth.logout(request)
| btidor/django-serverauth | serverauth/middleware.py | Python | mit | 3,099 |
import glob
from PIL import Image
File_list = glob.glob('C:/PythonImage/*.png')
for i in File_list:
print(i)
| BD823/pcover | Work/Work3/6imageload&face detection using openCV (nam,yang)/Image_Load_Test.py | Python | mit | 112 |
#!/usr/bin/python
'''
ispuapi test script
'''
__author__ = 'vickydasta'
try:
from lib.ispuapi import aqi
except ImportError:
print "ispuapi is not on this dir, see doc/README.md"
import matplotlib.pyplot as plt
# city kode for Pekanbaru is 'PKU'.lower()
kota = 'pku'
data = aqi(kota)
plt.plot(data)
plt.xlabel('Waktu (dalam satuan Jam)')
plt.ylabel('Tingkat PM10 (dalam satuan ugram/m3')
plt.show()
| vickydasta/ispuapi | test.py | Python | mit | 415 |
"""
~~~~~~~~~~~~~~~~~~~~
A simple GIF encoder
~~~~~~~~~~~~~~~~~~~~
Structure of a GIF file: (in the order they appear in the file)
1. always begins with the logical screen descriptor.
2. then follows the global color table.
3. then follows the loop control block (specify the number of loops).
for a static image this block is not necessary.
4. then follows the image data of the frames,
each frame can be further divided into:
(i) a graphics control block that specify the delay and
transparent color of this frame.
static frames don't have this block.
(ii) the image descriptor.
(iii) the LZW compressed data of the pixels.
5. finally the trailor '0x3B'.
Reference for the GIF89a specification:
http://giflib.sourceforge.net/whatsinagif/index.html
"""
from collections import OrderedDict
from struct import pack
__all__ = [
"screen_descriptor",
"loop_control_block",
"graphics_control_block",
"image_descriptor",
"rectangle",
"pause",
"parse_image",
"lzw_compress",
]
def screen_descriptor(width, height, color_depth):
"""
This block specifies both the size of the image and its global color table.
"""
byte = 0b10000000 | (color_depth - 1) | (color_depth - 1) << 4
return pack("<6s2H3B", b"GIF89a", width, height, byte, 0, 0)
def loop_control_block(loop):
"""This block specifies the number of loops (0 means loop infinitely).
"""
return pack("<3B8s3s2BHB", 0x21, 0xFF, 11, b"NETSCAPE", b"2.0", 3, 1, loop, 0)
def graphics_control_block(delay, trans_index=None):
"""
This block specifies the delay and transparent color of the coming frame.
`trans_index=None` means there is no transparent color in this frame.
For static frames this block is not necessary.
"""
if trans_index is None:
return pack("<4BH2B", 0x21, 0xF9, 4, 0b00000100, delay, 0, 0)
return pack("<4BH2B", 0x21, 0xF9, 4, 0b00000101, delay, trans_index, 0)
def image_descriptor(left, top, width, height, byte=0):
"""
This block specifies the position of the coming frame (relative to the window)
and whether it has a local color table or not.
"""
return pack("<B4HB", 0x2C, left, top, width, height, byte)
def rectangle(left, top, width, height, color):
"""Paint a rectangle with given color.
"""
descriptor = image_descriptor(left, top, width, height)
data = lzw_compress([color] * width * height, mcl=2)
return descriptor + data
def pause(delay, trans_index=0):
"""A 1x1 invisible frame that can be used for padding delay time in
an animation.
"""
control = graphics_control_block(delay, trans_index)
pixel1x1 = rectangle(0, 0, 1, 1, trans_index)
return control + pixel1x1
def parse_image(img):
"""
Parse a gif image and get its palette and LZW compressed pixel data.
`img` must be an instance of `PIL.Image.Image` class and has .gif format.
"""
data = list(img.getdata())
colors = OrderedDict()
indices = []
count = 0
for c in data:
if c not in colors:
colors[c] = count
indices.append(count)
count += 1
else:
indices.append(colors[c])
palette = []
for c in colors:
palette += c
# here we do not bother about how many colors are actually in the image,
# we simply use full 256 colors.
if len(palette) < 3 * 256:
palette += [0] * (3 * 256 - len(palette))
descriptor = image_descriptor(0, 0, img.size[0], img.size[1], 0b10000111)
compressed_data = lzw_compress(indices, mcl=8)
return descriptor + bytearray(palette) + compressed_data
class DataBlock(object):
"""
Write bits into a bytearray and then pack this bytearray into data blocks.
This class is used in the Lempel-Ziv-Welch compression algorithm when
encoding maze into frames.
"""
def __init__(self):
self._bitstream = bytearray() # write bits into this array
self._nbits = 0 # a counter holds how many bits have been written
def encode_bits(self, num, size):
"""
Given a number `num`, encode it as a binary string of length `size`,
and pack it at the end of bitstream.
Example: num = 3, size = 5. The binary string for 3 is '00011' (it's
'0b00011' in python), here we padded extra zeros at the left to make
its length to be 5. The tricky part is that in a gif file, the encoded
binary data stream increases from lower (least significant) bits to higher
(most significant) bits, so we have to reverse it as '11000' and pack
this string at the end of bitstream!
"""
string = bin(num)[2:].zfill(size)
for digit in reversed(string):
if len(self._bitstream) * 8 == self._nbits:
self._bitstream.append(0)
if digit == "1":
self._bitstream[-1] |= 1 << (self._nbits % 8)
self._nbits += 1
def dump_bytes(self):
"""
Pack the LZW encoded image data into blocks.
Each block is of length <= 255 and is preceded by a byte in 0-255 that
indicates the length of this block. Each time after this function is
called `_nbits` and `_bitstream` are reset to 0 and empty.
"""
bytestream = bytearray()
while len(self._bitstream) > 255:
bytestream.append(255)
bytestream.extend(self._bitstream[:255])
self._bitstream = self._bitstream[255:]
if len(self._bitstream) > 0:
bytestream.append(len(self._bitstream))
bytestream.extend(self._bitstream)
self._nbits = 0
self._bitstream = bytearray()
return bytestream
stream = DataBlock()
def lzw_compress(input_data, mcl):
"""
The Lempel-Ziv-Welch compression algorithm used in the GIF89a specification.
:param input_data: a 1-d list consists of integers in range [0, 255],
these integers are the indices of the colors of the pixels
in the global color table. We do not check the validity of
this input data here for efficiency.
:param mcl: minimum code length for compression, it's an integer between
2 and 12.
GIF allows the minimum code length as small as 2 and as large as 12.
Even there are only two colors, the minimum code length must be at least 2.
Note this is not actually the smallest code length that is used
in the encoding process since the minimum code length tells us
how many bits are needed just for the different colors of the image,
we still have to account for the two special codes `end` and `clear`.
Therefore the actual smallest code length that will be used is one more
than `mcl`.
"""
clear_code = 1 << mcl
end_code = clear_code + 1
max_codes = 4096
code_length = mcl + 1
next_code = end_code + 1
# the default initial dict
code_table = {(i,): i for i in range(1 << mcl)}
# output the clear code
stream.encode_bits(clear_code, code_length)
pattern = ()
for c in input_data:
pattern += (c,)
if pattern not in code_table:
# add new code to the table
code_table[pattern] = next_code
# output the prefix
stream.encode_bits(code_table[pattern[:-1]], code_length)
pattern = (c,) # suffix becomes the current pattern
next_code += 1
if next_code == 2 ** code_length + 1:
code_length += 1
if next_code == max_codes:
next_code = end_code + 1
stream.encode_bits(clear_code, code_length)
code_length = mcl + 1
code_table = {(i,): i for i in range(1 << mcl)}
stream.encode_bits(code_table[pattern], code_length)
stream.encode_bits(end_code, code_length)
return bytearray([mcl]) + stream.dump_bytes() + bytearray([0])
| neozhaoliang/pywonderland | src/gifmaze/gifmaze/encoder.py | Python | mit | 8,012 |
from django.template.defaultfilters import filesizeformat
"""
Human readable file size
source: http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
unit_prefixes1 = ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']
unit_prefixes2 = ['','K','M','G','T','P','E','Z']
def sizeof_fmt(num, suffix='B'):
return filesizeformat(float(num+0.00)).replace(u"\u00A0", " ").encode("UTF-8")
"""
for unit in unit_prefixes2:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
"""
def comma_sep_number(num):
"""Add thousands separator. 10000 -> 10,000"""
if num is None:
return None
return "{:,}".format(num)
| IQSS/miniverse | dv_apps/utils/byte_size.py | Python | mit | 777 |
import asyncio
import aiohs2
import pandas as pd
import subprocess
import urllib
import re
import logging
from functools import wraps
coroutine = asyncio.coroutine
logger = logging.getLogger(__name__)
hive_type_map = {
'BOOLEAN': pd.np.dtype(bool),
'BINARY': pd.np.dtype(bytes),
'TINYINT': pd.np.dtype(int),
'SMALLINT': pd.np.dtype(int),
'INT': pd.np.dtype(int),
'BIGINT': pd.np.dtype(int),
'FLOAT': pd.np.dtype(float),
'DOUBLE': pd.np.dtype(float),
'DECIMAL': pd.np.dtype(float),
'TIMESTAMP': pd.np.dtype('datetime64[ms]'),
'DATE': pd.np.dtype('datetime64[ms]'),
'STRING': pd.np.dtype(str),
'VARCHAR': pd.np.dtype(str),
'CHAR': pd.np.dtype(str),
'ARRAY': pd.np.dtype(list),
'MAP': pd.np.dtype(dict),
'STRUCT': pd.np.dtype(object),
'UNIONTYPE': pd.np.dtype(object),
}
hive_nils = ['(null)', 'null', 'none', '']
def hive_isnull(val):
return val.str.lower().isin(hive_nils)
class Framer:
def __init__(self, columns, dtypes, fill_values=None):
self.columns = columns
self.dtypes = dtypes
self.offset = 0
self.fill_values = fill_values or {}
self.warns = set()
@staticmethod
def get_dtype(typ):
try:
return hive_type_map[typ.rsplit('<', 1)[0].rsplit('_', 1)[0]]
except KeyError:
logger.warning('Unknown type %r for hive request', typ)
return pd.np.dtype(object)
@classmethod
@coroutine
def by_cursor(cls, cur, hql, **kws):
yield from cur.execute(hql)
schema = (yield from cur.getSchema())
if schema is None:
columns = dtypes = None
else:
columns = pd.Index([nfo['columnName'] for nfo in schema])
dtypes = [cls.get_dtype(nfo['type']) for nfo in schema]
return cls(columns, dtypes, **kws)
@coroutine
def __call__(self, coro):
raw = yield from coro
return self.mk_df(raw)
def mk_df(self, raw, na_vals=None):
if self.columns is None:
if raw is None:
return None
else:
if raw is not None and '__schema__' not in self.warns:
logger.warning('no schema, but got data from hive')
self.warns.add('__schema__')
return pd.DataFrame(raw, dtype=object)
df = pd.DataFrame(raw or None, # self.empty,
columns=self.columns, dtype=object)
if na_vals is not None:
df[df.isin(na_vals)] = None
df.index += self.offset
self.offset += len(df)
for col, val in self.fill_values.items():
if col in df:
df[col] = df[col].fillna(val)
# if self.empty is None:
# local.empty = df[:0].copy()
for col, typ in zip(self.columns, self.dtypes):
try:
if typ == pd.np.dtype(str):
# don't convert str again, as it will turn None into 'None'
continue
if typ == pd.np.dtype('datetime64[ms]'):
try:
df[col] = df[col].astype(int)
except ValueError:
pass
df[col] = df[col].astype(typ)
except (TypeError, ValueError) as e:
first = not bool(self.warns)
if col not in self.warns:
logger.warning('Cannot convert %r to %r (%s)', col, typ, e)
self.warns.add(col)
if first:
logger.warning('consider passing fill_values={%r: ...} '
'as argument to your request', col)
return df
class RawHDFSChunker:
def __init__(self, hive, table, partitions, fill_values=None,
sep=None, nl='\n', strip='\r\t'):
self.hive = hive
self.table = table
self.partitions = partitions[:]
self.fill_values = fill_values
self.partition = None
self.framer = None
self.proc = None
self.tail = b''
self.sep = sep
self.nl = nl
self.strip = strip
self.sel = slice(None)
self.fill = []
@coroutine
def next_part(self):
yield from self.close()
self.partition = self.partitions.pop(0)
self.framer, self.proc = yield from self.hive._raw_hdfs(
self.table, self.partition, fill_values=self.fill_values)
self.tail = b''
@coroutine
def chunker(self):
chunk = None
while self.partition or self.partitions:
if not self.partition:
yield from self.next_part()
chunk = yield from self.proc.stdout.read(24000000)
if not chunk:
self.partition = None
if self.tail:
chunk = self.tail
self.tail = b''
break
else:
continue
split = (self.tail + chunk).rsplit(b'\n', 1)
if len(split) == 1:
self.tail = chunk
else:
chunk, self.tail = split
break
if chunk:
chunk = chunk.decode()
if True: # FIXME when to initialize # self.sep is None:
self.l = len(self.framer.columns)
if self.l == 1:
self.sep = sep = '\x01'
else:
# guess se seperator
for sep in ['\x01', '\t', ';', ',', ' | ', ' ']:
if sep in chunk:
self.sep = sep
break
else:
raise ValueError('No Seperator found')
if sep == '\t':
self.strip = '\r'
elif sep == ' | ':
self.strip = ' \r\t'
lines = (pd.Series(chunk.split(self.nl))
.str.strip(self.strip).str.split(self.sep))
l = int(lines.str.len().median())
diff = l - self.l
a = 0
b = 0
while diff > 0:
if hive_isnull(lines.str[a]).all():
a += 1
diff -= 1
elif hive_isnull(lines.str[-(b+1)]).all():
b += 1
diff -= 1
else:
b += diff
diff = 0
if diff < 0:
self.fill = [None] * (-diff)
diff = 0
self.sel = slice(a or None, l - b if b else None)
raw = (cs + self.fill
for cs in (l.strip(self.strip).split(self.sep)[self.sel]
for l in chunk.split(self.nl))
if cs)
return self.framer.mk_df(raw, na_vals=['',
'\\N', 'n/a',
'NaN', 'nan'
'(null)', 'null'])
else:
return None
def iter(self):
try:
while True:
fut = asyncio.async(self.chunker())
yield fut
if fut.result() is None:
break
finally:
yield self.close()
@coroutine
def close(self):
if self.proc and self.proc.returncode is None:
try:
self.proc.send_signal(subprocess.signal.SIGINT)
except ProcessLookupError:
pass
yield from self.proc.wait()
class AioHive:
def __init__(self, host=None, port=10000, config=None, hadoop='hadoop'):
"""
coroutine based hive client
Parameters
==========
host : str
host of the hiveserver2 to connect to
config : str
hive-site.xml to extract hive.metastore.uris
port : int, default 10000
port of the hiveserver2
hadoop : str, optional
hadoop executable for raw hdfs access
"""
if (host is None and config is None) or (config and host):
raise TypeError('Either host or config argument has to be supplied')
if config:
import xml.etree.ElementTree as ET
cfg = ET.parse(config)
for res in cfg.iter('property'):
if res.findtext('name') == 'hive.metastore.uris':
uri = res.findtext('value')
host = uri.split('://')[-1].split(':')[0]
break
else:
raise ValueError(
"could not find 'hive.metastore.uris' in config")
self.cli = aiohs2.Client(host=host, port=port)
self.config = config
self.hadoop = hadoop
@coroutine
def execute(self, *rqs):
""" execute request without looking at returns """
cur = yield from self.cli.cursor()
try:
for rq in rqs:
yield from cur.execute(rq)
finally:
yield from cur.close()
@coroutine
def fetch(self, hql, chunk_size=10000, fill_values=None):
""" execute request and fetch answer as DataFrame """
cur = yield from self.cli.cursor()
try:
framer = yield from Framer.by_cursor(cur, hql,
fill_values=fill_values)
return (yield from framer(cur.fetch(maxRows=chunk_size)))
finally:
yield from cur.close()
def iter(self, hql, chunk_size=10000, fill_values=None):
""" execute request and iterate over chunks of resulting DataFrame """
cur = yield from self.cli.cursor()
framer = yield from Framer.by_cursor(cur, hql,
fill_values=fill_values)
chunks = cur.iter(maxRows=chunk_size)
def iter_chunks():
try:
for chunk in chunks:
# here we yield the coroutine that will fetch the data
# and put in in a frame
yield framer(chunk)
finally:
yield framer(cur.close())
return iter_chunks()
@coroutine
def close(self):
yield from self.cli.close()
@coroutine
def _raw_hdfs(self, table, partition=True, fill_values=None):
if partition is True:
rq = 'describe formatted {table}'
else:
rq = 'describe formatted {table} partition ({partition})'
info = (yield from self.fetch(
rq.format(table=table, partition=partition))).fillna('').applymap(str.strip)
i0, i1, *_ = pd.np.flatnonzero(info.col_name == '')
schema = info[i0+1:i1]
location = info.query('col_name == "Location:"').data_type
if location.empty:
raise KeyError('table {} seems not to be marterialized'
.format(table))
proc = yield from asyncio.create_subprocess_exec(
self.hadoop, 'fs', '-ls', '-R', *location.values,
stdout=subprocess.PIPE)
out = (yield from proc.stdout.read()).decode().split('\n')
location = [f
for f in (re.split('\s+', l, 7)[-1]
for l in out if l.startswith('-'))
if f.rsplit('/', 1)[-1][0] not in '._']
if not location:
raise KeyError('table {} seems not to be filled'
.format(table))
columns = schema.col_name
dtypes = (schema.data_type
.str.split('(').str[0]
.str.split('<').str[0].str.upper()
.apply(hive_type_map.__getitem__))
framer = Framer(columns, dtypes, fill_values=fill_values)
proc = yield from asyncio.create_subprocess_exec(
self.hadoop, 'fs', '-text', *location,
stdout=subprocess.PIPE)
return framer, proc
@coroutine
def raw(self, table, fill_values=None, **partitions):
if '.' in table:
db, table = table.rsplit('.', 1)
yield from self.execute('use {db}'.format(db=db))
try:
parts = yield from self.fetch('show partitions {}'.format(table))
if parts.empty:
parts = None
except aiohs2.error.Pyhs2Exception as e:
parts = None
if parts is None:
if partitions:
raise e
select = [True]
else:
parts = (parts
.applymap(urllib.parse.unquote)
.partition.str.split('/', expand=True)
.unstack().str.split('=', expand=True)
.reset_index().set_index(['level_1', 0])[1]
.unstack())
sel = pd.Series(not bool(partitions), index=parts.index)
for name, val in partitions.items():
if name not in parts.columns:
raise KeyError('no partition info {} in {}', name, table)
if isinstance(val, str):
val = [val]
for v in val:
sel |= parts[name].str.contains(v)
select = list((parts[sel].columns.values[None, :]
+ "='" + parts[sel] + "'")
.apply(', '.join, axis=1))
rhc = RawHDFSChunker(self, table, select,
fill_values=fill_values)
return rhc.iter()
class SyncedHive:
def __init__(self, *args, hive=None, **kws):
"""
synced wrapper around the asyncio hive class
Parameters
==========
host : str
host of the hiveserver2 to connect to
config : str
hive-site.xml to extract hive.metastore.uris
port : int, default 10000
port of the hiveserver2
hive : AioHive, optional
existing async hive client
"""
self.hive = hive or AioHive(*args, **kws)
self.loop = asyncio.get_event_loop()
def run(self, coro):
return self.loop.run_until_complete(coro)
def synced(name):
func = getattr(AioHive, name)
@wraps(func)
def synced(self, *args, **kws):
return self.run(func(self.hive, *args, **kws))
return synced
execute = synced('execute')
fetch = synced('fetch')
close = synced('close')
def iter(self, *args, **kws):
it = self.run(self.hive.iter(*args, **kws))
try:
for chunk in it:
data = self.run(chunk)
if data is not None and not data.empty:
yield data
except BaseException as e:
# ensure close is run
self.run(it.throw(e))
raise e
def raw(self, *args, **kws):
it = self.run(self.hive.raw(*args, **kws))
try:
for chunk in it:
data = self.run(chunk)
if data is not None and not data.empty:
yield data
except BaseException as e:
# ensure close is run
self.run(it.throw(e))
raise e
Hive = SyncedHive
| wabu/pyhive | hive/__init__.py | Python | mit | 15,462 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class StorageManagementClientConfiguration(Configuration):
"""Configuration for StorageManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(StorageManagementClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2018-02-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-storage/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/_configuration.py | Python | mit | 3,231 |
#!/usr/bin/env python2.7
from __future__ import print_function
import sys
import time
from threading import Thread
from pymesos import MesosExecutorDriver, Executor, decode_data
class JupyterHubExecutor(Executor):
"""
May not be necessary
"""
def launchTask(self, driver, task):
def run_task(task):
update = {
'task_id': {
"value": task['task_id']['value']
},
'state': 'TASK_RUNNING',
'timestamp': time.time()
}
driver.sendStatusUpdate(update)
print(decode_data(task['data']), file=sys.stderr)
time.sleep(30)
update = {
'task_id': {
'value': task['task_id']['value']
},
'state': 'TASK_FINISHED',
'timestamp': time.time()
}
driver.sendStatusUpdate(update)
thread = Thread(target=run_task, args=(task,))
thread.start()
| tanderegg/mesos-spawner | mesos_spawner/executor.py | Python | mit | 1,027 |
from flask import Flask
import flask.ext.testing as testing
from mould.app import create_app
from mould.migration import Migration
import config
class TestCaseDBException(BaseException):
pass
class TestCase(testing.TestCase):
def create_app(self):
app = Flask(config.APP_NAME)
app = create_app(app)
app.config["TESTING"] = True
if app.config.get('TEST_DB_URI') is None:
raise TestCaseDBException("No TEST_DB_URI specified in config.py!")
app.config['SQLALCHEMY_DATABASE_URI'] = app.config.get('TEST_DB_URI')
self.migration = Migration(app)
return app
def setUp(self):
self.migration.migrate('up')
def tearDown(self):
self.migration.migrate('base')
| kates/mould | mould/testcase.py | Python | mit | 755 |
#!/usr/bin/env python
# Copyright (c) 2014 Eugene Zhuk.
# Use of this source code is governed by the MIT license that can be found
# in the LICENSE file.
"""Checks AWS usage.
This script retrieves and displays an estimated total statement amount for
the specified billing period.
Usage:
./check_usage.py [options]
"""
import boto.beanstalk
import boto.cloudformation
import boto.cloudfront
import boto.cloudsearch2
import boto.cloudtrail
import boto.datapipeline
import boto.dynamodb2
import boto.ec2
import boto.ec2.autoscale
import boto.ec2.cloudwatch
import boto.elasticache
import boto.elastictranscoder
import boto.emr
import boto.glacier
import boto.iam
import boto.kinesis
import boto.opsworks
import boto.rds2
import boto.redshift
import boto.route53
import boto.s3
import boto.ses
import boto.sns
import boto.sdb
import boto.sqs
import boto.swf
import boto.vpc
import csv
import itertools
import optparse
import re
import sys
import time
from boto.ec2.cloudwatch import MetricAlarm
class Error(Exception):
pass
class InstanceState(object):
"""Represents the state of an instance.
"""
PENDING = 0
RUNNING = 16
SHUTTING_DOWN = 32
TERMINATED = 48
STOPPING = 64
STOPPED = 80
def connect(service, regions):
"""Establishes connections to the specified service.
"""
if regions is not None:
return [service.connect_to_region(r.name) for r in service.regions()
if r.name in regions]
else:
return [service.connect_to_region(r.name) for r in service.regions()
if not r.name.startswith(('us-gov-', 'cn-'))]
def print_items(items, labels):
if 1 == len(labels):
return '{0} {1}{2}'.format(items, labels[0], 's'[1 == items:])
else:
return '{0} {1}'.format(items, labels[1 != items])
def print_two_items(items1, labels1, items2, labels2):
return '{0}{1}'.format(print_items(items1, labels1),
' [{0} {1}]'.format(items2, labels2) if 0 != items2 else '')
def print_two_items2(items1, labels1, items2, labels2):
return '{0}{1}'.format(print_items(items1, labels1),
' [{0}]'.format(print_items(items2, labels2)) if 0 != items2 else '')
def flatten(x):
return itertools.chain.from_iterable(x)
def get_ec2_usage(regions):
cs = connect(boto.ec2, regions)
instances = list(flatten(x.instances for c in cs
for x in c.get_all_reservations()))
running = sum(InstanceState.RUNNING == i.state_code
for i in instances)
print print_two_items(len(instances), ['EC2 Instances'], running, 'running')
print print_items(sum(len(c.get_all_reserved_instances()) for c in cs),
['EC2 Reserved Instance'])
print print_items(sum(len(c.get_all_spot_instance_requests()) for c in cs),
['EC2 Spot Instance Request'])
volumes = list(flatten(c.get_all_volumes() for c in cs))
size = sum(v.size for v in volumes)
print print_two_items(len(volumes), ['EBS Volume'], size, 'GB')
print print_items(sum(len(c.get_all_snapshots(owner=['self'])) for c in cs),
['EBS Snapshot'])
print print_items(sum(len(c.get_all_images(owners=['self'])) for c in cs),
['Amazon Machine Image'])
print print_items(sum(len(c.get_all_network_interfaces()) for c in cs),
['Network Interface'])
print print_items(len(list(flatten(c.get_all_addresses() for c in cs))),
['Elastic IP Address', 'Elastic IP Addresses'])
groups = dict((x.name, x) for c in cs for x in c.get_all_security_groups())
print print_items(len(groups), ['Security Group'])
print print_items(sum(len(c.get_all_key_pairs()) for c in cs), ['Key Pair'])
print print_items(sum(len(c.get_all_tags()) for c in cs), ['Tag'])
def get_autoscale_usage(regions):
cs = connect(boto.ec2.autoscale, regions)
print print_items(sum(len(c.get_all_groups()) for c in cs),
['Auto Scaling Group'])
print print_items(len(list(flatten(c.get_all_autoscaling_instances()
for c in cs))), ['Auto Scaling Instance'])
print print_items(sum(len(c.get_all_launch_configurations()) for c in cs),
['Auto Scaling Launch Configuration'])
print print_items(sum(len(c.get_all_policies()) for c in cs),
['Auto Scaling Policy', 'Auto Scaling Policies'])
print print_items(sum(len(c.get_all_tags()) for c in cs),
['Auto Scaling Tag'])
def get_elb_usage(regions):
cs = connect(boto.ec2.elb, regions)
balancers = list(flatten(c.get_all_load_balancers() for c in cs))
print print_two_items2(len(balancers), ['Elastic Load Balancer'],
sum(b.instances for b in balancers), ['instance'])
def get_vpc_usage(regions):
cs = connect(boto.vpc, regions)
vpcs = list(flatten(c.get_all_vpcs() for c in cs))
print print_two_items(len(vpcs), ['Virtual Private Cloud'],
sum(v.is_default for v in vpcs), 'default')
print print_items(sum(len(c.get_all_internet_gateways()) for c in cs),
['Internet Gateway'])
print print_items(sum(len(c.get_all_customer_gateways()) for c in cs),
['Customer Gateway'])
print print_items(sum(len(c.get_all_vpn_gateways()) for c in cs),
['VPN Gateway'])
print print_items(sum(len(c.get_all_subnets()) for c in cs), ['Subnet'])
def get_route53_usage(regions):
cs = connect(boto.route53, regions)
zones = dict((x.id, x) for c in cs for x in c.get_zones())
records = sum(len(v.get_records()) for k, v in zones.iteritems())
print print_two_items2(len(zones), ['Route53 Hosted Zone'],
records, ['record'])
def get_s3_usage(regions):
cs = connect(boto.s3, regions)
buckets = dict((x.name, x) for c in cs for x in c.get_all_buckets())
size = sum(x.size for x in list(flatten(v.get_all_keys()
for k, v in buckets.iteritems())))
print '{0}{1}'.format(print_items(len(buckets), ['S3 Bucket']),
' [{0:.3f} GB]'.format(size / float(1024 * 1024 * 1024))
if 0 != size else '')
def get_glacier_usage(regions):
cs = connect(boto.glacier, regions)
vaults = list(flatten(c.list_vaults() for c in cs))
size = sum(v.size_in_bytes for v in vaults)
print print_items(len(vaults), ['Glacier Vault'])
print '{0}{1}' \
.format(print_items(sum(v.number_of_archives for v in vaults),
['Glacier Archive']),
' [{0} GB]'.format(size / float(1024 * 1024 * 1024))
if 0 != size else '')
def get_cloudfront_usage():
c = boto.connect_cloudfront()
distrs = c.get_all_distributions()
objects = len(list(flatten(d.get_distribution().get_objects()
for d in distrs)))
print print_two_items2(len(distrs), ['CloudFront Distribution'],
objects, ['object'])
def get_sdb_usage(regions):
cs = connect(boto.sdb, regions)
domains = sum(len(c.get_all_domains()) for c in cs)
print print_items(domains, ['SimpleDB Domain'])
def get_rds_usage(regions):
cs = connect(boto.rds2, regions)
instances = list(flatten(c.describe_db_instances()
['DescribeDBInstancesResponse']
['DescribeDBInstancesResult']
['DBInstances'] for c in cs))
available = sum(i['DBInstanceStatus'] == 'available' for i in instances)
print print_two_items(len(instances), ['RDS Instance'],
available, 'available')
print print_items(sum(len(c.describe_reserved_db_instances()
['DescribeReservedDBInstancesResponse']
['DescribeReservedDBInstancesResult']
['ReservedDBInstances']) for c in cs), ['RDS Reserved Instance'])
print print_items(sum(len(c.describe_db_snapshots()
['DescribeDBSnapshotsResponse']
['DescribeDBSnapshotsResult']
['DBSnapshots']) for c in cs), ['RDS Snapshot'])
def get_dynamodb_usage(regions):
cs = connect(boto.dynamodb2, regions)
tables = list(flatten([boto.dynamodb2.table.Table(t)] for c in cs
for t in c.list_tables()['TableNames']))
items = sum(t.count() for t in tables)
print print_two_items2(len(tables), ['DynamoDB Table'],
items, ['item'])
def get_elasticache_usage(regions):
cs = connect(boto.elasticache, regions)
clusters = list(flatten(c.describe_cache_clusters()
['DescribeCacheClustersResponse']
['DescribeCacheClustersResult']
['CacheClusters'] for c in cs))
print print_items(len(clusters), ['ElastiCache Cluster'])
def get_redshift_usage(regions):
cs = connect(boto.redshift, regions)
clusters = list(flatten(c.describe_clusters()
['DescribeClustersResponse']
['DescribeClustersResult']
['Clusters'] for c in cs))
print print_items(len(clusters), ['Redshift Cluster'])
snapshots = list(flatten(c.describe_cluster_snapshots()
['DescribeClusterSnapshotsResponse']
['DescribeClusterSnapshotsResult']
['Snapshots'] for c in cs))
print print_items(len(snapshots), ['Redshift Snapshot'])
def get_datapipeline_usage(regions):
cs = connect(boto.datapipeline, regions)
pipelines = list(flatten(c.list_pipelines()['pipelineIdList']
for c in cs))
objects = list(flatten(c.get_pipeline_definition(p)['pipelineObjects']
for c in cs for p in pipelines))
print print_two_items2(len(pipelines), ['Data Pipeline'],
len(objects), ['object'])
def get_emr_usage(regions):
cs = connect(boto.emr, regions)
clusters = list(flatten([c.describe_cluster(s.id)] for c in cs
for s in c.list_clusters().clusters))
print '{0} [{1} terminated]' \
.format(print_items(len(clusters), ['EMR Cluster']),
sum('TERMINATED' == c.status.state for c in clusters))
def get_kinesis_usage(regions):
cs = connect(boto.kinesis, regions)
streams = list(flatten(c.list_streams()['StreamNames'] for c in cs))
shards = sum(len(c.describe_stream(s)
['StreamDescription']
['Shards']) for c in cs for s in streams)
print print_two_items2(len(streams), ['Kinesis Stream'],
shards, ['shard'])
def get_cloudsearch_usage(regions):
cs = connect(boto.cloudsearch2, regions)
domains = list(flatten(c.list_domain_names()
['ListDomainNamesResponse']
['ListDomainNamesResult']
['DomainNames'] for c in cs))
print print_items(len(domains), ['CloudSearch Domain'])
def get_elastictranscoder_usage(regions):
cs = connect(boto.elastictranscoder, regions)
pipelines = list(flatten(c.list_pipelines()['Pipelines'] for c in cs))
jobs = list(flatten(c.list_jobs_by_status('Progressing')
['Jobs'] for c in cs))
print print_items(len(pipelines), ['Elastic Transcoder Pipeline'])
print print_items(len(jobs), ['Elastic Transcoder Job'])
def get_ses_usage(regions):
cs = connect(boto.ses, regions)
print print_items(len(list(flatten(c.list_identities()
['ListIdentitiesResponse']
['ListIdentitiesResult']
['Identities'] for c in cs))), ['SES Identity', 'SES Identities'])
def get_sns_usage(regions):
cs = connect(boto.sns, regions)
print print_items(sum(len(c.get_all_topics()
['ListTopicsResponse']
['ListTopicsResult']
['Topics']) for c in cs), ['SNS Topic'])
print print_items(sum(len(c.get_all_subscriptions()
['ListSubscriptionsResponse']
['ListSubscriptionsResult']
['Subscriptions']) for c in cs), ['SNS Subscription'])
print print_items(sum(len(c.list_platform_applications()
['ListPlatformApplicationsResponse']
['ListPlatformApplicationsResult']
['PlatformApplications']) for c in cs), ['SNS Platform Application'])
def get_sqs_usage(regions):
cs = connect(boto.sqs, regions)
queues = list(flatten(c.get_all_queues() for c in cs))
messages = sum(q.count() for q in queues)
print print_two_items2(len(queues), ['SQS Queue'], messages, ['message'])
def get_swf_usage(regions):
cs = connect(boto.swf, regions)
domains = list(flatten(c.list_domains('REGISTERED')
['domainInfos'] for c in cs))
print print_items(len(domains), ['SWF Domain'])
def get_iam_usage(regions):
cs = connect(boto.iam, regions)
users = list(flatten(c.get_all_users()
['list_users_response']
['list_users_result']
['users'] for c in cs))
groups = list(flatten(c.get_all_groups()
['list_groups_response']
['list_groups_result']
['groups'] for c in cs))
print print_items(len(users), ['IAM User'])
print print_items(len(groups), ['IAM Group'])
def get_beanstalk_usage(regions):
cs = connect(boto.beanstalk, regions)
apps = list(flatten(c.describe_applications()
['DescribeApplicationsResponse']
['DescribeApplicationsResult']
['Applications'] for c in cs))
print print_items(len(apps), ['Elastic Beanstalk Application'])
def get_cloudformation_usage(regions):
cs = connect(boto.cloudformation, regions)
stacks = list(flatten(c.describe_stacks() for c in cs))
print print_items(len(stacks), ['CloudFormation Stack'])
def get_cloudtrail_usage(regions):
cs = connect(boto.cloudtrail, regions)
trails = list(flatten(c.describe_trails()
['trailList'] for c in cs))
print print_items(len(trails), ['CloudTrail Trail'])
def get_cloudwatch_usage(regions):
cs = connect(boto.ec2.cloudwatch, regions)
alarms = list(flatten(c.describe_alarms() for c in cs))
triggered = sum(a.state_value == MetricAlarm.ALARM for a in alarms)
print print_two_items(len(alarms), ['CloudWatch Alarm'],
triggered, 'triggered')
def get_opsworks_usage(regions):
c = boto.connect_opsworks()
print print_items(len(c.describe_stacks()['Stacks']), ['OpsWorks Stack'])
def _get_time_period(period):
return time.strftime('%Y-%m', time.gmtime()) if period is None else period
def _get_billing_data(bucket_name, time_period, regions):
cs = connect(boto.s3, regions)
bucket = list(c.lookup(bucket_name) for c in cs)[0]
if bucket is None:
raise Error('could not find \'{0}\''.format(bucket_name))
data = ''
for key in bucket.list():
if re.match(r'(\w+)-aws-billing-csv-{0}.csv' \
.format(_get_time_period(time_period)), key.name):
data = key.get_contents_as_string()
break
if not data:
raise Error('could not find billing data for this month')
return data
def _parse_billing_data(data):
cost = dict()
total = list()
doc = csv.reader(data.rstrip('\n').split('\n'), delimiter=',')
for row in doc:
code = row[12]
if code and code != 'ProductCode':
value = float(row[28])
if value >= 0:
if not code in cost:
cost[code] = [row[13].split(' ', 1)[1], value, row[23]]
else:
cost[code][1] += value
if row[3] == 'StatementTotal':
total.extend([['Cost', float(row[24]), row[23]],
['Credit', float(row[25]), row[23]],
['Total', float(row[28]), row[23]]])
return cost, total
def get_aws_cost(bucket_name, time_period, regions):
data = _get_billing_data(bucket_name, time_period, regions)
cost, total = _parse_billing_data(data)
print '---'
for k, v in cost.items():
print '{0:<30} {1:>8.2f} {2}'.format(v[0], v[1], v[2])
for v in total:
print '{0:>29}: {1:>8.2f} {2}'.format(v[0], v[1], v[2])
def main():
parser = optparse.OptionParser('Usage: %prog [options]')
parser.add_option('-b', '--bucket', dest='bucket',
help='The name of the S3 bucket that holds billing reports. This '
'option is required.')
parser.add_option('-p', '--period', dest='period',
help='The billing period to check the usage for (e.g., \'2014-02\' '
'without quotes). Defaults to the current billing period if '
'not specified.')
parser.add_option('-r', '--region', dest='regions', action='append',
help='The name of the region to usage for.')
(opts, args) = parser.parse_args()
if 0 != len(args) or opts.bucket is None:
parser.print_help()
return 1
try:
get_ec2_usage(opts.regions)
get_autoscale_usage(opts.regions)
get_elb_usage(opts.regions)
get_vpc_usage(opts.regions)
get_route53_usage(opts.regions)
get_s3_usage(opts.regions)
get_glacier_usage(opts.regions)
get_cloudfront_usage()
get_sdb_usage(opts.regions)
get_rds_usage(opts.regions)
get_dynamodb_usage(opts.regions)
get_elasticache_usage(opts.regions)
get_redshift_usage(opts.regions)
get_datapipeline_usage(opts.regions)
get_emr_usage(opts.regions)
get_kinesis_usage(opts.regions)
get_cloudsearch_usage(opts.regions)
get_elastictranscoder_usage(opts.regions)
get_ses_usage(opts.regions)
get_sns_usage(opts.regions)
get_sqs_usage(opts.regions)
get_swf_usage(opts.regions)
get_beanstalk_usage(opts.regions)
get_cloudformation_usage(opts.regions)
get_cloudtrail_usage(opts.regions)
get_cloudwatch_usage(opts.regions)
get_opsworks_usage(opts.regions)
get_iam_usage(opts.regions)
get_aws_cost(opts.bucket, opts.period, opts.regions)
except (Error, Exception), err:
sys.stderr.write('[ERROR] {0}\n'.format(err))
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| ezhuk/aws-tools | billing/check_usage.py | Python | mit | 17,533 |
class Player(object):
"""Holds player information and scores"""
def __init__(self, name):
self.current_score = 0
self.name = name
| johnoleary/Farkel | player.py | Python | mit | 136 |
__author__ = 'parker'
import unittest
from src.parsers.base_parser import *
class BaseParserTest(unittest.TestCase):
def setUp(self):
self.mp = BaseParser()
def test_title_cleaner(self):
t, y = self.mp.clean_title('"!Next?" (1994)')
self.assertEqual(t, "!Next?")
self.assertEqual(y, "1994")
t, y = self.mp.clean_title('Anno 2006 (2007)')
self.assertEqual(t, "Anno 2006")
self.assertEqual(y, "2007")
t, y = self.mp.clean_title('"10 Years Younger" (2004/II)')
self.assertEqual(t, "10 Years Younger (2004/II)")
self.assertEqual(y, '2004')
t, y = self.mp.clean_title('SOMEMOVIE (????)')
self.assertEqual(y, None)
def test_year_cleaner(self):
self.assertEqual('1999', self.mp.get_start_date_from_year_group('1999-????'))
self.assertEqual('1999', self.mp.get_start_date_from_year_group('1999'))
self.assertEqual(None, self.mp.get_start_date_from_year_group('????'))
self.assertEqual(None, self.mp.get_start_date_from_year_group(None))
def test_actor_name_cleaner(self):
self.assertEquals('Bob', self.mp.clean_person_name('Boberson, Bob')[0])
self.assertEquals('Boberson', self.mp.clean_person_name('Boberson, Bob')[1])
self.assertEquals('The Goofy Names', self.mp.clean_person_name('The Goofy Names')[0])
self.assertEquals(None, self.mp.clean_person_name('The Goofy Names')[1])
def test_episode_name_cleaner(self):
ename, eseason, enum = self.mp.clean_episode_info("'Crazy Name'", '(#3.1)')
self.assertEqual('Crazy Name', ename)
self.assertEqual(3, eseason)
self.assertEqual(1, enum)
ename, eseason, enum = self.mp.clean_episode_info('"ANOTHER CRAZY NAME"', '(#300.12)')
self.assertEqual('ANOTHER CRAZY NAME', ename)
self.assertEqual(300, eseason)
self.assertEqual(12, enum)
ename, eseason, enum = self.mp.clean_episode_info('"ANOTHER CRAZY NAME"', '(#3.12)')
self.assertEqual(3, eseason)
self.assertEqual(12, enum)
ename, eseason, enum = self.mp.clean_episode_info('"ANOTHER CRAZY NAME"', '(#f.1200)')
self.assertEqual(None, eseason)
self.assertEqual(1200, enum)
ename, eseason, enum = self.mp.clean_episode_info('"ANOTHER CRAZY NAME"', '(#5.wamp)')
self.assertEqual(5, eseason)
self.assertEqual(None, enum)
ename, eseason, enum = self.mp.clean_episode_info('"ANOTHER CRAZY NAME"', 'uhoh')
self.assertEqual(None, eseason)
self.assertEqual(None, enum) | parkercoleman/imdb_parser | test/parsers/base_parser_test.py | Python | mit | 2,602 |
from random import randint
from random import choice
def montyhall(playerchoice):
prize = randint(1,3)
if (prize == 1):
noluck1 = randint(2,3)
if (noluck1 == 2):
noluck2 = 3
else:
noluck2 = 2
if (prize == 2):
noluck1 = choice([1,3])
if (noluck1 == 1):
noluck2 = 3
else:
noluck2 = 1
if (prize == 3):
noluck1 = randint(1,2)
if (noluck1 == 1):
noluck2 = 2
else:
noluck2 = 1
"out of the two remaining doors, pick the one that does not have\
prize behind"
if (playerchoice == prize):
openeddoor = choice([noluck1, noluck2])
if (playerchoice == noluck1):
openeddoor = noluck2
else:
openeddoor = noluck1
newplayerchoice = [i for i in [1,2,3] if (i != playerchoice and
i != openeddoor)][0]
win = (newplayerchoice == prize)
return win
def test(num):
wincount = 0
newnum = num
for i in range(1,num+1):
pchoice = randint(1,3)
print("Trial #" + str(i))
i += 1
win = montyhall(pchoice)
if (win == True):
wincount += 1
print("Win!")
if (win == False):
print("Lose.")
print("-----------")
print("By swapping, we won " + str(wincount) + " times in " + str(newnum)\
+ " trials.")
print("The possibility of Winning = " + "%.2f" % (wincount/num*100) + "%.\n")
repeat = input("Please enter:\n" + "'y' to try again\n" +
"'c' to change the number of trials\n" +
"'n' to stop\n")
while (repeat != "n" and repeat != "N"):
if (repeat == "y" or repeat == "Y"):
test(num)
repeat = "n"
if (repeat == "c" or repeat == "C"):
newnum = int(input("Number of trials = "))
test(newnum)
repeat = "n"
else:
repeat = input("Please enter the correct value. (y/n/c)")
return
def init():
num = int(input("Please enter the number of trials you want to take: "))
return test(num)
init()
| BrilliantLC/monty-hall-py | montyhall.py | Python | mit | 2,293 |
import requests as r
from bs4 import BeautifulSoup as bs
import json
from queue import Queue
import threading
import re
import time
import random
import os
pageUrl_pattern = '(http(s)?://)(www\.ipeen.com.tw/search/taiwan/000/1-0-0-0/\?p=)(\d+)'
def all_restaurant_list(page_url):
print(page_url)
if re.match(pageUrl_pattern, page_url) == None:
print('pageURL is not found')
else:
try:
res = r.get(page_url)
except HTTPError:
return None
res.encoding="utf-8"
soup = bs(res.text, 'lxml')
initialization_count_number = random.randint(1,4)
count_number = initialization_count_number
HOST = 'http://www.ipeen.com.tw'
all_restaurant_in_h3_list = soup.findAll('h3', {'id':re.compile('shop_h3_\d\d?')})
try:
for restaurant in all_restaurant_in_h3_list:
if not restaurant.span:
if count_number%4==1:
f1.write(HOST + restaurant.a['href']+'\n')
elif count_number%4==2:
f2.write(HOST + restaurant.a['href']+'\n')
elif count_number%4==3:
f3.write(HOST + restaurant.a['href']+'\n')
else:
f4.write(HOST + restaurant.a['href']+'\n')
except:
print('[ERROR]IOexception!')
class AWSTimeLimitError(Exception):
def __init__(self,msg):
self.message=msg
def __str__(self):
return self.message
def getExecutionTime(startTime):
if (time.time() - startTime < 60):
pass
else:
raise AWSTimeLimitError('Time is running out')
def reduce_AllPagesListSplitBlock_function(q,startTime):
try:
rf = open('all_pages_list_block4.txt', 'r',encoding='utf8')
pages_str = rf.read()
except FileNotFoundError:
print("'[ERROR]No such file or directory: 'all_pages_list_block4.txt'")
raise
if (pages_str==''):
print('no url!')
rf.close()
with open('success_all_pages_list_block4.txt', 'w', encoding='utf8') as wsf:
wsf.write('success')
os.remove('all_pages_list_block4.txt')
else:
pages_list = pages_str.split('\n')
for pages in range(len(pages_list)):
q.put(pages_list.pop())
if __name__ == '__main__':
startTime = time.time()
q = Queue()
t1 = threading.Thread(target=reduce_AllPagesListSplitBlock_function, args=(q,startTime,))
t1.start()
t1.join()
f1 = open('all_restaurant_list_block1.txt', 'a+',encoding='utf8')
f2 = open('all_restaurant_list_block2.txt', 'a+',encoding='utf8')
f3 = open('all_restaurant_list_block3.txt', 'a+',encoding='utf8')
f4 = open('all_restaurant_list_block4.txt', 'a+',encoding='utf8')
while not q.empty():
try:
getExecutionTime(startTime)
all_restaurant_list(q.get())
except AWSTimeLimitError:
with open('all_pages_list_block4.txt', 'w', encoding='utf8') as wf:
while not q.empty():
page = q.get()
wf.write(page + '\n')
if q.empty():
with open('success_all_pages_list_block4.txt', 'w', encoding='utf8') as wsf:
wsf.write('success')
#os.remove('all_pages_list_block4.txt')
f1.close()
f2.close()
f3.close()
f4.close()
endTime = time.time()
totalExecutionTime = str(endTime-startTime)
print('[INFO]good')
print('[INFO]TotalExecutionTime = ' + totalExecutionTime) | nick800608/TeamProject-FoodRecipe | ipeen_allRestaurantList_crawler_block4.py | Python | mit | 3,811 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^route/', views.route, name='route'),
]
| davideberdin/ocr-navigator | backend/navigator/urls.py | Python | mit | 122 |
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class EmbeddedXmlDocuments(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
NONE = "EMBEDDED-XML-NONE"
DM_FOR_PROGRAMMES = "EMBEDDED-XML-DM-FOR-PROGRAMMES"
allowable_values = [NONE, DM_FOR_PROGRAMMES] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""EmbeddedXmlDocuments - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmbeddedXmlDocuments):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EmbeddedXmlDocuments):
return True
return self.to_dict() != other.to_dict()
| Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/telestream_cloud_qc/models/embedded_xml_documents.py | Python | mit | 2,848 |
#!/usr/bin/env python
import os
import sys
import warnings
import django
from django.conf import settings
from django.test.utils import get_runner
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", DeprecationWarning)
warnings.simplefilter("error", PendingDeprecationWarning)
# Make runtime warning errors to ensure no usage of error prone patterns.
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
warnings.filterwarnings("ignore", "Using or importing the ABCs from 'collections' instead of from 'collections.abc' is "
"deprecated, and in 3.8 it will stop working", module='django')
warnings.filterwarnings("ignore", "Using or importing the ABCs from 'collections' instead of from 'collections.abc' is "
"deprecated since Python 3.3,and in 3.9 it will stop working", module='django')
if __name__ == "__main__":
print("Python:", sys.version)
print("Django:", django.get_version(django.VERSION))
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(sys.argv[1:] or ["tests"])
sys.exit(bool(failures))
| nshafer/django-hashid-field | runtests.py | Python | mit | 1,404 |
"""Test configuration."""
import mock
import pytest
from fixtures.common import *
@pytest.fixture(scope="session")
def mocked_get_context():
"""Mock argo.schema._get_context for returning empty dict."""
patcher = mock.patch("argo.schema._get_context")
patcher.start()
patcher.return_value = {}
| olegpidsadnyi/argo | tests/conftest.py | Python | mit | 313 |
# .. _persister_example:
import os
from ..serialize import serialize, deserialize
class FilesystemPersister(object):
@classmethod
def load_cassette(cls, cassette_path, serializer):
try:
with open(cassette_path) as f:
cassette_content = f.read()
except IOError:
raise ValueError("Cassette not found.")
cassette = deserialize(cassette_content, serializer)
return cassette
@staticmethod
def save_cassette(cassette_path, cassette_dict, serializer):
data = serialize(cassette_dict, serializer)
dirname, filename = os.path.split(cassette_path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(cassette_path, "w") as f:
f.write(data)
| Azure/azure-sdk-for-python | tools/vcrpy/vcr/persisters/filesystem.py | Python | mit | 801 |
"""
This helper command will import any python callable on your python path and
call it with the supplied arguments.
Use `yawn.task.decorators.make_task` to
"""
import importlib
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Execute a python callable'
def add_arguments(self, parser):
parser.add_argument('module', help='The python module to import, i.e. animal.bird')
parser.add_argument('callable', help='The python callable to invoke, i.e. Swallow')
parser.add_argument('argument', nargs='*', help='Arguments to pass to the callable')
def handle(self, *args, **options):
self.stdout.write('Importing module %s' % options['module'])
module_ = importlib.import_module(options['module'])
arguments = ''
if options['argument']:
arguments = "'{}'".format("', '".join(options['argument']))
self.stdout.write('Calling %s(%s)' % (options['callable'], arguments))
getattr(module_, options['callable'])(*options['argument'])
self.stdout.write('Execution complete')
| aclowes/yawn | yawn/management/commands/exec.py | Python | mit | 1,112 |
"""Filename globbing utility."""
import sys
import os
import re
import fnmatch
__all__ = ["glob", "iglob"]
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
return list(iglob(pathname))
def iglob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
if not has_magic(pathname):
if os.path.lexists(pathname):
yield pathname
return
dirname, basename = os.path.split(pathname)
if not dirname:
for name in glob1(None, basename):
yield name
return
if has_magic(dirname):
dirs = iglob(dirname)
else:
dirs = [dirname]
if has_magic(basename):
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(dirname, pattern):
if not dirname:
if isinstance(pattern, bytes):
dirname = bytes(os.curdir, 'ASCII')
else:
dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return []
if pattern[0] != '.':
names = [x for x in names if x[0] != '.']
return fnmatch.filter(names, pattern)
def glob0(dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
magic_check = re.compile('[*?[]')
magic_check_bytes = re.compile(b'[*?[]')
def has_magic(s):
if isinstance(s, bytes):
match = magic_check_bytes.search(s)
else:
match = magic_check.search(s)
return match is not None
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/glob.py | Python | mit | 2,272 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gupiao3.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import db_mysql
class Ui_Stock(object):
def setupUi(self, Stock):
Stock.setObjectName("Stock")
Stock.resize(1389, 960)
self.tabWidget = QtWidgets.QTabWidget(Stock)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 1401, 961))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.tableWidget_A = QtWidgets.QTableWidget(self.tab)
self.tableWidget_A.setGeometry(QtCore.QRect(40, 30, 1321, 811))
self.tableWidget_A.setObjectName("tableWidget_A")
self.tableWidget_A.setColumnCount(13)
self.tableWidget_A.setRowCount(18)
self.tableWidget_A.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_A.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableWidget_A.verticalHeader().setVisible(False)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(10, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(11, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(12, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(13, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(14, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(15, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(16, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setVerticalHeaderItem(17, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(10, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(11, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_A.setHorizontalHeaderItem(12, item)
self.tableWidget_A.horizontalHeader().setDefaultSectionSize(99)
self.PageUpButton_A = QtWidgets.QPushButton(self.tab)
self.PageUpButton_A.setGeometry(QtCore.QRect(750, 840, 126, 33))
self.PageUpButton_A.setObjectName("PageUpButton_A")
self.PageDownButton_A = QtWidgets.QPushButton(self.tab)
self.PageDownButton_A.setGeometry(QtCore.QRect(880, 840, 126, 33))
self.PageDownButton_A.setObjectName("PageDownButton_A")
self.PageCount_A = QtWidgets.QLabel(self.tab)
self.PageCount_A.setGeometry(QtCore.QRect(530, 850, 89, 23))
self.PageCount_A.setObjectName("PageCount_A")
self.Current_Page_A = QtWidgets.QLabel(self.tab)
self.Current_Page_A.setGeometry(QtCore.QRect(640, 850, 89, 23))
self.Current_Page_A.setObjectName("Current_Page_A")
self.InquirelineEdit_A = QtWidgets.QLineEdit(self.tab)
self.InquirelineEdit_A.setGeometry(QtCore.QRect(1002, 0, 151, 33))
self.InquirelineEdit_A.setObjectName("InquirelineEdit_A")
self.InquireButton_A = QtWidgets.QPushButton(self.tab)
self.InquireButton_A.setGeometry(QtCore.QRect(1150, 0, 126, 33))
self.InquireButton_A.setObjectName("InquireButton_A")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.Current_Page_B = QtWidgets.QLabel(self.tab_2)
self.Current_Page_B.setGeometry(QtCore.QRect(640, 850, 89, 23))
self.Current_Page_B.setObjectName("Current_Page_B")
self.InquirelineEdit_B = QtWidgets.QLineEdit(self.tab_2)
self.InquirelineEdit_B.setGeometry(QtCore.QRect(1002, 0, 151, 33))
self.InquirelineEdit_B.setObjectName("InquirelineEdit_B")
self.InquireButton_B = QtWidgets.QPushButton(self.tab_2)
self.InquireButton_B.setGeometry(QtCore.QRect(1150, 0, 126, 33))
self.InquireButton_B.setObjectName("InquireButton_B")
self.PageUpButton_B = QtWidgets.QPushButton(self.tab_2)
self.PageUpButton_B.setGeometry(QtCore.QRect(750, 840, 126, 33))
self.PageUpButton_B.setObjectName("PageUpButton_B")
self.PageDownButton_B = QtWidgets.QPushButton(self.tab_2)
self.PageDownButton_B.setGeometry(QtCore.QRect(880, 840, 126, 33))
self.PageDownButton_B.setObjectName("PageDownButton_B")
self.tableWidget_B = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget_B.setGeometry(QtCore.QRect(40, 30, 1321, 811))
self.tableWidget_B.setObjectName("tableWidget_B")
self.tableWidget_B.setColumnCount(13)
self.tableWidget_B.setRowCount(18)
self.tableWidget_B.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_B.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableWidget_B.verticalHeader().setVisible(False)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(10, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(11, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(12, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(13, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(14, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(15, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(16, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setVerticalHeaderItem(17, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(10, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(11, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_B.setHorizontalHeaderItem(12, item)
self.tableWidget_B.horizontalHeader().setDefaultSectionSize(99)
self.PageCount_B = QtWidgets.QLabel(self.tab_2)
self.PageCount_B.setGeometry(QtCore.QRect(530, 850, 89, 23))
self.PageCount_B.setObjectName("PageCount_B")
self.tabWidget.addTab(self.tab_2, "")
self.retranslateUi(Stock)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Stock)
def retranslateUi(self, Stock):
_translate = QtCore.QCoreApplication.translate
Stock.setWindowTitle(_translate("Stock", "股票查询"))
item = self.tableWidget_A.verticalHeaderItem(0)
item.setText(_translate("Stock", "1"))
item = self.tableWidget_A.verticalHeaderItem(1)
item.setText(_translate("Stock", "2"))
item = self.tableWidget_A.verticalHeaderItem(2)
item.setText(_translate("Stock", "3"))
item = self.tableWidget_A.verticalHeaderItem(3)
item.setText(_translate("Stock", "4"))
item = self.tableWidget_A.verticalHeaderItem(4)
item.setText(_translate("Stock", "5"))
item = self.tableWidget_A.verticalHeaderItem(5)
item.setText(_translate("Stock", "6"))
item = self.tableWidget_A.verticalHeaderItem(6)
item.setText(_translate("Stock", "7"))
item = self.tableWidget_A.verticalHeaderItem(7)
item.setText(_translate("Stock", "8"))
item = self.tableWidget_A.verticalHeaderItem(8)
item.setText(_translate("Stock", "9"))
item = self.tableWidget_A.verticalHeaderItem(9)
item.setText(_translate("Stock", "10"))
item = self.tableWidget_A.verticalHeaderItem(10)
item.setText(_translate("Stock", "11"))
item = self.tableWidget_A.verticalHeaderItem(11)
item.setText(_translate("Stock", "12"))
item = self.tableWidget_A.verticalHeaderItem(12)
item.setText(_translate("Stock", "13"))
item = self.tableWidget_A.verticalHeaderItem(13)
item.setText(_translate("Stock", "14"))
item = self.tableWidget_A.verticalHeaderItem(14)
item.setText(_translate("Stock", "15"))
item = self.tableWidget_A.verticalHeaderItem(15)
item.setText(_translate("Stock", "16"))
item = self.tableWidget_A.verticalHeaderItem(16)
item.setText(_translate("Stock", "17"))
item = self.tableWidget_A.verticalHeaderItem(17)
item.setText(_translate("Stock", "18"))
item = self.tableWidget_A.horizontalHeaderItem(0)
item.setText(_translate("Stock", "代码"))
item = self.tableWidget_A.horizontalHeaderItem(1)
item.setText(_translate("Stock", "简称"))
item = self.tableWidget_A.horizontalHeaderItem(2)
item.setText(_translate("Stock", "最新价"))
item = self.tableWidget_A.horizontalHeaderItem(3)
item.setText(_translate("Stock", "涨幅率"))
item = self.tableWidget_A.horizontalHeaderItem(4)
item.setText(_translate("Stock", "涨幅额"))
item = self.tableWidget_A.horizontalHeaderItem(5)
item.setText(_translate("Stock", "5分钟涨幅"))
item = self.tableWidget_A.horizontalHeaderItem(6)
item.setText(_translate("Stock", "成交量(手)"))
item = self.tableWidget_A.horizontalHeaderItem(7)
item.setText(_translate("Stock", "成交额(万)"))
item = self.tableWidget_A.horizontalHeaderItem(8)
item.setText(_translate("Stock", "换手率"))
item = self.tableWidget_A.horizontalHeaderItem(9)
item.setText(_translate("Stock", "振幅"))
item = self.tableWidget_A.horizontalHeaderItem(10)
item.setText(_translate("Stock", "量比"))
item = self.tableWidget_A.horizontalHeaderItem(11)
item.setText(_translate("Stock", "委比"))
item = self.tableWidget_A.horizontalHeaderItem(12)
item.setText(_translate("Stock", "市盈率"))
# self.q = self.tableWidget_A.horizontalHeader().sectionClicked
# QtCore.QObject.connect(self.tableWidget_A.horizontalHeader(),QtCore.SIGNAL('sectionClicked(int)'),self.order(int))
self.PageUpButton_A.setText(_translate("Stock", "上一页"))
self.PageDownButton_A.setText(_translate("Stock", "下一页"))
self.PageCount_A.setText(_translate("Stock", "TextLabel"))
self.Current_Page_A.setText(_translate("Stock", "TextLabel"))
self.InquirelineEdit_A.setText(_translate("Stock", "请输入股票代码"))
self.InquireButton_A.setText(_translate("Stock", "查询"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Stock", "沪深A股"))
self.Current_Page_B.setText(_translate("Stock", "TextLabel"))
self.InquirelineEdit_B.setText(_translate("Stock", "请输入股票代码"))
self.InquireButton_B.setText(_translate("Stock", "查询"))
self.PageUpButton_B.setText(_translate("Stock", "上一页"))
self.PageDownButton_B.setText(_translate("Stock", "下一页"))
item = self.tableWidget_B.verticalHeaderItem(0)
item.setText(_translate("Stock", "1"))
item = self.tableWidget_B.verticalHeaderItem(1)
item.setText(_translate("Stock", "2"))
item = self.tableWidget_B.verticalHeaderItem(2)
item.setText(_translate("Stock", "3"))
item = self.tableWidget_B.verticalHeaderItem(3)
item.setText(_translate("Stock", "4"))
item = self.tableWidget_B.verticalHeaderItem(4)
item.setText(_translate("Stock", "5"))
item = self.tableWidget_B.verticalHeaderItem(5)
item.setText(_translate("Stock", "6"))
item = self.tableWidget_B.verticalHeaderItem(6)
item.setText(_translate("Stock", "7"))
item = self.tableWidget_B.verticalHeaderItem(7)
item.setText(_translate("Stock", "8"))
item = self.tableWidget_B.verticalHeaderItem(8)
item.setText(_translate("Stock", "9"))
item = self.tableWidget_B.verticalHeaderItem(9)
item.setText(_translate("Stock", "10"))
item = self.tableWidget_B.verticalHeaderItem(10)
item.setText(_translate("Stock", "11"))
item = self.tableWidget_B.verticalHeaderItem(11)
item.setText(_translate("Stock", "12"))
item = self.tableWidget_B.verticalHeaderItem(12)
item.setText(_translate("Stock", "13"))
item = self.tableWidget_B.verticalHeaderItem(13)
item.setText(_translate("Stock", "14"))
item = self.tableWidget_B.verticalHeaderItem(14)
item.setText(_translate("Stock", "15"))
item = self.tableWidget_B.verticalHeaderItem(15)
item.setText(_translate("Stock", "16"))
item = self.tableWidget_B.verticalHeaderItem(16)
item.setText(_translate("Stock", "17"))
item = self.tableWidget_B.verticalHeaderItem(17)
item.setText(_translate("Stock", "18"))
item = self.tableWidget_B.horizontalHeaderItem(0)
item.setText(_translate("Stock", "代码"))
item = self.tableWidget_B.horizontalHeaderItem(1)
item.setText(_translate("Stock", "简称"))
item = self.tableWidget_B.horizontalHeaderItem(2)
item.setText(_translate("Stock", "最新价"))
item = self.tableWidget_B.horizontalHeaderItem(3)
item.setText(_translate("Stock", "涨幅率"))
item = self.tableWidget_B.horizontalHeaderItem(4)
item.setText(_translate("Stock", "涨幅额"))
item = self.tableWidget_B.horizontalHeaderItem(5)
item.setText(_translate("Stock", "5分钟涨幅"))
item = self.tableWidget_B.horizontalHeaderItem(6)
item.setText(_translate("Stock", "成交量(手)"))
item = self.tableWidget_B.horizontalHeaderItem(7)
item.setText(_translate("Stock", "成交额(万)"))
item = self.tableWidget_B.horizontalHeaderItem(8)
item.setText(_translate("Stock", "换手率"))
item = self.tableWidget_B.horizontalHeaderItem(9)
item.setText(_translate("Stock", "振幅"))
item = self.tableWidget_B.horizontalHeaderItem(10)
item.setText(_translate("Stock", "量比"))
item = self.tableWidget_B.horizontalHeaderItem(11)
item.setText(_translate("Stock", "委比"))
item = self.tableWidget_B.horizontalHeaderItem(12)
item.setText(_translate("Stock", "市盈率"))
self.PageCount_B.setText(_translate("Stock", "TextLabel"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Stock", "沪深B股"))
class pagemanage(object):
def __init__(self):
self.currentpage=1
self.nextpage=2
self.prevpage=0
self.sumpage=179
def page_turning_down(self):
if self.currentpage == 179:
return 179
self.currentpage = self.currentpage+1
self.nextpage = self.nextpage+1
self.prevpage = self.prevpage+1
return self.currentpage
def page_turring_up(self):
if self.currentpage==1:
return 1
self.currentpage = self.currentpage - 1
self.nextpage = self.nextpage - 1
self.prevpage = self.prevpage - 1
return self.currentpage
def get_currentpage(self):
return self.currentpage
class MY_UI(QtWidgets.QWidget,Ui_Stock):
def __init__(self):
super(MY_UI,self).__init__()
self.setupUi(self)
self.initui()
self.pagemanger_A = pagemanage()
self.pagemanger_B = pagemanage()
self.dbA = db_mysql.Mysql('沪深A股')
self.dbB = db_mysql.Mysql('沪深B股')
self.initdataA()
self.initdataB()
def initui(self):
self.tableWidget_A.horizontalHeader().sectionClicked.connect(self.SortA_desc)
self.tableWidget_B.horizontalHeader().sectionClicked.connect(self.SortB_desc)
self.PageUpButton_A.clicked.connect(self.PageUpButton_A_clicked)
self.PageDownButton_A.clicked.connect(self.PageDownButton_A_clicked)
self.PageUpButton_B.clicked.connect(self.PageUpButton_B_clicked)
self.PageDownButton_B.clicked.connect(self.PageDownButton_B_clicked)
self.InquireButton_A.clicked.connect(self.InquireButton_A_clicked)
def SortA_asc(self,colum):
order = {0: 'default', 1: 'default', 2: '最新价', 3: '(涨跌幅+0)', 4: '(涨跌额+0)', 5: '(5分钟涨幅+0)',
6: '成交量', 7: '成交额', 8: '(换手率+0)', 9: '(振幅+0)', 10: '量比', 11: '(委比+0)', 12: '市盈率', }
self.dbA.order = order[colum]
self.dbA.sortby = 'asc'
self.tableWidget_A.horizontalHeader().sectionClicked.disconnect(self.SortA_asc)
self.tableWidget_A.horizontalHeader().sectionClicked.connect(self.SortA_desc)
self.initdataA()
def SortA_desc(self,colum):
order = {0: 'default', 1: 'default', 2: '最新价', 3: '(涨跌幅+0)', 4: '(涨跌额+0)', 5: '(5分钟涨幅+0)',
6: '成交量', 7: '成交额', 8: '(换手率+0)', 9: '(振幅+0)', 10: '量比', 11: '(委比+0)', 12: '市盈率', }
self.dbA.order = order[colum]
self.dbA.sortby = 'desc'
self.tableWidget_A.horizontalHeader().sectionClicked.disconnect(self.SortA_desc)
self.tableWidget_A.horizontalHeader().sectionClicked.connect(self.SortA_asc)
self.initdataA()
def SortB_asc(self,colum):
order = {0: 'default', 1: 'default', 2: '最新价', 3: '(涨跌幅+0)', 4: '(涨跌额+0)', 5: '(5分钟涨幅+0)',
6: '成交量', 7: '成交额', 8: '(换手率+0)', 9: '(振幅+0)', 10: '量比', 11: '(委比+0)', 12: '市盈率', }
self.dbB.order = order[colum]
self.dbB.sortby = 'asc'
self.tableWidget_B.horizontalHeader().sectionClicked.disconnect(self.SortB_asc)
self.tableWidget_B.horizontalHeader().sectionClicked.connect(self.SortB_desc)
self.initdataB()
def SortB_desc(self,colum):
order = {0: 'default', 1: 'default', 2: '最新价', 3: '(涨跌幅+0)', 4: '(涨跌额+0)', 5: '(5分钟涨幅+0)',
6: '成交量', 7: '成交额', 8: '(换手率+0)', 9: '(振幅+0)', 10: '量比', 11: '(委比+0)', 12: '市盈率', }
self.dbB.order = order[colum]
self.dbB.sortby = 'desc'
self.tableWidget_B.horizontalHeader().sectionClicked.disconnect(self.SortB_desc)
self.tableWidget_B.horizontalHeader().sectionClicked.connect(self.SortB_asc)
self.initdataB()
def initdataA(self):
datas = self.dbA.fetch(self.pagemanger_A.get_currentpage())
self.settabledataA(datas)
def initdataB(self):
datas = self.dbB.fetch(self.pagemanger_B.get_currentpage())
self.settabledataB(datas)
def settabledataA(self,datas):
self.tableWidget_A.clearContents()
for i in range(0,len(datas)):
for j in range(0, 13):
item =QtWidgets.QTableWidgetItem(datas[i][j])
self.tableWidget_A.setItem(i,j,item)
def settabledataB(self,datas):
self.tableWidget_B.clearContents()
for i in range(0,len(datas)):
for j in range(0, 13):
item =QtWidgets.QTableWidgetItem(datas[i][j])
self.tableWidget_B.setItem(i,j,item)
def PageUpButton_A_clicked(self):
datas = self.dbA.fetch(self.pagemanger_A.page_turring_up())
self.settabledataA(datas)
def PageDownButton_A_clicked(self):
datas = self.dbA.fetch(self.pagemanger_A.page_turning_down())
self.settabledataA(datas)
def PageUpButton_B_clicked(self):
datas = self.dbB.fetch(self.pagemanger_B.page_turring_up())
self.settabledataB(datas)
def PageDownButton_B_clicked(self):
datas = self.dbB.fetch(self.pagemanger_B.page_turning_down())
self.settabledataB(datas)
def InquireButton_A_clicked(self,colum):
print(colum)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = MY_UI()
ui.show()
sys.exit(app.exec_())
| yangzijian000/gupiao_spider | gupiao_ui_3.0.py | Python | mit | 24,439 |
Subsets and Splits